problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_338 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-1865 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Read the Docs build failing
https://readthedocs.org/projects/gcloud-python/builds/4108022/
https://readthedocs.org/projects/gcloud-python/builds/4108027/
</issue>
<code>
[start of setup.py]
1 import os
2 import sys
3
4 from setuptools import setup
5 from setuptools import find_packages
6
7 here = os.path.abspath(os.path.dirname(__file__))
8
9
10 with open(os.path.join(here, 'README.rst')) as f:
11 README = f.read()
12
13
14 REQUIREMENTS = [
15 'httplib2 >= 0.9.1',
16 'googleapis-common-protos',
17 'oauth2client >= 2.0.1',
18 'protobuf >= 3.0.0b2, != 3.0.0.b2.post1',
19 'six',
20 ]
21
22 GRPC_EXTRAS = [
23 'grpcio >= 0.14.0',
24 'google-gax >= 0.12.0',
25 'gax-google-pubsub-v1',
26 ]
27
28 if sys.version_info[:2] == (2, 7):
29 REQUIREMENTS.extend(GRPC_EXTRAS)
30
31 setup(
32 name='gcloud',
33 version='0.15.0',
34 description='API Client library for Google Cloud',
35 author='Google Cloud Platform',
36 author_email='[email protected]',
37 long_description=README,
38 scripts=[],
39 url='https://github.com/GoogleCloudPlatform/gcloud-python',
40 packages=find_packages(),
41 license='Apache 2.0',
42 platforms='Posix; MacOS X; Windows',
43 include_package_data=True,
44 zip_safe=False,
45 install_requires=REQUIREMENTS,
46 extras_require={'grpc': GRPC_EXTRAS},
47 classifiers=[
48 'Development Status :: 4 - Beta',
49 'Intended Audience :: Developers',
50 'License :: OSI Approved :: Apache Software License',
51 'Operating System :: OS Independent',
52 'Programming Language :: Python :: 2',
53 'Programming Language :: Python :: 2.6',
54 'Programming Language :: Python :: 2.7',
55 'Programming Language :: Python :: 3',
56 'Programming Language :: Python :: 3.4',
57 'Programming Language :: Python :: 3.5',
58 'Topic :: Internet',
59 ]
60 )
61
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -25,7 +25,7 @@
'gax-google-pubsub-v1',
]
-if sys.version_info[:2] == (2, 7):
+if sys.version_info[:2] == (2, 7) and 'READTHEDOCS' not in os.environ:
REQUIREMENTS.extend(GRPC_EXTRAS)
setup(
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -25,7 +25,7 @@\n 'gax-google-pubsub-v1',\n ]\n \n-if sys.version_info[:2] == (2, 7):\n+if sys.version_info[:2] == (2, 7) and 'READTHEDOCS' not in os.environ:\n REQUIREMENTS.extend(GRPC_EXTRAS)\n \n setup(\n", "issue": "Read the Docs build failing\nhttps://readthedocs.org/projects/gcloud-python/builds/4108022/\nhttps://readthedocs.org/projects/gcloud-python/builds/4108027/\n\n", "before_files": [{"content": "import os\nimport sys\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\nwith open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n\n\nREQUIREMENTS = [\n 'httplib2 >= 0.9.1',\n 'googleapis-common-protos',\n 'oauth2client >= 2.0.1',\n 'protobuf >= 3.0.0b2, != 3.0.0.b2.post1',\n 'six',\n]\n\nGRPC_EXTRAS = [\n 'grpcio >= 0.14.0',\n 'google-gax >= 0.12.0',\n 'gax-google-pubsub-v1',\n]\n\nif sys.version_info[:2] == (2, 7):\n REQUIREMENTS.extend(GRPC_EXTRAS)\n\nsetup(\n name='gcloud',\n version='0.15.0',\n description='API Client library for Google Cloud',\n author='Google Cloud Platform',\n author_email='[email protected]',\n long_description=README,\n scripts=[],\n url='https://github.com/GoogleCloudPlatform/gcloud-python',\n packages=find_packages(),\n license='Apache 2.0',\n platforms='Posix; MacOS X; Windows',\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIREMENTS,\n extras_require={'grpc': GRPC_EXTRAS},\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Internet',\n ]\n)\n", "path": "setup.py"}]} | 1,136 | 101 |
gh_patches_debug_60829 | rasdani/github-patches | git_diff | microsoft__AzureTRE-1754 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release version 0.3
## Description
As a TRE developer
I want to release current code base as version 0.3
So that people can use a more stable version going forward
## Acceptance criteria
- [ ] All core apps are bumped to 0.3
- [ ] All bundles are bumped to 0.3
- [ ] A tag is created
- [ ] A release is created
</issue>
<code>
[start of api_app/_version.py]
1 __version__ = "0.2.28"
2
[end of api_app/_version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/api_app/_version.py b/api_app/_version.py
--- a/api_app/_version.py
+++ b/api_app/_version.py
@@ -1 +1 @@
-__version__ = "0.2.28"
+__version__ = "0.3.0"
| {"golden_diff": "diff --git a/api_app/_version.py b/api_app/_version.py\n--- a/api_app/_version.py\n+++ b/api_app/_version.py\n@@ -1 +1 @@\n-__version__ = \"0.2.28\"\n+__version__ = \"0.3.0\"\n", "issue": "Release version 0.3\n## Description\r\n\r\nAs a TRE developer \r\nI want to release current code base as version 0.3\r\nSo that people can use a more stable version going forward\r\n\r\n## Acceptance criteria\r\n\r\n- [ ] All core apps are bumped to 0.3\r\n- [ ] All bundles are bumped to 0.3\r\n- [ ] A tag is created\r\n- [ ] A release is created\r\n\n", "before_files": [{"content": "__version__ = \"0.2.28\"\n", "path": "api_app/_version.py"}]} | 640 | 64 |
gh_patches_debug_10176 | rasdani/github-patches | git_diff | conan-io__conan-7364 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
graph --table=file.html does not show outdated packages
They were previously shown in colors. I guess adding a column "outdated" will resolve this issue.
</issue>
<code>
[start of conans/assets/templates/search_table_html.py]
1
2 content = """
3 <!DOCTYPE html>
4 <html lang="en">
5 <head>
6 <title>Conan | {{ search.reference }}</title>
7 <link rel="stylesheet" type="text/css" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css"/>
8 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/1.10.21/css/dataTables.bootstrap.min.css"/>
9 <style>
10 tr td {
11 white-space:nowrap;
12 }
13 </style>
14 </head>
15 <body>
16 <div class="container-fluid">
17 <h1>{{ search.reference }}</h1>
18 <div class="info">
19 <p>
20 Depending on your package_id_mode, any combination of settings, options and requirements
21 can give you a different packageID. Take into account that your configuration might be
22 different from the one used to generate the packages.
23 </p>
24 </div>
25
26 <table id="results" class="table table-striped table-bordered" style="width:100%">
27 <thead>
28 {%- set headers = results.get_headers(keys=['remote', 'package_id']) %}
29 {%- set headers2rows = headers.row(n_rows=2) %}
30 <tr>
31 {%- for category, subheaders in headers2rows %}
32 <th class="text-center" rowspan="{% if subheaders|length == 1 and not subheaders[0] %}2{% else %}1{% endif %}" colspan="{{ subheaders|length }}">
33 {{ category }}
34 </th>
35 {%- endfor %}
36 </tr>
37 <tr>
38 {%- for category, subheaders in headers2rows %}
39 {%- if subheaders|length != 1 or subheaders[0] != '' %}
40 {%- for subheader in subheaders %}
41 <th>{{ subheader|default(category, true) }}</th>
42 {%- endfor %}
43 {%- endif %}
44 {%- endfor %}
45 </tr>
46 </thead>
47 <tbody>
48 {%- for package in results.packages() %}
49 <tr>
50 {%- for item in package.row(headers) %}
51 <td>{{ item if item != None else ''}}</td>
52 {%- endfor %}
53 </tr>
54 {%- endfor %}
55 </tbody>
56 <tfoot>
57 <tr>
58 {%- for header in headers.row(n_rows=1) %}
59 <th>{{ header }}</th>
60 {%- endfor %}
61 </tr>
62 </tfoot>
63 </table>
64
65 <script type="text/javascript" src="https://code.jquery.com/jquery-3.3.1.js"></script>
66 <script type="text/javascript" src="https://cdn.datatables.net/v/dt/dt-1.10.20/datatables.min.js"></script>
67 <script type="text/javascript" src="https://cdn.datatables.net/1.10.21/js/dataTables.bootstrap.min.js"></script>
68 <script>
69 $(document).ready(function() {
70 // Setup - add a text input to each footer cell
71 $('#results tfoot th').each( function () {
72 var title = $(this).text();
73 $(this).html( '<input type="text" class="form-control filter-input" placeholder="Filter '+title+'" style="width:100%"/>' );
74 });
75
76 var table = $('#results').DataTable( {
77 "dom": "lrtip",
78 "lengthMenu": [[10, 25, 50, -1], [10, 25, 50, "All"]],
79 "pageLength": 10,
80 "columnDefs": [
81 { className: "cell_border_right", "targets": [ {{ headers.keys|length + headers.settings|length -1 }}, {{ headers.keys|length + headers.settings|length + headers.options|length -1 }} ] },
82 { className: "cell_border_right monospaced", "targets": [{{ headers.keys|length -1 }}, ]}
83 ]
84 });
85
86 // Apply the search
87 table.columns().every( function () {
88 var that = this;
89
90 $( 'input', this.footer() ).on( 'keyup change clear', function () {
91 if ( that.search() !== this.value ) {
92 that
93 .search( this.value )
94 .draw();
95 }
96 } );
97 } );
98 });
99 </script>
100 </div>
101 </body>
102 </html>
103 """
104
[end of conans/assets/templates/search_table_html.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conans/assets/templates/search_table_html.py b/conans/assets/templates/search_table_html.py
--- a/conans/assets/templates/search_table_html.py
+++ b/conans/assets/templates/search_table_html.py
@@ -25,7 +25,7 @@
<table id="results" class="table table-striped table-bordered" style="width:100%">
<thead>
- {%- set headers = results.get_headers(keys=['remote', 'package_id']) %}
+ {%- set headers = results.get_headers(keys=['remote', 'package_id', 'outdated']) %}
{%- set headers2rows = headers.row(n_rows=2) %}
<tr>
{%- for category, subheaders in headers2rows %}
| {"golden_diff": "diff --git a/conans/assets/templates/search_table_html.py b/conans/assets/templates/search_table_html.py\n--- a/conans/assets/templates/search_table_html.py\n+++ b/conans/assets/templates/search_table_html.py\n@@ -25,7 +25,7 @@\n \n <table id=\"results\" class=\"table table-striped table-bordered\" style=\"width:100%\">\n <thead>\n- {%- set headers = results.get_headers(keys=['remote', 'package_id']) %}\n+ {%- set headers = results.get_headers(keys=['remote', 'package_id', 'outdated']) %}\n {%- set headers2rows = headers.row(n_rows=2) %}\n <tr>\n {%- for category, subheaders in headers2rows %}\n", "issue": "graph --table=file.html does not show outdated packages\nThey were previously shown in colors. I guess adding a column \"outdated\" will resolve this issue.\n", "before_files": [{"content": "\ncontent = \"\"\"\n<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <title>Conan | {{ search.reference }}</title>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css\"/>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"https://cdn.datatables.net/1.10.21/css/dataTables.bootstrap.min.css\"/>\n <style>\n tr td {\n white-space:nowrap;\n }\n </style>\n </head>\n <body>\n <div class=\"container-fluid\">\n <h1>{{ search.reference }}</h1>\n <div class=\"info\">\n <p>\n Depending on your package_id_mode, any combination of settings, options and requirements\n can give you a different packageID. Take into account that your configuration might be\n different from the one used to generate the packages.\n </p>\n </div>\n\n <table id=\"results\" class=\"table table-striped table-bordered\" style=\"width:100%\">\n <thead>\n {%- set headers = results.get_headers(keys=['remote', 'package_id']) %}\n {%- set headers2rows = headers.row(n_rows=2) %}\n <tr>\n {%- for category, subheaders in headers2rows %}\n <th class=\"text-center\" rowspan=\"{% if subheaders|length == 1 and not subheaders[0] %}2{% else %}1{% endif %}\" colspan=\"{{ subheaders|length }}\">\n {{ category }}\n </th>\n {%- endfor %}\n </tr>\n <tr>\n {%- for category, subheaders in headers2rows %}\n {%- if subheaders|length != 1 or subheaders[0] != '' %}\n {%- for subheader in subheaders %}\n <th>{{ subheader|default(category, true) }}</th>\n {%- endfor %}\n {%- endif %}\n {%- endfor %}\n </tr>\n </thead>\n <tbody>\n {%- for package in results.packages() %}\n <tr>\n {%- for item in package.row(headers) %}\n <td>{{ item if item != None else ''}}</td>\n {%- endfor %}\n </tr>\n {%- endfor %}\n </tbody>\n <tfoot>\n <tr>\n {%- for header in headers.row(n_rows=1) %}\n <th>{{ header }}</th>\n {%- endfor %}\n </tr>\n </tfoot>\n </table>\n\n <script type=\"text/javascript\" src=\"https://code.jquery.com/jquery-3.3.1.js\"></script>\n <script type=\"text/javascript\" src=\"https://cdn.datatables.net/v/dt/dt-1.10.20/datatables.min.js\"></script>\n <script type=\"text/javascript\" src=\"https://cdn.datatables.net/1.10.21/js/dataTables.bootstrap.min.js\"></script>\n <script>\n $(document).ready(function() {\n // Setup - add a text input to each footer cell\n $('#results tfoot th').each( function () {\n var title = $(this).text();\n $(this).html( '<input type=\"text\" class=\"form-control filter-input\" placeholder=\"Filter '+title+'\" style=\"width:100%\"/>' );\n });\n\n var table = $('#results').DataTable( {\n \"dom\": \"lrtip\",\n \"lengthMenu\": [[10, 25, 50, -1], [10, 25, 50, \"All\"]],\n \"pageLength\": 10,\n \"columnDefs\": [\n { className: \"cell_border_right\", \"targets\": [ {{ headers.keys|length + headers.settings|length -1 }}, {{ headers.keys|length + headers.settings|length + headers.options|length -1 }} ] },\n { className: \"cell_border_right monospaced\", \"targets\": [{{ headers.keys|length -1 }}, ]}\n ]\n });\n\n // Apply the search\n table.columns().every( function () {\n var that = this;\n\n $( 'input', this.footer() ).on( 'keyup change clear', function () {\n if ( that.search() !== this.value ) {\n that\n .search( this.value )\n .draw();\n }\n } );\n } );\n });\n </script>\n </div>\n </body>\n</html>\n\"\"\"\n", "path": "conans/assets/templates/search_table_html.py"}]} | 1,742 | 160 |
gh_patches_debug_18833 | rasdani/github-patches | git_diff | deepchecks__deepchecks-1548 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG][HOTFIX] Add deprecation warning for condition parameter names
In 0.7 the drift checks were updated to add Cramer's V.
Thus the previous parameter condition names ("max_psi... "max_earthmovers...") were renamed.
Previous parameters should still be re-added with deprecation warnings, for all relevant checks and conditions.
</issue>
<code>
[start of deepchecks/vision/deprecation_warnings.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """This file changes default 'ignore' action of DeprecationWarnings for specific deprecation messages."""
12 import warnings
13
14 # Added in version 0.6.2, deprecates max_num_categories in all drift checks
15 warnings.filterwarnings(
16 action='always',
17 message=r'.*max_num_categories.*',
18 category=DeprecationWarning,
19 module=r'deepchecks.*'
20 )
21
22 # Added in 0.7 Warning filters for deprecated functions in deepchecks.tabular.checks
23 # Should be removed in 0.8
24 warnings.filterwarnings(
25 action='once',
26 message=r'deepchecks.vision.checks.performance is deprecated.*',
27 category=DeprecationWarning,
28 module=r'deepchecks.*'
29 )
30
31 warnings.filterwarnings(
32 action='once',
33 message=r'deepchecks.vision.checks.methodology is deprecated.*',
34 category=DeprecationWarning,
35 module=r'deepchecks.tabular.checks.methodology.*'
36 )
37
38 warnings.filterwarnings(
39 action='once',
40 message=r'deepchecks.vision.checks.distribution is deprecated.*',
41 category=DeprecationWarning,
42 module=r'deepchecks.*'
43 )
44
45 warnings.filterwarnings(
46 action='always',
47 message=r'the integrity_validation suite is deprecated.*',
48 category=DeprecationWarning,
49 module=r'deepchecks.*'
50 )
51
[end of deepchecks/vision/deprecation_warnings.py]
[start of deepchecks/tabular/deprecation_warnings.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """This file changes default 'ignore' action of DeprecationWarnings for specific deprecation messages."""
12 import warnings
13
14 # Added in version 0.6.2, deprecates max_num_categories in all drift checks
15 warnings.filterwarnings(
16 action='always',
17 message=r'.*max_num_categories.*',
18 category=DeprecationWarning,
19 module=r'deepchecks.*'
20 )
21
22 # Added in 0.7 Warning filters for deprecated functions in deepchecks.tabular.checks
23 # Should be removed in 0.8
24 warnings.filterwarnings(
25 action='once',
26 message=r'deepchecks\.tabular\.checks\.performance is deprecated.*',
27 category=DeprecationWarning,
28 module=r'deepchecks.*'
29 )
30
31 warnings.filterwarnings(
32 action='once',
33 message=r'deepchecks.tabular.checks.overview is deprecated.*',
34 category=DeprecationWarning,
35 module=r'deepchecks.*'
36 )
37
38 warnings.filterwarnings(
39 action='once',
40 message=r'deepchecks.tabular.checks.methodology is deprecated.*',
41 category=DeprecationWarning,
42 module=r'deepchecks.tabular.checks.methodology.*'
43 )
44
45 warnings.filterwarnings(
46 action='once',
47 message=r'deepchecks.tabular.checks.distribution is deprecated.*',
48 category=DeprecationWarning,
49 module=r'deepchecks.*'
50 )
51
52 warnings.filterwarnings(
53 action='always',
54 message=r'the single_dataset_integrity suite is deprecated.*',
55 category=DeprecationWarning,
56 module=r'deepchecks.*'
57 )
58
59 warnings.filterwarnings(
60 action='always',
61 message=r'the train_test_leakage suite is deprecated.*',
62 category=DeprecationWarning,
63 module=r'deepchecks.*'
64 )
65
[end of deepchecks/tabular/deprecation_warnings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/deepchecks/tabular/deprecation_warnings.py b/deepchecks/tabular/deprecation_warnings.py
--- a/deepchecks/tabular/deprecation_warnings.py
+++ b/deepchecks/tabular/deprecation_warnings.py
@@ -62,3 +62,20 @@
category=DeprecationWarning,
module=r'deepchecks.*'
)
+
+# Added in 0.7 Warning filters for drift conditions
+# Should be removed in 0.8
+
+warnings.filterwarnings(
+ action='once',
+ message=r'.*max_allowed_psi_score is deprecated.*',
+ category=DeprecationWarning,
+ module=r'deepchecks.*'
+)
+
+warnings.filterwarnings(
+ action='once',
+ message=r'.*max_allowed_earth_movers_score is deprecated.*',
+ category=DeprecationWarning,
+ module=r'deepchecks.*'
+)
diff --git a/deepchecks/vision/deprecation_warnings.py b/deepchecks/vision/deprecation_warnings.py
--- a/deepchecks/vision/deprecation_warnings.py
+++ b/deepchecks/vision/deprecation_warnings.py
@@ -48,3 +48,20 @@
category=DeprecationWarning,
module=r'deepchecks.*'
)
+
+# Added in 0.7 Warning filters for drift conditions
+# Should be removed in 0.8
+
+warnings.filterwarnings(
+ action='once',
+ message=r'.*max_allowed_psi_score is deprecated.*',
+ category=DeprecationWarning,
+ module=r'deepchecks.*'
+)
+
+warnings.filterwarnings(
+ action='once',
+ message=r'.*max_allowed_earth_movers_score is deprecated.*',
+ category=DeprecationWarning,
+ module=r'deepchecks.*'
+)
| {"golden_diff": "diff --git a/deepchecks/tabular/deprecation_warnings.py b/deepchecks/tabular/deprecation_warnings.py\n--- a/deepchecks/tabular/deprecation_warnings.py\n+++ b/deepchecks/tabular/deprecation_warnings.py\n@@ -62,3 +62,20 @@\n category=DeprecationWarning,\n module=r'deepchecks.*'\n )\n+\n+# Added in 0.7 Warning filters for drift conditions\n+# Should be removed in 0.8\n+\n+warnings.filterwarnings(\n+ action='once',\n+ message=r'.*max_allowed_psi_score is deprecated.*',\n+ category=DeprecationWarning,\n+ module=r'deepchecks.*'\n+)\n+\n+warnings.filterwarnings(\n+ action='once',\n+ message=r'.*max_allowed_earth_movers_score is deprecated.*',\n+ category=DeprecationWarning,\n+ module=r'deepchecks.*'\n+)\ndiff --git a/deepchecks/vision/deprecation_warnings.py b/deepchecks/vision/deprecation_warnings.py\n--- a/deepchecks/vision/deprecation_warnings.py\n+++ b/deepchecks/vision/deprecation_warnings.py\n@@ -48,3 +48,20 @@\n category=DeprecationWarning,\n module=r'deepchecks.*'\n )\n+\n+# Added in 0.7 Warning filters for drift conditions\n+# Should be removed in 0.8\n+\n+warnings.filterwarnings(\n+ action='once',\n+ message=r'.*max_allowed_psi_score is deprecated.*',\n+ category=DeprecationWarning,\n+ module=r'deepchecks.*'\n+)\n+\n+warnings.filterwarnings(\n+ action='once',\n+ message=r'.*max_allowed_earth_movers_score is deprecated.*',\n+ category=DeprecationWarning,\n+ module=r'deepchecks.*'\n+)\n", "issue": "[BUG][HOTFIX] Add deprecation warning for condition parameter names\nIn 0.7 the drift checks were updated to add Cramer's V.\r\nThus the previous parameter condition names (\"max_psi... \"max_earthmovers...\") were renamed.\r\nPrevious parameters should still be re-added with deprecation warnings, for all relevant checks and conditions.\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"This file changes default 'ignore' action of DeprecationWarnings for specific deprecation messages.\"\"\"\nimport warnings\n\n# Added in version 0.6.2, deprecates max_num_categories in all drift checks\nwarnings.filterwarnings(\n action='always',\n message=r'.*max_num_categories.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n\n# Added in 0.7 Warning filters for deprecated functions in deepchecks.tabular.checks\n# Should be removed in 0.8\nwarnings.filterwarnings(\n action='once',\n message=r'deepchecks.vision.checks.performance is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n\nwarnings.filterwarnings(\n action='once',\n message=r'deepchecks.vision.checks.methodology is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.tabular.checks.methodology.*'\n)\n\nwarnings.filterwarnings(\n action='once',\n message=r'deepchecks.vision.checks.distribution is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n\nwarnings.filterwarnings(\n action='always',\n message=r'the integrity_validation suite is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n", "path": "deepchecks/vision/deprecation_warnings.py"}, {"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"This file changes default 'ignore' action of DeprecationWarnings for specific deprecation messages.\"\"\"\nimport warnings\n\n# Added in version 0.6.2, deprecates max_num_categories in all drift checks\nwarnings.filterwarnings(\n action='always',\n message=r'.*max_num_categories.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n\n# Added in 0.7 Warning filters for deprecated functions in deepchecks.tabular.checks\n# Should be removed in 0.8\nwarnings.filterwarnings(\n action='once',\n message=r'deepchecks\\.tabular\\.checks\\.performance is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n\nwarnings.filterwarnings(\n action='once',\n message=r'deepchecks.tabular.checks.overview is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n\nwarnings.filterwarnings(\n action='once',\n message=r'deepchecks.tabular.checks.methodology is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.tabular.checks.methodology.*'\n)\n\nwarnings.filterwarnings(\n action='once',\n message=r'deepchecks.tabular.checks.distribution is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n\nwarnings.filterwarnings(\n action='always',\n message=r'the single_dataset_integrity suite is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n\nwarnings.filterwarnings(\n action='always',\n message=r'the train_test_leakage suite is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n", "path": "deepchecks/tabular/deprecation_warnings.py"}]} | 1,730 | 403 |
gh_patches_debug_20513 | rasdani/github-patches | git_diff | getsentry__sentry-1896 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error processing 'rule_notify' on 'QuotaMailPlugin': 'int' object has no attribute 'lower'
https://beta.getsentry.com/sentry/sentry/group/56738068/
```
AttributeError: 'int' object has no attribute 'lower'
(11 additional frame(s) were not displayed)
...
File "sentry/interfaces/user.py", line 99, in to_email_html
return render_to_string('sentry/partial/interfaces/user_email.html', context)
File "sentry/web/helpers.py", line 122, in render_to_string
return loader.render_to_string(template, context)
File "templatetag_sugar/node.py", line 19, in render
return self.function(context, *args, **kwargs)
File "sentry/templatetags/sentry_helpers.py", line 314, in gravatar_url
return get_gravatar_url(email, size, default)
File "sentry/utils/avatar.py", line 18, in get_gravatar_url
hashlib.md5(email.lower()).hexdigest())
Stacktrace (most recent call last):
Error processing 'rule_notify' on 'QuotaMailPlugin': 'int' object has no attribute 'lower'
File "sentry/partial/interfaces/user_email.html", line 7
<tr>
{% if user_email %}
<td style="width:84px;padding-top:10px;vertical-align:top">
<img src="{% gravatar_url user_email size 64 %}">
</td>
{% endif %}
<td>
```
</issue>
<code>
[start of src/sentry/interfaces/user.py]
1 """
2 sentry.interfaces.user
3 ~~~~~~~~~~~~~~~~~~~~~~
4
5 :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
6 :license: BSD, see LICENSE for more details.
7 """
8 from __future__ import absolute_import
9
10 __all__ = ('User',)
11
12 from sentry.interfaces.base import Interface
13 from sentry.utils.safe import trim, trim_dict
14 from sentry.web.helpers import render_to_string
15 from ipaddr import IPAddress
16
17
18 def validate_ip(value, required=True):
19 if not required and not value:
20 return
21
22 # will raise a ValueError
23 IPAddress(value)
24 return value
25
26
27 class User(Interface):
28 """
29 An interface which describes the authenticated User for a request.
30
31 You should provide **at least** either an `id` (a unique identifier for
32 an authenticated user) or `ip_address` (their IP address).
33
34 All other attributes are optional.
35
36 >>> {
37 >>> "id": "unique_id",
38 >>> "username": "my_user",
39 >>> "email": "[email protected]"
40 >>> "ip_address": "127.0.0.1",
41 >>> "optional": "value"
42 >>> }
43 """
44 @classmethod
45 def to_python(cls, data):
46 data = data.copy()
47
48 extra_data = data.pop('data', data)
49 if not isinstance(extra_data, dict):
50 extra_data = {}
51
52 kwargs = {
53 'id': trim(data.pop('id', None), 128),
54 'email': trim(data.pop('email', None), 128),
55 'username': trim(data.pop('username', None), 128),
56 'ip_address': validate_ip(data.pop('ip_address', None), False),
57 }
58
59 kwargs['data'] = trim_dict(extra_data)
60 return cls(**kwargs)
61
62 def get_api_context(self, is_public=False):
63 return {
64 'id': self.id,
65 'email': self.email,
66 'username': self.username,
67 'ipAddress': self.ip_address,
68 'data': self.data,
69 }
70
71 def get_path(self):
72 return 'sentry.interfaces.User'
73
74 def get_hash(self):
75 return []
76
77 def to_email_html(self, event, **kwargs):
78 context = {
79 'user_id': self.id,
80 'user_email': self.email,
81 'user_username': self.username,
82 'user_ip_address': self.ip_address,
83 'user_data': self.data,
84 }
85 return render_to_string('sentry/partial/interfaces/user_email.html', context)
86
[end of src/sentry/interfaces/user.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/interfaces/user.py b/src/sentry/interfaces/user.py
--- a/src/sentry/interfaces/user.py
+++ b/src/sentry/interfaces/user.py
@@ -15,6 +15,19 @@
from ipaddr import IPAddress
+def validate_email(value, required=True):
+ if not required and not value:
+ return
+
+ if not isinstance(value, basestring):
+ raise TypeError('object of type %r is not an email address' % type(value).__name__)
+
+ # safe to assume an email address at least has a @ in it.
+ if '@' not in value:
+ raise ValueError('malformed email address')
+ return value
+
+
def validate_ip(value, required=True):
if not required and not value:
return
@@ -51,7 +64,7 @@
kwargs = {
'id': trim(data.pop('id', None), 128),
- 'email': trim(data.pop('email', None), 128),
+ 'email': trim(validate_email(data.pop('email', None), False), 128),
'username': trim(data.pop('username', None), 128),
'ip_address': validate_ip(data.pop('ip_address', None), False),
}
| {"golden_diff": "diff --git a/src/sentry/interfaces/user.py b/src/sentry/interfaces/user.py\n--- a/src/sentry/interfaces/user.py\n+++ b/src/sentry/interfaces/user.py\n@@ -15,6 +15,19 @@\n from ipaddr import IPAddress\n \n \n+def validate_email(value, required=True):\n+ if not required and not value:\n+ return\n+\n+ if not isinstance(value, basestring):\n+ raise TypeError('object of type %r is not an email address' % type(value).__name__)\n+\n+ # safe to assume an email address at least has a @ in it.\n+ if '@' not in value:\n+ raise ValueError('malformed email address')\n+ return value\n+\n+\n def validate_ip(value, required=True):\n if not required and not value:\n return\n@@ -51,7 +64,7 @@\n \n kwargs = {\n 'id': trim(data.pop('id', None), 128),\n- 'email': trim(data.pop('email', None), 128),\n+ 'email': trim(validate_email(data.pop('email', None), False), 128),\n 'username': trim(data.pop('username', None), 128),\n 'ip_address': validate_ip(data.pop('ip_address', None), False),\n }\n", "issue": "Error processing 'rule_notify' on 'QuotaMailPlugin': 'int' object has no attribute 'lower'\nhttps://beta.getsentry.com/sentry/sentry/group/56738068/\n\n```\nAttributeError: 'int' object has no attribute 'lower'\n(11 additional frame(s) were not displayed)\n...\n File \"sentry/interfaces/user.py\", line 99, in to_email_html\n return render_to_string('sentry/partial/interfaces/user_email.html', context)\n File \"sentry/web/helpers.py\", line 122, in render_to_string\n return loader.render_to_string(template, context)\n File \"templatetag_sugar/node.py\", line 19, in render\n return self.function(context, *args, **kwargs)\n File \"sentry/templatetags/sentry_helpers.py\", line 314, in gravatar_url\n return get_gravatar_url(email, size, default)\n File \"sentry/utils/avatar.py\", line 18, in get_gravatar_url\n hashlib.md5(email.lower()).hexdigest())\n\nStacktrace (most recent call last):\n\nError processing 'rule_notify' on 'QuotaMailPlugin': 'int' object has no attribute 'lower'\n\nFile \"sentry/partial/interfaces/user_email.html\", line 7\n\n <tr>\n {% if user_email %}\n <td style=\"width:84px;padding-top:10px;vertical-align:top\">\n <img src=\"{% gravatar_url user_email size 64 %}\">\n </td>\n {% endif %}\n <td>\n```\n\n", "before_files": [{"content": "\"\"\"\nsentry.interfaces.user\n~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\nfrom __future__ import absolute_import\n\n__all__ = ('User',)\n\nfrom sentry.interfaces.base import Interface\nfrom sentry.utils.safe import trim, trim_dict\nfrom sentry.web.helpers import render_to_string\nfrom ipaddr import IPAddress\n\n\ndef validate_ip(value, required=True):\n if not required and not value:\n return\n\n # will raise a ValueError\n IPAddress(value)\n return value\n\n\nclass User(Interface):\n \"\"\"\n An interface which describes the authenticated User for a request.\n\n You should provide **at least** either an `id` (a unique identifier for\n an authenticated user) or `ip_address` (their IP address).\n\n All other attributes are optional.\n\n >>> {\n >>> \"id\": \"unique_id\",\n >>> \"username\": \"my_user\",\n >>> \"email\": \"[email protected]\"\n >>> \"ip_address\": \"127.0.0.1\",\n >>> \"optional\": \"value\"\n >>> }\n \"\"\"\n @classmethod\n def to_python(cls, data):\n data = data.copy()\n\n extra_data = data.pop('data', data)\n if not isinstance(extra_data, dict):\n extra_data = {}\n\n kwargs = {\n 'id': trim(data.pop('id', None), 128),\n 'email': trim(data.pop('email', None), 128),\n 'username': trim(data.pop('username', None), 128),\n 'ip_address': validate_ip(data.pop('ip_address', None), False),\n }\n\n kwargs['data'] = trim_dict(extra_data)\n return cls(**kwargs)\n\n def get_api_context(self, is_public=False):\n return {\n 'id': self.id,\n 'email': self.email,\n 'username': self.username,\n 'ipAddress': self.ip_address,\n 'data': self.data,\n }\n\n def get_path(self):\n return 'sentry.interfaces.User'\n\n def get_hash(self):\n return []\n\n def to_email_html(self, event, **kwargs):\n context = {\n 'user_id': self.id,\n 'user_email': self.email,\n 'user_username': self.username,\n 'user_ip_address': self.ip_address,\n 'user_data': self.data,\n }\n return render_to_string('sentry/partial/interfaces/user_email.html', context)\n", "path": "src/sentry/interfaces/user.py"}]} | 1,610 | 285 |
gh_patches_debug_35633 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-583 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix several configuration issues
Fix the pending issues in #563 as pointed out by @mauriciovasquezbernal.
</issue>
<code>
[start of opentelemetry-api/src/opentelemetry/configuration/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # FIXME find a better way to avoid all those "Expression has type "Any"" errors
16 # type: ignore
17
18 """
19 Simple configuration manager
20
21 This is a configuration manager for OpenTelemetry. It reads configuration
22 values from environment variables prefixed with
23 ``OPENTELEMETRY_PYTHON_`` whose characters are only all caps and underscores.
24 The first character after ``OPENTELEMETRY_PYTHON_`` must be an uppercase
25 character.
26
27 For example, these environment variables will be read:
28
29 1. ``OPENTELEMETRY_PYTHON_SOMETHING``
30 2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``
31 3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``
32
33 These won't:
34
35 1. ``OPENTELEMETRY_PYTH_SOMETHING``
36 2. ``OPENTELEMETRY_PYTHON_something``
37 3. ``OPENTELEMETRY_PYTHON_SOMETHING_2_AND__ELSE``
38 4. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``
39
40 The values stored in the environment variables can be found in an instance of
41 ``opentelemetry.configuration.Configuration``. This class can be instantiated
42 freely because instantiating it returns a singleton.
43
44 For example, if the environment variable
45 ``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then
46 ``Configuration().meter_provider == "my_meter_provider"`` would be ``True``.
47
48 Non defined attributes will always return ``None``. This is intended to make it
49 easier to use the ``Configuration`` object in actual code, because it won't be
50 necessary to check for the attribute to be defined first.
51
52 Environment variables used by OpenTelemetry
53 -------------------------------------------
54
55 1. OPENTELEMETRY_PYTHON_METER_PROVIDER
56 2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER
57
58 The value of these environment variables should be the name of the entry point
59 that points to the class that implements either provider. This OpenTelemetry
60 API package provides one entry point for each, which can be found in the
61 setup.py file::
62
63 entry_points={
64 ...
65 "opentelemetry_meter_provider": [
66 "default_meter_provider = "
67 "opentelemetry.metrics:DefaultMeterProvider"
68 ],
69 "opentelemetry_tracer_provider": [
70 "default_tracer_provider = "
71 "opentelemetry.trace:DefaultTracerProvider"
72 ],
73 }
74
75 To use the meter provider above, then the
76 ``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to
77 "default_meter_provider" (this is not actually necessary since the
78 OpenTelemetry API provided providers are the default ones used if no
79 configuration is found in the environment variables).
80 """
81
82 from os import environ
83 from re import fullmatch
84
85
86 class Configuration:
87 _instance = None
88
89 __slots__ = []
90
91 def __new__(cls) -> "Configuration":
92 if Configuration._instance is None:
93
94 for key, value in environ.items():
95
96 match = fullmatch("OPENTELEMETRY_PYTHON_([A-Z][A-Z_]*)", key)
97
98 if match is not None:
99
100 key = match.group(1).lower()
101
102 setattr(Configuration, "_{}".format(key), value)
103 setattr(
104 Configuration,
105 key,
106 property(
107 fget=lambda cls, key=key: getattr(
108 cls, "_{}".format(key)
109 )
110 ),
111 )
112
113 Configuration.__slots__.append(key)
114
115 Configuration.__slots__ = tuple(Configuration.__slots__)
116
117 Configuration._instance = object.__new__(cls)
118
119 return cls._instance
120
121 def __getattr__(self, name):
122 return None
123
[end of opentelemetry-api/src/opentelemetry/configuration/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opentelemetry-api/src/opentelemetry/configuration/__init__.py b/opentelemetry-api/src/opentelemetry/configuration/__init__.py
--- a/opentelemetry-api/src/opentelemetry/configuration/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/configuration/__init__.py
@@ -19,27 +19,27 @@
Simple configuration manager
This is a configuration manager for OpenTelemetry. It reads configuration
-values from environment variables prefixed with
-``OPENTELEMETRY_PYTHON_`` whose characters are only all caps and underscores.
-The first character after ``OPENTELEMETRY_PYTHON_`` must be an uppercase
-character.
+values from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose
+characters are only alphanumeric characters and unserscores, except for the
+first character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.
For example, these environment variables will be read:
1. ``OPENTELEMETRY_PYTHON_SOMETHING``
2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``
3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``
+4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``
+4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``
These won't:
1. ``OPENTELEMETRY_PYTH_SOMETHING``
-2. ``OPENTELEMETRY_PYTHON_something``
-3. ``OPENTELEMETRY_PYTHON_SOMETHING_2_AND__ELSE``
-4. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``
+2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``
+3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``
The values stored in the environment variables can be found in an instance of
``opentelemetry.configuration.Configuration``. This class can be instantiated
-freely because instantiating it returns a singleton.
+freely because instantiating it returns always the same object.
For example, if the environment variable
``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then
@@ -93,11 +93,13 @@
for key, value in environ.items():
- match = fullmatch("OPENTELEMETRY_PYTHON_([A-Z][A-Z_]*)", key)
+ match = fullmatch(
+ r"OPENTELEMETRY_PYTHON_([A-Za-z_][\w_]*)", key
+ )
if match is not None:
- key = match.group(1).lower()
+ key = match.group(1)
setattr(Configuration, "_{}".format(key), value)
setattr(
| {"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/configuration/__init__.py b/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n@@ -19,27 +19,27 @@\n Simple configuration manager\n \n This is a configuration manager for OpenTelemetry. It reads configuration\n-values from environment variables prefixed with\n-``OPENTELEMETRY_PYTHON_`` whose characters are only all caps and underscores.\n-The first character after ``OPENTELEMETRY_PYTHON_`` must be an uppercase\n-character.\n+values from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose\n+characters are only alphanumeric characters and unserscores, except for the\n+first character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.\n \n For example, these environment variables will be read:\n \n 1. ``OPENTELEMETRY_PYTHON_SOMETHING``\n 2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``\n 3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``\n+4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``\n+4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``\n \n These won't:\n \n 1. ``OPENTELEMETRY_PYTH_SOMETHING``\n-2. ``OPENTELEMETRY_PYTHON_something``\n-3. ``OPENTELEMETRY_PYTHON_SOMETHING_2_AND__ELSE``\n-4. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``\n+2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``\n+3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``\n \n The values stored in the environment variables can be found in an instance of\n ``opentelemetry.configuration.Configuration``. This class can be instantiated\n-freely because instantiating it returns a singleton.\n+freely because instantiating it returns always the same object.\n \n For example, if the environment variable\n ``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then\n@@ -93,11 +93,13 @@\n \n for key, value in environ.items():\n \n- match = fullmatch(\"OPENTELEMETRY_PYTHON_([A-Z][A-Z_]*)\", key)\n+ match = fullmatch(\n+ r\"OPENTELEMETRY_PYTHON_([A-Za-z_][\\w_]*)\", key\n+ )\n \n if match is not None:\n \n- key = match.group(1).lower()\n+ key = match.group(1)\n \n setattr(Configuration, \"_{}\".format(key), value)\n setattr(\n", "issue": "Fix several configuration issues\nFix the pending issues in #563 as pointed out by @mauriciovasquezbernal. \n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# FIXME find a better way to avoid all those \"Expression has type \"Any\"\" errors\n# type: ignore\n\n\"\"\"\nSimple configuration manager\n\nThis is a configuration manager for OpenTelemetry. It reads configuration\nvalues from environment variables prefixed with\n``OPENTELEMETRY_PYTHON_`` whose characters are only all caps and underscores.\nThe first character after ``OPENTELEMETRY_PYTHON_`` must be an uppercase\ncharacter.\n\nFor example, these environment variables will be read:\n\n1. ``OPENTELEMETRY_PYTHON_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``\n\nThese won't:\n\n1. ``OPENTELEMETRY_PYTH_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_something``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_2_AND__ELSE``\n4. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``\n\nThe values stored in the environment variables can be found in an instance of\n``opentelemetry.configuration.Configuration``. This class can be instantiated\nfreely because instantiating it returns a singleton.\n\nFor example, if the environment variable\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then\n``Configuration().meter_provider == \"my_meter_provider\"`` would be ``True``.\n\nNon defined attributes will always return ``None``. This is intended to make it\neasier to use the ``Configuration`` object in actual code, because it won't be\nnecessary to check for the attribute to be defined first.\n\nEnvironment variables used by OpenTelemetry\n-------------------------------------------\n\n1. OPENTELEMETRY_PYTHON_METER_PROVIDER\n2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER\n\nThe value of these environment variables should be the name of the entry point\nthat points to the class that implements either provider. This OpenTelemetry\nAPI package provides one entry point for each, which can be found in the\nsetup.py file::\n\n entry_points={\n ...\n \"opentelemetry_meter_provider\": [\n \"default_meter_provider = \"\n \"opentelemetry.metrics:DefaultMeterProvider\"\n ],\n \"opentelemetry_tracer_provider\": [\n \"default_tracer_provider = \"\n \"opentelemetry.trace:DefaultTracerProvider\"\n ],\n }\n\nTo use the meter provider above, then the\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to\n\"default_meter_provider\" (this is not actually necessary since the\nOpenTelemetry API provided providers are the default ones used if no\nconfiguration is found in the environment variables).\n\"\"\"\n\nfrom os import environ\nfrom re import fullmatch\n\n\nclass Configuration:\n _instance = None\n\n __slots__ = []\n\n def __new__(cls) -> \"Configuration\":\n if Configuration._instance is None:\n\n for key, value in environ.items():\n\n match = fullmatch(\"OPENTELEMETRY_PYTHON_([A-Z][A-Z_]*)\", key)\n\n if match is not None:\n\n key = match.group(1).lower()\n\n setattr(Configuration, \"_{}\".format(key), value)\n setattr(\n Configuration,\n key,\n property(\n fget=lambda cls, key=key: getattr(\n cls, \"_{}\".format(key)\n )\n ),\n )\n\n Configuration.__slots__.append(key)\n\n Configuration.__slots__ = tuple(Configuration.__slots__)\n\n Configuration._instance = object.__new__(cls)\n\n return cls._instance\n\n def __getattr__(self, name):\n return None\n", "path": "opentelemetry-api/src/opentelemetry/configuration/__init__.py"}]} | 1,775 | 639 |
gh_patches_debug_61676 | rasdani/github-patches | git_diff | joke2k__faker-1710 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lower bound for typing_extensions?
* Faker version: 14.1.0
* OS: OpenSUSE Leap 15.3
For old Python versions (before Python 3.8), *faker* requires the *typing_extensions* with at least version 3.10.0.2. Due to some other dependencies inside my project, I am required to use version 3.7.4.3.
### Steps to reproduce
1. `pip install faker`
### Expected behavior
*faker* works with `typing_extensions==3.7.4.3` as well, to allow better integration with larger systems which are stuck at such older versions for some reasons.
### Actual behavior
*faker* declares `typing_extensions>=3.10.0.2` (#1557), although I could see no issue when using it with `typing_extensions==3.7.4.3`.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from pathlib import Path
4
5 from setuptools import find_packages, setup
6
7 here = Path(__file__).resolve().parent
8 README = (here / "README.rst").read_text(encoding="utf-8")
9 VERSION = (here / "VERSION").read_text(encoding="utf-8").strip()
10
11 excluded_packages = ["docs", "tests", "tests.*"]
12
13
14 # this module can be zip-safe if the zipimporter implements iter_modules or if
15 # pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.
16 try:
17 import pkgutil
18 import zipimport
19
20 zip_safe = (
21 hasattr(zipimport.zipimporter, "iter_modules")
22 or zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()
23 )
24 except AttributeError:
25 zip_safe = False
26
27 setup(
28 name="Faker",
29 version=VERSION,
30 description="Faker is a Python package that generates fake data for you.",
31 long_description=README,
32 entry_points={
33 "console_scripts": ["faker=faker.cli:execute_from_command_line"],
34 "pytest11": ["faker = faker.contrib.pytest.plugin"],
35 },
36 classifiers=[
37 # See https://pypi.org/pypi?%3Aaction=list_classifiers
38 "Development Status :: 5 - Production/Stable",
39 "Environment :: Console",
40 "Intended Audience :: Developers",
41 "Programming Language :: Python",
42 "Programming Language :: Python :: 3",
43 "Programming Language :: Python :: 3 :: Only",
44 "Programming Language :: Python :: 3.6",
45 "Programming Language :: Python :: 3.7",
46 "Programming Language :: Python :: 3.8",
47 "Programming Language :: Python :: 3.9",
48 "Programming Language :: Python :: Implementation :: CPython",
49 "Programming Language :: Python :: Implementation :: PyPy",
50 "Topic :: Software Development :: Libraries :: Python Modules",
51 "Topic :: Software Development :: Testing",
52 "Topic :: Utilities",
53 "License :: OSI Approved :: MIT License",
54 ],
55 keywords="faker fixtures data test mock generator",
56 author="joke2k",
57 author_email="[email protected]",
58 url="https://github.com/joke2k/faker",
59 project_urls={
60 "Bug Tracker": "https://github.com/joke2k/faker/issues",
61 "Changes": "https://github.com/joke2k/faker/blob/master/CHANGELOG.md",
62 "Documentation": "http://faker.rtfd.org/",
63 "Source Code": "https://github.com/joke2k/faker",
64 },
65 license="MIT License",
66 packages=find_packages(exclude=excluded_packages),
67 package_data={
68 "faker": ["py.typed"],
69 },
70 platforms=["any"],
71 zip_safe=zip_safe,
72 python_requires=">=3.6",
73 install_requires=[
74 "python-dateutil>=2.4",
75 "typing-extensions>=3.10.0.2;python_version<'3.8'",
76 ],
77 )
78
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -72,6 +72,6 @@
python_requires=">=3.6",
install_requires=[
"python-dateutil>=2.4",
- "typing-extensions>=3.10.0.2;python_version<'3.8'",
+ "typing-extensions>=3.7.4.3;python_version<'3.8'",
],
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -72,6 +72,6 @@\n python_requires=\">=3.6\",\n install_requires=[\n \"python-dateutil>=2.4\",\n- \"typing-extensions>=3.10.0.2;python_version<'3.8'\",\n+ \"typing-extensions>=3.7.4.3;python_version<'3.8'\",\n ],\n )\n", "issue": "Lower bound for typing_extensions?\n* Faker version: 14.1.0\r\n* OS: OpenSUSE Leap 15.3\r\n\r\nFor old Python versions (before Python 3.8), *faker* requires the *typing_extensions* with at least version 3.10.0.2. Due to some other dependencies inside my project, I am required to use version 3.7.4.3.\r\n\r\n### Steps to reproduce\r\n\r\n1. `pip install faker`\r\n\r\n### Expected behavior\r\n\r\n*faker* works with `typing_extensions==3.7.4.3` as well, to allow better integration with larger systems which are stuck at such older versions for some reasons.\r\n\r\n### Actual behavior\r\n\r\n*faker* declares `typing_extensions>=3.10.0.2` (#1557), although I could see no issue when using it with `typing_extensions==3.7.4.3`.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup\n\nhere = Path(__file__).resolve().parent\nREADME = (here / \"README.rst\").read_text(encoding=\"utf-8\")\nVERSION = (here / \"VERSION\").read_text(encoding=\"utf-8\").strip()\n\nexcluded_packages = [\"docs\", \"tests\", \"tests.*\"]\n\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n\n zip_safe = (\n hasattr(zipimport.zipimporter, \"iter_modules\")\n or zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\n )\nexcept AttributeError:\n zip_safe = False\n\nsetup(\n name=\"Faker\",\n version=VERSION,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README,\n entry_points={\n \"console_scripts\": [\"faker=faker.cli:execute_from_command_line\"],\n \"pytest11\": [\"faker = faker.contrib.pytest.plugin\"],\n },\n classifiers=[\n # See https://pypi.org/pypi?%3Aaction=list_classifiers\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Software Development :: Testing\",\n \"Topic :: Utilities\",\n \"License :: OSI Approved :: MIT License\",\n ],\n keywords=\"faker fixtures data test mock generator\",\n author=\"joke2k\",\n author_email=\"[email protected]\",\n url=\"https://github.com/joke2k/faker\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/joke2k/faker/issues\",\n \"Changes\": \"https://github.com/joke2k/faker/blob/master/CHANGELOG.md\",\n \"Documentation\": \"http://faker.rtfd.org/\",\n \"Source Code\": \"https://github.com/joke2k/faker\",\n },\n license=\"MIT License\",\n packages=find_packages(exclude=excluded_packages),\n package_data={\n \"faker\": [\"py.typed\"],\n },\n platforms=[\"any\"],\n zip_safe=zip_safe,\n python_requires=\">=3.6\",\n install_requires=[\n \"python-dateutil>=2.4\",\n \"typing-extensions>=3.10.0.2;python_version<'3.8'\",\n ],\n)\n", "path": "setup.py"}]} | 1,520 | 104 |
gh_patches_debug_14376 | rasdani/github-patches | git_diff | DataDog__dd-agent-495 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
apache check doesn't support basic auth
If I want basic auth, I'd assume that all I have to do is make my config look like:
```
apache_status_url: http://user:[email protected]:80/server-status?auto
```
But actually that just doesn't work. We need to do some special incantations to make `urllib2` do basic auth.
</issue>
<code>
[start of checks.d/apache.py]
1 import urllib2
2
3 from util import headers
4 from checks import AgentCheck
5
6 class Apache(AgentCheck):
7 """Tracks basic connection/requests/workers metrics
8
9 See http://httpd.apache.org/docs/2.2/mod/mod_status.html for more details
10 """
11 GAUGES = {
12 'IdleWorkers': 'apache.performance.idle_workers',
13 'BusyWorkers': 'apache.performance.busy_workers',
14 'CPULoad': 'apache.performance.cpu_load',
15 'Uptime': 'apache.performance.uptime',
16 'Total kBytes': 'apache.net.bytes',
17 'Total Accesses': 'apache.net.hits',
18 }
19
20 RATES = {
21 'Total kBytes': 'apache.net.bytes_per_s',
22 'Total Accesses': 'apache.net.request_per_s'
23 }
24
25 def __init__(self, name, init_config, agentConfig, instances=None):
26 AgentCheck.__init__(self, name, init_config, agentConfig, instances)
27 self.assumed_url = {}
28
29 def check(self, instance):
30 if 'apache_status_url' not in instance:
31 raise Exception("Missing 'apache_status_url' in Apache config")
32
33 url = self.assumed_url.get(instance['apache_status_url'], instance['apache_status_url'])
34
35 tags = instance.get('tags', [])
36 req = urllib2.Request(url, None,
37 headers(self.agentConfig))
38 request = urllib2.urlopen(req)
39 response = request.read()
40
41 metric_count = 0
42 # Loop through and extract the numerical values
43 for line in response.split('\n'):
44 values = line.split(': ')
45 if len(values) == 2: # match
46 metric, value = values
47 try:
48 value = float(value)
49 except ValueError:
50 continue
51
52 # Special case: kBytes => bytes
53 if metric == 'Total kBytes':
54 value = value * 1024
55
56 # Send metric as a gauge, if applicable
57 if metric in self.GAUGES:
58 metric_count += 1
59 metric_name = self.GAUGES[metric]
60 self.gauge(metric_name, value, tags=tags)
61
62 # Send metric as a rate, if applicable
63 if metric in self.RATES:
64 metric_count += 1
65 metric_name = self.RATES[metric]
66 self.rate(metric_name, value, tags=tags)
67
68 if metric_count == 0:
69 if self.assumed_url.get(instance['apache_status_url'], None) is None and url[-5:] != '?auto':
70 self.assumed_url[instance['apache_status_url']]= '%s?auto' % url
71 self.warning("Assuming url was not correct. Trying to add ?auto suffix to the url")
72 self.check(instance)
73 else:
74 raise Exception("No metrics were fetched for this instance. Make sure that %s is the proper url." % instance['apache_status_url'])
75
76
77 @staticmethod
78 def parse_agent_config(agentConfig):
79 if not agentConfig.get('apache_status_url'):
80 return False
81
82 return {
83 'instances': [{'apache_status_url': agentConfig.get('apache_status_url')}]
84 }
85
[end of checks.d/apache.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checks.d/apache.py b/checks.d/apache.py
--- a/checks.d/apache.py
+++ b/checks.d/apache.py
@@ -1,4 +1,5 @@
import urllib2
+import base64
from util import headers
from checks import AgentCheck
@@ -35,6 +36,10 @@
tags = instance.get('tags', [])
req = urllib2.Request(url, None,
headers(self.agentConfig))
+ if 'apache_user' in instance and 'apache_password' in instance:
+ auth_str = '%s:%s' % (instance['apache_user'], instance['apache_password'])
+ encoded_auth_str = base64.encodestring(auth_str)
+ req.add_header("Authorization", "Basic %s" % encoded_auth_str)
request = urllib2.urlopen(req)
response = request.read()
| {"golden_diff": "diff --git a/checks.d/apache.py b/checks.d/apache.py\n--- a/checks.d/apache.py\n+++ b/checks.d/apache.py\n@@ -1,4 +1,5 @@\n import urllib2\n+import base64\n \n from util import headers\n from checks import AgentCheck\n@@ -35,6 +36,10 @@\n tags = instance.get('tags', [])\n req = urllib2.Request(url, None,\n headers(self.agentConfig))\n+ if 'apache_user' in instance and 'apache_password' in instance:\n+ auth_str = '%s:%s' % (instance['apache_user'], instance['apache_password'])\n+ encoded_auth_str = base64.encodestring(auth_str)\n+ req.add_header(\"Authorization\", \"Basic %s\" % encoded_auth_str)\n request = urllib2.urlopen(req)\n response = request.read()\n", "issue": "apache check doesn't support basic auth\nIf I want basic auth, I'd assume that all I have to do is make my config look like:\n\n```\napache_status_url: http://user:[email protected]:80/server-status?auto\n```\n\nBut actually that just doesn't work. We need to do some special incantations to make `urllib2` do basic auth.\n\n", "before_files": [{"content": "import urllib2\n\nfrom util import headers\nfrom checks import AgentCheck\n\nclass Apache(AgentCheck):\n \"\"\"Tracks basic connection/requests/workers metrics\n\n See http://httpd.apache.org/docs/2.2/mod/mod_status.html for more details\n \"\"\"\n GAUGES = {\n 'IdleWorkers': 'apache.performance.idle_workers',\n 'BusyWorkers': 'apache.performance.busy_workers',\n 'CPULoad': 'apache.performance.cpu_load',\n 'Uptime': 'apache.performance.uptime',\n 'Total kBytes': 'apache.net.bytes',\n 'Total Accesses': 'apache.net.hits',\n }\n\n RATES = {\n 'Total kBytes': 'apache.net.bytes_per_s',\n 'Total Accesses': 'apache.net.request_per_s'\n }\n\n def __init__(self, name, init_config, agentConfig, instances=None):\n AgentCheck.__init__(self, name, init_config, agentConfig, instances)\n self.assumed_url = {}\n\n def check(self, instance):\n if 'apache_status_url' not in instance:\n raise Exception(\"Missing 'apache_status_url' in Apache config\")\n\n url = self.assumed_url.get(instance['apache_status_url'], instance['apache_status_url'])\n\n tags = instance.get('tags', [])\n req = urllib2.Request(url, None,\n headers(self.agentConfig))\n request = urllib2.urlopen(req)\n response = request.read()\n\n metric_count = 0\n # Loop through and extract the numerical values\n for line in response.split('\\n'):\n values = line.split(': ')\n if len(values) == 2: # match\n metric, value = values\n try:\n value = float(value)\n except ValueError:\n continue\n\n # Special case: kBytes => bytes\n if metric == 'Total kBytes':\n value = value * 1024\n\n # Send metric as a gauge, if applicable\n if metric in self.GAUGES:\n metric_count += 1\n metric_name = self.GAUGES[metric]\n self.gauge(metric_name, value, tags=tags)\n\n # Send metric as a rate, if applicable\n if metric in self.RATES:\n metric_count += 1\n metric_name = self.RATES[metric]\n self.rate(metric_name, value, tags=tags)\n\n if metric_count == 0:\n if self.assumed_url.get(instance['apache_status_url'], None) is None and url[-5:] != '?auto':\n self.assumed_url[instance['apache_status_url']]= '%s?auto' % url\n self.warning(\"Assuming url was not correct. Trying to add ?auto suffix to the url\")\n self.check(instance)\n else:\n raise Exception(\"No metrics were fetched for this instance. Make sure that %s is the proper url.\" % instance['apache_status_url'])\n\n\n @staticmethod\n def parse_agent_config(agentConfig):\n if not agentConfig.get('apache_status_url'):\n return False\n\n return {\n 'instances': [{'apache_status_url': agentConfig.get('apache_status_url')}]\n }\n", "path": "checks.d/apache.py"}]} | 1,465 | 188 |
gh_patches_debug_4502 | rasdani/github-patches | git_diff | deis__deis-1495 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CoreOS on Google Compute Engine
Could you add Compute Engine to contrib?
http://googlecloudplatform.blogspot.cz/2014/05/official-coreos-images-are-now-available-on-google-compute-engine.html?m=1
</issue>
<code>
[start of client/setup.py]
1 #!/usr/bin/env python
2
3 """Install the Deis command-line client."""
4
5
6 try:
7 from setuptools import setup
8 USE_SETUPTOOLS = True
9 except ImportError:
10 from distutils.core import setup
11 USE_SETUPTOOLS = False
12
13 try:
14 LONG_DESCRIPTION = open('README.rst').read()
15 except IOError:
16 LONG_DESCRIPTION = 'Deis command-line client'
17
18 try:
19 APACHE_LICENSE = open('LICENSE').read()
20 except IOError:
21 APACHE_LICENSE = 'See http://www.apache.org/licenses/LICENSE-2.0'
22
23 KWARGS = {}
24 if USE_SETUPTOOLS:
25 KWARGS = {'entry_points': {'console_scripts': ['deis = deis:main']}}
26 else:
27 KWARGS = {'scripts': ['deis']}
28
29
30 setup(name='deis',
31 version='0.11.0',
32 license=APACHE_LICENSE,
33 description='Command-line Client for Deis, the open PaaS',
34 author='OpDemand',
35 author_email='[email protected]',
36 url='https://github.com/deis/deis',
37 keywords=[
38 'opdemand', 'deis', 'paas', 'cloud', 'chef', 'docker', 'heroku',
39 'aws', 'ec2', 'rackspace', 'digitalocean'
40 ],
41 classifiers=[
42 'Development Status :: 4 - Beta',
43 'Environment :: Console',
44 'Intended Audience :: Developers',
45 'Intended Audience :: Information Technology',
46 'Intended Audience :: System Administrators',
47 'License :: OSI Approved :: Apache Software License',
48 'Operating System :: OS Independent',
49 'Programming Language :: Python',
50 'Programming Language :: Python :: 2.7',
51 'Topic :: Internet',
52 'Topic :: System :: Systems Administration',
53 ],
54 py_modules=['deis'],
55 data_files=[
56 ('.', ['README.rst']),
57 ],
58 long_description=LONG_DESCRIPTION,
59 install_requires=[
60 'docopt==0.6.1', 'python-dateutil==2.2',
61 'PyYAML==3.10', 'requests==2.2.1', 'urllib3==1.8.2'
62 ],
63 zip_safe=True,
64 **KWARGS)
65
[end of client/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/client/setup.py b/client/setup.py
--- a/client/setup.py
+++ b/client/setup.py
@@ -36,7 +36,7 @@
url='https://github.com/deis/deis',
keywords=[
'opdemand', 'deis', 'paas', 'cloud', 'chef', 'docker', 'heroku',
- 'aws', 'ec2', 'rackspace', 'digitalocean'
+ 'aws', 'ec2', 'rackspace', 'digitalocean', 'gce'
],
classifiers=[
'Development Status :: 4 - Beta',
| {"golden_diff": "diff --git a/client/setup.py b/client/setup.py\n--- a/client/setup.py\n+++ b/client/setup.py\n@@ -36,7 +36,7 @@\n url='https://github.com/deis/deis',\n keywords=[\n 'opdemand', 'deis', 'paas', 'cloud', 'chef', 'docker', 'heroku',\n- 'aws', 'ec2', 'rackspace', 'digitalocean'\n+ 'aws', 'ec2', 'rackspace', 'digitalocean', 'gce'\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n", "issue": "CoreOS on Google Compute Engine\nCould you add Compute Engine to contrib?\n\nhttp://googlecloudplatform.blogspot.cz/2014/05/official-coreos-images-are-now-available-on-google-compute-engine.html?m=1\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"Install the Deis command-line client.\"\"\"\n\n\ntry:\n from setuptools import setup\n USE_SETUPTOOLS = True\nexcept ImportError:\n from distutils.core import setup\n USE_SETUPTOOLS = False\n\ntry:\n LONG_DESCRIPTION = open('README.rst').read()\nexcept IOError:\n LONG_DESCRIPTION = 'Deis command-line client'\n\ntry:\n APACHE_LICENSE = open('LICENSE').read()\nexcept IOError:\n APACHE_LICENSE = 'See http://www.apache.org/licenses/LICENSE-2.0'\n\nKWARGS = {}\nif USE_SETUPTOOLS:\n KWARGS = {'entry_points': {'console_scripts': ['deis = deis:main']}}\nelse:\n KWARGS = {'scripts': ['deis']}\n\n\nsetup(name='deis',\n version='0.11.0',\n license=APACHE_LICENSE,\n description='Command-line Client for Deis, the open PaaS',\n author='OpDemand',\n author_email='[email protected]',\n url='https://github.com/deis/deis',\n keywords=[\n 'opdemand', 'deis', 'paas', 'cloud', 'chef', 'docker', 'heroku',\n 'aws', 'ec2', 'rackspace', 'digitalocean'\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet',\n 'Topic :: System :: Systems Administration',\n ],\n py_modules=['deis'],\n data_files=[\n ('.', ['README.rst']),\n ],\n long_description=LONG_DESCRIPTION,\n install_requires=[\n 'docopt==0.6.1', 'python-dateutil==2.2',\n 'PyYAML==3.10', 'requests==2.2.1', 'urllib3==1.8.2'\n ],\n zip_safe=True,\n **KWARGS)\n", "path": "client/setup.py"}]} | 1,187 | 135 |
gh_patches_debug_5489 | rasdani/github-patches | git_diff | pwndbg__pwndbg-2005 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature request: RVA-based breakpoints
**Is your feature request related to a problem? Please describe.**
I am missing a command that would allow creating a breakpoint relative to some image - either main program or a library - for example at offset 0x1337 from libc base.
**Describe the solution you'd like**
A new command that would enable this, for example
````
bp libc.so+0x1337
````
**Describe alternatives you've considered**
The alternative is to manually find the image bases which is tedious.
</issue>
<code>
[start of pwndbg/gdblib/functions.py]
1 """
2 Put all functions defined for gdb in here.
3
4 This file might be changed into a module in the future.
5 """
6
7 from __future__ import annotations
8
9 import functools
10 from typing import Any
11 from typing import Callable
12 from typing import List
13
14 import gdb
15
16 import pwndbg.gdblib.elf
17 import pwndbg.gdblib.proc
18
19 functions: List[_GdbFunction] = []
20
21
22 def GdbFunction(only_when_running: bool = False) -> Callable[..., Any]:
23 return functools.partial(_GdbFunction, only_when_running=only_when_running)
24
25
26 class _GdbFunction(gdb.Function):
27 def __init__(self, func: Callable[..., Any], only_when_running: bool) -> None:
28 self.name = func.__name__
29 self.func = func
30 self.only_when_running = only_when_running
31
32 functions.append(self)
33
34 super().__init__(self.name)
35
36 functools.update_wrapper(self, func)
37 self.__doc__ = func.__doc__
38
39 def invoke(self, *args: gdb.Value) -> Any:
40 if self.only_when_running and not pwndbg.gdblib.proc.alive:
41 # Returning empty string is a workaround that we can't stop e.g. `break *$rebase(offset)`
42 # Thx to that, gdb will print out 'evaluation of this expression requires the target program to be active'
43 return ""
44
45 return self.func(*args)
46
47 def __call__(self, *args: gdb.Value) -> Any:
48 return self.invoke(*args)
49
50
51 @GdbFunction(only_when_running=True)
52 def rebase(addr: gdb.Value | int) -> int:
53 """Return rebased address."""
54 base = pwndbg.gdblib.elf.exe().address
55 return base + int(addr)
56
[end of pwndbg/gdblib/functions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwndbg/gdblib/functions.py b/pwndbg/gdblib/functions.py
--- a/pwndbg/gdblib/functions.py
+++ b/pwndbg/gdblib/functions.py
@@ -53,3 +53,16 @@
"""Return rebased address."""
base = pwndbg.gdblib.elf.exe().address
return base + int(addr)
+
+
+@GdbFunction(only_when_running=True)
+def base(name_pattern: gdb.Value | str) -> int:
+ if isinstance(name_pattern, gdb.Value):
+ name = name_pattern.string()
+ else:
+ name = name_pattern
+
+ for p in pwndbg.gdblib.vmmap.get():
+ if name in p.objfile:
+ return p.vaddr
+ raise ValueError(f"No mapping named {name}")
| {"golden_diff": "diff --git a/pwndbg/gdblib/functions.py b/pwndbg/gdblib/functions.py\n--- a/pwndbg/gdblib/functions.py\n+++ b/pwndbg/gdblib/functions.py\n@@ -53,3 +53,16 @@\n \"\"\"Return rebased address.\"\"\"\n base = pwndbg.gdblib.elf.exe().address\n return base + int(addr)\n+\n+\n+@GdbFunction(only_when_running=True)\n+def base(name_pattern: gdb.Value | str) -> int:\n+ if isinstance(name_pattern, gdb.Value):\n+ name = name_pattern.string()\n+ else:\n+ name = name_pattern\n+\n+ for p in pwndbg.gdblib.vmmap.get():\n+ if name in p.objfile:\n+ return p.vaddr\n+ raise ValueError(f\"No mapping named {name}\")\n", "issue": "Feature request: RVA-based breakpoints\n**Is your feature request related to a problem? Please describe.**\r\nI am missing a command that would allow creating a breakpoint relative to some image - either main program or a library - for example at offset 0x1337 from libc base.\r\n\r\n**Describe the solution you'd like**\r\nA new command that would enable this, for example\r\n\r\n````\r\nbp libc.so+0x1337\r\n````\r\n\r\n\r\n**Describe alternatives you've considered**\r\nThe alternative is to manually find the image bases which is tedious.\r\n\n", "before_files": [{"content": "\"\"\"\nPut all functions defined for gdb in here.\n\nThis file might be changed into a module in the future.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport functools\nfrom typing import Any\nfrom typing import Callable\nfrom typing import List\n\nimport gdb\n\nimport pwndbg.gdblib.elf\nimport pwndbg.gdblib.proc\n\nfunctions: List[_GdbFunction] = []\n\n\ndef GdbFunction(only_when_running: bool = False) -> Callable[..., Any]:\n return functools.partial(_GdbFunction, only_when_running=only_when_running)\n\n\nclass _GdbFunction(gdb.Function):\n def __init__(self, func: Callable[..., Any], only_when_running: bool) -> None:\n self.name = func.__name__\n self.func = func\n self.only_when_running = only_when_running\n\n functions.append(self)\n\n super().__init__(self.name)\n\n functools.update_wrapper(self, func)\n self.__doc__ = func.__doc__\n\n def invoke(self, *args: gdb.Value) -> Any:\n if self.only_when_running and not pwndbg.gdblib.proc.alive:\n # Returning empty string is a workaround that we can't stop e.g. `break *$rebase(offset)`\n # Thx to that, gdb will print out 'evaluation of this expression requires the target program to be active'\n return \"\"\n\n return self.func(*args)\n\n def __call__(self, *args: gdb.Value) -> Any:\n return self.invoke(*args)\n\n\n@GdbFunction(only_when_running=True)\ndef rebase(addr: gdb.Value | int) -> int:\n \"\"\"Return rebased address.\"\"\"\n base = pwndbg.gdblib.elf.exe().address\n return base + int(addr)\n", "path": "pwndbg/gdblib/functions.py"}]} | 1,146 | 186 |
gh_patches_debug_34969 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-2525 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature Request: !Sub equivalent of W1001
*cfn-lint version: 0.58.2*
I believe that `!Sub` parameters should be checked to see if they depend on conditional resources the same way W1001 checks this for `!Ref` (e.g. `SubCondParam.Value` should produce a warning). I suspect this is possible since E1019 checks for non-existent parameters within a `!Sub`.
```
---
AWSTemplateFormatVersion: 2010-09-09
Conditions:
CreateContainerRepository: !Equals ["yes", "no"]
Resources:
Repository:
Type: AWS::ECR::Repository
Condition: CreateContainerRepository
SubCondParam:
Type: AWS::SSM::Parameter
Properties:
Type: String
Value: !Sub ${Repository}
RefCondParam:
Type: AWS::SSM::Parameter
Properties:
Type: String
Value: !Ref Repository
SubFakeParam:
Type: AWS::SSM::Parameter
Properties:
Type: String
Value: !Sub ${Fake}
RefFakeParam:
Type: AWS::SSM::Parameter
Properties:
Type: String
Value: !Ref Fake
```
* SubCondParam.Value shows no error or warning, I believe it should show a warning
* RefCondParam.Value shows W1001
* SubFakeParam.Value shows E1019
* RefFakeParam.Value shows E1012
</issue>
<code>
[start of src/cfnlint/rules/functions/RelationshipConditions.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 from cfnlint.helpers import PSEUDOPARAMS
6 from cfnlint.rules import CloudFormationLintRule, RuleMatch
7
8
9 class RelationshipConditions(CloudFormationLintRule):
10 """Check if Ref/GetAtt values are available via conditions"""
11
12 id = "W1001"
13 shortdesc = "Ref/GetAtt to resource that is available when conditions are applied"
14 description = (
15 "Check the Conditions that affect a Ref/GetAtt to make sure "
16 "the resource being related to is available when there is a resource "
17 "condition."
18 )
19 source_url = "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html"
20 tags = ["conditions", "resources", "relationships", "ref", "getatt"]
21
22 def match(self, cfn):
23 """Check CloudFormation Ref/GetAtt for Conditions"""
24
25 matches = []
26
27 # Start with Ref checks
28 ref_objs = cfn.search_deep_keys(searchText="Ref", includeGlobals=False)
29 for ref_obj in ref_objs:
30 value = ref_obj[-1]
31 if value not in PSEUDOPARAMS:
32 scenarios = cfn.is_resource_available(ref_obj, value)
33 for scenario in scenarios:
34 # pylint: disable=consider-using-f-string
35 scenario_text = " and ".join(
36 [
37 'when condition "%s" is %s' % (k, v)
38 for (k, v) in scenario.items()
39 ]
40 )
41 message = (
42 'Ref to resource "{0}" that may not be available {1} at {2}'
43 )
44 matches.append(
45 RuleMatch(
46 ref_obj[:-1],
47 message.format(
48 value, scenario_text, "/".join(map(str, ref_obj[:-1]))
49 ),
50 )
51 )
52
53 # The do GetAtt
54 getatt_objs = cfn.search_deep_keys(
55 searchText="Fn::GetAtt", includeGlobals=False
56 )
57 for getatt_obj in getatt_objs:
58 value_obj = getatt_obj[-1]
59 value = None
60 if isinstance(value_obj, list):
61 value = value_obj[0]
62 elif isinstance(value_obj, str):
63 value = value_obj.split(".")[0]
64 if value:
65 if value not in PSEUDOPARAMS:
66 scenarios = cfn.is_resource_available(getatt_obj, value)
67 for scenario in scenarios:
68 scenario_text = " and ".join(
69 [
70 f'when condition "{k}" is {v}'
71 for (k, v) in scenario.items()
72 ]
73 )
74 message = 'GetAtt to resource "{0}" that may not be available {1} at {2}'
75 matches.append(
76 RuleMatch(
77 getatt_obj[:-1],
78 message.format(
79 value,
80 scenario_text,
81 "/".join(map(str, getatt_obj[:-1])),
82 ),
83 )
84 )
85
86 return matches
87
[end of src/cfnlint/rules/functions/RelationshipConditions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/functions/RelationshipConditions.py b/src/cfnlint/rules/functions/RelationshipConditions.py
--- a/src/cfnlint/rules/functions/RelationshipConditions.py
+++ b/src/cfnlint/rules/functions/RelationshipConditions.py
@@ -17,7 +17,7 @@
"condition."
)
source_url = "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html"
- tags = ["conditions", "resources", "relationships", "ref", "getatt"]
+ tags = ["conditions", "resources", "relationships", "ref", "getatt", "sub"]
def match(self, cfn):
"""Check CloudFormation Ref/GetAtt for Conditions"""
@@ -83,4 +83,46 @@
)
)
+ # The do Sub
+ sub_objs = cfn.search_deep_keys(searchText="Fn::Sub", includeGlobals=False)
+ for sub_obj in sub_objs:
+ sub_string = sub_obj[-1]
+ # Filter out bad types of sub_strings.
+ # Lists have two be two items and it can be just a string
+ if not isinstance(sub_string, (list, str)):
+ continue
+ if isinstance(sub_string, str):
+ sub_string = [sub_string, {}]
+ if len(sub_string) != 2:
+ continue
+ sub_params = sub_string[1]
+ string_params = cfn.get_sub_parameters(sub_string[0])
+
+ for string_param in string_params:
+ if string_param not in sub_params:
+ # deal with GetAtts by dropping everything after the .
+ string_param = string_param.split(".")[0]
+ if string_param in cfn.template.get("Resources", {}):
+ scenarios = cfn.is_resource_available(
+ sub_obj[:-1], string_param
+ )
+ for scenario in scenarios:
+ scenario_text = " and ".join(
+ [
+ f'when condition "{k}" is {v}'
+ for (k, v) in scenario.items()
+ ]
+ )
+ message = 'Fn::Sub to resource "{0}" that may not be available {1} at {2}'
+ matches.append(
+ RuleMatch(
+ sub_obj[:-1],
+ message.format(
+ string_param,
+ scenario_text,
+ "/".join(map(str, sub_obj[:-1])),
+ ),
+ )
+ )
+
return matches
| {"golden_diff": "diff --git a/src/cfnlint/rules/functions/RelationshipConditions.py b/src/cfnlint/rules/functions/RelationshipConditions.py\n--- a/src/cfnlint/rules/functions/RelationshipConditions.py\n+++ b/src/cfnlint/rules/functions/RelationshipConditions.py\n@@ -17,7 +17,7 @@\n \"condition.\"\n )\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html\"\n- tags = [\"conditions\", \"resources\", \"relationships\", \"ref\", \"getatt\"]\n+ tags = [\"conditions\", \"resources\", \"relationships\", \"ref\", \"getatt\", \"sub\"]\n \n def match(self, cfn):\n \"\"\"Check CloudFormation Ref/GetAtt for Conditions\"\"\"\n@@ -83,4 +83,46 @@\n )\n )\n \n+ # The do Sub\n+ sub_objs = cfn.search_deep_keys(searchText=\"Fn::Sub\", includeGlobals=False)\n+ for sub_obj in sub_objs:\n+ sub_string = sub_obj[-1]\n+ # Filter out bad types of sub_strings.\n+ # Lists have two be two items and it can be just a string\n+ if not isinstance(sub_string, (list, str)):\n+ continue\n+ if isinstance(sub_string, str):\n+ sub_string = [sub_string, {}]\n+ if len(sub_string) != 2:\n+ continue\n+ sub_params = sub_string[1]\n+ string_params = cfn.get_sub_parameters(sub_string[0])\n+\n+ for string_param in string_params:\n+ if string_param not in sub_params:\n+ # deal with GetAtts by dropping everything after the .\n+ string_param = string_param.split(\".\")[0]\n+ if string_param in cfn.template.get(\"Resources\", {}):\n+ scenarios = cfn.is_resource_available(\n+ sub_obj[:-1], string_param\n+ )\n+ for scenario in scenarios:\n+ scenario_text = \" and \".join(\n+ [\n+ f'when condition \"{k}\" is {v}'\n+ for (k, v) in scenario.items()\n+ ]\n+ )\n+ message = 'Fn::Sub to resource \"{0}\" that may not be available {1} at {2}'\n+ matches.append(\n+ RuleMatch(\n+ sub_obj[:-1],\n+ message.format(\n+ string_param,\n+ scenario_text,\n+ \"/\".join(map(str, sub_obj[:-1])),\n+ ),\n+ )\n+ )\n+\n return matches\n", "issue": "Feature Request: !Sub equivalent of W1001\n*cfn-lint version: 0.58.2*\r\n\r\nI believe that `!Sub` parameters should be checked to see if they depend on conditional resources the same way W1001 checks this for `!Ref` (e.g. `SubCondParam.Value` should produce a warning). I suspect this is possible since E1019 checks for non-existent parameters within a `!Sub`.\r\n\r\n```\r\n---\r\nAWSTemplateFormatVersion: 2010-09-09\r\n\r\nConditions:\r\n CreateContainerRepository: !Equals [\"yes\", \"no\"]\r\n\r\nResources:\r\n Repository:\r\n Type: AWS::ECR::Repository\r\n Condition: CreateContainerRepository\r\n\r\n SubCondParam:\r\n Type: AWS::SSM::Parameter\r\n Properties:\r\n Type: String\r\n Value: !Sub ${Repository}\r\n\r\n RefCondParam:\r\n Type: AWS::SSM::Parameter\r\n Properties:\r\n Type: String\r\n Value: !Ref Repository\r\n\r\n SubFakeParam:\r\n Type: AWS::SSM::Parameter\r\n Properties:\r\n Type: String\r\n Value: !Sub ${Fake}\r\n\r\n RefFakeParam:\r\n Type: AWS::SSM::Parameter\r\n Properties:\r\n Type: String\r\n Value: !Ref Fake\r\n```\r\n\r\n* SubCondParam.Value shows no error or warning, I believe it should show a warning\r\n* RefCondParam.Value shows W1001\r\n* SubFakeParam.Value shows E1019\r\n* RefFakeParam.Value shows E1012\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.helpers import PSEUDOPARAMS\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass RelationshipConditions(CloudFormationLintRule):\n \"\"\"Check if Ref/GetAtt values are available via conditions\"\"\"\n\n id = \"W1001\"\n shortdesc = \"Ref/GetAtt to resource that is available when conditions are applied\"\n description = (\n \"Check the Conditions that affect a Ref/GetAtt to make sure \"\n \"the resource being related to is available when there is a resource \"\n \"condition.\"\n )\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html\"\n tags = [\"conditions\", \"resources\", \"relationships\", \"ref\", \"getatt\"]\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Ref/GetAtt for Conditions\"\"\"\n\n matches = []\n\n # Start with Ref checks\n ref_objs = cfn.search_deep_keys(searchText=\"Ref\", includeGlobals=False)\n for ref_obj in ref_objs:\n value = ref_obj[-1]\n if value not in PSEUDOPARAMS:\n scenarios = cfn.is_resource_available(ref_obj, value)\n for scenario in scenarios:\n # pylint: disable=consider-using-f-string\n scenario_text = \" and \".join(\n [\n 'when condition \"%s\" is %s' % (k, v)\n for (k, v) in scenario.items()\n ]\n )\n message = (\n 'Ref to resource \"{0}\" that may not be available {1} at {2}'\n )\n matches.append(\n RuleMatch(\n ref_obj[:-1],\n message.format(\n value, scenario_text, \"/\".join(map(str, ref_obj[:-1]))\n ),\n )\n )\n\n # The do GetAtt\n getatt_objs = cfn.search_deep_keys(\n searchText=\"Fn::GetAtt\", includeGlobals=False\n )\n for getatt_obj in getatt_objs:\n value_obj = getatt_obj[-1]\n value = None\n if isinstance(value_obj, list):\n value = value_obj[0]\n elif isinstance(value_obj, str):\n value = value_obj.split(\".\")[0]\n if value:\n if value not in PSEUDOPARAMS:\n scenarios = cfn.is_resource_available(getatt_obj, value)\n for scenario in scenarios:\n scenario_text = \" and \".join(\n [\n f'when condition \"{k}\" is {v}'\n for (k, v) in scenario.items()\n ]\n )\n message = 'GetAtt to resource \"{0}\" that may not be available {1} at {2}'\n matches.append(\n RuleMatch(\n getatt_obj[:-1],\n message.format(\n value,\n scenario_text,\n \"/\".join(map(str, getatt_obj[:-1])),\n ),\n )\n )\n\n return matches\n", "path": "src/cfnlint/rules/functions/RelationshipConditions.py"}]} | 1,699 | 549 |
gh_patches_debug_16419 | rasdani/github-patches | git_diff | pantsbuild__pants-16001 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`./pants run` crashes when `remote_cache_eager_fetch = false`
**Describe the bug**
After setting `remote_cache_eager_fetch = false`, a CI step that `./pants run`s a `pex_binary` has started (intermittently) failing with errors like:
```
Exception: Was not present in either the local or remote store: Digest { hash: Fingerprint<46683dec8706b7ac7c4f6011f68b4b8c10ad423ae8ba57745a6f5e01ba5b2f7b>, size_bytes: 11827 }
```
**Pants version**
`PANTS_SHA=5d8a328d72209863986c8959b20305505bc068ba`
**OS**
Linux
**Additional info**
Some BuildSense links where we've seen the failure:
* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_29_46_827_37a3a5f2d8e440cca373a7fb4a5d3b51/
* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_31_09_277_3793f53b54914135885f2ac951faf210/
* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_29_46_827_37a3a5f2d8e440cca373a7fb4a5d3b51/
</issue>
<code>
[start of src/python/pants/base/exceptions.py]
1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6
7 class TargetDefinitionException(Exception):
8 """Indicates an invalid target definition.
9
10 :API: public
11 """
12
13 def __init__(self, target, msg):
14 """
15 :param target: the target in question
16 :param string msg: a description of the target misconfiguration
17 """
18 super().__init__(f"Invalid target {target}: {msg}")
19
20
21 class BuildConfigurationError(Exception):
22 """Indicates an error in a pants installation's configuration."""
23
24
25 class BackendConfigurationError(BuildConfigurationError):
26 """Indicates a plugin backend with a missing or malformed register module."""
27
28
29 class MappingError(Exception):
30 """Indicates an error mapping addressable objects."""
31
[end of src/python/pants/base/exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/base/exceptions.py b/src/python/pants/base/exceptions.py
--- a/src/python/pants/base/exceptions.py
+++ b/src/python/pants/base/exceptions.py
@@ -3,6 +3,11 @@
from __future__ import annotations
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from pants.engine.internals.native_engine import PyFailure
+
class TargetDefinitionException(Exception):
"""Indicates an invalid target definition.
@@ -28,3 +33,15 @@
class MappingError(Exception):
"""Indicates an error mapping addressable objects."""
+
+
+class NativeEngineFailure(Exception):
+ """A wrapper around a `Failure` instance.
+
+ TODO: This type is defined in Python because pyo3 doesn't support declaring Exceptions with
+ additional fields. See https://github.com/PyO3/pyo3/issues/295
+ """
+
+ def __init__(self, msg: str, failure: PyFailure) -> None:
+ super().__init__(msg)
+ self.failure = failure
| {"golden_diff": "diff --git a/src/python/pants/base/exceptions.py b/src/python/pants/base/exceptions.py\n--- a/src/python/pants/base/exceptions.py\n+++ b/src/python/pants/base/exceptions.py\n@@ -3,6 +3,11 @@\n \n from __future__ import annotations\n \n+from typing import TYPE_CHECKING\n+\n+if TYPE_CHECKING:\n+ from pants.engine.internals.native_engine import PyFailure\n+\n \n class TargetDefinitionException(Exception):\n \"\"\"Indicates an invalid target definition.\n@@ -28,3 +33,15 @@\n \n class MappingError(Exception):\n \"\"\"Indicates an error mapping addressable objects.\"\"\"\n+\n+\n+class NativeEngineFailure(Exception):\n+ \"\"\"A wrapper around a `Failure` instance.\n+\n+ TODO: This type is defined in Python because pyo3 doesn't support declaring Exceptions with\n+ additional fields. See https://github.com/PyO3/pyo3/issues/295\n+ \"\"\"\n+\n+ def __init__(self, msg: str, failure: PyFailure) -> None:\n+ super().__init__(msg)\n+ self.failure = failure\n", "issue": "`./pants run` crashes when `remote_cache_eager_fetch = false`\n**Describe the bug**\r\n\r\nAfter setting `remote_cache_eager_fetch = false`, a CI step that `./pants run`s a `pex_binary` has started (intermittently) failing with errors like:\r\n```\r\n Exception: Was not present in either the local or remote store: Digest { hash: Fingerprint<46683dec8706b7ac7c4f6011f68b4b8c10ad423ae8ba57745a6f5e01ba5b2f7b>, size_bytes: 11827 }\r\n```\r\n\r\n**Pants version**\r\n\r\n`PANTS_SHA=5d8a328d72209863986c8959b20305505bc068ba`\r\n\r\n**OS**\r\n\r\nLinux\r\n\r\n**Additional info**\r\n\r\nSome BuildSense links where we've seen the failure:\r\n* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_29_46_827_37a3a5f2d8e440cca373a7fb4a5d3b51/\r\n* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_31_09_277_3793f53b54914135885f2ac951faf210/\r\n* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_29_46_827_37a3a5f2d8e440cca373a7fb4a5d3b51/\r\n\n", "before_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\n\nclass TargetDefinitionException(Exception):\n \"\"\"Indicates an invalid target definition.\n\n :API: public\n \"\"\"\n\n def __init__(self, target, msg):\n \"\"\"\n :param target: the target in question\n :param string msg: a description of the target misconfiguration\n \"\"\"\n super().__init__(f\"Invalid target {target}: {msg}\")\n\n\nclass BuildConfigurationError(Exception):\n \"\"\"Indicates an error in a pants installation's configuration.\"\"\"\n\n\nclass BackendConfigurationError(BuildConfigurationError):\n \"\"\"Indicates a plugin backend with a missing or malformed register module.\"\"\"\n\n\nclass MappingError(Exception):\n \"\"\"Indicates an error mapping addressable objects.\"\"\"\n", "path": "src/python/pants/base/exceptions.py"}]} | 1,222 | 244 |
gh_patches_debug_40698 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-761 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Latest requirements.txt seems to be over-specifying dependencies
This is the latest version requirements.txt file:
```
black>=19.3b0
darglint
hypothesis>=4.4.0
interrogate
ipykernel
isort>=4.3.18
jupyter_client
lxml
natsort
nbsphinx>=0.4.2
pandas-flavor
pandas-vet
pre-commit
pyspark
pytest-azurepipelines
pytest-cov
pytest>=3.4.2
scikit-learn
seaborn
setuptools>=38.5.2
sphinxcontrib-fulltoc==1.2.0
unyt
xarray
```
Those are inject in the setup.py as mandatory dependencies but a lot in there looks like docs, dev, or optional. Is pyjanitor really require all that at run time?
</issue>
<code>
[start of setup.py]
1 """Setup script."""
2
3 import re
4 from pathlib import Path
5
6 from setuptools import find_packages, setup
7
8
9 def requirements():
10 """Reader for requirements.txt"""
11 with open("requirements.txt", "r+") as f:
12 return f.read()
13
14
15 def generate_long_description() -> str:
16 """
17 Extra chunks from README for PyPI description.
18
19 Target chunks must be contained within `.. pypi-doc` pair comments,
20 so there must be an even number of comments in README.
21
22 :returns: Extracted description from README.
23 :raises Exception: if odd number of ``.. pypi-doc`` comments
24 in README.
25 """
26 # Read the contents of README file
27 this_directory = Path(__file__).parent
28 with open(this_directory / "README.rst", encoding="utf-8") as f:
29 readme = f.read()
30
31 # Find pypi-doc comments in README
32 indices = [m.start() for m in re.finditer(".. pypi-doc", readme)]
33 if len(indices) % 2 != 0:
34 raise Exception("Odd number of `.. pypi-doc` comments in README")
35
36 # Loop through pairs of comments and save text between pairs
37 long_description = ""
38 for i in range(0, len(indices), 2):
39 start_index = indices[i] + 11
40 end_index = indices[i + 1]
41 long_description += readme[start_index:end_index]
42 return long_description
43
44
45 extra_spark = ["pyspark"]
46 extra_biology = ["biopython"]
47 extra_chemistry = ["rdkit"]
48 extra_engineering = ["unyt"]
49 extra_all = extra_biology + extra_engineering + extra_spark
50
51 setup(
52 name="pyjanitor",
53 version="0.20.10",
54 description="Tools for cleaning pandas DataFrames",
55 author="Eric J. Ma",
56 author_email="[email protected]",
57 url="https://github.com/ericmjl/pyjanitor",
58 license="MIT",
59 # packages=["janitor", "janitor.xarray", "janitor.spark"],
60 packages=find_packages(),
61 install_requires=requirements(),
62 extras_require={
63 "all": extra_all,
64 "biology": extra_biology,
65 # "chemistry": extra_chemistry, should be inserted once rdkit
66 # fixes https://github.com/rdkit/rdkit/issues/1812
67 "engineering": extra_engineering,
68 "spark": extra_spark,
69 },
70 python_requires=">=3.6",
71 long_description=generate_long_description(),
72 long_description_content_type="text/x-rst",
73 )
74
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,15 +1,82 @@
"""Setup script."""
-
+import codecs
+import os
import re
from pathlib import Path
+from pprint import pprint
from setuptools import find_packages, setup
+HERE = os.path.abspath(os.path.dirname(__file__))
+
+
+def read(*parts):
+ # intentionally *not* adding an encoding option to open
+ return codecs.open(os.path.join(HERE, *parts), "r").read()
+
-def requirements():
- """Reader for requirements.txt"""
- with open("requirements.txt", "r+") as f:
- return f.read()
+def read_requirements(*parts):
+ """
+ Return requirements from parts.
+
+ Given a requirements.txt (or similar style file),
+ returns a list of requirements.
+ Assumes anything after a single '#' on a line is a comment, and ignores
+ empty lines.
+
+ :param parts: list of filenames which contain the installation "parts",
+ i.e. submodule-specific installation requirements
+ :returns: A compiled list of requirements.
+ """
+ requirements = []
+ for line in read(*parts).splitlines():
+ new_line = re.sub( # noqa: PD005
+ r"(\s*)?#.*$", # the space immediately before the
+ # hash mark, the hash mark, and
+ # anything that follows it
+ "", # replace with a blank string
+ line,
+ )
+ new_line = re.sub( # noqa: PD005
+ r"-r.*$", # link to another requirement file
+ "", # replace with a blank string
+ new_line,
+ )
+ new_line = re.sub( # noqa: PD005
+ r"-e \..*$", # link to editable install
+ "", # replace with a blank string
+ new_line,
+ )
+ # print(line, "-->", new_line)
+ if new_line: # i.e. we have a non-zero-length string
+ requirements.append(new_line)
+ return requirements
+
+
+# pull from requirements.IN, requirements.TXT is generated from this
+INSTALL_REQUIRES = read_requirements(".requirements/base.in")
+
+EXTRA_REQUIRES = {
+ "dev": read_requirements(".requirements/dev.in"),
+ "docs": read_requirements(".requirements/docs.in"),
+ "test": read_requirements(".requirements/testing.in"),
+ "biology": read_requirements(".requirements/biology.in"),
+ "chemistry": read_requirements(".requirements/chemistry.in"),
+ "engineering": read_requirements(".requirements/engineering.in"),
+ "spark": read_requirements(".requirements/spark.in"),
+}
+
+# add 'all' key to EXTRA_REQUIRES
+all_requires = []
+for k, v in EXTRA_REQUIRES.items():
+ all_requires.extend(v)
+EXTRA_REQUIRES["all"] = set(all_requires)
+
+for k1 in ["biology", "chemistry", "engineering", "spark"]:
+ for v2 in EXTRA_REQUIRES[k1]:
+ EXTRA_REQUIRES["docs"].append(v2)
+
+pprint(EXTRA_REQUIRES)
def generate_long_description() -> str:
@@ -42,12 +109,6 @@
return long_description
-extra_spark = ["pyspark"]
-extra_biology = ["biopython"]
-extra_chemistry = ["rdkit"]
-extra_engineering = ["unyt"]
-extra_all = extra_biology + extra_engineering + extra_spark
-
setup(
name="pyjanitor",
version="0.20.10",
@@ -58,15 +119,8 @@
license="MIT",
# packages=["janitor", "janitor.xarray", "janitor.spark"],
packages=find_packages(),
- install_requires=requirements(),
- extras_require={
- "all": extra_all,
- "biology": extra_biology,
- # "chemistry": extra_chemistry, should be inserted once rdkit
- # fixes https://github.com/rdkit/rdkit/issues/1812
- "engineering": extra_engineering,
- "spark": extra_spark,
- },
+ install_requires=INSTALL_REQUIRES,
+ extras_require=EXTRA_REQUIRES,
python_requires=">=3.6",
long_description=generate_long_description(),
long_description_content_type="text/x-rst",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,15 +1,82 @@\n \"\"\"Setup script.\"\"\"\n-\n+import codecs\n+import os\n import re\n from pathlib import Path\n+from pprint import pprint\n \n from setuptools import find_packages, setup\n \n+HERE = os.path.abspath(os.path.dirname(__file__))\n+\n+\n+def read(*parts):\n+ # intentionally *not* adding an encoding option to open\n+ return codecs.open(os.path.join(HERE, *parts), \"r\").read()\n+\n \n-def requirements():\n- \"\"\"Reader for requirements.txt\"\"\"\n- with open(\"requirements.txt\", \"r+\") as f:\n- return f.read()\n+def read_requirements(*parts):\n+ \"\"\"\n+ Return requirements from parts.\n+\n+ Given a requirements.txt (or similar style file),\n+ returns a list of requirements.\n+ Assumes anything after a single '#' on a line is a comment, and ignores\n+ empty lines.\n+\n+ :param parts: list of filenames which contain the installation \"parts\",\n+ i.e. submodule-specific installation requirements\n+ :returns: A compiled list of requirements.\n+ \"\"\"\n+ requirements = []\n+ for line in read(*parts).splitlines():\n+ new_line = re.sub( # noqa: PD005\n+ r\"(\\s*)?#.*$\", # the space immediately before the\n+ # hash mark, the hash mark, and\n+ # anything that follows it\n+ \"\", # replace with a blank string\n+ line,\n+ )\n+ new_line = re.sub( # noqa: PD005\n+ r\"-r.*$\", # link to another requirement file\n+ \"\", # replace with a blank string\n+ new_line,\n+ )\n+ new_line = re.sub( # noqa: PD005\n+ r\"-e \\..*$\", # link to editable install\n+ \"\", # replace with a blank string\n+ new_line,\n+ )\n+ # print(line, \"-->\", new_line)\n+ if new_line: # i.e. we have a non-zero-length string\n+ requirements.append(new_line)\n+ return requirements\n+\n+\n+# pull from requirements.IN, requirements.TXT is generated from this\n+INSTALL_REQUIRES = read_requirements(\".requirements/base.in\")\n+\n+EXTRA_REQUIRES = {\n+ \"dev\": read_requirements(\".requirements/dev.in\"),\n+ \"docs\": read_requirements(\".requirements/docs.in\"),\n+ \"test\": read_requirements(\".requirements/testing.in\"),\n+ \"biology\": read_requirements(\".requirements/biology.in\"),\n+ \"chemistry\": read_requirements(\".requirements/chemistry.in\"),\n+ \"engineering\": read_requirements(\".requirements/engineering.in\"),\n+ \"spark\": read_requirements(\".requirements/spark.in\"),\n+}\n+\n+# add 'all' key to EXTRA_REQUIRES\n+all_requires = []\n+for k, v in EXTRA_REQUIRES.items():\n+ all_requires.extend(v)\n+EXTRA_REQUIRES[\"all\"] = set(all_requires)\n+\n+for k1 in [\"biology\", \"chemistry\", \"engineering\", \"spark\"]:\n+ for v2 in EXTRA_REQUIRES[k1]:\n+ EXTRA_REQUIRES[\"docs\"].append(v2)\n+\n+pprint(EXTRA_REQUIRES)\n \n \n def generate_long_description() -> str:\n@@ -42,12 +109,6 @@\n return long_description\n \n \n-extra_spark = [\"pyspark\"]\n-extra_biology = [\"biopython\"]\n-extra_chemistry = [\"rdkit\"]\n-extra_engineering = [\"unyt\"]\n-extra_all = extra_biology + extra_engineering + extra_spark\n-\n setup(\n name=\"pyjanitor\",\n version=\"0.20.10\",\n@@ -58,15 +119,8 @@\n license=\"MIT\",\n # packages=[\"janitor\", \"janitor.xarray\", \"janitor.spark\"],\n packages=find_packages(),\n- install_requires=requirements(),\n- extras_require={\n- \"all\": extra_all,\n- \"biology\": extra_biology,\n- # \"chemistry\": extra_chemistry, should be inserted once rdkit\n- # fixes https://github.com/rdkit/rdkit/issues/1812\n- \"engineering\": extra_engineering,\n- \"spark\": extra_spark,\n- },\n+ install_requires=INSTALL_REQUIRES,\n+ extras_require=EXTRA_REQUIRES,\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n", "issue": "Latest requirements.txt seems to be over-specifying dependencies\nThis is the latest version requirements.txt file:\r\n\r\n```\r\nblack>=19.3b0\r\ndarglint\r\nhypothesis>=4.4.0\r\ninterrogate\r\nipykernel\r\nisort>=4.3.18\r\njupyter_client\r\nlxml\r\nnatsort\r\nnbsphinx>=0.4.2\r\npandas-flavor\r\npandas-vet\r\npre-commit\r\npyspark\r\npytest-azurepipelines\r\npytest-cov\r\npytest>=3.4.2\r\nscikit-learn\r\nseaborn\r\nsetuptools>=38.5.2\r\nsphinxcontrib-fulltoc==1.2.0\r\nunyt\r\nxarray\r\n```\r\n\r\nThose are inject in the setup.py as mandatory dependencies but a lot in there looks like docs, dev, or optional. Is pyjanitor really require all that at run time?\n", "before_files": [{"content": "\"\"\"Setup script.\"\"\"\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup\n\n\ndef requirements():\n \"\"\"Reader for requirements.txt\"\"\"\n with open(\"requirements.txt\", \"r+\") as f:\n return f.read()\n\n\ndef generate_long_description() -> str:\n \"\"\"\n Extra chunks from README for PyPI description.\n\n Target chunks must be contained within `.. pypi-doc` pair comments,\n so there must be an even number of comments in README.\n\n :returns: Extracted description from README.\n :raises Exception: if odd number of ``.. pypi-doc`` comments\n in README.\n \"\"\"\n # Read the contents of README file\n this_directory = Path(__file__).parent\n with open(this_directory / \"README.rst\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n # Find pypi-doc comments in README\n indices = [m.start() for m in re.finditer(\".. pypi-doc\", readme)]\n if len(indices) % 2 != 0:\n raise Exception(\"Odd number of `.. pypi-doc` comments in README\")\n\n # Loop through pairs of comments and save text between pairs\n long_description = \"\"\n for i in range(0, len(indices), 2):\n start_index = indices[i] + 11\n end_index = indices[i + 1]\n long_description += readme[start_index:end_index]\n return long_description\n\n\nextra_spark = [\"pyspark\"]\nextra_biology = [\"biopython\"]\nextra_chemistry = [\"rdkit\"]\nextra_engineering = [\"unyt\"]\nextra_all = extra_biology + extra_engineering + extra_spark\n\nsetup(\n name=\"pyjanitor\",\n version=\"0.20.10\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ericmjl/pyjanitor\",\n license=\"MIT\",\n # packages=[\"janitor\", \"janitor.xarray\", \"janitor.spark\"],\n packages=find_packages(),\n install_requires=requirements(),\n extras_require={\n \"all\": extra_all,\n \"biology\": extra_biology,\n # \"chemistry\": extra_chemistry, should be inserted once rdkit\n # fixes https://github.com/rdkit/rdkit/issues/1812\n \"engineering\": extra_engineering,\n \"spark\": extra_spark,\n },\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n)\n", "path": "setup.py"}]} | 1,427 | 993 |
gh_patches_debug_3040 | rasdani/github-patches | git_diff | mosaicml__composer-496 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move `ComposerTrainer` to top-level imports
Our most heavily used objects should be easily importable from `composer` via:
```
from composer import Trainer, ComposerModel
```
rather than remember their submodule:
```
from composer.models import ComposerModel
```
Especially the last one, its tricky to remember whether its `models` or `model`
</issue>
<code>
[start of composer/__init__.py]
1 # Copyright 2021 MosaicML. All Rights Reserved.
2
3 from composer import algorithms as algorithms
4 from composer import callbacks as callbacks
5 from composer import datasets as datasets
6 from composer import loggers as loggers
7 from composer import models as models
8 from composer import optim as optim
9 from composer import profiler as profiler
10 from composer import trainer as trainer
11 from composer import utils as utils
12 from composer.core import Algorithm as Algorithm
13 from composer.core import Callback as Callback
14 from composer.core import DataSpec as DataSpec
15 from composer.core import Engine as Engine
16 from composer.core import Event as Event
17 from composer.core import Logger as Logger
18 from composer.core import State as State
19 from composer.core import Time as Time
20 from composer.core import Timer as Timer
21 from composer.core import TimeUnit as TimeUnit
22 from composer.core import types as types
23 from composer.trainer import Trainer as Trainer
24
25 __version__ = "0.3.1"
26
[end of composer/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/composer/__init__.py b/composer/__init__.py
--- a/composer/__init__.py
+++ b/composer/__init__.py
@@ -20,6 +20,7 @@
from composer.core import Timer as Timer
from composer.core import TimeUnit as TimeUnit
from composer.core import types as types
+from composer.models import ComposerModel as ComposerModel
from composer.trainer import Trainer as Trainer
__version__ = "0.3.1"
| {"golden_diff": "diff --git a/composer/__init__.py b/composer/__init__.py\n--- a/composer/__init__.py\n+++ b/composer/__init__.py\n@@ -20,6 +20,7 @@\n from composer.core import Timer as Timer\n from composer.core import TimeUnit as TimeUnit\n from composer.core import types as types\n+from composer.models import ComposerModel as ComposerModel\n from composer.trainer import Trainer as Trainer\n \n __version__ = \"0.3.1\"\n", "issue": "Move `ComposerTrainer` to top-level imports\nOur most heavily used objects should be easily importable from `composer` via:\r\n```\r\nfrom composer import Trainer, ComposerModel\r\n```\r\nrather than remember their submodule:\r\n\r\n```\r\nfrom composer.models import ComposerModel\r\n```\r\n\r\nEspecially the last one, its tricky to remember whether its `models` or `model`\n", "before_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom composer import algorithms as algorithms\nfrom composer import callbacks as callbacks\nfrom composer import datasets as datasets\nfrom composer import loggers as loggers\nfrom composer import models as models\nfrom composer import optim as optim\nfrom composer import profiler as profiler\nfrom composer import trainer as trainer\nfrom composer import utils as utils\nfrom composer.core import Algorithm as Algorithm\nfrom composer.core import Callback as Callback\nfrom composer.core import DataSpec as DataSpec\nfrom composer.core import Engine as Engine\nfrom composer.core import Event as Event\nfrom composer.core import Logger as Logger\nfrom composer.core import State as State\nfrom composer.core import Time as Time\nfrom composer.core import Timer as Timer\nfrom composer.core import TimeUnit as TimeUnit\nfrom composer.core import types as types\nfrom composer.trainer import Trainer as Trainer\n\n__version__ = \"0.3.1\"\n", "path": "composer/__init__.py"}]} | 846 | 104 |
gh_patches_debug_11199 | rasdani/github-patches | git_diff | joke2k__faker-1937 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
When attempting to generate a Finnish ssn of a specific age the provider crashes
* Faker version: 19.11.0
* OS: Windows 10
When attempting to generate a Finnish ssn of a certain age the provider crashes because ``datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))`` is always trying to generate a time within range. The ssn provider should handle situations where min_age and max_age are the same.
For example:
```
if min_age == max_age:
age = datetime.timedelta(days=min_age * 365)
else:
age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))
```
### Steps to reproduce
1. Create a faker with fi_FI locale: ``fake = Faker('fi_FI')``
2. Call the ssn provider with the same minimum and maximum age: ``fake.ssn(min_age=50, max_age=50, artificial=True)``
### Expected behavior
The faker generates an artificial Finnish ssn that is 50 years old.
### Actual behavior
``ValueError: empty range for randrange() (18250, 18250, 0)``
</issue>
<code>
[start of faker/providers/ssn/fi_FI/__init__.py]
1 import datetime
2
3 from .. import Provider as SsnProvider
4
5
6 class Provider(SsnProvider):
7 def ssn(self, min_age: int = 0, max_age: int = 105, artificial: bool = False) -> str:
8 """
9 Returns 11 character Finnish personal identity code (Henkilötunnus,
10 HETU, Swedish: Personbeteckning). This function assigns random
11 gender to person.
12
13 HETU consists of eleven characters of the form DDMMYYCZZZQ, where
14 DDMMYY is the date of birth, C the century sign, ZZZ the individual
15 number and Q the control character (checksum). The sign for the
16 century is either + (1800–1899), - (1900–1999), or A (2000–2099).
17 The individual number ZZZ is odd for males and even for females.
18 For people born in Finland its range is 002-899
19 (larger numbers may be used in special cases).
20 An example of a valid code is 311280-888Y.
21
22 https://en.wikipedia.org/wiki/National_identification_number#Finland
23 """
24
25 def _checksum(hetu):
26 checksum_characters = "0123456789ABCDEFHJKLMNPRSTUVWXY"
27 return checksum_characters[int(hetu) % 31]
28
29 age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))
30 birthday = datetime.date.today() - age
31 hetu_date = "%02d%02d%s" % (
32 birthday.day,
33 birthday.month,
34 str(birthday.year)[-2:],
35 )
36 range = (900, 999) if artificial is True else (2, 899)
37 suffix = str(self.generator.random.randrange(*range)).zfill(3)
38 checksum = _checksum(hetu_date + suffix)
39 separator = self._get_century_code(birthday.year)
40 hetu = "".join([hetu_date, separator, suffix, checksum])
41 return hetu
42
43 @staticmethod
44 def _get_century_code(year: int) -> str:
45 """Returns the century code for a given year"""
46 if 2000 <= year < 3000:
47 separator = "A"
48 elif 1900 <= year < 2000:
49 separator = "-"
50 elif 1800 <= year < 1900:
51 separator = "+"
52 else:
53 raise ValueError("Finnish SSN do not support people born before the year 1800 or after the year 2999")
54 return separator
55
56 vat_id_formats = ("FI########",)
57
58 def vat_id(self) -> str:
59 """
60 http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
61 :return: A random Finnish VAT ID
62 """
63 return self.bothify(self.random_element(self.vat_id_formats))
64
[end of faker/providers/ssn/fi_FI/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/ssn/fi_FI/__init__.py b/faker/providers/ssn/fi_FI/__init__.py
--- a/faker/providers/ssn/fi_FI/__init__.py
+++ b/faker/providers/ssn/fi_FI/__init__.py
@@ -26,7 +26,10 @@
checksum_characters = "0123456789ABCDEFHJKLMNPRSTUVWXY"
return checksum_characters[int(hetu) % 31]
- age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))
+ if min_age == max_age:
+ age = datetime.timedelta(days=min_age * 365)
+ else:
+ age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))
birthday = datetime.date.today() - age
hetu_date = "%02d%02d%s" % (
birthday.day,
| {"golden_diff": "diff --git a/faker/providers/ssn/fi_FI/__init__.py b/faker/providers/ssn/fi_FI/__init__.py\n--- a/faker/providers/ssn/fi_FI/__init__.py\n+++ b/faker/providers/ssn/fi_FI/__init__.py\n@@ -26,7 +26,10 @@\n checksum_characters = \"0123456789ABCDEFHJKLMNPRSTUVWXY\"\n return checksum_characters[int(hetu) % 31]\n \n- age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))\n+ if min_age == max_age:\n+ age = datetime.timedelta(days=min_age * 365)\n+ else:\n+ age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))\n birthday = datetime.date.today() - age\n hetu_date = \"%02d%02d%s\" % (\n birthday.day,\n", "issue": "When attempting to generate a Finnish ssn of a specific age the provider crashes\n* Faker version: 19.11.0\r\n* OS: Windows 10\r\n\r\nWhen attempting to generate a Finnish ssn of a certain age the provider crashes because ``datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))`` is always trying to generate a time within range. The ssn provider should handle situations where min_age and max_age are the same.\r\nFor example:\r\n```\r\nif min_age == max_age:\r\n age = datetime.timedelta(days=min_age * 365)\r\nelse:\r\n age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))\r\n```\r\n\r\n### Steps to reproduce\r\n\r\n1. Create a faker with fi_FI locale: ``fake = Faker('fi_FI')``\r\n2. Call the ssn provider with the same minimum and maximum age: ``fake.ssn(min_age=50, max_age=50, artificial=True)``\r\n\r\n### Expected behavior\r\n\r\nThe faker generates an artificial Finnish ssn that is 50 years old. \r\n\r\n### Actual behavior\r\n\r\n``ValueError: empty range for randrange() (18250, 18250, 0)``\r\n\n", "before_files": [{"content": "import datetime\n\nfrom .. import Provider as SsnProvider\n\n\nclass Provider(SsnProvider):\n def ssn(self, min_age: int = 0, max_age: int = 105, artificial: bool = False) -> str:\n \"\"\"\n Returns 11 character Finnish personal identity code (Henkil\u00f6tunnus,\n HETU, Swedish: Personbeteckning). This function assigns random\n gender to person.\n\n HETU consists of eleven characters of the form DDMMYYCZZZQ, where\n DDMMYY is the date of birth, C the century sign, ZZZ the individual\n number and Q the control character (checksum). The sign for the\n century is either + (1800\u20131899), - (1900\u20131999), or A (2000\u20132099).\n The individual number ZZZ is odd for males and even for females.\n For people born in Finland its range is 002-899\n (larger numbers may be used in special cases).\n An example of a valid code is 311280-888Y.\n\n https://en.wikipedia.org/wiki/National_identification_number#Finland\n \"\"\"\n\n def _checksum(hetu):\n checksum_characters = \"0123456789ABCDEFHJKLMNPRSTUVWXY\"\n return checksum_characters[int(hetu) % 31]\n\n age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))\n birthday = datetime.date.today() - age\n hetu_date = \"%02d%02d%s\" % (\n birthday.day,\n birthday.month,\n str(birthday.year)[-2:],\n )\n range = (900, 999) if artificial is True else (2, 899)\n suffix = str(self.generator.random.randrange(*range)).zfill(3)\n checksum = _checksum(hetu_date + suffix)\n separator = self._get_century_code(birthday.year)\n hetu = \"\".join([hetu_date, separator, suffix, checksum])\n return hetu\n\n @staticmethod\n def _get_century_code(year: int) -> str:\n \"\"\"Returns the century code for a given year\"\"\"\n if 2000 <= year < 3000:\n separator = \"A\"\n elif 1900 <= year < 2000:\n separator = \"-\"\n elif 1800 <= year < 1900:\n separator = \"+\"\n else:\n raise ValueError(\"Finnish SSN do not support people born before the year 1800 or after the year 2999\")\n return separator\n\n vat_id_formats = (\"FI########\",)\n\n def vat_id(self) -> str:\n \"\"\"\n http://ec.europa.eu/taxation_customs/vies/faq.html#item_11\n :return: A random Finnish VAT ID\n \"\"\"\n return self.bothify(self.random_element(self.vat_id_formats))\n", "path": "faker/providers/ssn/fi_FI/__init__.py"}]} | 1,640 | 231 |
gh_patches_debug_5859 | rasdani/github-patches | git_diff | netbox-community__netbox-11755 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GraphQL returns all objects if tag doesn't exist
### NetBox version
v3.4.2
### Python version
3.10
### Steps to Reproduce
1. query GraphQL for device/interface with a tag that doesn't exist
### Expected Behavior
Resulting list of devices/interfaces only contains devices matching the tags or none if no device exists with matching tags.
### Observed Behavior
All devices/interfaces are returned no matter what tag filter has been applied. Same seems to apply for all kinds of filters that can be defined in GraphQL. No errors are returned.
Example below
query:
`{
device_list(tag: "blablalba") {
id
name
tags {
name
}
}
interface_list(tag: "not-ipmi_exporter") {
id
device {
id
}
tags {
name
}
}
}
`
result:
`{
"data": {
"device_list": [
{
"id": "1",
"name": "device-A",
"tags": [
{
"name": "node_exporter"
}
]
},
{
"id": "2",
"name": "device-B",
"tags": [
{
"name": "node_exporter"
}
]
}
],
"interface_list": [
{
"id": "1",
"device": {
"id": "1"
},
"tags": [
{
"name": "ipmi_exporter"
}
]
},
{
"id": "2",
"device": {
"id": "2"
},
"tags": [
{
"name": "ipmi_exporter"
}
]
}
]
}
}`
</issue>
<code>
[start of netbox/netbox/graphql/fields.py]
1 from functools import partial
2
3 import graphene
4 from graphene_django import DjangoListField
5
6 from .utils import get_graphene_type
7
8 __all__ = (
9 'ObjectField',
10 'ObjectListField',
11 )
12
13
14 class ObjectField(graphene.Field):
15 """
16 Retrieve a single object, identified by its numeric ID.
17 """
18 def __init__(self, *args, **kwargs):
19
20 if 'id' not in kwargs:
21 kwargs['id'] = graphene.Int(required=True)
22
23 super().__init__(*args, **kwargs)
24
25 @staticmethod
26 def object_resolver(django_object_type, root, info, **args):
27 """
28 Return an object given its numeric ID.
29 """
30 manager = django_object_type._meta.model._default_manager
31 queryset = django_object_type.get_queryset(manager, info)
32
33 return queryset.get(**args)
34
35 def get_resolver(self, parent_resolver):
36 return partial(self.object_resolver, self._type)
37
38
39 class ObjectListField(DjangoListField):
40 """
41 Retrieve a list of objects, optionally filtered by one or more FilterSet filters.
42 """
43 def __init__(self, _type, *args, **kwargs):
44 filter_kwargs = {}
45
46 # Get FilterSet kwargs
47 filterset_class = getattr(_type._meta, 'filterset_class', None)
48 if filterset_class:
49 for filter_name, filter_field in filterset_class.get_filters().items():
50 field_type = get_graphene_type(type(filter_field))
51 filter_kwargs[filter_name] = graphene.Argument(field_type)
52
53 super().__init__(_type, args=filter_kwargs, *args, **kwargs)
54
55 @staticmethod
56 def list_resolver(django_object_type, resolver, default_manager, root, info, **args):
57 queryset = super(ObjectListField, ObjectListField).list_resolver(django_object_type, resolver, default_manager, root, info, **args)
58
59 # Instantiate and apply the FilterSet, if defined
60 filterset_class = django_object_type._meta.filterset_class
61 if filterset_class:
62 filterset = filterset_class(data=args, queryset=queryset, request=info.context)
63 return filterset.qs
64
65 return queryset
66
[end of netbox/netbox/graphql/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/netbox/graphql/fields.py b/netbox/netbox/graphql/fields.py
--- a/netbox/netbox/graphql/fields.py
+++ b/netbox/netbox/graphql/fields.py
@@ -60,6 +60,8 @@
filterset_class = django_object_type._meta.filterset_class
if filterset_class:
filterset = filterset_class(data=args, queryset=queryset, request=info.context)
+ if not filterset.is_valid():
+ return queryset.none()
return filterset.qs
return queryset
| {"golden_diff": "diff --git a/netbox/netbox/graphql/fields.py b/netbox/netbox/graphql/fields.py\n--- a/netbox/netbox/graphql/fields.py\n+++ b/netbox/netbox/graphql/fields.py\n@@ -60,6 +60,8 @@\n filterset_class = django_object_type._meta.filterset_class\n if filterset_class:\n filterset = filterset_class(data=args, queryset=queryset, request=info.context)\n+ if not filterset.is_valid():\n+ return queryset.none()\n return filterset.qs\n \n return queryset\n", "issue": "GraphQL returns all objects if tag doesn't exist\n### NetBox version\n\nv3.4.2\n\n### Python version\n\n3.10\n\n### Steps to Reproduce\n\n1. query GraphQL for device/interface with a tag that doesn't exist\n\n### Expected Behavior\n\nResulting list of devices/interfaces only contains devices matching the tags or none if no device exists with matching tags.\n\n### Observed Behavior\n\nAll devices/interfaces are returned no matter what tag filter has been applied. Same seems to apply for all kinds of filters that can be defined in GraphQL. No errors are returned.\r\n\r\nExample below\r\n\r\nquery:\r\n`{\r\n device_list(tag: \"blablalba\") {\r\n id\r\n name\r\n tags {\r\n name\r\n }\r\n }\r\n interface_list(tag: \"not-ipmi_exporter\") {\r\n id\r\n device {\r\n id\r\n }\r\n tags {\r\n name\r\n }\r\n }\r\n}\r\n`\r\n\r\nresult:\r\n`{\r\n \"data\": {\r\n \"device_list\": [\r\n {\r\n \"id\": \"1\",\r\n \"name\": \"device-A\",\r\n \"tags\": [\r\n {\r\n \"name\": \"node_exporter\"\r\n }\r\n ]\r\n },\r\n {\r\n \"id\": \"2\",\r\n \"name\": \"device-B\",\r\n \"tags\": [\r\n {\r\n \"name\": \"node_exporter\"\r\n }\r\n ]\r\n }\r\n ],\r\n \"interface_list\": [\r\n {\r\n \"id\": \"1\",\r\n \"device\": {\r\n \"id\": \"1\"\r\n },\r\n \"tags\": [\r\n {\r\n \"name\": \"ipmi_exporter\"\r\n }\r\n ]\r\n },\r\n {\r\n \"id\": \"2\",\r\n \"device\": {\r\n \"id\": \"2\"\r\n },\r\n \"tags\": [\r\n {\r\n \"name\": \"ipmi_exporter\"\r\n }\r\n ]\r\n }\r\n ]\r\n }\r\n}`\n", "before_files": [{"content": "from functools import partial\n\nimport graphene\nfrom graphene_django import DjangoListField\n\nfrom .utils import get_graphene_type\n\n__all__ = (\n 'ObjectField',\n 'ObjectListField',\n)\n\n\nclass ObjectField(graphene.Field):\n \"\"\"\n Retrieve a single object, identified by its numeric ID.\n \"\"\"\n def __init__(self, *args, **kwargs):\n\n if 'id' not in kwargs:\n kwargs['id'] = graphene.Int(required=True)\n\n super().__init__(*args, **kwargs)\n\n @staticmethod\n def object_resolver(django_object_type, root, info, **args):\n \"\"\"\n Return an object given its numeric ID.\n \"\"\"\n manager = django_object_type._meta.model._default_manager\n queryset = django_object_type.get_queryset(manager, info)\n\n return queryset.get(**args)\n\n def get_resolver(self, parent_resolver):\n return partial(self.object_resolver, self._type)\n\n\nclass ObjectListField(DjangoListField):\n \"\"\"\n Retrieve a list of objects, optionally filtered by one or more FilterSet filters.\n \"\"\"\n def __init__(self, _type, *args, **kwargs):\n filter_kwargs = {}\n\n # Get FilterSet kwargs\n filterset_class = getattr(_type._meta, 'filterset_class', None)\n if filterset_class:\n for filter_name, filter_field in filterset_class.get_filters().items():\n field_type = get_graphene_type(type(filter_field))\n filter_kwargs[filter_name] = graphene.Argument(field_type)\n\n super().__init__(_type, args=filter_kwargs, *args, **kwargs)\n\n @staticmethod\n def list_resolver(django_object_type, resolver, default_manager, root, info, **args):\n queryset = super(ObjectListField, ObjectListField).list_resolver(django_object_type, resolver, default_manager, root, info, **args)\n\n # Instantiate and apply the FilterSet, if defined\n filterset_class = django_object_type._meta.filterset_class\n if filterset_class:\n filterset = filterset_class(data=args, queryset=queryset, request=info.context)\n return filterset.qs\n\n return queryset\n", "path": "netbox/netbox/graphql/fields.py"}]} | 1,525 | 122 |
gh_patches_debug_2601 | rasdani/github-patches | git_diff | carpentries__amy-2126 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Community Roles: Date range validation
Currently, an end date earlier than start date is allowed.
</issue>
<code>
[start of amy/communityroles/forms.py]
1 from collections import defaultdict
2 from typing import Any, Optional
3
4 from django import forms
5 from django.core.exceptions import ObjectDoesNotExist, ValidationError
6
7 from workshops.fields import HeavySelect2Widget, ModelSelect2Widget
8 from workshops.forms import SELECT2_SIDEBAR, BootstrapHelper, WidgetOverrideMixin
9
10 from .models import CommunityRole, CommunityRoleConfig
11
12
13 class CommunityRoleForm(WidgetOverrideMixin, forms.ModelForm):
14 class Meta:
15 model = CommunityRole
16 fields = (
17 "config",
18 "person",
19 "award",
20 "start",
21 "end",
22 "inactivation",
23 "membership",
24 "url",
25 "generic_relation_content_type",
26 "generic_relation_pk",
27 )
28 widgets = {
29 "config": HeavySelect2Widget(
30 data_view="api:communityroleconfig-list", attrs=SELECT2_SIDEBAR
31 ),
32 "person": ModelSelect2Widget(
33 data_view="person-lookup", attrs=SELECT2_SIDEBAR
34 ),
35 "award": ModelSelect2Widget(
36 data_view="award-lookup", attrs=SELECT2_SIDEBAR
37 ),
38 "membership": ModelSelect2Widget(
39 data_view="membership-lookup", attrs=SELECT2_SIDEBAR
40 ),
41 "generic_relation_content_type": forms.Select(
42 # "disabled" means the browsers will not send the field during POST.
43 # See how it's handled in `clean()` method below.
44 attrs={"disabled": ""},
45 ),
46 "generic_relation_pk": HeavySelect2Widget(
47 data_view="generic-object-lookup", attrs=SELECT2_SIDEBAR
48 ),
49 }
50 labels = {
51 "generic_relation_content_type": "Generic relation object type",
52 "generic_relation_pk": "Generic relation object",
53 }
54
55 class Media:
56 js = ("communityrole_form.js",)
57
58 def __init__(self, *args, **kwargs):
59 form_tag = kwargs.pop("form_tag", True)
60 super().__init__(*args, **kwargs)
61 bootstrap_kwargs = {
62 "add_cancel_button": False,
63 "form_tag": form_tag,
64 }
65 self.helper = BootstrapHelper(**bootstrap_kwargs)
66
67 def clean(self) -> dict[str, Any]:
68 """Validate form according to rules set up in related Community Role
69 configuration."""
70 cleaned_data = super().clean()
71 errors: defaultdict[str, list[ValidationError]] = defaultdict(list)
72 config: Optional[CommunityRoleConfig] = cleaned_data.get("config")
73
74 # Config is required, but field validation for 'config' should raise
75 # validation error first.
76 if not config:
77 return cleaned_data
78
79 # Award required?
80 if config.link_to_award and not cleaned_data.get("award"):
81 errors["award"].append(
82 ValidationError(f"Award is required with community role {config}")
83 )
84
85 # Specific award badge required?
86 if (badge := config.award_badge_limit) and (award := cleaned_data.get("award")):
87 if award.badge != badge:
88 errors["award"].append(
89 ValidationError(
90 f"Award badge must be {badge} for community role {config}"
91 )
92 )
93
94 # Membership required?
95 if config.link_to_membership and not cleaned_data.get("membership"):
96 errors["membership"].append(
97 ValidationError(f"Membership is required with community role {config}")
98 )
99
100 # Additional URL supported?
101 if not config.additional_url and cleaned_data.get("url"):
102 errors["url"].append(
103 ValidationError(f"URL is not supported for community role {config}")
104 )
105
106 # Widget for `generic_relation_content_type` is disabled in HTML, which
107 # makes browsers not send it. The code below sets the default value to
108 # the same value as in related config.
109 generic_relation_content_type = config.generic_relation_content_type
110
111 # Generic relation object must exist
112 if config.generic_relation_content_type and generic_relation_content_type:
113 model_class = generic_relation_content_type.model_class()
114 try:
115 model_class._base_manager.get(
116 pk=cleaned_data.get("generic_relation_pk")
117 )
118 except ObjectDoesNotExist:
119 errors["generic_relation_pk"].append(
120 ValidationError(
121 f"Generic relation object of model {model_class.__name__} "
122 "doesn't exist"
123 )
124 )
125
126 if errors:
127 raise ValidationError(errors)
128
129 return cleaned_data
130
[end of amy/communityroles/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/amy/communityroles/forms.py b/amy/communityroles/forms.py
--- a/amy/communityroles/forms.py
+++ b/amy/communityroles/forms.py
@@ -127,3 +127,11 @@
raise ValidationError(errors)
return cleaned_data
+
+ def clean_end(self):
+ """Validate that end >= start"""
+ start = self.cleaned_data.get("start")
+ end = self.cleaned_data.get("end")
+ if start and end and end < start:
+ raise ValidationError("Must not be earlier than start date.")
+ return end
| {"golden_diff": "diff --git a/amy/communityroles/forms.py b/amy/communityroles/forms.py\n--- a/amy/communityroles/forms.py\n+++ b/amy/communityroles/forms.py\n@@ -127,3 +127,11 @@\n raise ValidationError(errors)\n \n return cleaned_data\n+\n+ def clean_end(self):\n+ \"\"\"Validate that end >= start\"\"\"\n+ start = self.cleaned_data.get(\"start\")\n+ end = self.cleaned_data.get(\"end\")\n+ if start and end and end < start:\n+ raise ValidationError(\"Must not be earlier than start date.\")\n+ return end\n", "issue": "Community Roles: Date range validation\nCurrently, an end date earlier than start date is allowed. \n", "before_files": [{"content": "from collections import defaultdict\nfrom typing import Any, Optional\n\nfrom django import forms\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\n\nfrom workshops.fields import HeavySelect2Widget, ModelSelect2Widget\nfrom workshops.forms import SELECT2_SIDEBAR, BootstrapHelper, WidgetOverrideMixin\n\nfrom .models import CommunityRole, CommunityRoleConfig\n\n\nclass CommunityRoleForm(WidgetOverrideMixin, forms.ModelForm):\n class Meta:\n model = CommunityRole\n fields = (\n \"config\",\n \"person\",\n \"award\",\n \"start\",\n \"end\",\n \"inactivation\",\n \"membership\",\n \"url\",\n \"generic_relation_content_type\",\n \"generic_relation_pk\",\n )\n widgets = {\n \"config\": HeavySelect2Widget(\n data_view=\"api:communityroleconfig-list\", attrs=SELECT2_SIDEBAR\n ),\n \"person\": ModelSelect2Widget(\n data_view=\"person-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"award\": ModelSelect2Widget(\n data_view=\"award-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"membership\": ModelSelect2Widget(\n data_view=\"membership-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"generic_relation_content_type\": forms.Select(\n # \"disabled\" means the browsers will not send the field during POST.\n # See how it's handled in `clean()` method below.\n attrs={\"disabled\": \"\"},\n ),\n \"generic_relation_pk\": HeavySelect2Widget(\n data_view=\"generic-object-lookup\", attrs=SELECT2_SIDEBAR\n ),\n }\n labels = {\n \"generic_relation_content_type\": \"Generic relation object type\",\n \"generic_relation_pk\": \"Generic relation object\",\n }\n\n class Media:\n js = (\"communityrole_form.js\",)\n\n def __init__(self, *args, **kwargs):\n form_tag = kwargs.pop(\"form_tag\", True)\n super().__init__(*args, **kwargs)\n bootstrap_kwargs = {\n \"add_cancel_button\": False,\n \"form_tag\": form_tag,\n }\n self.helper = BootstrapHelper(**bootstrap_kwargs)\n\n def clean(self) -> dict[str, Any]:\n \"\"\"Validate form according to rules set up in related Community Role\n configuration.\"\"\"\n cleaned_data = super().clean()\n errors: defaultdict[str, list[ValidationError]] = defaultdict(list)\n config: Optional[CommunityRoleConfig] = cleaned_data.get(\"config\")\n\n # Config is required, but field validation for 'config' should raise\n # validation error first.\n if not config:\n return cleaned_data\n\n # Award required?\n if config.link_to_award and not cleaned_data.get(\"award\"):\n errors[\"award\"].append(\n ValidationError(f\"Award is required with community role {config}\")\n )\n\n # Specific award badge required?\n if (badge := config.award_badge_limit) and (award := cleaned_data.get(\"award\")):\n if award.badge != badge:\n errors[\"award\"].append(\n ValidationError(\n f\"Award badge must be {badge} for community role {config}\"\n )\n )\n\n # Membership required?\n if config.link_to_membership and not cleaned_data.get(\"membership\"):\n errors[\"membership\"].append(\n ValidationError(f\"Membership is required with community role {config}\")\n )\n\n # Additional URL supported?\n if not config.additional_url and cleaned_data.get(\"url\"):\n errors[\"url\"].append(\n ValidationError(f\"URL is not supported for community role {config}\")\n )\n\n # Widget for `generic_relation_content_type` is disabled in HTML, which\n # makes browsers not send it. The code below sets the default value to\n # the same value as in related config.\n generic_relation_content_type = config.generic_relation_content_type\n\n # Generic relation object must exist\n if config.generic_relation_content_type and generic_relation_content_type:\n model_class = generic_relation_content_type.model_class()\n try:\n model_class._base_manager.get(\n pk=cleaned_data.get(\"generic_relation_pk\")\n )\n except ObjectDoesNotExist:\n errors[\"generic_relation_pk\"].append(\n ValidationError(\n f\"Generic relation object of model {model_class.__name__} \"\n \"doesn't exist\"\n )\n )\n\n if errors:\n raise ValidationError(errors)\n\n return cleaned_data\n", "path": "amy/communityroles/forms.py"}]} | 1,768 | 131 |
gh_patches_debug_12625 | rasdani/github-patches | git_diff | HypothesisWorks__hypothesis-1530 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
License file in pypi archive
The pypi source archive isn't including the `LICENSE.txt` file. Would it be possible to add it? It is very helpful when packaging this for Linux distributions. The official python packaging guidelines also say [each package should have one](https://packaging.python.org/guides/distributing-packages-using-setuptools/#license-txt). Thank you!
Add keywords to setup.py
Testing with pyroma package showed keywords are missing from setup.py
```
willo@macbook ~/g/m/h/hypothesis-python> pyroma .
------------------------------
Checking .
Found hypothesis
------------------------------
Your package does not have keywords data.
------------------------------
Final rating: 9/10
Cottage Cheese
------------------------------
```
Discussed with @Zac-HD and keywords from the github repo below would be good to use.
python testing fuzzing property-based-testing
</issue>
<code>
[start of hypothesis-python/setup.py]
1 # coding=utf-8
2 #
3 # This file is part of Hypothesis, which may be found at
4 # https://github.com/HypothesisWorks/hypothesis-python
5 #
6 # Most of this work is copyright (C) 2013-2018 David R. MacIver
7 # ([email protected]), but it contains contributions by others. See
8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
9 # consult the git log if you need to determine who owns an individual
10 # contribution.
11 #
12 # This Source Code Form is subject to the terms of the Mozilla Public License,
13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
14 # obtain one at http://mozilla.org/MPL/2.0/.
15 #
16 # END HEADER
17
18 from __future__ import division, print_function, absolute_import
19
20 import os
21 import sys
22 import warnings
23
24 import setuptools
25
26
27 def local_file(name):
28 return os.path.relpath(os.path.join(os.path.dirname(__file__), name))
29
30
31 SOURCE = local_file('src')
32 README = local_file('README.rst')
33
34 setuptools_version = tuple(map(int, setuptools.__version__.split('.')[:2]))
35
36 if setuptools_version < (36, 2):
37 # Warning only - very bad if uploading bdist but fine if installing sdist.
38 warnings.warn(
39 'This version of setuptools is too old to correctly store '
40 'conditional dependencies in binary wheels. For more info, see: '
41 'https://hynek.me/articles/conditional-python-dependencies/'
42 )
43
44
45 # Assignment to placate pyflakes. The actual version is from the exec that
46 # follows.
47 __version__ = None
48
49 with open(local_file('src/hypothesis/version.py')) as o:
50 exec(o.read())
51
52 assert __version__ is not None
53
54
55 extras = {
56 'datetime': ['pytz'],
57 'pytz': ['pytz'],
58 'dateutil': ['python-dateutil'],
59 'fakefactory': ['Faker>=0.7'],
60 'numpy': ['numpy>=1.9.0'],
61 'pytest': ['pytest>=3.0'],
62 # We only support Django versions with upstream support - see
63 # https://www.djangoproject.com/download/#supported-versions
64 'django': ['pytz', 'django>=1.11'],
65 }
66
67 extras['faker'] = extras['fakefactory']
68 extras['all'] = sorted(sum(extras.values(), []))
69
70
71 install_requires = ['attrs>=16.0.0', 'coverage>=4.0']
72 # Using an environment marker on enum34 makes the dependency condition
73 # independent of the build environemnt, which is important for wheels.
74 # https://www.python.org/dev/peps/pep-0345/#environment-markers
75 if sys.version_info[0] < 3 and setuptools_version < (8, 0):
76 # Except really old systems, where we give up and install unconditionally
77 install_requires.append('enum34')
78 else:
79 install_requires.append('enum34; python_version=="2.7"')
80
81
82 setuptools.setup(
83 name='hypothesis',
84 version=__version__,
85 author='David R. MacIver',
86 author_email='[email protected]',
87 packages=setuptools.find_packages(SOURCE),
88 package_dir={'': SOURCE},
89 package_data={'hypothesis': ['py.typed']},
90 url=(
91 'https://github.com/HypothesisWorks/hypothesis/'
92 'tree/master/hypothesis-python'
93 ),
94 license='MPL v2',
95 description='A library for property based testing',
96 zip_safe=False,
97 extras_require=extras,
98 install_requires=install_requires,
99 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
100 classifiers=[
101 'Development Status :: 5 - Production/Stable',
102 'Intended Audience :: Developers',
103 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
104 'Operating System :: Unix',
105 'Operating System :: POSIX',
106 'Operating System :: Microsoft :: Windows',
107 'Programming Language :: Python',
108 'Programming Language :: Python :: 2.7',
109 'Programming Language :: Python :: 3',
110 'Programming Language :: Python :: 3.4',
111 'Programming Language :: Python :: 3.5',
112 'Programming Language :: Python :: 3.6',
113 'Programming Language :: Python :: 3.7',
114 'Programming Language :: Python :: Implementation :: CPython',
115 'Programming Language :: Python :: Implementation :: PyPy',
116 'Topic :: Software Development :: Testing',
117 'Framework :: Pytest',
118 ],
119 entry_points={
120 'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],
121 },
122 long_description=open(README).read(),
123 )
124
[end of hypothesis-python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hypothesis-python/setup.py b/hypothesis-python/setup.py
--- a/hypothesis-python/setup.py
+++ b/hypothesis-python/setup.py
@@ -53,9 +53,9 @@
extras = {
- 'datetime': ['pytz'],
- 'pytz': ['pytz'],
- 'dateutil': ['python-dateutil'],
+ 'datetime': ['pytz>=2014.1'],
+ 'pytz': ['pytz>=2014.1'],
+ 'dateutil': ['python-dateutil>=1.4'],
'fakefactory': ['Faker>=0.7'],
'numpy': ['numpy>=1.9.0'],
'pytest': ['pytest>=3.0'],
@@ -120,4 +120,5 @@
'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],
},
long_description=open(README).read(),
+ keywords='python testing fuzzing property-based-testing',
)
| {"golden_diff": "diff --git a/hypothesis-python/setup.py b/hypothesis-python/setup.py\n--- a/hypothesis-python/setup.py\n+++ b/hypothesis-python/setup.py\n@@ -53,9 +53,9 @@\n \n \n extras = {\n- 'datetime': ['pytz'],\n- 'pytz': ['pytz'],\n- 'dateutil': ['python-dateutil'],\n+ 'datetime': ['pytz>=2014.1'],\n+ 'pytz': ['pytz>=2014.1'],\n+ 'dateutil': ['python-dateutil>=1.4'],\n 'fakefactory': ['Faker>=0.7'],\n 'numpy': ['numpy>=1.9.0'],\n 'pytest': ['pytest>=3.0'],\n@@ -120,4 +120,5 @@\n 'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],\n },\n long_description=open(README).read(),\n+ keywords='python testing fuzzing property-based-testing',\n )\n", "issue": "License file in pypi archive\nThe pypi source archive isn't including the `LICENSE.txt` file. Would it be possible to add it? It is very helpful when packaging this for Linux distributions. The official python packaging guidelines also say [each package should have one](https://packaging.python.org/guides/distributing-packages-using-setuptools/#license-txt). Thank you!\nAdd keywords to setup.py\nTesting with pyroma package showed keywords are missing from setup.py\r\n\r\n```\r\nwillo@macbook ~/g/m/h/hypothesis-python> pyroma .\r\n------------------------------\r\nChecking .\r\nFound hypothesis\r\n------------------------------\r\nYour package does not have keywords data.\r\n------------------------------\r\nFinal rating: 9/10\r\nCottage Cheese\r\n------------------------------\r\n```\r\nDiscussed with @Zac-HD and keywords from the github repo below would be good to use.\r\n\r\npython testing fuzzing property-based-testing\n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2018 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport sys\nimport warnings\n\nimport setuptools\n\n\ndef local_file(name):\n return os.path.relpath(os.path.join(os.path.dirname(__file__), name))\n\n\nSOURCE = local_file('src')\nREADME = local_file('README.rst')\n\nsetuptools_version = tuple(map(int, setuptools.__version__.split('.')[:2]))\n\nif setuptools_version < (36, 2):\n # Warning only - very bad if uploading bdist but fine if installing sdist.\n warnings.warn(\n 'This version of setuptools is too old to correctly store '\n 'conditional dependencies in binary wheels. For more info, see: '\n 'https://hynek.me/articles/conditional-python-dependencies/'\n )\n\n\n# Assignment to placate pyflakes. The actual version is from the exec that\n# follows.\n__version__ = None\n\nwith open(local_file('src/hypothesis/version.py')) as o:\n exec(o.read())\n\nassert __version__ is not None\n\n\nextras = {\n 'datetime': ['pytz'],\n 'pytz': ['pytz'],\n 'dateutil': ['python-dateutil'],\n 'fakefactory': ['Faker>=0.7'],\n 'numpy': ['numpy>=1.9.0'],\n 'pytest': ['pytest>=3.0'],\n # We only support Django versions with upstream support - see\n # https://www.djangoproject.com/download/#supported-versions\n 'django': ['pytz', 'django>=1.11'],\n}\n\nextras['faker'] = extras['fakefactory']\nextras['all'] = sorted(sum(extras.values(), []))\n\n\ninstall_requires = ['attrs>=16.0.0', 'coverage>=4.0']\n# Using an environment marker on enum34 makes the dependency condition\n# independent of the build environemnt, which is important for wheels.\n# https://www.python.org/dev/peps/pep-0345/#environment-markers\nif sys.version_info[0] < 3 and setuptools_version < (8, 0):\n # Except really old systems, where we give up and install unconditionally\n install_requires.append('enum34')\nelse:\n install_requires.append('enum34; python_version==\"2.7\"')\n\n\nsetuptools.setup(\n name='hypothesis',\n version=__version__,\n author='David R. MacIver',\n author_email='[email protected]',\n packages=setuptools.find_packages(SOURCE),\n package_dir={'': SOURCE},\n package_data={'hypothesis': ['py.typed']},\n url=(\n 'https://github.com/HypothesisWorks/hypothesis/'\n 'tree/master/hypothesis-python'\n ),\n license='MPL v2',\n description='A library for property based testing',\n zip_safe=False,\n extras_require=extras,\n install_requires=install_requires,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',\n 'Operating System :: Unix',\n 'Operating System :: POSIX',\n 'Operating System :: Microsoft :: Windows',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Testing',\n 'Framework :: Pytest',\n ],\n entry_points={\n 'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],\n },\n long_description=open(README).read(),\n)\n", "path": "hypothesis-python/setup.py"}]} | 2,042 | 224 |
gh_patches_debug_3972 | rasdani/github-patches | git_diff | pytorch__audio-635 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
multiple definitions of `SoxEffect`.
`SoxEffect` is defined both in
https://github.com/pytorch/audio/blob/00d38203e401b8d9472a8f8394a10e2c309be02c/torchaudio/torch_sox.h#L50-L54
and
https://github.com/pytorch/audio/blob/00d38203e401b8d9472a8f8394a10e2c309be02c/torchaudio/torch_sox.cpp#L73-L77
-----
-----
`torch_sox.h` can be included into `torch_sox.cpp` so that `SoxEffect` needs to be defined
only once in the header.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import os
3 import platform
4 import sys
5 import subprocess
6
7 from setuptools import setup, find_packages
8 from torch.utils.cpp_extension import BuildExtension, CppExtension
9
10
11 def check_env_flag(name, default=''):
12 return os.getenv(name, default).upper() in set(['ON', '1', 'YES', 'TRUE', 'Y'])
13
14 DEBUG = check_env_flag('DEBUG')
15 IS_WHEEL = check_env_flag('IS_WHEEL')
16 IS_CONDA = check_env_flag('IS_CONDA')
17
18 print('DEBUG:', DEBUG, 'IS_WHEEL:', IS_WHEEL, 'IS_CONDA:', IS_CONDA)
19
20 eca = []
21 ela = []
22 if DEBUG:
23 if platform.system() == 'Windows':
24 ela += ['/DEBUG:FULL']
25 else:
26 eca += ['-O0', '-g']
27 ela += ['-O0', '-g']
28
29
30 libraries = []
31 include_dirs = []
32 extra_objects = []
33
34 # Hypothetically, the conda distribution could rely on an external sox,
35 # but the library is pretty small and it is not available on the default
36 # anaconda channel. So we statically link it in, just as we do with wheels.
37 if IS_WHEEL or IS_CONDA:
38 audio_path = os.path.dirname(os.path.abspath(__file__))
39
40 include_dirs += [os.path.join(audio_path, 'third_party/flac/include')]
41 include_dirs += [os.path.join(audio_path, 'third_party/lame/include')]
42 include_dirs += [os.path.join(audio_path, 'third_party/sox/include')]
43 include_dirs += [os.path.join(audio_path, 'third_party/mad/include')]
44
45 # proper link order (sox, mad, flac, lame)
46 # (the most important thing is that dependencies come after a libraryl
47 # e.g., sox comes first)
48 extra_objects += [os.path.join(audio_path, 'third_party/sox/lib/libsox.a')]
49 extra_objects += [os.path.join(audio_path, 'third_party/mad/lib/libmad.a')]
50 extra_objects += [os.path.join(audio_path, 'third_party/flac/lib/libFLAC.a')]
51 extra_objects += [os.path.join(audio_path, 'third_party/lame/lib/libmp3lame.a')]
52 else:
53 libraries += ['sox']
54
55
56 # Creating the version file
57 cwd = os.path.dirname(os.path.abspath(__file__))
58 version = '0.6.0a0'
59 sha = 'Unknown'
60
61 try:
62 sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()
63 except Exception:
64 pass
65
66 if os.getenv('BUILD_VERSION'):
67 version = os.getenv('BUILD_VERSION')
68 elif sha != 'Unknown':
69 version += '+' + sha[:7]
70 print('-- Building version ' + version)
71
72 version_path = os.path.join(cwd, 'torchaudio', 'version.py')
73 with open(version_path, 'w') as f:
74 f.write("__version__ = '{}'\n".format(version))
75 f.write("git_version = {}\n".format(repr(sha)))
76
77 pytorch_package_version = os.getenv('PYTORCH_VERSION')
78
79 pytorch_package_dep = 'torch'
80 if pytorch_package_version is not None:
81 pytorch_package_dep += "==" + pytorch_package_version
82
83 if platform.system() == 'Windows':
84 ext_modules = None
85 else:
86 ext_modules = [
87 CppExtension(
88 '_torch_sox',
89 ['torchaudio/torch_sox.cpp'],
90 libraries=libraries,
91 include_dirs=include_dirs,
92 extra_compile_args=eca,
93 extra_objects=extra_objects,
94 extra_link_args=ela),
95 ]
96
97 setup(
98 name="torchaudio",
99 version=version,
100 description="An audio package for PyTorch",
101 url="https://github.com/pytorch/audio",
102 author="Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough",
103 author_email="[email protected]",
104 classifiers=[
105 "Environment :: Plugins",
106 "Intended Audience :: Developers",
107 "Intended Audience :: Science/Research",
108 "License :: OSI Approved :: BSD License",
109 "Operating System :: MacOS :: MacOS X",
110 "Operating System :: Microsoft :: Windows",
111 "Operating System :: POSIX",
112 "Programming Language :: C++",
113 "Programming Language :: Python :: 2.7",
114 "Programming Language :: Python :: 3",
115 "Programming Language :: Python :: Implementation :: CPython",
116 "Topic :: Multimedia :: Sound/Audio",
117 "Topic :: Scientific/Engineering :: Artificial Intelligence"
118 ],
119 # Exclude the build files.
120 packages=find_packages(exclude=["build"]),
121 ext_modules=ext_modules,
122 cmdclass={'build_ext': BuildExtension},
123 install_requires=[pytorch_package_dep]
124 )
125
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -88,7 +88,7 @@
'_torch_sox',
['torchaudio/torch_sox.cpp'],
libraries=libraries,
- include_dirs=include_dirs,
+ include_dirs=include_dirs + [cwd],
extra_compile_args=eca,
extra_objects=extra_objects,
extra_link_args=ela),
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -88,7 +88,7 @@\n '_torch_sox',\n ['torchaudio/torch_sox.cpp'],\n libraries=libraries,\n- include_dirs=include_dirs,\n+ include_dirs=include_dirs + [cwd],\n extra_compile_args=eca,\n extra_objects=extra_objects,\n extra_link_args=ela),\n", "issue": "multiple definitions of `SoxEffect`.\n`SoxEffect` is defined both in\r\n\r\nhttps://github.com/pytorch/audio/blob/00d38203e401b8d9472a8f8394a10e2c309be02c/torchaudio/torch_sox.h#L50-L54\r\n\r\nand\r\n\r\nhttps://github.com/pytorch/audio/blob/00d38203e401b8d9472a8f8394a10e2c309be02c/torchaudio/torch_sox.cpp#L73-L77\r\n\r\n\r\n-----\r\n\r\n-----\r\n\r\n`torch_sox.h` can be included into `torch_sox.cpp` so that `SoxEffect` needs to be defined\r\nonly once in the header.\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport platform\nimport sys\nimport subprocess\n\nfrom setuptools import setup, find_packages\nfrom torch.utils.cpp_extension import BuildExtension, CppExtension\n\n\ndef check_env_flag(name, default=''):\n return os.getenv(name, default).upper() in set(['ON', '1', 'YES', 'TRUE', 'Y'])\n\nDEBUG = check_env_flag('DEBUG')\nIS_WHEEL = check_env_flag('IS_WHEEL')\nIS_CONDA = check_env_flag('IS_CONDA')\n\nprint('DEBUG:', DEBUG, 'IS_WHEEL:', IS_WHEEL, 'IS_CONDA:', IS_CONDA)\n\neca = []\nela = []\nif DEBUG:\n if platform.system() == 'Windows':\n ela += ['/DEBUG:FULL']\n else:\n eca += ['-O0', '-g']\n ela += ['-O0', '-g']\n\n\nlibraries = []\ninclude_dirs = []\nextra_objects = []\n\n# Hypothetically, the conda distribution could rely on an external sox,\n# but the library is pretty small and it is not available on the default\n# anaconda channel. So we statically link it in, just as we do with wheels.\nif IS_WHEEL or IS_CONDA:\n audio_path = os.path.dirname(os.path.abspath(__file__))\n\n include_dirs += [os.path.join(audio_path, 'third_party/flac/include')]\n include_dirs += [os.path.join(audio_path, 'third_party/lame/include')]\n include_dirs += [os.path.join(audio_path, 'third_party/sox/include')]\n include_dirs += [os.path.join(audio_path, 'third_party/mad/include')]\n\n # proper link order (sox, mad, flac, lame)\n # (the most important thing is that dependencies come after a libraryl\n # e.g., sox comes first)\n extra_objects += [os.path.join(audio_path, 'third_party/sox/lib/libsox.a')]\n extra_objects += [os.path.join(audio_path, 'third_party/mad/lib/libmad.a')]\n extra_objects += [os.path.join(audio_path, 'third_party/flac/lib/libFLAC.a')]\n extra_objects += [os.path.join(audio_path, 'third_party/lame/lib/libmp3lame.a')]\nelse:\n libraries += ['sox']\n\n\n# Creating the version file\ncwd = os.path.dirname(os.path.abspath(__file__))\nversion = '0.6.0a0'\nsha = 'Unknown'\n\ntry:\n sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()\nexcept Exception:\n pass\n\nif os.getenv('BUILD_VERSION'):\n version = os.getenv('BUILD_VERSION')\nelif sha != 'Unknown':\n version += '+' + sha[:7]\nprint('-- Building version ' + version)\n\nversion_path = os.path.join(cwd, 'torchaudio', 'version.py')\nwith open(version_path, 'w') as f:\n f.write(\"__version__ = '{}'\\n\".format(version))\n f.write(\"git_version = {}\\n\".format(repr(sha)))\n\npytorch_package_version = os.getenv('PYTORCH_VERSION')\n\npytorch_package_dep = 'torch'\nif pytorch_package_version is not None:\n pytorch_package_dep += \"==\" + pytorch_package_version\n\nif platform.system() == 'Windows':\n ext_modules = None\nelse:\n ext_modules = [\n CppExtension(\n '_torch_sox',\n ['torchaudio/torch_sox.cpp'],\n libraries=libraries,\n include_dirs=include_dirs,\n extra_compile_args=eca,\n extra_objects=extra_objects,\n extra_link_args=ela),\n ]\n\nsetup(\n name=\"torchaudio\",\n version=version,\n description=\"An audio package for PyTorch\",\n url=\"https://github.com/pytorch/audio\",\n author=\"Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Environment :: Plugins\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Programming Language :: C++\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\"\n ],\n # Exclude the build files.\n packages=find_packages(exclude=[\"build\"]),\n ext_modules=ext_modules,\n cmdclass={'build_ext': BuildExtension},\n install_requires=[pytorch_package_dep]\n)\n", "path": "setup.py"}]} | 2,015 | 95 |
gh_patches_debug_18014 | rasdani/github-patches | git_diff | streamlink__streamlink-1606 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tf1 plugin better stream
Hi,
Are you sure there isn't better stream than 360p for TF1 ?
[cli][info] Found matching plugin tf1 for URL https://www.tf1.fr/tf1/direct
[cli][info] Available streams: 496k (worst), 234p_alt, 234p, 896k, 360p_alt, 360p (best)
I have a far better quality with a web browser than best.
</issue>
<code>
[start of src/streamlink/plugins/tf1.py]
1 from __future__ import print_function
2 import re
3
4 from streamlink.plugin import Plugin
5 from streamlink.plugin.api import http, useragents
6 from streamlink.stream import HDSStream
7 from streamlink.stream import HLSStream
8
9
10 class TF1(Plugin):
11 url_re = re.compile(r"https?://(?:www\.)?(?:tf1\.fr/(tf1|tmc|tfx|tf1-series-films)/direct|(lci).fr/direct)/?")
12 embed_url = "http://www.wat.tv/embedframe/live{0}"
13 embed_re = re.compile(r"urlLive.*?:.*?\"(http.*?)\"", re.MULTILINE)
14 api_url = "http://www.wat.tv/get/{0}/591997"
15 swf_url = "http://www.wat.tv/images/v70/PlayerLite.swf"
16 hds_channel_remap = {"tf1": "androidliveconnect", "lci": "androidlivelci", "tfx" : "nt1live", "tf1-series-films" : "hd1live" }
17 hls_channel_remap = {"lci": "LCI", "tf1": "V4", "tfx" : "nt1", "tf1-series-films" : "hd1" }
18
19 @classmethod
20 def can_handle_url(cls, url):
21 return cls.url_re.match(url) is not None
22
23 def _get_hds_streams(self, channel):
24 channel = self.hds_channel_remap.get(channel, "{0}live".format(channel))
25 manifest_url = http.get(self.api_url.format(channel),
26 params={"getURL": 1},
27 headers={"User-Agent": useragents.FIREFOX}).text
28
29 for s in HDSStream.parse_manifest(self.session,
30 manifest_url,
31 pvswf=self.swf_url,
32 headers={"User-Agent": useragents.FIREFOX}).items():
33 yield s
34
35 def _get_hls_streams(self, channel):
36 channel = self.hls_channel_remap.get(channel, channel)
37 embed_url = self.embed_url.format(channel)
38 self.logger.debug("Found embed URL: {0}", embed_url)
39 # page needs to have a mobile user agent
40 embed_page = http.get(embed_url, headers={"User-Agent": useragents.ANDROID})
41
42 m = self.embed_re.search(embed_page.text)
43 if m:
44 hls_stream_url = m.group(1)
45
46 try:
47 for s in HLSStream.parse_variant_playlist(self.session, hls_stream_url).items():
48 yield s
49 except Exception:
50 self.logger.error("Failed to load the HLS playlist for {0}", channel)
51
52 def _get_streams(self):
53 m = self.url_re.match(self.url)
54 if m:
55 channel = m.group(1) or m.group(2)
56 self.logger.debug("Found channel {0}", channel)
57 for s in self._get_hds_streams(channel):
58 yield s
59
60 for s in self._get_hls_streams(channel):
61 yield s
62
63
64 __plugin__ = TF1
65
[end of src/streamlink/plugins/tf1.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/tf1.py b/src/streamlink/plugins/tf1.py
--- a/src/streamlink/plugins/tf1.py
+++ b/src/streamlink/plugins/tf1.py
@@ -1,6 +1,7 @@
from __future__ import print_function
import re
+from streamlink.compat import urlparse, parse_qsl
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, useragents
from streamlink.stream import HDSStream
@@ -41,8 +42,9 @@
m = self.embed_re.search(embed_page.text)
if m:
- hls_stream_url = m.group(1)
-
+ o = urlparse(m.group(1))
+ prms = dict(parse_qsl(o.query))
+ hls_stream_url = "{0}://{1}{2}?hdnea={3}".format(o.scheme, o.netloc, o.path, prms["hdnea"])
try:
for s in HLSStream.parse_variant_playlist(self.session, hls_stream_url).items():
yield s
| {"golden_diff": "diff --git a/src/streamlink/plugins/tf1.py b/src/streamlink/plugins/tf1.py\n--- a/src/streamlink/plugins/tf1.py\n+++ b/src/streamlink/plugins/tf1.py\n@@ -1,6 +1,7 @@\n from __future__ import print_function\n import re\n \n+from streamlink.compat import urlparse, parse_qsl\n from streamlink.plugin import Plugin\n from streamlink.plugin.api import http, useragents\n from streamlink.stream import HDSStream\n@@ -41,8 +42,9 @@\n \n m = self.embed_re.search(embed_page.text)\n if m:\n- hls_stream_url = m.group(1)\n-\n+ o = urlparse(m.group(1))\n+ prms = dict(parse_qsl(o.query))\n+ hls_stream_url = \"{0}://{1}{2}?hdnea={3}\".format(o.scheme, o.netloc, o.path, prms[\"hdnea\"])\n try:\n for s in HLSStream.parse_variant_playlist(self.session, hls_stream_url).items():\n yield s\n", "issue": "tf1 plugin better stream\nHi,\r\n\r\nAre you sure there isn't better stream than 360p for TF1 ?\r\n[cli][info] Found matching plugin tf1 for URL https://www.tf1.fr/tf1/direct\r\n[cli][info] Available streams: 496k (worst), 234p_alt, 234p, 896k, 360p_alt, 360p (best)\r\n\r\nI have a far better quality with a web browser than best.\n", "before_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, useragents\nfrom streamlink.stream import HDSStream\nfrom streamlink.stream import HLSStream\n\n\nclass TF1(Plugin):\n url_re = re.compile(r\"https?://(?:www\\.)?(?:tf1\\.fr/(tf1|tmc|tfx|tf1-series-films)/direct|(lci).fr/direct)/?\")\n embed_url = \"http://www.wat.tv/embedframe/live{0}\"\n embed_re = re.compile(r\"urlLive.*?:.*?\\\"(http.*?)\\\"\", re.MULTILINE)\n api_url = \"http://www.wat.tv/get/{0}/591997\"\n swf_url = \"http://www.wat.tv/images/v70/PlayerLite.swf\"\n hds_channel_remap = {\"tf1\": \"androidliveconnect\", \"lci\": \"androidlivelci\", \"tfx\" : \"nt1live\", \"tf1-series-films\" : \"hd1live\" }\n hls_channel_remap = {\"lci\": \"LCI\", \"tf1\": \"V4\", \"tfx\" : \"nt1\", \"tf1-series-films\" : \"hd1\" }\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_hds_streams(self, channel):\n channel = self.hds_channel_remap.get(channel, \"{0}live\".format(channel))\n manifest_url = http.get(self.api_url.format(channel),\n params={\"getURL\": 1},\n headers={\"User-Agent\": useragents.FIREFOX}).text\n\n for s in HDSStream.parse_manifest(self.session,\n manifest_url,\n pvswf=self.swf_url,\n headers={\"User-Agent\": useragents.FIREFOX}).items():\n yield s\n\n def _get_hls_streams(self, channel):\n channel = self.hls_channel_remap.get(channel, channel)\n embed_url = self.embed_url.format(channel)\n self.logger.debug(\"Found embed URL: {0}\", embed_url)\n # page needs to have a mobile user agent\n embed_page = http.get(embed_url, headers={\"User-Agent\": useragents.ANDROID})\n\n m = self.embed_re.search(embed_page.text)\n if m:\n hls_stream_url = m.group(1)\n\n try:\n for s in HLSStream.parse_variant_playlist(self.session, hls_stream_url).items():\n yield s\n except Exception:\n self.logger.error(\"Failed to load the HLS playlist for {0}\", channel)\n\n def _get_streams(self):\n m = self.url_re.match(self.url)\n if m:\n channel = m.group(1) or m.group(2)\n self.logger.debug(\"Found channel {0}\", channel)\n for s in self._get_hds_streams(channel):\n yield s\n\n for s in self._get_hls_streams(channel):\n yield s\n\n\n__plugin__ = TF1\n", "path": "src/streamlink/plugins/tf1.py"}]} | 1,444 | 231 |
gh_patches_debug_1466 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-1813 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Red input fields on first page load
When loading some pages, input fields are redish, suggesting there where validation errors, but there was not input submit yet.

</issue>
<code>
[start of meinberlin/apps/users/fields.py]
1 import re
2
3 from django import forms
4 from django.core.exceptions import ValidationError
5 from django.core.validators import EmailValidator
6 from django.forms import widgets
7 from django.utils.translation import ugettext_lazy as _
8
9
10 class CommaSeparatedEmailField(forms.Field):
11 email_validator = EmailValidator(
12 message=_('Please enter correct email addresses, separated by '
13 'commas.')
14 )
15
16 widget = widgets.TextInput(attrs={
17 'placeholder': '[email protected], [email protected], '
18 '[email protected],…'
19 })
20
21 def to_python(self, value):
22 if not value:
23 return []
24
25 emails = []
26 for email in value.split(','):
27 email = email.strip()
28 self.email_validator(email)
29 emails.append(email)
30
31 return emails
32
33
34 class EmailFileField(forms.FileField):
35 """Extract emails from uploaded text files."""
36
37 widget = widgets.FileInput
38 # Find possible email strings. Emails may be quoted and separated by
39 # whitespaces, commas, semicolons or < and >.
40 email_regex = re.compile(r'[^\s;,"\'<]+@[^\s;,"\'>]+\.[a-z]{2,}')
41 email_validator = EmailValidator()
42
43 def clean(self, data, initial=None):
44 file = super().clean(data, initial)
45 return self._extract_emails(file)
46
47 def _extract_emails(self, file):
48 if not file:
49 return []
50
51 emails = []
52 for byteline in file:
53 # As it is difficult to guess the correct encoding of a file,
54 # email addresses are restricted to contain only ascii letters.
55 # This works for every encoding which is a superset of ascii like
56 # utf-8 and latin-1. Non ascii chars are simply ignored.
57 line = byteline.decode('ascii', 'ignore')
58 for match in self.email_regex.finditer(line):
59 email = match.group(0)
60 if self.is_valid_email(email):
61 emails.append(email)
62 return emails
63
64 def is_valid_email(self, email):
65 try:
66 self.email_validator(email)
67 return True
68 except ValidationError:
69 return False
70
[end of meinberlin/apps/users/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/users/fields.py b/meinberlin/apps/users/fields.py
--- a/meinberlin/apps/users/fields.py
+++ b/meinberlin/apps/users/fields.py
@@ -14,8 +14,7 @@
)
widget = widgets.TextInput(attrs={
- 'placeholder': '[email protected], [email protected], '
- '[email protected],…'
+ 'placeholder': '[email protected], [email protected],…'
})
def to_python(self, value):
| {"golden_diff": "diff --git a/meinberlin/apps/users/fields.py b/meinberlin/apps/users/fields.py\n--- a/meinberlin/apps/users/fields.py\n+++ b/meinberlin/apps/users/fields.py\n@@ -14,8 +14,7 @@\n )\n \n widget = widgets.TextInput(attrs={\n- 'placeholder': '[email protected], [email protected], '\n- '[email protected],\u2026'\n+ 'placeholder': '[email protected], [email protected],\u2026'\n })\n \n def to_python(self, value):\n", "issue": "Red input fields on first page load\nWhen loading some pages, input fields are redish, suggesting there where validation errors, but there was not input submit yet.\r\n\r\n\n", "before_files": [{"content": "import re\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import EmailValidator\nfrom django.forms import widgets\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass CommaSeparatedEmailField(forms.Field):\n email_validator = EmailValidator(\n message=_('Please enter correct email addresses, separated by '\n 'commas.')\n )\n\n widget = widgets.TextInput(attrs={\n 'placeholder': '[email protected], [email protected], '\n '[email protected],\u2026'\n })\n\n def to_python(self, value):\n if not value:\n return []\n\n emails = []\n for email in value.split(','):\n email = email.strip()\n self.email_validator(email)\n emails.append(email)\n\n return emails\n\n\nclass EmailFileField(forms.FileField):\n \"\"\"Extract emails from uploaded text files.\"\"\"\n\n widget = widgets.FileInput\n # Find possible email strings. Emails may be quoted and separated by\n # whitespaces, commas, semicolons or < and >.\n email_regex = re.compile(r'[^\\s;,\"\\'<]+@[^\\s;,\"\\'>]+\\.[a-z]{2,}')\n email_validator = EmailValidator()\n\n def clean(self, data, initial=None):\n file = super().clean(data, initial)\n return self._extract_emails(file)\n\n def _extract_emails(self, file):\n if not file:\n return []\n\n emails = []\n for byteline in file:\n # As it is difficult to guess the correct encoding of a file,\n # email addresses are restricted to contain only ascii letters.\n # This works for every encoding which is a superset of ascii like\n # utf-8 and latin-1. Non ascii chars are simply ignored.\n line = byteline.decode('ascii', 'ignore')\n for match in self.email_regex.finditer(line):\n email = match.group(0)\n if self.is_valid_email(email):\n emails.append(email)\n return emails\n\n def is_valid_email(self, email):\n try:\n self.email_validator(email)\n return True\n except ValidationError:\n return False\n", "path": "meinberlin/apps/users/fields.py"}]} | 1,245 | 123 |
gh_patches_debug_16723 | rasdani/github-patches | git_diff | comic__grand-challenge.org-2915 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot edit or delete challenge pages with slug `evaluation`
</issue>
<code>
[start of app/grandchallenge/pages/forms.py]
1 from django import forms
2 from django.db.models import BLANK_CHOICE_DASH
3 from django_summernote.widgets import SummernoteInplaceWidget
4
5 from grandchallenge.core.forms import SaveFormInitMixin
6 from grandchallenge.pages.models import Page
7
8
9 class PageCreateForm(SaveFormInitMixin, forms.ModelForm):
10 def __init__(self, *args, challenge, **kwargs):
11 self.challenge = challenge
12 super().__init__(*args, **kwargs)
13
14 class Meta:
15 model = Page
16 fields = (
17 "display_title",
18 "permission_level",
19 "hidden",
20 "html",
21 )
22 widgets = {"html": SummernoteInplaceWidget()}
23 help_texts = {
24 "html": (
25 "The content of your page. <b>Please note</b>: your html will "
26 "be filtered after it has been saved to remove any non-HTML5 "
27 "compliant markup and scripts. The filtering is not reflected "
28 "in the live view so please <b>check the rendering of your "
29 "page after you click save</b>. If you're going to paste from "
30 "another source such as MS Word, please <b>paste without "
31 "formatting</b> using <b>CTRL+SHIFT+V</b> on Windows or "
32 "<b>⇧+⌥+⌘+V</b> on OS X."
33 )
34 }
35
36
37 class PageUpdateForm(PageCreateForm):
38 """Like the page update form but you can also move the page."""
39
40 move = forms.CharField(widget=forms.Select)
41 move.required = False
42 move.widget.choices = (
43 (BLANK_CHOICE_DASH[0]),
44 (Page.FIRST, "First"),
45 (Page.UP, "Up"),
46 (Page.DOWN, "Down"),
47 (Page.LAST, "Last"),
48 )
49
[end of app/grandchallenge/pages/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/pages/forms.py b/app/grandchallenge/pages/forms.py
--- a/app/grandchallenge/pages/forms.py
+++ b/app/grandchallenge/pages/forms.py
@@ -1,4 +1,5 @@
from django import forms
+from django.core.exceptions import ValidationError
from django.db.models import BLANK_CHOICE_DASH
from django_summernote.widgets import SummernoteInplaceWidget
@@ -33,6 +34,17 @@
)
}
+ def clean_display_title(self):
+ display_title = self.cleaned_data["display_title"]
+
+ if display_title.lower() in {"evaluation"}:
+ # evaluation results in a URL clash, especially with the update page.
+ raise ValidationError(
+ "Title not allowed, please select an alternative"
+ )
+
+ return display_title
+
class PageUpdateForm(PageCreateForm):
"""Like the page update form but you can also move the page."""
| {"golden_diff": "diff --git a/app/grandchallenge/pages/forms.py b/app/grandchallenge/pages/forms.py\n--- a/app/grandchallenge/pages/forms.py\n+++ b/app/grandchallenge/pages/forms.py\n@@ -1,4 +1,5 @@\n from django import forms\n+from django.core.exceptions import ValidationError\n from django.db.models import BLANK_CHOICE_DASH\n from django_summernote.widgets import SummernoteInplaceWidget\n \n@@ -33,6 +34,17 @@\n )\n }\n \n+ def clean_display_title(self):\n+ display_title = self.cleaned_data[\"display_title\"]\n+\n+ if display_title.lower() in {\"evaluation\"}:\n+ # evaluation results in a URL clash, especially with the update page.\n+ raise ValidationError(\n+ \"Title not allowed, please select an alternative\"\n+ )\n+\n+ return display_title\n+\n \n class PageUpdateForm(PageCreateForm):\n \"\"\"Like the page update form but you can also move the page.\"\"\"\n", "issue": "Cannot edit or delete challenge pages with slug `evaluation`\n\n", "before_files": [{"content": "from django import forms\nfrom django.db.models import BLANK_CHOICE_DASH\nfrom django_summernote.widgets import SummernoteInplaceWidget\n\nfrom grandchallenge.core.forms import SaveFormInitMixin\nfrom grandchallenge.pages.models import Page\n\n\nclass PageCreateForm(SaveFormInitMixin, forms.ModelForm):\n def __init__(self, *args, challenge, **kwargs):\n self.challenge = challenge\n super().__init__(*args, **kwargs)\n\n class Meta:\n model = Page\n fields = (\n \"display_title\",\n \"permission_level\",\n \"hidden\",\n \"html\",\n )\n widgets = {\"html\": SummernoteInplaceWidget()}\n help_texts = {\n \"html\": (\n \"The content of your page. <b>Please note</b>: your html will \"\n \"be filtered after it has been saved to remove any non-HTML5 \"\n \"compliant markup and scripts. The filtering is not reflected \"\n \"in the live view so please <b>check the rendering of your \"\n \"page after you click save</b>. If you're going to paste from \"\n \"another source such as MS Word, please <b>paste without \"\n \"formatting</b> using <b>CTRL+SHIFT+V</b> on Windows or \"\n \"<b>\u21e7+\u2325+\u2318+V</b> on OS X.\"\n )\n }\n\n\nclass PageUpdateForm(PageCreateForm):\n \"\"\"Like the page update form but you can also move the page.\"\"\"\n\n move = forms.CharField(widget=forms.Select)\n move.required = False\n move.widget.choices = (\n (BLANK_CHOICE_DASH[0]),\n (Page.FIRST, \"First\"),\n (Page.UP, \"Up\"),\n (Page.DOWN, \"Down\"),\n (Page.LAST, \"Last\"),\n )\n", "path": "app/grandchallenge/pages/forms.py"}]} | 1,036 | 209 |
gh_patches_debug_20301 | rasdani/github-patches | git_diff | UTNkar__moore-134 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing search template
<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->
### Prerequisites
* [X] Put an X between the brackets on this line if you have done all of the
following:
* Reproduced the problem with clear cache.
* (If running the application locally:) Made sure your running the newest version on the development branch
* Checked that your issue isn't already filed: https://github.com/UTNkar/moore/issues
### Description
The current search is now really working. Although you can search, the results are ugly and very spotty.
Goal: Evaluate the searchable fields within Project Moore and make a search template.
### Steps to Reproduce
1. Visit the home page
2. Search something
3. Be disappointed ☹️
<!-- Please select the appropriate "topic category"/blue and "issue type"/yellow label -->
</issue>
<code>
[start of website/materialize/templatetags/materialize.py]
1 from django import template
2 from django.template import loader
3
4 register = template.Library()
5
6
7 def get_widget_name(field):
8 return field.field.widget.__class__.__name__
9
10
11 def append_classes(field):
12 field.field.label_suffix = ''
13 classes = field.field.widget.attrs.get('class', '')
14 classes += ' validate'
15 if field.errors:
16 classes += ' invalid'
17 field.field.widget.attrs['class'] = classes
18
19
20 def render_field(template, field, prefix=None):
21 t = loader.get_template(template)
22 c = {
23 'field': field,
24 'prefix': prefix,
25 }
26 html = t.render(c)
27 return html
28
29
30 @register.simple_tag
31 def materialize_field(field, prefix=None):
32 widget = get_widget_name(field)
33 if widget in ['TextInput', 'EmailInput', 'PasswordInput', 'Select',
34 'Textarea']:
35 append_classes(field)
36 return render_field('materialize/form/input.html', field, prefix)
37 else:
38 return field.as_widget()
39
[end of website/materialize/templatetags/materialize.py]
[start of website/search/views.py]
1 from __future__ import absolute_import, unicode_literals
2
3 from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
4 from django.shortcuts import render
5
6 from wagtail.wagtailcore.models import Page
7 from wagtail.wagtailsearch.models import Query
8
9
10 def search(request):
11 search_query = request.GET.get('query', None)
12 page = request.GET.get('page', 1)
13
14 # Search
15 if search_query:
16 search_results = Page.objects.live().search(search_query)
17 query = Query.get(search_query)
18
19 # Record hit
20 query.add_hit()
21 else:
22 search_results = Page.objects.none()
23
24 # Pagination
25 paginator = Paginator(search_results, 10)
26 try:
27 search_results = paginator.page(page)
28 except PageNotAnInteger:
29 search_results = paginator.page(1)
30 except EmptyPage:
31 search_results = paginator.page(paginator.num_pages)
32
33 return render(request, 'search/search.html', {
34 'search_query': search_query,
35 'search_results': search_results,
36 })
37
[end of website/search/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/materialize/templatetags/materialize.py b/website/materialize/templatetags/materialize.py
--- a/website/materialize/templatetags/materialize.py
+++ b/website/materialize/templatetags/materialize.py
@@ -23,8 +23,7 @@
'field': field,
'prefix': prefix,
}
- html = t.render(c)
- return html
+ return t.render(c)
@register.simple_tag
@@ -36,3 +35,12 @@
return render_field('materialize/form/input.html', field, prefix)
else:
return field.as_widget()
+
+
[email protected]_tag('materialize/pagination.html')
+def materialize_pagination(page, url):
+ return {
+ 'page': page,
+ 'paginator': page.paginator,
+ 'url': url,
+ }
diff --git a/website/search/views.py b/website/search/views.py
--- a/website/search/views.py
+++ b/website/search/views.py
@@ -22,7 +22,7 @@
search_results = Page.objects.none()
# Pagination
- paginator = Paginator(search_results, 10)
+ paginator = Paginator(search_results, 25)
try:
search_results = paginator.page(page)
except PageNotAnInteger:
| {"golden_diff": "diff --git a/website/materialize/templatetags/materialize.py b/website/materialize/templatetags/materialize.py\n--- a/website/materialize/templatetags/materialize.py\n+++ b/website/materialize/templatetags/materialize.py\n@@ -23,8 +23,7 @@\n 'field': field,\n 'prefix': prefix,\n }\n- html = t.render(c)\n- return html\n+ return t.render(c)\n \n \n @register.simple_tag\n@@ -36,3 +35,12 @@\n return render_field('materialize/form/input.html', field, prefix)\n else:\n return field.as_widget()\n+\n+\[email protected]_tag('materialize/pagination.html')\n+def materialize_pagination(page, url):\n+ return {\n+ 'page': page,\n+ 'paginator': page.paginator,\n+ 'url': url,\n+ }\ndiff --git a/website/search/views.py b/website/search/views.py\n--- a/website/search/views.py\n+++ b/website/search/views.py\n@@ -22,7 +22,7 @@\n search_results = Page.objects.none()\n \n # Pagination\n- paginator = Paginator(search_results, 10)\n+ paginator = Paginator(search_results, 25)\n try:\n search_results = paginator.page(page)\n except PageNotAnInteger:\n", "issue": "Missing search template\n<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->\r\n\r\n### Prerequisites\r\n\r\n* [X] Put an X between the brackets on this line if you have done all of the\r\nfollowing:\r\n * Reproduced the problem with clear cache.\r\n * (If running the application locally:) Made sure your running the newest version on the development branch\r\n * Checked that your issue isn't already filed: https://github.com/UTNkar/moore/issues\r\n\r\n### Description\r\n\r\nThe current search is now really working. Although you can search, the results are ugly and very spotty. \r\n\r\nGoal: Evaluate the searchable fields within Project Moore and make a search template.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Visit the home page\r\n2. Search something\r\n3. Be disappointed \u2639\ufe0f \r\n\r\n<!-- Please select the appropriate \"topic category\"/blue and \"issue type\"/yellow label -->\n", "before_files": [{"content": "from django import template\nfrom django.template import loader\n\nregister = template.Library()\n\n\ndef get_widget_name(field):\n return field.field.widget.__class__.__name__\n\n\ndef append_classes(field):\n field.field.label_suffix = ''\n classes = field.field.widget.attrs.get('class', '')\n classes += ' validate'\n if field.errors:\n classes += ' invalid'\n field.field.widget.attrs['class'] = classes\n\n\ndef render_field(template, field, prefix=None):\n t = loader.get_template(template)\n c = {\n 'field': field,\n 'prefix': prefix,\n }\n html = t.render(c)\n return html\n\n\[email protected]_tag\ndef materialize_field(field, prefix=None):\n widget = get_widget_name(field)\n if widget in ['TextInput', 'EmailInput', 'PasswordInput', 'Select',\n 'Textarea']:\n append_classes(field)\n return render_field('materialize/form/input.html', field, prefix)\n else:\n return field.as_widget()\n", "path": "website/materialize/templatetags/materialize.py"}, {"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom django.shortcuts import render\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailsearch.models import Query\n\n\ndef search(request):\n search_query = request.GET.get('query', None)\n page = request.GET.get('page', 1)\n\n # Search\n if search_query:\n search_results = Page.objects.live().search(search_query)\n query = Query.get(search_query)\n\n # Record hit\n query.add_hit()\n else:\n search_results = Page.objects.none()\n\n # Pagination\n paginator = Paginator(search_results, 10)\n try:\n search_results = paginator.page(page)\n except PageNotAnInteger:\n search_results = paginator.page(1)\n except EmptyPage:\n search_results = paginator.page(paginator.num_pages)\n\n return render(request, 'search/search.html', {\n 'search_query': search_query,\n 'search_results': search_results,\n })\n", "path": "website/search/views.py"}]} | 1,336 | 302 |
gh_patches_debug_12092 | rasdani/github-patches | git_diff | pre-commit__pre-commit-86 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UnicodeDecodeError in staged_files_only
```
$ pre-commit
[WARNING] Unstaged files detected.
[INFO] Stashing unstaged files to .../.pre-commit-files/patch1397853050.
Traceback (most recent call last):
File ".../bin/pre-commit", line 9, in <module>
load_entry_point('pre-commit==0.0.0', 'console_scripts', 'pre-commit')()
File ".../lib/python2.6/site-packages/pre_commit/util.py", line 52, in wrapper
return func(argv)
File ".../lib/python2.6/site-packages/pre_commit/run.py", line 59, in run
return commands.run(runner, args)
File ".../lib/python2.6/site-packages/pre_commit/commands.py", line 254, in run
with ctx:
File "/usr/lib64/python2.6/contextlib.py", line 16, in __enter__
return self.gen.next()
File ".../lib/python2.6/site-packages/pre_commit/staged_files_only.py", line 32, in staged_files_only
patch_file.write(diff_stdout)
UnicodeEncodeError: 'ascii' codec can't encode character u'\xfc' in position 3795: ordinal not in range(128)
```
</issue>
<code>
[start of pre_commit/staged_files_only.py]
1 import contextlib
2 import logging
3 import time
4
5 from pre_commit.prefixed_command_runner import CalledProcessError
6
7
8 logger = logging.getLogger('pre_commit')
9
10
11 @contextlib.contextmanager
12 def staged_files_only(cmd_runner):
13 """Clear any unstaged changes from the git working directory inside this
14 context.
15
16 Args:
17 cmd_runner - PrefixedCommandRunner
18 """
19 # Determine if there are unstaged files
20 retcode, diff_stdout, _ = cmd_runner.run(
21 ['git', 'diff', '--ignore-submodules', '--binary', '--exit-code'],
22 retcode=None,
23 )
24 if retcode and diff_stdout.strip():
25 patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))
26 logger.warning('Unstaged files detected.')
27 logger.info(
28 'Stashing unstaged files to {0}.'.format(patch_filename),
29 )
30 # Save the current unstaged changes as a patch
31 with open(patch_filename, 'w') as patch_file:
32 patch_file.write(diff_stdout)
33
34 # Clear the working directory of unstaged changes
35 cmd_runner.run(['git', 'checkout', '--', '.'])
36 try:
37 yield
38 finally:
39 # Try to apply the patch we saved
40 try:
41 cmd_runner.run(['git', 'apply', patch_filename])
42 except CalledProcessError:
43 logger.warning(
44 'Stashed changes conflicted with hook auto-fixes... '
45 'Rolling back fixes...'
46 )
47 # We failed to apply the patch, presumably due to fixes made
48 # by hooks.
49 # Roll back the changes made by hooks.
50 cmd_runner.run(['git', 'checkout', '--', '.'])
51 cmd_runner.run(['git', 'apply', patch_filename])
52 logger.info('Restored changes from {0}.'.format(patch_filename))
53 else:
54 # There weren't any staged files so we don't need to do anything
55 # special
56 yield
57
[end of pre_commit/staged_files_only.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py
--- a/pre_commit/staged_files_only.py
+++ b/pre_commit/staged_files_only.py
@@ -1,4 +1,5 @@
import contextlib
+import io
import logging
import time
@@ -28,7 +29,7 @@
'Stashing unstaged files to {0}.'.format(patch_filename),
)
# Save the current unstaged changes as a patch
- with open(patch_filename, 'w') as patch_file:
+ with io.open(patch_filename, 'w', encoding='utf-8') as patch_file:
patch_file.write(diff_stdout)
# Clear the working directory of unstaged changes
| {"golden_diff": "diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py\n--- a/pre_commit/staged_files_only.py\n+++ b/pre_commit/staged_files_only.py\n@@ -1,4 +1,5 @@\n import contextlib\n+import io\n import logging\n import time\n \n@@ -28,7 +29,7 @@\n 'Stashing unstaged files to {0}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n- with open(patch_filename, 'w') as patch_file:\n+ with io.open(patch_filename, 'w', encoding='utf-8') as patch_file:\n patch_file.write(diff_stdout)\n \n # Clear the working directory of unstaged changes\n", "issue": "UnicodeDecodeError in staged_files_only\n```\n$ pre-commit\n[WARNING] Unstaged files detected.\n[INFO] Stashing unstaged files to .../.pre-commit-files/patch1397853050.\nTraceback (most recent call last):\n File \".../bin/pre-commit\", line 9, in <module>\n load_entry_point('pre-commit==0.0.0', 'console_scripts', 'pre-commit')()\n File \".../lib/python2.6/site-packages/pre_commit/util.py\", line 52, in wrapper\n return func(argv)\n File \".../lib/python2.6/site-packages/pre_commit/run.py\", line 59, in run\n return commands.run(runner, args)\n File \".../lib/python2.6/site-packages/pre_commit/commands.py\", line 254, in run\n with ctx:\n File \"/usr/lib64/python2.6/contextlib.py\", line 16, in __enter__\n return self.gen.next()\n File \".../lib/python2.6/site-packages/pre_commit/staged_files_only.py\", line 32, in staged_files_only\n patch_file.write(diff_stdout)\nUnicodeEncodeError: 'ascii' codec can't encode character u'\\xfc' in position 3795: ordinal not in range(128)\n```\n\n", "before_files": [{"content": "import contextlib\nimport logging\nimport time\n\nfrom pre_commit.prefixed_command_runner import CalledProcessError\n\n\nlogger = logging.getLogger('pre_commit')\n\n\[email protected]\ndef staged_files_only(cmd_runner):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n\n Args:\n cmd_runner - PrefixedCommandRunner\n \"\"\"\n # Determine if there are unstaged files\n retcode, diff_stdout, _ = cmd_runner.run(\n ['git', 'diff', '--ignore-submodules', '--binary', '--exit-code'],\n retcode=None,\n )\n if retcode and diff_stdout.strip():\n patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {0}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n with open(patch_filename, 'w') as patch_file:\n patch_file.write(diff_stdout)\n\n # Clear the working directory of unstaged changes\n cmd_runner.run(['git', 'checkout', '--', '.'])\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n cmd_runner.run(['git', 'apply', patch_filename])\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...'\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_runner.run(['git', 'checkout', '--', '.'])\n cmd_runner.run(['git', 'apply', patch_filename])\n logger.info('Restored changes from {0}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}]} | 1,349 | 161 |
gh_patches_debug_13481 | rasdani/github-patches | git_diff | pulp__pulpcore-382 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve the logic in the ACSHandleStage
**Version**
main
**Describe the bug**
If there are ACS that point to the same content, pick the first RA instead of last
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
Add any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.
</issue>
<code>
[start of pulpcore/plugin/repo_version_utils.py]
1 from collections import defaultdict
2 from gettext import gettext as _
3 import logging
4
5 from django.db.models import Q
6
7
8 _logger = logging.getLogger(__name__)
9
10
11 def remove_duplicates(repository_version):
12 """
13 Inspect content additions in the `RepositoryVersion` and replace repository duplicates.
14
15 Some content can have two instances A and B which are unique, but cannot both exist together in
16 one repository. For example, pulp_file's content has `relative_path` for that file within the
17 repository.
18
19 Any content newly added to the :class:`~pulpcore.plugin.models.RepositoryVersion` is checked
20 against existing content in the :class:`~pulpcore.plugin.models.RepositoryVersion` with newer
21 "repository duplicates" replace existing "repository duplicates". Each Content model can define
22 a `repo_key_fields` attribute with the field names to be compared. If all `repo_key_fields`
23 contain the same value for two content units, they are considered "repository duplicates".
24
25 After instantiating `RemoveDuplicates` call it with the `run()` method and pass in the
26 :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked and possibly modified as a
27 parameter to `run()`.
28
29 Args:
30 repository_version: The :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked
31 and possibly modified.
32 """
33 query_for_repo_duplicates_by_type = defaultdict(lambda: Q())
34 for item in repository_version.added():
35 detail_item = item.cast()
36 if detail_item.repo_key_fields == ():
37 continue
38 unit_q_dict = {
39 field: getattr(detail_item, field) for field in detail_item.repo_key_fields
40 }
41 item_query = Q(**unit_q_dict) & ~Q(pk=detail_item.pk)
42 query_for_repo_duplicates_by_type[detail_item._meta.model] |= item_query
43
44 for model in query_for_repo_duplicates_by_type:
45 _logger.debug(_("Removing duplicates for type: {}".format(model)))
46 qs = model.objects.filter(query_for_repo_duplicates_by_type[model])
47 repository_version.remove_content(qs)
48
[end of pulpcore/plugin/repo_version_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/plugin/repo_version_utils.py b/pulpcore/plugin/repo_version_utils.py
--- a/pulpcore/plugin/repo_version_utils.py
+++ b/pulpcore/plugin/repo_version_utils.py
@@ -22,10 +22,6 @@
a `repo_key_fields` attribute with the field names to be compared. If all `repo_key_fields`
contain the same value for two content units, they are considered "repository duplicates".
- After instantiating `RemoveDuplicates` call it with the `run()` method and pass in the
- :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked and possibly modified as a
- parameter to `run()`.
-
Args:
repository_version: The :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked
and possibly modified.
| {"golden_diff": "diff --git a/pulpcore/plugin/repo_version_utils.py b/pulpcore/plugin/repo_version_utils.py\n--- a/pulpcore/plugin/repo_version_utils.py\n+++ b/pulpcore/plugin/repo_version_utils.py\n@@ -22,10 +22,6 @@\n a `repo_key_fields` attribute with the field names to be compared. If all `repo_key_fields`\n contain the same value for two content units, they are considered \"repository duplicates\".\n \n- After instantiating `RemoveDuplicates` call it with the `run()` method and pass in the\n- :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked and possibly modified as a\n- parameter to `run()`.\n-\n Args:\n repository_version: The :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked\n and possibly modified.\n", "issue": "Improve the logic in the ACSHandleStage\n**Version**\r\nmain\r\n\r\n**Describe the bug**\r\nIf there are ACS that point to the same content, pick the first RA instead of last\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Additional context**\r\nAdd any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.\r\n\n", "before_files": [{"content": "from collections import defaultdict\nfrom gettext import gettext as _\nimport logging\n\nfrom django.db.models import Q\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef remove_duplicates(repository_version):\n \"\"\"\n Inspect content additions in the `RepositoryVersion` and replace repository duplicates.\n\n Some content can have two instances A and B which are unique, but cannot both exist together in\n one repository. For example, pulp_file's content has `relative_path` for that file within the\n repository.\n\n Any content newly added to the :class:`~pulpcore.plugin.models.RepositoryVersion` is checked\n against existing content in the :class:`~pulpcore.plugin.models.RepositoryVersion` with newer\n \"repository duplicates\" replace existing \"repository duplicates\". Each Content model can define\n a `repo_key_fields` attribute with the field names to be compared. If all `repo_key_fields`\n contain the same value for two content units, they are considered \"repository duplicates\".\n\n After instantiating `RemoveDuplicates` call it with the `run()` method and pass in the\n :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked and possibly modified as a\n parameter to `run()`.\n\n Args:\n repository_version: The :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked\n and possibly modified.\n \"\"\"\n query_for_repo_duplicates_by_type = defaultdict(lambda: Q())\n for item in repository_version.added():\n detail_item = item.cast()\n if detail_item.repo_key_fields == ():\n continue\n unit_q_dict = {\n field: getattr(detail_item, field) for field in detail_item.repo_key_fields\n }\n item_query = Q(**unit_q_dict) & ~Q(pk=detail_item.pk)\n query_for_repo_duplicates_by_type[detail_item._meta.model] |= item_query\n\n for model in query_for_repo_duplicates_by_type:\n _logger.debug(_(\"Removing duplicates for type: {}\".format(model)))\n qs = model.objects.filter(query_for_repo_duplicates_by_type[model])\n repository_version.remove_content(qs)\n", "path": "pulpcore/plugin/repo_version_utils.py"}]} | 1,164 | 184 |
gh_patches_debug_4095 | rasdani/github-patches | git_diff | mne-tools__mne-python-6083 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip install mne should install numpy and scipy
Currently, the `mne` PyPI package doesn't have any dependencies, but I think it should really treat `numpy` and `scipy` as hard dependencies (and automatically install them if they are not present).
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 # Copyright (C) 2011-2017 Alexandre Gramfort
4 # <[email protected]>
5
6 import os
7 import os.path as op
8
9 from setuptools import setup
10
11 # get the version (don't import mne here, so dependencies are not needed)
12 version = None
13 with open(op.join('mne', '__init__.py'), 'r') as fid:
14 for line in (line.strip() for line in fid):
15 if line.startswith('__version__'):
16 version = line.split('=')[1].strip().strip('\'')
17 break
18 if version is None:
19 raise RuntimeError('Could not determine version')
20
21
22 descr = """MNE python project for MEG and EEG data analysis."""
23
24 DISTNAME = 'mne'
25 DESCRIPTION = descr
26 MAINTAINER = 'Alexandre Gramfort'
27 MAINTAINER_EMAIL = '[email protected]'
28 URL = 'http://martinos.org/mne'
29 LICENSE = 'BSD (3-clause)'
30 DOWNLOAD_URL = 'http://github.com/mne-tools/mne-python'
31 VERSION = version
32
33
34 def package_tree(pkgroot):
35 """Get the submodule list."""
36 # Adapted from VisPy
37 path = op.dirname(__file__)
38 subdirs = [op.relpath(i[0], path).replace(op.sep, '.')
39 for i in os.walk(op.join(path, pkgroot))
40 if '__init__.py' in i[2]]
41 return sorted(subdirs)
42
43
44 if __name__ == "__main__":
45 if op.exists('MANIFEST'):
46 os.remove('MANIFEST')
47
48 setup(name=DISTNAME,
49 maintainer=MAINTAINER,
50 include_package_data=True,
51 maintainer_email=MAINTAINER_EMAIL,
52 description=DESCRIPTION,
53 license=LICENSE,
54 url=URL,
55 version=VERSION,
56 download_url=DOWNLOAD_URL,
57 long_description=open('README.rst').read(),
58 zip_safe=False, # the package can run out of an .egg file
59 classifiers=['Intended Audience :: Science/Research',
60 'Intended Audience :: Developers',
61 'License :: OSI Approved',
62 'Programming Language :: Python',
63 'Topic :: Software Development',
64 'Topic :: Scientific/Engineering',
65 'Operating System :: Microsoft :: Windows',
66 'Operating System :: POSIX',
67 'Operating System :: Unix',
68 'Operating System :: MacOS',
69 'Programming Language :: Python :: 3',
70 ],
71 platforms='any',
72 packages=package_tree('mne'),
73 package_data={'mne': [
74 op.join('data', '*.sel'),
75 op.join('data', 'icos.fif.gz'),
76 op.join('data', 'coil_def*.dat'),
77 op.join('data', 'helmets', '*.fif.gz'),
78 op.join('data', 'FreeSurferColorLUT.txt'),
79 op.join('data', 'image', '*gif'),
80 op.join('data', 'image', '*lout'),
81 op.join('data', 'fsaverage', '*.fif'),
82 op.join('channels', 'data', 'layouts', '*.lout'),
83 op.join('channels', 'data', 'layouts', '*.lay'),
84 op.join('channels', 'data', 'montages', '*.sfp'),
85 op.join('channels', 'data', 'montages', '*.txt'),
86 op.join('channels', 'data', 'montages', '*.elc'),
87 op.join('channels', 'data', 'neighbors', '*.mat'),
88 op.join('datasets', 'sleep_physionet', 'SHA1SUMS'),
89 op.join('gui', 'help', '*.json'),
90 op.join('html', '*.js'),
91 op.join('html', '*.css'),
92 op.join('io', 'artemis123', 'resources', '*.csv'),
93 op.join('io', 'edf', 'gdf_encodes.txt')
94 ]},
95 scripts=['bin/mne'])
96
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -69,6 +69,7 @@
'Programming Language :: Python :: 3',
],
platforms='any',
+ install_requires=['numpy>=1.11.3', 'scipy>=0.17.1'],
packages=package_tree('mne'),
package_data={'mne': [
op.join('data', '*.sel'),
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -69,6 +69,7 @@\n 'Programming Language :: Python :: 3',\n ],\n platforms='any',\n+ install_requires=['numpy>=1.11.3', 'scipy>=0.17.1'],\n packages=package_tree('mne'),\n package_data={'mne': [\n op.join('data', '*.sel'),\n", "issue": "pip install mne should install numpy and scipy\nCurrently, the `mne` PyPI package doesn't have any dependencies, but I think it should really treat `numpy` and `scipy` as hard dependencies (and automatically install them if they are not present).\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Copyright (C) 2011-2017 Alexandre Gramfort\n# <[email protected]>\n\nimport os\nimport os.path as op\n\nfrom setuptools import setup\n\n# get the version (don't import mne here, so dependencies are not needed)\nversion = None\nwith open(op.join('mne', '__init__.py'), 'r') as fid:\n for line in (line.strip() for line in fid):\n if line.startswith('__version__'):\n version = line.split('=')[1].strip().strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\ndescr = \"\"\"MNE python project for MEG and EEG data analysis.\"\"\"\n\nDISTNAME = 'mne'\nDESCRIPTION = descr\nMAINTAINER = 'Alexandre Gramfort'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'http://martinos.org/mne'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'http://github.com/mne-tools/mne-python'\nVERSION = version\n\n\ndef package_tree(pkgroot):\n \"\"\"Get the submodule list.\"\"\"\n # Adapted from VisPy\n path = op.dirname(__file__)\n subdirs = [op.relpath(i[0], path).replace(op.sep, '.')\n for i in os.walk(op.join(path, pkgroot))\n if '__init__.py' in i[2]]\n return sorted(subdirs)\n\n\nif __name__ == \"__main__\":\n if op.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n include_package_data=True,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n zip_safe=False, # the package can run out of an .egg file\n classifiers=['Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n 'Programming Language :: Python :: 3',\n ],\n platforms='any',\n packages=package_tree('mne'),\n package_data={'mne': [\n op.join('data', '*.sel'),\n op.join('data', 'icos.fif.gz'),\n op.join('data', 'coil_def*.dat'),\n op.join('data', 'helmets', '*.fif.gz'),\n op.join('data', 'FreeSurferColorLUT.txt'),\n op.join('data', 'image', '*gif'),\n op.join('data', 'image', '*lout'),\n op.join('data', 'fsaverage', '*.fif'),\n op.join('channels', 'data', 'layouts', '*.lout'),\n op.join('channels', 'data', 'layouts', '*.lay'),\n op.join('channels', 'data', 'montages', '*.sfp'),\n op.join('channels', 'data', 'montages', '*.txt'),\n op.join('channels', 'data', 'montages', '*.elc'),\n op.join('channels', 'data', 'neighbors', '*.mat'),\n op.join('datasets', 'sleep_physionet', 'SHA1SUMS'),\n op.join('gui', 'help', '*.json'),\n op.join('html', '*.js'),\n op.join('html', '*.css'),\n op.join('io', 'artemis123', 'resources', '*.csv'),\n op.join('io', 'edf', 'gdf_encodes.txt')\n ]},\n scripts=['bin/mne'])\n", "path": "setup.py"}]} | 1,618 | 101 |
gh_patches_debug_4828 | rasdani/github-patches | git_diff | hylang__hy-2078 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Run REPL using `python -m hy`
Figuring out how to get a REPL is not trivial. What do you think about making `python -m hy` do it?
</issue>
<code>
[start of hy/__main__.py]
1 import hy # NOQA
2 import sys
3
4 # This just mocks the normalish behavior of the Python interp. Helpful to aid
5 # with shimming existing apps that don't really "work" with Hy.
6 #
7 # You could say this script helps Hyjack a file.
8 #
9
10
11 if len(sys.argv) > 1:
12 sys.argv.pop(0)
13 hy.importer._import_from_path('__main__', sys.argv[0])
14 sys.exit(0) # right?
15
[end of hy/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hy/__main__.py b/hy/__main__.py
--- a/hy/__main__.py
+++ b/hy/__main__.py
@@ -1,14 +1,8 @@
-import hy # NOQA
import sys
-# This just mocks the normalish behavior of the Python interp. Helpful to aid
-# with shimming existing apps that don't really "work" with Hy.
-#
-# You could say this script helps Hyjack a file.
-#
+from hy.cmdline import hy_main
+# Running hy as a module (e.g. `python -m hy`)
+# is equivalent to running the main `hy` command.
-if len(sys.argv) > 1:
- sys.argv.pop(0)
- hy.importer._import_from_path('__main__', sys.argv[0])
- sys.exit(0) # right?
+sys.exit(hy_main())
| {"golden_diff": "diff --git a/hy/__main__.py b/hy/__main__.py\n--- a/hy/__main__.py\n+++ b/hy/__main__.py\n@@ -1,14 +1,8 @@\n-import hy # NOQA\n import sys\n \n-# This just mocks the normalish behavior of the Python interp. Helpful to aid\n-# with shimming existing apps that don't really \"work\" with Hy.\n-#\n-# You could say this script helps Hyjack a file.\n-#\n+from hy.cmdline import hy_main\n \n+# Running hy as a module (e.g. `python -m hy`)\n+# is equivalent to running the main `hy` command.\n \n-if len(sys.argv) > 1:\n- sys.argv.pop(0)\n- hy.importer._import_from_path('__main__', sys.argv[0])\n- sys.exit(0) # right?\n+sys.exit(hy_main())\n", "issue": "Run REPL using `python -m hy`\nFiguring out how to get a REPL is not trivial. What do you think about making `python -m hy` do it?\n", "before_files": [{"content": "import hy # NOQA\nimport sys\n\n# This just mocks the normalish behavior of the Python interp. Helpful to aid\n# with shimming existing apps that don't really \"work\" with Hy.\n#\n# You could say this script helps Hyjack a file.\n#\n\n\nif len(sys.argv) > 1:\n sys.argv.pop(0)\n hy.importer._import_from_path('__main__', sys.argv[0])\n sys.exit(0) # right?\n", "path": "hy/__main__.py"}]} | 695 | 200 |
gh_patches_debug_1702 | rasdani/github-patches | git_diff | mozilla__bugbug-331 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Figure out what to do with http_service on CI
We have two options:
- build the http_service with fake models and don't push it on CI. Build it with real models and push it after training;
- build the http_service without models and let it download models at runtime.
</issue>
<code>
[start of http_service/check_models.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import logging
7 import os
8 import sys
9
10 from bugbug.models.component import ComponentModel
11 from bugbug.models.defect_enhancement_task import DefectEnhancementTaskModel
12 from bugbug.models.regression import RegressionModel
13
14 logging.basicConfig(level=logging.INFO)
15 LOGGER = logging.getLogger()
16
17 MODELS = {
18 "defectenhancementtask": DefectEnhancementTaskModel,
19 "component": ComponentModel,
20 "regression": RegressionModel,
21 }
22 MODELS_DIR = os.path.join(os.path.dirname(__file__), "models")
23
24
25 def load_model(model):
26 model_file_path = os.path.join(MODELS_DIR, f"{model}model")
27 LOGGER.info(f"Lookup model in {model_file_path}")
28 model = MODELS[model].load(model_file_path)
29 return model
30
31
32 def check_models():
33 for model_name in MODELS.keys():
34 # Try loading the model
35 load_model(model_name)
36
37
38 if __name__ == "__main__":
39 try:
40 check_models()
41 except Exception:
42 LOGGER.warning(
43 "Failed to validate the models, please run `python models.py download`",
44 exc_info=True,
45 )
46 sys.exit(1)
47
[end of http_service/check_models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/http_service/check_models.py b/http_service/check_models.py
--- a/http_service/check_models.py
+++ b/http_service/check_models.py
@@ -36,6 +36,13 @@
if __name__ == "__main__":
+
+ should_check_models = os.environ.get("CHECK_MODELS", "1")
+
+ if should_check_models == "0":
+ print("Skipping checking models as instructed by env var $CHECK_MODELS")
+ sys.exit(0)
+
try:
check_models()
except Exception:
| {"golden_diff": "diff --git a/http_service/check_models.py b/http_service/check_models.py\n--- a/http_service/check_models.py\n+++ b/http_service/check_models.py\n@@ -36,6 +36,13 @@\n \n \n if __name__ == \"__main__\":\n+\n+ should_check_models = os.environ.get(\"CHECK_MODELS\", \"1\")\n+\n+ if should_check_models == \"0\":\n+ print(\"Skipping checking models as instructed by env var $CHECK_MODELS\")\n+ sys.exit(0)\n+\n try:\n check_models()\n except Exception:\n", "issue": "Figure out what to do with http_service on CI\nWe have two options:\r\n- build the http_service with fake models and don't push it on CI. Build it with real models and push it after training;\r\n- build the http_service without models and let it download models at runtime.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nimport os\nimport sys\n\nfrom bugbug.models.component import ComponentModel\nfrom bugbug.models.defect_enhancement_task import DefectEnhancementTaskModel\nfrom bugbug.models.regression import RegressionModel\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger()\n\nMODELS = {\n \"defectenhancementtask\": DefectEnhancementTaskModel,\n \"component\": ComponentModel,\n \"regression\": RegressionModel,\n}\nMODELS_DIR = os.path.join(os.path.dirname(__file__), \"models\")\n\n\ndef load_model(model):\n model_file_path = os.path.join(MODELS_DIR, f\"{model}model\")\n LOGGER.info(f\"Lookup model in {model_file_path}\")\n model = MODELS[model].load(model_file_path)\n return model\n\n\ndef check_models():\n for model_name in MODELS.keys():\n # Try loading the model\n load_model(model_name)\n\n\nif __name__ == \"__main__\":\n try:\n check_models()\n except Exception:\n LOGGER.warning(\n \"Failed to validate the models, please run `python models.py download`\",\n exc_info=True,\n )\n sys.exit(1)\n", "path": "http_service/check_models.py"}]} | 991 | 120 |
gh_patches_debug_43397 | rasdani/github-patches | git_diff | opsdroid__opsdroid-1931 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add token to websockets connector
Currently, anyone that knows opdroid url and endpoint will be able to request a socket to initialize a websocket connection.
Ideally, we should allow users to select a token in the configuration settings. When opsdroid gets a request, if the token doesn't exist then we just reject the request.
This will also a nice feature to implement along side opsdroid-web v2
</issue>
<code>
[start of opsdroid/connector/websocket/__init__.py]
1 """A connector which allows websocket connections."""
2 import json
3 import logging
4 import uuid
5 from datetime import datetime
6
7 import aiohttp
8 import aiohttp.web
9 from aiohttp import WSCloseCode
10 from opsdroid.connector import Connector, register_event
11 from opsdroid.events import Message
12
13 _LOGGER = logging.getLogger(__name__)
14 HEADERS = {"Access-Control-Allow-Origin": "*"}
15 CONFIG_SCHEMA = {"bot-name": str, "max-connections": int, "connection-timeout": int}
16
17
18 class ConnectorWebsocket(Connector):
19 """A connector which allows websocket connections."""
20
21 def __init__(self, config, opsdroid=None):
22 """Create the connector."""
23 super().__init__(config, opsdroid=opsdroid)
24 _LOGGER.debug(_("Starting Websocket connector."))
25 self.name = config.get("name", "websocket")
26 self.max_connections = self.config.get("max-connections", 10)
27 self.connection_timeout = self.config.get("connection-timeout", 60)
28 self.accepting_connections = True
29 self.active_connections = {}
30 self.available_connections = []
31 self.bot_name = self.config.get("bot-name", "opsdroid")
32
33 async def connect(self):
34 """Connect to the chat service."""
35 self.accepting_connections = True
36
37 self.opsdroid.web_server.web_app.router.add_get(
38 "/connector/websocket/{socket}", self.websocket_handler
39 )
40
41 self.opsdroid.web_server.web_app.router.add_post(
42 "/connector/websocket", self.new_websocket_handler
43 )
44
45 async def disconnect(self):
46 """Disconnect from current sessions."""
47 self.accepting_connections = False
48 connections_to_close = self.active_connections.copy()
49 for connection in connections_to_close:
50 await connections_to_close[connection].close(
51 code=WSCloseCode.GOING_AWAY, message="Server shutdown"
52 )
53
54 async def new_websocket_handler(self, request):
55 """Handle for aiohttp creating websocket connections."""
56 if (
57 len(self.active_connections) + len(self.available_connections)
58 < self.max_connections
59 and self.accepting_connections
60 ):
61 socket = {"id": str(uuid.uuid1()), "date": datetime.now()}
62 self.available_connections.append(socket)
63 return aiohttp.web.Response(
64 text=json.dumps({"socket": socket["id"]}), headers=HEADERS, status=200
65 )
66 return aiohttp.web.Response(
67 text=json.dumps("No connections available"), headers=HEADERS, status=429
68 )
69
70 async def websocket_handler(self, request):
71 """Handle for aiohttp handling websocket connections."""
72 socket = request.match_info.get("socket")
73 available = [
74 item for item in self.available_connections if item["id"] == socket
75 ]
76 if len(available) != 1:
77 return aiohttp.web.Response(
78 text=json.dumps("Please request a socket first"),
79 headers=HEADERS,
80 status=400,
81 )
82 if (
83 datetime.now() - available[0]["date"]
84 ).total_seconds() > self.connection_timeout:
85 self.available_connections.remove(available[0])
86 return aiohttp.web.Response(
87 text=json.dumps("Socket request timed out"), headers=HEADERS, status=408
88 )
89 self.available_connections.remove(available[0])
90 _LOGGER.debug(_("User connected to %s."), socket)
91
92 websocket = aiohttp.web.WebSocketResponse()
93 await websocket.prepare(request)
94
95 self.active_connections[socket] = websocket
96 async for msg in websocket:
97 if msg.type == aiohttp.WSMsgType.TEXT:
98 message = Message(text=msg.data, user=None, target=None, connector=self)
99 await self.opsdroid.parse(message)
100 elif msg.type == aiohttp.WSMsgType.ERROR:
101 _LOGGER.error(
102 _("Websocket connection closed with exception %s."),
103 websocket.exception(),
104 )
105
106 _LOGGER.info(_("websocket connection closed"))
107 self.active_connections.pop(socket, None)
108
109 return websocket
110
111 async def listen(self):
112 """Listen for and parse new messages.
113
114 Listening is handled by the aiohttp web server so
115 we don't need to do anything here.
116
117 """
118
119 @register_event(Message)
120 async def send_message(self, message):
121 """Respond with a message."""
122 try:
123 if message.target is None:
124 message.target = next(iter(self.active_connections))
125 _LOGGER.debug(
126 _("Responding with: '%s' in target %s"), message.text, message.target
127 )
128 await self.active_connections[message.target].send_str(message.text)
129 except KeyError:
130 _LOGGER.error(_("No active socket for target %s"), message.target)
131
[end of opsdroid/connector/websocket/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opsdroid/connector/websocket/__init__.py b/opsdroid/connector/websocket/__init__.py
--- a/opsdroid/connector/websocket/__init__.py
+++ b/opsdroid/connector/websocket/__init__.py
@@ -9,12 +9,49 @@
from aiohttp import WSCloseCode
from opsdroid.connector import Connector, register_event
from opsdroid.events import Message
+import dataclasses
+from typing import Optional
_LOGGER = logging.getLogger(__name__)
HEADERS = {"Access-Control-Allow-Origin": "*"}
CONFIG_SCHEMA = {"bot-name": str, "max-connections": int, "connection-timeout": int}
[email protected]
+class WebsocketMessage:
+ """A message received from a websocket connection."""
+
+ message: str
+ user: Optional[str]
+ socket: Optional[str]
+
+ @classmethod
+ def parse_payload(cls, payload: str):
+ """Parse the payload of a websocket message.
+
+ We will try to parse the payload as a json string.
+ If that fails, we will use the default values which are:
+
+ message: str
+ user: None
+ socket: None
+
+ """
+ try:
+ data = json.loads(payload)
+ return cls(
+ message=data.get("message"),
+ user=data.get("user"),
+ socket=data.get("socket"),
+ )
+ except json.JSONDecodeError:
+ return cls(
+ message=payload,
+ user=None,
+ socket=None,
+ )
+
+
class ConnectorWebsocket(Connector):
"""A connector which allows websocket connections."""
@@ -29,6 +66,7 @@
self.active_connections = {}
self.available_connections = []
self.bot_name = self.config.get("bot-name", "opsdroid")
+ self.authorization_token = self.config.get("token")
async def connect(self):
"""Connect to the chat service."""
@@ -53,6 +91,7 @@
async def new_websocket_handler(self, request):
"""Handle for aiohttp creating websocket connections."""
+ await self.validate_request(request)
if (
len(self.active_connections) + len(self.available_connections)
< self.max_connections
@@ -95,7 +134,13 @@
self.active_connections[socket] = websocket
async for msg in websocket:
if msg.type == aiohttp.WSMsgType.TEXT:
- message = Message(text=msg.data, user=None, target=None, connector=self)
+ payload = WebsocketMessage.parse_payload(msg.data)
+ message = Message(
+ text=payload.message,
+ user=payload.user,
+ target=payload.socket,
+ connector=self,
+ )
await self.opsdroid.parse(message)
elif msg.type == aiohttp.WSMsgType.ERROR:
_LOGGER.error(
@@ -108,6 +153,20 @@
return websocket
+ async def validate_request(self, request):
+ """Validate the request by looking at headers and the connector token.
+
+ If the token does not exist in the header, but exists in the configuration,
+ then we will simply return a Forbidden error.
+
+ """
+ client_token = request.headers.get("Authorization")
+ if self.authorization_token and (
+ client_token is None or client_token != self.authorization_token
+ ):
+ raise aiohttp.web.HTTPUnauthorized()
+ return True
+
async def listen(self):
"""Listen for and parse new messages.
@@ -117,7 +176,7 @@
"""
@register_event(Message)
- async def send_message(self, message):
+ async def send_message(self, message: Message):
"""Respond with a message."""
try:
if message.target is None:
| {"golden_diff": "diff --git a/opsdroid/connector/websocket/__init__.py b/opsdroid/connector/websocket/__init__.py\n--- a/opsdroid/connector/websocket/__init__.py\n+++ b/opsdroid/connector/websocket/__init__.py\n@@ -9,12 +9,49 @@\n from aiohttp import WSCloseCode\n from opsdroid.connector import Connector, register_event\n from opsdroid.events import Message\n+import dataclasses\n+from typing import Optional\n \n _LOGGER = logging.getLogger(__name__)\n HEADERS = {\"Access-Control-Allow-Origin\": \"*\"}\n CONFIG_SCHEMA = {\"bot-name\": str, \"max-connections\": int, \"connection-timeout\": int}\n \n \[email protected]\n+class WebsocketMessage:\n+ \"\"\"A message received from a websocket connection.\"\"\"\n+\n+ message: str\n+ user: Optional[str]\n+ socket: Optional[str]\n+\n+ @classmethod\n+ def parse_payload(cls, payload: str):\n+ \"\"\"Parse the payload of a websocket message.\n+\n+ We will try to parse the payload as a json string.\n+ If that fails, we will use the default values which are:\n+\n+ message: str\n+ user: None\n+ socket: None\n+\n+ \"\"\"\n+ try:\n+ data = json.loads(payload)\n+ return cls(\n+ message=data.get(\"message\"),\n+ user=data.get(\"user\"),\n+ socket=data.get(\"socket\"),\n+ )\n+ except json.JSONDecodeError:\n+ return cls(\n+ message=payload,\n+ user=None,\n+ socket=None,\n+ )\n+\n+\n class ConnectorWebsocket(Connector):\n \"\"\"A connector which allows websocket connections.\"\"\"\n \n@@ -29,6 +66,7 @@\n self.active_connections = {}\n self.available_connections = []\n self.bot_name = self.config.get(\"bot-name\", \"opsdroid\")\n+ self.authorization_token = self.config.get(\"token\")\n \n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n@@ -53,6 +91,7 @@\n \n async def new_websocket_handler(self, request):\n \"\"\"Handle for aiohttp creating websocket connections.\"\"\"\n+ await self.validate_request(request)\n if (\n len(self.active_connections) + len(self.available_connections)\n < self.max_connections\n@@ -95,7 +134,13 @@\n self.active_connections[socket] = websocket\n async for msg in websocket:\n if msg.type == aiohttp.WSMsgType.TEXT:\n- message = Message(text=msg.data, user=None, target=None, connector=self)\n+ payload = WebsocketMessage.parse_payload(msg.data)\n+ message = Message(\n+ text=payload.message,\n+ user=payload.user,\n+ target=payload.socket,\n+ connector=self,\n+ )\n await self.opsdroid.parse(message)\n elif msg.type == aiohttp.WSMsgType.ERROR:\n _LOGGER.error(\n@@ -108,6 +153,20 @@\n \n return websocket\n \n+ async def validate_request(self, request):\n+ \"\"\"Validate the request by looking at headers and the connector token.\n+\n+ If the token does not exist in the header, but exists in the configuration,\n+ then we will simply return a Forbidden error.\n+\n+ \"\"\"\n+ client_token = request.headers.get(\"Authorization\")\n+ if self.authorization_token and (\n+ client_token is None or client_token != self.authorization_token\n+ ):\n+ raise aiohttp.web.HTTPUnauthorized()\n+ return True\n+\n async def listen(self):\n \"\"\"Listen for and parse new messages.\n \n@@ -117,7 +176,7 @@\n \"\"\"\n \n @register_event(Message)\n- async def send_message(self, message):\n+ async def send_message(self, message: Message):\n \"\"\"Respond with a message.\"\"\"\n try:\n if message.target is None:\n", "issue": "Add token to websockets connector\nCurrently, anyone that knows opdroid url and endpoint will be able to request a socket to initialize a websocket connection.\r\n\r\nIdeally, we should allow users to select a token in the configuration settings. When opsdroid gets a request, if the token doesn't exist then we just reject the request.\r\n\r\nThis will also a nice feature to implement along side opsdroid-web v2\n", "before_files": [{"content": "\"\"\"A connector which allows websocket connections.\"\"\"\nimport json\nimport logging\nimport uuid\nfrom datetime import datetime\n\nimport aiohttp\nimport aiohttp.web\nfrom aiohttp import WSCloseCode\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message\n\n_LOGGER = logging.getLogger(__name__)\nHEADERS = {\"Access-Control-Allow-Origin\": \"*\"}\nCONFIG_SCHEMA = {\"bot-name\": str, \"max-connections\": int, \"connection-timeout\": int}\n\n\nclass ConnectorWebsocket(Connector):\n \"\"\"A connector which allows websocket connections.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Websocket connector.\"))\n self.name = config.get(\"name\", \"websocket\")\n self.max_connections = self.config.get(\"max-connections\", 10)\n self.connection_timeout = self.config.get(\"connection-timeout\", 60)\n self.accepting_connections = True\n self.active_connections = {}\n self.available_connections = []\n self.bot_name = self.config.get(\"bot-name\", \"opsdroid\")\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n self.accepting_connections = True\n\n self.opsdroid.web_server.web_app.router.add_get(\n \"/connector/websocket/{socket}\", self.websocket_handler\n )\n\n self.opsdroid.web_server.web_app.router.add_post(\n \"/connector/websocket\", self.new_websocket_handler\n )\n\n async def disconnect(self):\n \"\"\"Disconnect from current sessions.\"\"\"\n self.accepting_connections = False\n connections_to_close = self.active_connections.copy()\n for connection in connections_to_close:\n await connections_to_close[connection].close(\n code=WSCloseCode.GOING_AWAY, message=\"Server shutdown\"\n )\n\n async def new_websocket_handler(self, request):\n \"\"\"Handle for aiohttp creating websocket connections.\"\"\"\n if (\n len(self.active_connections) + len(self.available_connections)\n < self.max_connections\n and self.accepting_connections\n ):\n socket = {\"id\": str(uuid.uuid1()), \"date\": datetime.now()}\n self.available_connections.append(socket)\n return aiohttp.web.Response(\n text=json.dumps({\"socket\": socket[\"id\"]}), headers=HEADERS, status=200\n )\n return aiohttp.web.Response(\n text=json.dumps(\"No connections available\"), headers=HEADERS, status=429\n )\n\n async def websocket_handler(self, request):\n \"\"\"Handle for aiohttp handling websocket connections.\"\"\"\n socket = request.match_info.get(\"socket\")\n available = [\n item for item in self.available_connections if item[\"id\"] == socket\n ]\n if len(available) != 1:\n return aiohttp.web.Response(\n text=json.dumps(\"Please request a socket first\"),\n headers=HEADERS,\n status=400,\n )\n if (\n datetime.now() - available[0][\"date\"]\n ).total_seconds() > self.connection_timeout:\n self.available_connections.remove(available[0])\n return aiohttp.web.Response(\n text=json.dumps(\"Socket request timed out\"), headers=HEADERS, status=408\n )\n self.available_connections.remove(available[0])\n _LOGGER.debug(_(\"User connected to %s.\"), socket)\n\n websocket = aiohttp.web.WebSocketResponse()\n await websocket.prepare(request)\n\n self.active_connections[socket] = websocket\n async for msg in websocket:\n if msg.type == aiohttp.WSMsgType.TEXT:\n message = Message(text=msg.data, user=None, target=None, connector=self)\n await self.opsdroid.parse(message)\n elif msg.type == aiohttp.WSMsgType.ERROR:\n _LOGGER.error(\n _(\"Websocket connection closed with exception %s.\"),\n websocket.exception(),\n )\n\n _LOGGER.info(_(\"websocket connection closed\"))\n self.active_connections.pop(socket, None)\n\n return websocket\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\n\n Listening is handled by the aiohttp web server so\n we don't need to do anything here.\n\n \"\"\"\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n try:\n if message.target is None:\n message.target = next(iter(self.active_connections))\n _LOGGER.debug(\n _(\"Responding with: '%s' in target %s\"), message.text, message.target\n )\n await self.active_connections[message.target].send_str(message.text)\n except KeyError:\n _LOGGER.error(_(\"No active socket for target %s\"), message.target)\n", "path": "opsdroid/connector/websocket/__init__.py"}]} | 1,914 | 851 |
gh_patches_debug_30899 | rasdani/github-patches | git_diff | litestar-org__litestar-1797 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docs: DTO Factory exclude fields in nested complex types
### Summary
Need to document procedure for excluding fields from nested complex types, e.g.,
```py
class Model1:
a: int
b: str
class Model2:
c: int
d: str
class Model3:
e: Union[Model1, Model2]
config = DTOConfig(exclude={"e.0.a", "e.1.c"})
...
```
</issue>
<code>
[start of docs/examples/data_transfer_objects/factory/excluding_fields.py]
1 from datetime import datetime
2 from uuid import UUID
3
4 from sqlalchemy import ForeignKey
5 from sqlalchemy.orm import Mapped, mapped_column, relationship
6 from typing_extensions import Annotated
7
8 from litestar import Litestar, post
9 from litestar.contrib.sqlalchemy.dto import SQLAlchemyDTO
10 from litestar.dto.factory import DTOConfig, dto_field
11
12 from .my_lib import Base
13
14
15 class Address(Base):
16 street: Mapped[str]
17 city: Mapped[str]
18 state: Mapped[str]
19 zip: Mapped[str]
20
21
22 class User(Base):
23 name: Mapped[str]
24 password: Mapped[str] = mapped_column(info=dto_field("private"))
25 created_at: Mapped[datetime] = mapped_column(info=dto_field("read-only"))
26 address_id: Mapped[UUID] = mapped_column(ForeignKey("address.id"), info=dto_field("private"))
27 address: Mapped[Address] = relationship(info=dto_field("read-only"))
28
29
30 UserDTO = SQLAlchemyDTO[User]
31 config = DTOConfig(exclude={"id", "address.id", "address.street"})
32 ReadUserDTO = SQLAlchemyDTO[Annotated[User, config]]
33
34
35 @post("/users", dto=UserDTO, return_dto=ReadUserDTO, sync_to_thread=False)
36 def create_user(data: User) -> User:
37 data.created_at = datetime.min
38 data.address = Address(street="123 Main St", city="Anytown", state="NY", zip="12345")
39 return data
40
41
42 app = Litestar(route_handlers=[create_user])
43
44 # run: /users -H "Content-Type: application/json" -d '{"name":"Litestar User","password":"xyz","created_at":"2023-04-24T00:00:00Z"}'
45
[end of docs/examples/data_transfer_objects/factory/excluding_fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/examples/data_transfer_objects/factory/excluding_fields.py b/docs/examples/data_transfer_objects/factory/excluding_fields.py
--- a/docs/examples/data_transfer_objects/factory/excluding_fields.py
+++ b/docs/examples/data_transfer_objects/factory/excluding_fields.py
@@ -1,4 +1,5 @@
from datetime import datetime
+from typing import List
from uuid import UUID
from sqlalchemy import ForeignKey
@@ -19,16 +20,30 @@
zip: Mapped[str]
+class Pets(Base):
+ name: Mapped[str]
+ user_id: Mapped[UUID] = mapped_column(ForeignKey("user.id"))
+
+
class User(Base):
name: Mapped[str]
password: Mapped[str] = mapped_column(info=dto_field("private"))
created_at: Mapped[datetime] = mapped_column(info=dto_field("read-only"))
address_id: Mapped[UUID] = mapped_column(ForeignKey("address.id"), info=dto_field("private"))
address: Mapped[Address] = relationship(info=dto_field("read-only"))
+ pets: Mapped[List[Pets]] = relationship(info=dto_field("read-only"))
UserDTO = SQLAlchemyDTO[User]
-config = DTOConfig(exclude={"id", "address.id", "address.street"})
+config = DTOConfig(
+ exclude={
+ "id",
+ "address.id",
+ "address.street",
+ "pets.0.id",
+ "pets.0.user_id",
+ }
+)
ReadUserDTO = SQLAlchemyDTO[Annotated[User, config]]
@@ -36,6 +51,7 @@
def create_user(data: User) -> User:
data.created_at = datetime.min
data.address = Address(street="123 Main St", city="Anytown", state="NY", zip="12345")
+ data.pets = [Pets(id=1, name="Fido"), Pets(id=2, name="Spot")]
return data
| {"golden_diff": "diff --git a/docs/examples/data_transfer_objects/factory/excluding_fields.py b/docs/examples/data_transfer_objects/factory/excluding_fields.py\n--- a/docs/examples/data_transfer_objects/factory/excluding_fields.py\n+++ b/docs/examples/data_transfer_objects/factory/excluding_fields.py\n@@ -1,4 +1,5 @@\n from datetime import datetime\n+from typing import List\n from uuid import UUID\n \n from sqlalchemy import ForeignKey\n@@ -19,16 +20,30 @@\n zip: Mapped[str]\n \n \n+class Pets(Base):\n+ name: Mapped[str]\n+ user_id: Mapped[UUID] = mapped_column(ForeignKey(\"user.id\"))\n+\n+\n class User(Base):\n name: Mapped[str]\n password: Mapped[str] = mapped_column(info=dto_field(\"private\"))\n created_at: Mapped[datetime] = mapped_column(info=dto_field(\"read-only\"))\n address_id: Mapped[UUID] = mapped_column(ForeignKey(\"address.id\"), info=dto_field(\"private\"))\n address: Mapped[Address] = relationship(info=dto_field(\"read-only\"))\n+ pets: Mapped[List[Pets]] = relationship(info=dto_field(\"read-only\"))\n \n \n UserDTO = SQLAlchemyDTO[User]\n-config = DTOConfig(exclude={\"id\", \"address.id\", \"address.street\"})\n+config = DTOConfig(\n+ exclude={\n+ \"id\",\n+ \"address.id\",\n+ \"address.street\",\n+ \"pets.0.id\",\n+ \"pets.0.user_id\",\n+ }\n+)\n ReadUserDTO = SQLAlchemyDTO[Annotated[User, config]]\n \n \n@@ -36,6 +51,7 @@\n def create_user(data: User) -> User:\n data.created_at = datetime.min\n data.address = Address(street=\"123 Main St\", city=\"Anytown\", state=\"NY\", zip=\"12345\")\n+ data.pets = [Pets(id=1, name=\"Fido\"), Pets(id=2, name=\"Spot\")]\n return data\n", "issue": "Docs: DTO Factory exclude fields in nested complex types\n### Summary\n\nNeed to document procedure for excluding fields from nested complex types, e.g.,\r\n\r\n```py\r\nclass Model1:\r\n a: int\r\n b: str\r\n\r\nclass Model2:\r\n c: int\r\n d: str\r\n\r\nclass Model3:\r\n e: Union[Model1, Model2]\r\n\r\nconfig = DTOConfig(exclude={\"e.0.a\", \"e.1.c\"})\r\n...\r\n```\n", "before_files": [{"content": "from datetime import datetime\nfrom uuid import UUID\n\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy.orm import Mapped, mapped_column, relationship\nfrom typing_extensions import Annotated\n\nfrom litestar import Litestar, post\nfrom litestar.contrib.sqlalchemy.dto import SQLAlchemyDTO\nfrom litestar.dto.factory import DTOConfig, dto_field\n\nfrom .my_lib import Base\n\n\nclass Address(Base):\n street: Mapped[str]\n city: Mapped[str]\n state: Mapped[str]\n zip: Mapped[str]\n\n\nclass User(Base):\n name: Mapped[str]\n password: Mapped[str] = mapped_column(info=dto_field(\"private\"))\n created_at: Mapped[datetime] = mapped_column(info=dto_field(\"read-only\"))\n address_id: Mapped[UUID] = mapped_column(ForeignKey(\"address.id\"), info=dto_field(\"private\"))\n address: Mapped[Address] = relationship(info=dto_field(\"read-only\"))\n\n\nUserDTO = SQLAlchemyDTO[User]\nconfig = DTOConfig(exclude={\"id\", \"address.id\", \"address.street\"})\nReadUserDTO = SQLAlchemyDTO[Annotated[User, config]]\n\n\n@post(\"/users\", dto=UserDTO, return_dto=ReadUserDTO, sync_to_thread=False)\ndef create_user(data: User) -> User:\n data.created_at = datetime.min\n data.address = Address(street=\"123 Main St\", city=\"Anytown\", state=\"NY\", zip=\"12345\")\n return data\n\n\napp = Litestar(route_handlers=[create_user])\n\n# run: /users -H \"Content-Type: application/json\" -d '{\"name\":\"Litestar User\",\"password\":\"xyz\",\"created_at\":\"2023-04-24T00:00:00Z\"}'\n", "path": "docs/examples/data_transfer_objects/factory/excluding_fields.py"}]} | 1,114 | 438 |
gh_patches_debug_18488 | rasdani/github-patches | git_diff | facebookresearch__hydra-1054 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Feature Request] [Hydra-Submitit-Launcher] Add support for passing `additional_parameters` as supported by Submititi
# 🚀 Feature Request
When calling [Submitit](https://github.com/facebookincubator/submitit) directly (not the plugin), one can pass an optional dictionary argument called [`additional_parameters`](https://github.com/facebookincubator/submitit/blob/e6fc6b2532a2ea480f08747816f14c53f526ef04/submitit/slurm/slurm.py#L432) to add parameters which are not currently available in submitit.
This argument can not be passed when using the submitit-plugin. Example error:
```
Key 'additional_parameters' not in 'SlurmQueueConf'
full_key: hydra.launcher.additional_parameters
reference_type=Optional[SlurmQueueConf]
object_type=SlurmQueueConf
```
We should add support for passing this `additional_parameters` dict via the plugin aswell.
## Motivation
Same as above.
## Pitch
Add support for the dict in [SlurmQueueConf](https://github.com/facebookresearch/hydra/blob/master/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py#L31) and pass to submitit.AutoExecutor [here](https://github.com/facebookresearch/hydra/blob/dfbebc34020ad3d4bf1ab061565408740e62eb22/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/submitit_launcher.py#L105)
**Are you willing to open a pull request?** (See [CONTRIBUTING](../../CONTRIBUTING.md)): Yes
## Additional context
Since this change is quite small, is there any reason it wasnt done in the first iteration of the plugin?
</issue>
<code>
[start of plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 from dataclasses import dataclass
3 from typing import Optional
4
5 from hydra.core.config_store import ConfigStore
6
7
8 @dataclass
9 class BaseQueueConf:
10 """Configuration shared by all executors"""
11
12 submitit_folder: str = "${hydra.sweep.dir}/.submitit/%j"
13
14 # maximum time for the job in minutes
15 timeout_min: int = 60
16 # number of cpus to use for each task
17 cpus_per_task: int = 1
18 # number of gpus to use on each node
19 gpus_per_node: int = 0
20 # number of tasks to spawn on each node
21 tasks_per_node: int = 1
22 # memory to reserve for the job on each node (in GB)
23 mem_gb: int = 4
24 # number of nodes to use for the job
25 nodes: int = 1
26 # name of the job
27 name: str = "${hydra.job.name}"
28
29
30 @dataclass
31 class SlurmQueueConf(BaseQueueConf):
32 """Slurm configuration overrides and specific parameters"""
33
34 _target_: str = (
35 "hydra_plugins.hydra_submitit_launcher.submitit_launcher.SlurmLauncher"
36 )
37
38 # Params are used to configure sbatch, for more info check:
39 # https://github.com/facebookincubator/submitit/blob/master/submitit/slurm/slurm.py
40
41 # Following parameters are slurm specific
42 # More information: https://slurm.schedmd.com/sbatch.html
43 #
44 # slurm partition to use on the cluster
45 partition: Optional[str] = None
46 comment: Optional[str] = None
47 constraint: Optional[str] = None
48 exclude: Optional[str] = None
49
50 # Following parameters are submitit specifics
51 #
52 # USR1 signal delay before timeout
53 signal_delay_s: int = 120
54 # Maximum number of retries on job timeout.
55 # Change this only after you confirmed your code can handle re-submission
56 # by properly resuming from the latest stored checkpoint.
57 # check the following for more info on slurm_max_num_timeout
58 # https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md
59 max_num_timeout: int = 0
60
61
62 @dataclass
63 class LocalQueueConf(BaseQueueConf):
64 _target_: str = (
65 "hydra_plugins.hydra_submitit_launcher.submitit_launcher.LocalLauncher"
66 )
67
68
69 # finally, register two different choices:
70 ConfigStore.instance().store(
71 group="hydra/launcher",
72 name="submitit_local",
73 node=LocalQueueConf(),
74 provider="submitit_launcher",
75 )
76
77
78 ConfigStore.instance().store(
79 group="hydra/launcher",
80 name="submitit_slurm",
81 node=SlurmQueueConf(),
82 provider="submitit_launcher",
83 )
84
[end of plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py
--- a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py
+++ b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py
@@ -1,6 +1,6 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-from dataclasses import dataclass
-from typing import Optional
+from dataclasses import dataclass, field
+from typing import Any, Dict, Optional
from hydra.core.config_store import ConfigStore
@@ -57,6 +57,9 @@
# check the following for more info on slurm_max_num_timeout
# https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md
max_num_timeout: int = 0
+ # Useful to add parameters which are not currently available in the plugin.
+ # Eg: {"mail-user": "[email protected]", "mail-type": "BEGIN"}
+ additional_parameters: Dict[str, Any] = field(default_factory=dict)
@dataclass
| {"golden_diff": "diff --git a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py\n--- a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py\n+++ b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py\n@@ -1,6 +1,6 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n-from dataclasses import dataclass\n-from typing import Optional\n+from dataclasses import dataclass, field\n+from typing import Any, Dict, Optional\n \n from hydra.core.config_store import ConfigStore\n \n@@ -57,6 +57,9 @@\n # check the following for more info on slurm_max_num_timeout\n # https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md\n max_num_timeout: int = 0\n+ # Useful to add parameters which are not currently available in the plugin.\n+ # Eg: {\"mail-user\": \"[email protected]\", \"mail-type\": \"BEGIN\"}\n+ additional_parameters: Dict[str, Any] = field(default_factory=dict)\n \n \n @dataclass\n", "issue": "[Feature Request] [Hydra-Submitit-Launcher] Add support for passing `additional_parameters` as supported by Submititi\n# \ud83d\ude80 Feature Request\r\n\r\nWhen calling [Submitit](https://github.com/facebookincubator/submitit) directly (not the plugin), one can pass an optional dictionary argument called [`additional_parameters`](https://github.com/facebookincubator/submitit/blob/e6fc6b2532a2ea480f08747816f14c53f526ef04/submitit/slurm/slurm.py#L432) to add parameters which are not currently available in submitit.\r\n\r\nThis argument can not be passed when using the submitit-plugin. Example error:\r\n\r\n```\r\nKey 'additional_parameters' not in 'SlurmQueueConf'\r\n full_key: hydra.launcher.additional_parameters\r\n reference_type=Optional[SlurmQueueConf]\r\n object_type=SlurmQueueConf\r\n```\r\n\r\nWe should add support for passing this `additional_parameters` dict via the plugin aswell.\r\n\r\n## Motivation\r\n\r\nSame as above.\r\n\r\n## Pitch\r\n\r\nAdd support for the dict in [SlurmQueueConf](https://github.com/facebookresearch/hydra/blob/master/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py#L31) and pass to submitit.AutoExecutor [here](https://github.com/facebookresearch/hydra/blob/dfbebc34020ad3d4bf1ab061565408740e62eb22/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/submitit_launcher.py#L105)\r\n\r\n**Are you willing to open a pull request?** (See [CONTRIBUTING](../../CONTRIBUTING.md)): Yes\r\n\r\n## Additional context\r\n\r\nSince this change is quite small, is there any reason it wasnt done in the first iteration of the plugin?\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nfrom hydra.core.config_store import ConfigStore\n\n\n@dataclass\nclass BaseQueueConf:\n \"\"\"Configuration shared by all executors\"\"\"\n\n submitit_folder: str = \"${hydra.sweep.dir}/.submitit/%j\"\n\n # maximum time for the job in minutes\n timeout_min: int = 60\n # number of cpus to use for each task\n cpus_per_task: int = 1\n # number of gpus to use on each node\n gpus_per_node: int = 0\n # number of tasks to spawn on each node\n tasks_per_node: int = 1\n # memory to reserve for the job on each node (in GB)\n mem_gb: int = 4\n # number of nodes to use for the job\n nodes: int = 1\n # name of the job\n name: str = \"${hydra.job.name}\"\n\n\n@dataclass\nclass SlurmQueueConf(BaseQueueConf):\n \"\"\"Slurm configuration overrides and specific parameters\"\"\"\n\n _target_: str = (\n \"hydra_plugins.hydra_submitit_launcher.submitit_launcher.SlurmLauncher\"\n )\n\n # Params are used to configure sbatch, for more info check:\n # https://github.com/facebookincubator/submitit/blob/master/submitit/slurm/slurm.py\n\n # Following parameters are slurm specific\n # More information: https://slurm.schedmd.com/sbatch.html\n #\n # slurm partition to use on the cluster\n partition: Optional[str] = None\n comment: Optional[str] = None\n constraint: Optional[str] = None\n exclude: Optional[str] = None\n\n # Following parameters are submitit specifics\n #\n # USR1 signal delay before timeout\n signal_delay_s: int = 120\n # Maximum number of retries on job timeout.\n # Change this only after you confirmed your code can handle re-submission\n # by properly resuming from the latest stored checkpoint.\n # check the following for more info on slurm_max_num_timeout\n # https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md\n max_num_timeout: int = 0\n\n\n@dataclass\nclass LocalQueueConf(BaseQueueConf):\n _target_: str = (\n \"hydra_plugins.hydra_submitit_launcher.submitit_launcher.LocalLauncher\"\n )\n\n\n# finally, register two different choices:\nConfigStore.instance().store(\n group=\"hydra/launcher\",\n name=\"submitit_local\",\n node=LocalQueueConf(),\n provider=\"submitit_launcher\",\n)\n\n\nConfigStore.instance().store(\n group=\"hydra/launcher\",\n name=\"submitit_slurm\",\n node=SlurmQueueConf(),\n provider=\"submitit_launcher\",\n)\n", "path": "plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py"}]} | 1,788 | 277 |
gh_patches_debug_3636 | rasdani/github-patches | git_diff | safe-global__safe-config-service-68 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add /check endpoint
This endpoint should be used to check if the server is alive and ready to receive requests
</issue>
<code>
[start of src/config/urls.py]
1 from django.contrib import admin
2 from django.urls import include, path
3
4 urlpatterns = [
5 path("api/v1/", include("safe_apps.urls", namespace="v1")),
6 path("admin/", admin.site.urls),
7 ]
8
[end of src/config/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/config/urls.py b/src/config/urls.py
--- a/src/config/urls.py
+++ b/src/config/urls.py
@@ -1,7 +1,9 @@
from django.contrib import admin
+from django.http import HttpResponse
from django.urls import include, path
urlpatterns = [
path("api/v1/", include("safe_apps.urls", namespace="v1")),
path("admin/", admin.site.urls),
+ path("check/", lambda request: HttpResponse("Ok"), name="check"),
]
| {"golden_diff": "diff --git a/src/config/urls.py b/src/config/urls.py\n--- a/src/config/urls.py\n+++ b/src/config/urls.py\n@@ -1,7 +1,9 @@\n from django.contrib import admin\n+from django.http import HttpResponse\n from django.urls import include, path\n \n urlpatterns = [\n path(\"api/v1/\", include(\"safe_apps.urls\", namespace=\"v1\")),\n path(\"admin/\", admin.site.urls),\n+ path(\"check/\", lambda request: HttpResponse(\"Ok\"), name=\"check\"),\n ]\n", "issue": "Add /check endpoint\nThis endpoint should be used to check if the server is alive and ready to receive requests\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.urls import include, path\n\nurlpatterns = [\n path(\"api/v1/\", include(\"safe_apps.urls\", namespace=\"v1\")),\n path(\"admin/\", admin.site.urls),\n]\n", "path": "src/config/urls.py"}]} | 613 | 112 |
gh_patches_debug_34281 | rasdani/github-patches | git_diff | joke2k__faker-830 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
zh_CN (and possible other providers) return invalid results for domain_name
Brief summary of the issue goes here.
### Steps to reproduce
f = Faker('zh_CN')
f.domain_name()
>he.cn
### Expected behavior
it should return valid domain names
### Actual behavior
he.cn is not a valid domain. he is in the list of second-level domains (https://en.wikipedia.org/wiki/.cn#Second-level_domains_of_provinces) and will cause failures in places where domains are being validated.
it's unclear where this change should/could be made after briefly reviewing the source code.
</issue>
<code>
[start of faker/providers/internet/zh_CN/__init__.py]
1 # coding=utf-8
2 from __future__ import unicode_literals
3 from .. import Provider as InternetProvider
4 from faker.utils.decorators import slugify
5
6
7 class Provider(InternetProvider):
8 user_name_formats = (
9 '{{last_romanized_name}}.{{first_romanized_name}}',
10 '{{first_romanized_name}}.{{last_romanized_name}}',
11 '{{first_romanized_name}}##',
12 '?{{last_romanized_name}}',
13 )
14 tlds = ('com', 'com', 'com', 'net', 'org', 'cn', 'cn', 'cn')
15
16 @slugify
17 def domain_word(self):
18 return self.generator.format('last_romanized_name')
19
[end of faker/providers/internet/zh_CN/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/internet/zh_CN/__init__.py b/faker/providers/internet/zh_CN/__init__.py
--- a/faker/providers/internet/zh_CN/__init__.py
+++ b/faker/providers/internet/zh_CN/__init__.py
@@ -1,5 +1,6 @@
# coding=utf-8
from __future__ import unicode_literals
+from collections import OrderedDict
from .. import Provider as InternetProvider
from faker.utils.decorators import slugify
@@ -11,8 +12,54 @@
'{{first_romanized_name}}##',
'?{{last_romanized_name}}',
)
- tlds = ('com', 'com', 'com', 'net', 'org', 'cn', 'cn', 'cn')
+
+ tlds = OrderedDict((
+ ('cn', 0.8),
+ ('net', 0.1),
+ ('com', 0.05),
+ ('org', 0.05),
+ ))
+
+ second_level_domains = ('ac', 'com', 'edu', 'gov', 'mil', 'net', 'org',
+ 'ah', 'bj', 'cq', 'fj', 'gd', 'gs', 'gz', 'gx',
+ 'ha', 'hb', 'he', 'hi', 'hk', 'hl', 'hn', 'jl',
+ 'js', 'jx', 'ln', 'mo', 'nm', 'nx', 'qh', 'sc',
+ 'sd', 'sh', 'sn', 'sx', 'tj', 'xj', 'xz', 'yn', 'zj')
+
+ domain_formats = (
+ '##', '??',
+ '{{first_romanized_name}}',
+ '{{last_romanized_name}}',
+ '{{first_romanized_name}}{{last_romanized_name}}',
+ '{{last_romanized_name}}{{last_romanized_name}}',
+ '{{first_romanized_name}}{{first_romanized_name}}',
+ )
@slugify
def domain_word(self):
- return self.generator.format('last_romanized_name')
+ pattern = self.random_element(self.domain_formats)
+ if '#' in pattern or '?' in pattern:
+ return self.bothify(pattern)
+ else:
+ return self.generator.parse(pattern)
+
+ def domain_name(self, levels=1):
+ if levels < 1:
+ raise ValueError("levels must be greater than or equal to 1")
+ if levels == 1:
+ domain_word = self.domain_word()
+ # Avoids he.cn as seen in issue #687
+ while domain_word in self.second_level_domains:
+ domain_word = self.domain_word()
+ return domain_word + '.' + self.tld()
+ elif levels == 2:
+ my_tld = self.tld()
+ my_second_level = ''
+ if my_tld == 'cn':
+ my_second_level = self.random_element(self.second_level_domains)
+ else:
+ my_second_level = self.domain_word()
+ return self.domain_word() + '.' + my_second_level + '.' + my_tld
+ else:
+ return self.domain_word() + '.' + self.domain_name(levels - 1)
+
| {"golden_diff": "diff --git a/faker/providers/internet/zh_CN/__init__.py b/faker/providers/internet/zh_CN/__init__.py\n--- a/faker/providers/internet/zh_CN/__init__.py\n+++ b/faker/providers/internet/zh_CN/__init__.py\n@@ -1,5 +1,6 @@\n # coding=utf-8\n from __future__ import unicode_literals\n+from collections import OrderedDict\n from .. import Provider as InternetProvider\n from faker.utils.decorators import slugify\n \n@@ -11,8 +12,54 @@\n '{{first_romanized_name}}##',\n '?{{last_romanized_name}}',\n )\n- tlds = ('com', 'com', 'com', 'net', 'org', 'cn', 'cn', 'cn')\n+\n+ tlds = OrderedDict((\n+ ('cn', 0.8),\n+ ('net', 0.1),\n+ ('com', 0.05),\n+ ('org', 0.05),\n+ ))\n+\n+ second_level_domains = ('ac', 'com', 'edu', 'gov', 'mil', 'net', 'org',\n+ 'ah', 'bj', 'cq', 'fj', 'gd', 'gs', 'gz', 'gx',\n+ 'ha', 'hb', 'he', 'hi', 'hk', 'hl', 'hn', 'jl',\n+ 'js', 'jx', 'ln', 'mo', 'nm', 'nx', 'qh', 'sc',\n+ 'sd', 'sh', 'sn', 'sx', 'tj', 'xj', 'xz', 'yn', 'zj')\n+\n+ domain_formats = (\n+ '##', '??',\n+ '{{first_romanized_name}}',\n+ '{{last_romanized_name}}',\n+ '{{first_romanized_name}}{{last_romanized_name}}',\n+ '{{last_romanized_name}}{{last_romanized_name}}',\n+ '{{first_romanized_name}}{{first_romanized_name}}',\n+ )\n \n @slugify\n def domain_word(self):\n- return self.generator.format('last_romanized_name')\n+ pattern = self.random_element(self.domain_formats)\n+ if '#' in pattern or '?' in pattern:\n+ return self.bothify(pattern)\n+ else:\n+ return self.generator.parse(pattern)\n+\n+ def domain_name(self, levels=1):\n+ if levels < 1:\n+ raise ValueError(\"levels must be greater than or equal to 1\")\n+ if levels == 1:\n+ domain_word = self.domain_word()\n+ # Avoids he.cn as seen in issue #687\n+ while domain_word in self.second_level_domains:\n+ domain_word = self.domain_word()\n+ return domain_word + '.' + self.tld()\n+ elif levels == 2:\n+ my_tld = self.tld()\n+ my_second_level = ''\n+ if my_tld == 'cn':\n+ my_second_level = self.random_element(self.second_level_domains)\n+ else:\n+ my_second_level = self.domain_word()\n+ return self.domain_word() + '.' + my_second_level + '.' + my_tld\n+ else:\n+ return self.domain_word() + '.' + self.domain_name(levels - 1)\n+\n", "issue": "zh_CN (and possible other providers) return invalid results for domain_name\nBrief summary of the issue goes here.\r\n\r\n### Steps to reproduce\r\n\r\nf = Faker('zh_CN')\r\nf.domain_name()\r\n>he.cn\r\n\r\n### Expected behavior\r\n\r\nit should return valid domain names\r\n\r\n### Actual behavior\r\n\r\nhe.cn is not a valid domain. he is in the list of second-level domains (https://en.wikipedia.org/wiki/.cn#Second-level_domains_of_provinces) and will cause failures in places where domains are being validated.\r\n\r\nit's unclear where this change should/could be made after briefly reviewing the source code.\r\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom .. import Provider as InternetProvider\nfrom faker.utils.decorators import slugify\n\n\nclass Provider(InternetProvider):\n user_name_formats = (\n '{{last_romanized_name}}.{{first_romanized_name}}',\n '{{first_romanized_name}}.{{last_romanized_name}}',\n '{{first_romanized_name}}##',\n '?{{last_romanized_name}}',\n )\n tlds = ('com', 'com', 'com', 'net', 'org', 'cn', 'cn', 'cn')\n\n @slugify\n def domain_word(self):\n return self.generator.format('last_romanized_name')\n", "path": "faker/providers/internet/zh_CN/__init__.py"}]} | 853 | 725 |
gh_patches_debug_20196 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-8465 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Failing to load sqlalchemy-plugin in pyinstaller'ed exe
This line of code works perfectly fine while running as a file from spyder.
engine = sqlalchemy.create_engine('teradata://uid:pwd@DBinstance?driver=Teradata')
However, after making an exe file using pyinstaller, I get the following error while running the exe:
sqlalchemy.exc.NoSuchModuleError: Can't load plugin:sqlalchemy.dialects:teradata
This looks to be an exe-creation-issue in loading libraries. Has anyone encountered a similar issue and was able to resolve? Any pointers would be appreciated.
</issue>
<code>
[start of PyInstaller/hooks/hook-sqlalchemy.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2023, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12 import re
13 import importlib.util
14
15 from PyInstaller import isolated
16 from PyInstaller.lib.modulegraph.modulegraph import SourceModule
17 from PyInstaller.utils.hooks import check_requirement, logger
18
19 # 'sqlalchemy.testing' causes bundling a lot of unnecessary modules.
20 excludedimports = ['sqlalchemy.testing']
21
22 # Include most common database bindings some database bindings are detected and include some are not. We should
23 # explicitly include database backends.
24 hiddenimports = ['pysqlite2', 'MySQLdb', 'psycopg2', 'sqlalchemy.ext.baked']
25
26 if check_requirement('sqlalchemy >= 1.4'):
27 hiddenimports.append("sqlalchemy.sql.default_comparator")
28
29
30 @isolated.decorate
31 def _get_dialect_modules(module_name):
32 import importlib
33 module = importlib.import_module(module_name)
34 return [f"{module_name}.{submodule_name}" for submodule_name in module.__all__]
35
36
37 # In SQLAlchemy >= 0.6, the "sqlalchemy.dialects" package provides dialects.
38 # In SQLAlchemy <= 0.5, the "sqlalchemy.databases" package provides dialects.
39 if check_requirement('sqlalchemy >= 0.6'):
40 hiddenimports += _get_dialect_modules("sqlalchemy.dialects")
41 else:
42 hiddenimports += _get_dialect_modules("sqlalchemy.databases")
43
44
45 def hook(hook_api):
46 """
47 SQLAlchemy 0.9 introduced the decorator 'util.dependencies'. This decorator does imports. E.g.:
48
49 @util.dependencies("sqlalchemy.sql.schema")
50
51 This hook scans for included SQLAlchemy modules and then scans those modules for any util.dependencies and marks
52 those modules as hidden imports.
53 """
54
55 if not check_requirement('sqlalchemy >= 0.9'):
56 return
57
58 # this parser is very simplistic but seems to catch all cases as of V1.1
59 depend_regex = re.compile(r'@util.dependencies\([\'"](.*?)[\'"]\)')
60
61 hidden_imports_set = set()
62 known_imports = set()
63 for node in hook_api.module_graph.iter_graph(start=hook_api.module):
64 if isinstance(node, SourceModule) and node.identifier.startswith('sqlalchemy.'):
65 known_imports.add(node.identifier)
66
67 # Read the source...
68 with open(node.filename, 'rb') as f:
69 source_code = f.read()
70 source_code = importlib.util.decode_source(source_code)
71
72 # ... and scan it
73 for match in depend_regex.findall(source_code):
74 hidden_imports_set.add(match)
75
76 hidden_imports_set -= known_imports
77 if len(hidden_imports_set):
78 logger.info(" Found %d sqlalchemy hidden imports", len(hidden_imports_set))
79 hook_api.add_imports(*list(hidden_imports_set))
80
[end of PyInstaller/hooks/hook-sqlalchemy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/hook-sqlalchemy.py b/PyInstaller/hooks/hook-sqlalchemy.py
--- a/PyInstaller/hooks/hook-sqlalchemy.py
+++ b/PyInstaller/hooks/hook-sqlalchemy.py
@@ -14,7 +14,9 @@
from PyInstaller import isolated
from PyInstaller.lib.modulegraph.modulegraph import SourceModule
-from PyInstaller.utils.hooks import check_requirement, logger
+from PyInstaller.utils.hooks import check_requirement, collect_entry_point, logger
+
+datas = []
# 'sqlalchemy.testing' causes bundling a lot of unnecessary modules.
excludedimports = ['sqlalchemy.testing']
@@ -41,6 +43,13 @@
else:
hiddenimports += _get_dialect_modules("sqlalchemy.databases")
+# Collect additional dialects and plugins that are registered via entry-points, under assumption that they are available
+# in the build environment for a reason (i.e., they are used).
+for entry_point_name in ('sqlalchemy.dialects', 'sqlalchemy.plugins'):
+ ep_datas, ep_hiddenimports = collect_entry_point(entry_point_name)
+ datas += ep_datas
+ hiddenimports += ep_hiddenimports
+
def hook(hook_api):
"""
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-sqlalchemy.py b/PyInstaller/hooks/hook-sqlalchemy.py\n--- a/PyInstaller/hooks/hook-sqlalchemy.py\n+++ b/PyInstaller/hooks/hook-sqlalchemy.py\n@@ -14,7 +14,9 @@\n \n from PyInstaller import isolated\n from PyInstaller.lib.modulegraph.modulegraph import SourceModule\n-from PyInstaller.utils.hooks import check_requirement, logger\n+from PyInstaller.utils.hooks import check_requirement, collect_entry_point, logger\n+\n+datas = []\n \n # 'sqlalchemy.testing' causes bundling a lot of unnecessary modules.\n excludedimports = ['sqlalchemy.testing']\n@@ -41,6 +43,13 @@\n else:\n hiddenimports += _get_dialect_modules(\"sqlalchemy.databases\")\n \n+# Collect additional dialects and plugins that are registered via entry-points, under assumption that they are available\n+# in the build environment for a reason (i.e., they are used).\n+for entry_point_name in ('sqlalchemy.dialects', 'sqlalchemy.plugins'):\n+ ep_datas, ep_hiddenimports = collect_entry_point(entry_point_name)\n+ datas += ep_datas\n+ hiddenimports += ep_hiddenimports\n+\n \n def hook(hook_api):\n \"\"\"\n", "issue": "Failing to load sqlalchemy-plugin in pyinstaller'ed exe\nThis line of code works perfectly fine while running as a file from spyder.\r\n\r\nengine = sqlalchemy.create_engine('teradata://uid:pwd@DBinstance?driver=Teradata')\r\nHowever, after making an exe file using pyinstaller, I get the following error while running the exe:\r\n\r\nsqlalchemy.exc.NoSuchModuleError: Can't load plugin:sqlalchemy.dialects:teradata\r\n\r\nThis looks to be an exe-creation-issue in loading libraries. Has anyone encountered a similar issue and was able to resolve? Any pointers would be appreciated.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2023, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nimport re\nimport importlib.util\n\nfrom PyInstaller import isolated\nfrom PyInstaller.lib.modulegraph.modulegraph import SourceModule\nfrom PyInstaller.utils.hooks import check_requirement, logger\n\n# 'sqlalchemy.testing' causes bundling a lot of unnecessary modules.\nexcludedimports = ['sqlalchemy.testing']\n\n# Include most common database bindings some database bindings are detected and include some are not. We should\n# explicitly include database backends.\nhiddenimports = ['pysqlite2', 'MySQLdb', 'psycopg2', 'sqlalchemy.ext.baked']\n\nif check_requirement('sqlalchemy >= 1.4'):\n hiddenimports.append(\"sqlalchemy.sql.default_comparator\")\n\n\[email protected]\ndef _get_dialect_modules(module_name):\n import importlib\n module = importlib.import_module(module_name)\n return [f\"{module_name}.{submodule_name}\" for submodule_name in module.__all__]\n\n\n# In SQLAlchemy >= 0.6, the \"sqlalchemy.dialects\" package provides dialects.\n# In SQLAlchemy <= 0.5, the \"sqlalchemy.databases\" package provides dialects.\nif check_requirement('sqlalchemy >= 0.6'):\n hiddenimports += _get_dialect_modules(\"sqlalchemy.dialects\")\nelse:\n hiddenimports += _get_dialect_modules(\"sqlalchemy.databases\")\n\n\ndef hook(hook_api):\n \"\"\"\n SQLAlchemy 0.9 introduced the decorator 'util.dependencies'. This decorator does imports. E.g.:\n\n @util.dependencies(\"sqlalchemy.sql.schema\")\n\n This hook scans for included SQLAlchemy modules and then scans those modules for any util.dependencies and marks\n those modules as hidden imports.\n \"\"\"\n\n if not check_requirement('sqlalchemy >= 0.9'):\n return\n\n # this parser is very simplistic but seems to catch all cases as of V1.1\n depend_regex = re.compile(r'@util.dependencies\\([\\'\"](.*?)[\\'\"]\\)')\n\n hidden_imports_set = set()\n known_imports = set()\n for node in hook_api.module_graph.iter_graph(start=hook_api.module):\n if isinstance(node, SourceModule) and node.identifier.startswith('sqlalchemy.'):\n known_imports.add(node.identifier)\n\n # Read the source...\n with open(node.filename, 'rb') as f:\n source_code = f.read()\n source_code = importlib.util.decode_source(source_code)\n\n # ... and scan it\n for match in depend_regex.findall(source_code):\n hidden_imports_set.add(match)\n\n hidden_imports_set -= known_imports\n if len(hidden_imports_set):\n logger.info(\" Found %d sqlalchemy hidden imports\", len(hidden_imports_set))\n hook_api.add_imports(*list(hidden_imports_set))\n", "path": "PyInstaller/hooks/hook-sqlalchemy.py"}]} | 1,506 | 272 |
gh_patches_debug_21 | rasdani/github-patches | git_diff | sbi-dev__sbi-31 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move tests to top folder
</issue>
<code>
[start of sbi/dummytests/whateverstuff.py]
1 # content of test_compute.py
2
3
4 def test_compute(param1):
5 assert param1 < 4
[end of sbi/dummytests/whateverstuff.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sbi/dummytests/whateverstuff.py b/sbi/dummytests/whateverstuff.py
deleted file mode 100644
--- a/sbi/dummytests/whateverstuff.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# content of test_compute.py
-
-
-def test_compute(param1):
- assert param1 < 4
\ No newline at end of file
| {"golden_diff": "diff --git a/sbi/dummytests/whateverstuff.py b/sbi/dummytests/whateverstuff.py\ndeleted file mode 100644\n--- a/sbi/dummytests/whateverstuff.py\n+++ /dev/null\n@@ -1,5 +0,0 @@\n-# content of test_compute.py\n-\n-\n-def test_compute(param1):\n- assert param1 < 4\n\\ No newline at end of file\n", "issue": "Move tests to top folder\n\n", "before_files": [{"content": "# content of test_compute.py\n\n\ndef test_compute(param1):\n assert param1 < 4", "path": "sbi/dummytests/whateverstuff.py"}]} | 577 | 96 |
gh_patches_debug_42 | rasdani/github-patches | git_diff | wright-group__WrightTools-221 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
make setup.py an executable script
Change file permissions and reformat the shebang line
</issue>
<code>
[start of setup.py]
1 # !/usr/bin/env python
2
3 import os
4 from setuptools import setup, find_packages
5
6
7 def package_files(directory):
8 paths = []
9 for (path, directories, filenames) in os.walk(directory):
10 for filename in filenames:
11 paths.append(os.path.join('..', path, filename))
12 return paths
13
14
15 here = os.path.abspath(os.path.dirname(__file__))
16
17 extra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))
18 extra_files.append(os.path.join(here, 'CONTRIBUTORS'))
19 extra_files.append(os.path.join(here, 'LICENSE'))
20 extra_files.append(os.path.join(here, 'README.rst'))
21 extra_files.append(os.path.join(here, 'requirements.txt'))
22 extra_files.append(os.path.join(here, 'VERSION'))
23 extra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json'))
24
25 with open(os.path.join(here, 'requirements.txt')) as f:
26 required = f.read().splitlines()
27
28 with open(os.path.join(here, 'VERSION')) as version_file:
29 version = version_file.read().strip()
30
31 setup(
32 name='WrightTools',
33 packages=find_packages(),
34 package_data={'': extra_files},
35 setup_requires=['pytest-runner'],
36 tests_require=['pytest'],
37 install_requires=required,
38 extras_require={'docs': ['sphinx-gallery>=0.1.9']},
39 version=version,
40 description='Tools for loading, processing, and plotting multidimensional spectroscopy data.',
41 author='Blaise Thompson',
42 author_email='[email protected]',
43 license='MIT',
44 url='http://wright.tools',
45 keywords='spectroscopy science multidimensional visualization',
46 classifiers=['Development Status :: 5 - Production/Stable',
47 'Intended Audience :: Science/Research',
48 'License :: OSI Approved :: MIT License',
49 'Natural Language :: English',
50 'Programming Language :: Python :: 2',
51 'Programming Language :: Python :: 2.7',
52 'Programming Language :: Python :: 3',
53 'Programming Language :: Python :: 3.3',
54 'Programming Language :: Python :: 3.4',
55 'Programming Language :: Python :: 3.5',
56 'Topic :: Scientific/Engineering']
57 )
58
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
old mode 100644
new mode 100755
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,4 @@
-# !/usr/bin/env python
+#! /usr/bin/env python
import os
from setuptools import setup, find_packages
| {"golden_diff": "diff --git a/setup.py b/setup.py\nold mode 100644\nnew mode 100755\n--- a/setup.py\n+++ b/setup.py\n@@ -1,4 +1,4 @@\n-# !/usr/bin/env python\n+#! /usr/bin/env python\n \n import os\n from setuptools import setup, find_packages\n", "issue": "make setup.py an executable script\nChange file permissions and reformat the shebang line\n", "before_files": [{"content": "# !/usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef package_files(directory):\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nextra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))\nextra_files.append(os.path.join(here, 'CONTRIBUTORS'))\nextra_files.append(os.path.join(here, 'LICENSE'))\nextra_files.append(os.path.join(here, 'README.rst'))\nextra_files.append(os.path.join(here, 'requirements.txt'))\nextra_files.append(os.path.join(here, 'VERSION'))\nextra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json'))\n\nwith open(os.path.join(here, 'requirements.txt')) as f:\n required = f.read().splitlines()\n\nwith open(os.path.join(here, 'VERSION')) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name='WrightTools',\n packages=find_packages(),\n package_data={'': extra_files},\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n install_requires=required,\n extras_require={'docs': ['sphinx-gallery>=0.1.9']},\n version=version,\n description='Tools for loading, processing, and plotting multidimensional spectroscopy data.',\n author='Blaise Thompson',\n author_email='[email protected]',\n license='MIT',\n url='http://wright.tools',\n keywords='spectroscopy science multidimensional visualization',\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Scientific/Engineering']\n)\n", "path": "setup.py"}]} | 1,132 | 79 |
gh_patches_debug_590 | rasdani/github-patches | git_diff | pex-tool__pex-1838 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.96
On the docket:
+ [x] PEX_EXTRA_SYS_PATH propagation can break subprocesses run against other venvs. #1836
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.95"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.95"
+__version__ = "2.1.96"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.95\"\n+__version__ = \"2.1.96\"\n", "issue": "Release 2.1.96\nOn the docket:\r\n+ [x] PEX_EXTRA_SYS_PATH propagation can break subprocesses run against other venvs. #1836 \r\n\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.95\"\n", "path": "pex/version.py"}]} | 626 | 97 |
gh_patches_debug_29330 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-4987 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
STS calls to use regional endpoints instead of global endpoint
Change STS api calls to use regional STS endpoints to gain the benefits of "reduce latency, build in redundancy, and increase session token validity" as documented at https://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region
This will also enable leveraging a VPC Endpoint for STS, if provisioned.
</issue>
<code>
[start of c7n/credentials.py]
1 # Copyright 2016-2017 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """
15 Authentication utilities
16 """
17 from __future__ import absolute_import, division, print_function, unicode_literals
18
19 import os
20
21 from botocore.credentials import RefreshableCredentials
22 from botocore.session import get_session
23 from boto3 import Session
24
25 from c7n.version import version
26 from c7n.utils import get_retry
27
28
29 class SessionFactory(object):
30
31 def __init__(self, region, profile=None, assume_role=None, external_id=None):
32 self.region = region
33 self.profile = profile
34 self.assume_role = assume_role
35 self.external_id = external_id
36 self.user_agent_name = "CloudCustodian"
37 self.session_name = "CloudCustodian"
38 if 'C7N_SESSION_SUFFIX' in os.environ:
39 self.session_name = "%s@%s" % (
40 self.session_name, os.environ['C7N_SESSION_SUFFIX'])
41 self._subscribers = []
42
43 def _set_policy_name(self, name):
44 self.user_agent_name = ("CloudCustodian(%s)" % name).strip()
45
46 policy_name = property(None, _set_policy_name)
47
48 def __call__(self, assume=True, region=None):
49 if self.assume_role and assume:
50 session = Session(profile_name=self.profile)
51 session = assumed_session(
52 self.assume_role, self.session_name, session,
53 region or self.region, self.external_id)
54 else:
55 session = Session(
56 region_name=region or self.region, profile_name=self.profile)
57
58 return self.update(session)
59
60 def update(self, session):
61 session._session.user_agent_name = self.user_agent_name
62 session._session.user_agent_version = version
63
64 for s in self._subscribers:
65 s(session)
66
67 return session
68
69 def set_subscribers(self, subscribers):
70 self._subscribers = subscribers
71
72
73 def assumed_session(role_arn, session_name, session=None, region=None, external_id=None):
74 """STS Role assume a boto3.Session
75
76 With automatic credential renewal.
77
78 Args:
79 role_arn: iam role arn to assume
80 session_name: client session identifier
81 session: an optional extant session, note session is captured
82 in a function closure for renewing the sts assumed role.
83
84 :return: a boto3 session using the sts assumed role credentials
85
86 Notes: We have to poke at botocore internals a few times
87 """
88 if session is None:
89 session = Session()
90
91 retry = get_retry(('Throttling',))
92
93 def refresh():
94
95 parameters = {"RoleArn": role_arn, "RoleSessionName": session_name}
96
97 if external_id is not None:
98 parameters['ExternalId'] = external_id
99
100 credentials = retry(
101 session.client('sts').assume_role, **parameters)['Credentials']
102 return dict(
103 access_key=credentials['AccessKeyId'],
104 secret_key=credentials['SecretAccessKey'],
105 token=credentials['SessionToken'],
106 # Silly that we basically stringify so it can be parsed again
107 expiry_time=credentials['Expiration'].isoformat())
108
109 session_credentials = RefreshableCredentials.create_from_metadata(
110 metadata=refresh(),
111 refresh_using=refresh,
112 method='sts-assume-role')
113
114 # so dirty.. it hurts, no clean way to set this outside of the
115 # internals poke. There's some work upstream on making this nicer
116 # but its pretty baroque as well with upstream support.
117 # https://github.com/boto/boto3/issues/443
118 # https://github.com/boto/botocore/issues/761
119
120 s = get_session()
121 s._credentials = session_credentials
122 if region is None:
123 region = s.get_config_variable('region') or 'us-east-1'
124 s.set_config_variable('region', region)
125 return Session(botocore_session=s)
126
[end of c7n/credentials.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/c7n/credentials.py b/c7n/credentials.py
--- a/c7n/credentials.py
+++ b/c7n/credentials.py
@@ -26,6 +26,10 @@
from c7n.utils import get_retry
+# 0.8.45.1 compatibility with global only sts endpoints, out of caution, remove in 0.8.46.1
+USE_STS_GLOBAL = os.environ.get('C7N_USE_STS_GLOBAL', '').lower() in ('yes', 'true')
+
+
class SessionFactory(object):
def __init__(self, region, profile=None, assume_role=None, external_id=None):
@@ -98,7 +102,8 @@
parameters['ExternalId'] = external_id
credentials = retry(
- session.client('sts').assume_role, **parameters)['Credentials']
+ get_sts_client(
+ session, region).assume_role, **parameters)['Credentials']
return dict(
access_key=credentials['AccessKeyId'],
secret_key=credentials['SecretAccessKey'],
@@ -123,3 +128,20 @@
region = s.get_config_variable('region') or 'us-east-1'
s.set_config_variable('region', region)
return Session(botocore_session=s)
+
+
+def get_sts_client(session, region):
+ """Get the AWS STS endpoint specific for the given region.
+
+ Returns the global endpoint if region is not specified.
+
+ For the list of regional endpoints, see https://amzn.to/2ohJgtR
+ """
+ if region and not USE_STS_GLOBAL:
+ endpoint_url = "https://sts.{}.amazonaws.com".format(region)
+ region_name = region
+ else:
+ endpoint_url = "https://sts.amazonaws.com"
+ region_name = None
+ return session.client(
+ 'sts', endpoint_url=endpoint_url, region_name=region_name)
| {"golden_diff": "diff --git a/c7n/credentials.py b/c7n/credentials.py\n--- a/c7n/credentials.py\n+++ b/c7n/credentials.py\n@@ -26,6 +26,10 @@\n from c7n.utils import get_retry\n \n \n+# 0.8.45.1 compatibility with global only sts endpoints, out of caution, remove in 0.8.46.1\n+USE_STS_GLOBAL = os.environ.get('C7N_USE_STS_GLOBAL', '').lower() in ('yes', 'true')\n+\n+\n class SessionFactory(object):\n \n def __init__(self, region, profile=None, assume_role=None, external_id=None):\n@@ -98,7 +102,8 @@\n parameters['ExternalId'] = external_id\n \n credentials = retry(\n- session.client('sts').assume_role, **parameters)['Credentials']\n+ get_sts_client(\n+ session, region).assume_role, **parameters)['Credentials']\n return dict(\n access_key=credentials['AccessKeyId'],\n secret_key=credentials['SecretAccessKey'],\n@@ -123,3 +128,20 @@\n region = s.get_config_variable('region') or 'us-east-1'\n s.set_config_variable('region', region)\n return Session(botocore_session=s)\n+\n+\n+def get_sts_client(session, region):\n+ \"\"\"Get the AWS STS endpoint specific for the given region.\n+\n+ Returns the global endpoint if region is not specified.\n+\n+ For the list of regional endpoints, see https://amzn.to/2ohJgtR\n+ \"\"\"\n+ if region and not USE_STS_GLOBAL:\n+ endpoint_url = \"https://sts.{}.amazonaws.com\".format(region)\n+ region_name = region\n+ else:\n+ endpoint_url = \"https://sts.amazonaws.com\"\n+ region_name = None\n+ return session.client(\n+ 'sts', endpoint_url=endpoint_url, region_name=region_name)\n", "issue": "STS calls to use regional endpoints instead of global endpoint\nChange STS api calls to use regional STS endpoints to gain the benefits of \"reduce latency, build in redundancy, and increase session token validity\" as documented at https://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region\r\n\r\nThis will also enable leveraging a VPC Endpoint for STS, if provisioned. \n", "before_files": [{"content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nAuthentication utilities\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\n\nfrom botocore.credentials import RefreshableCredentials\nfrom botocore.session import get_session\nfrom boto3 import Session\n\nfrom c7n.version import version\nfrom c7n.utils import get_retry\n\n\nclass SessionFactory(object):\n\n def __init__(self, region, profile=None, assume_role=None, external_id=None):\n self.region = region\n self.profile = profile\n self.assume_role = assume_role\n self.external_id = external_id\n self.user_agent_name = \"CloudCustodian\"\n self.session_name = \"CloudCustodian\"\n if 'C7N_SESSION_SUFFIX' in os.environ:\n self.session_name = \"%s@%s\" % (\n self.session_name, os.environ['C7N_SESSION_SUFFIX'])\n self._subscribers = []\n\n def _set_policy_name(self, name):\n self.user_agent_name = (\"CloudCustodian(%s)\" % name).strip()\n\n policy_name = property(None, _set_policy_name)\n\n def __call__(self, assume=True, region=None):\n if self.assume_role and assume:\n session = Session(profile_name=self.profile)\n session = assumed_session(\n self.assume_role, self.session_name, session,\n region or self.region, self.external_id)\n else:\n session = Session(\n region_name=region or self.region, profile_name=self.profile)\n\n return self.update(session)\n\n def update(self, session):\n session._session.user_agent_name = self.user_agent_name\n session._session.user_agent_version = version\n\n for s in self._subscribers:\n s(session)\n\n return session\n\n def set_subscribers(self, subscribers):\n self._subscribers = subscribers\n\n\ndef assumed_session(role_arn, session_name, session=None, region=None, external_id=None):\n \"\"\"STS Role assume a boto3.Session\n\n With automatic credential renewal.\n\n Args:\n role_arn: iam role arn to assume\n session_name: client session identifier\n session: an optional extant session, note session is captured\n in a function closure for renewing the sts assumed role.\n\n :return: a boto3 session using the sts assumed role credentials\n\n Notes: We have to poke at botocore internals a few times\n \"\"\"\n if session is None:\n session = Session()\n\n retry = get_retry(('Throttling',))\n\n def refresh():\n\n parameters = {\"RoleArn\": role_arn, \"RoleSessionName\": session_name}\n\n if external_id is not None:\n parameters['ExternalId'] = external_id\n\n credentials = retry(\n session.client('sts').assume_role, **parameters)['Credentials']\n return dict(\n access_key=credentials['AccessKeyId'],\n secret_key=credentials['SecretAccessKey'],\n token=credentials['SessionToken'],\n # Silly that we basically stringify so it can be parsed again\n expiry_time=credentials['Expiration'].isoformat())\n\n session_credentials = RefreshableCredentials.create_from_metadata(\n metadata=refresh(),\n refresh_using=refresh,\n method='sts-assume-role')\n\n # so dirty.. it hurts, no clean way to set this outside of the\n # internals poke. There's some work upstream on making this nicer\n # but its pretty baroque as well with upstream support.\n # https://github.com/boto/boto3/issues/443\n # https://github.com/boto/botocore/issues/761\n\n s = get_session()\n s._credentials = session_credentials\n if region is None:\n region = s.get_config_variable('region') or 'us-east-1'\n s.set_config_variable('region', region)\n return Session(botocore_session=s)\n", "path": "c7n/credentials.py"}]} | 1,858 | 432 |
gh_patches_debug_555 | rasdani/github-patches | git_diff | pex-tool__pex-753 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 1.6.9
On the docket:
+ [x] Fix pkg resource early import #750
+ [x] Fix `sys.path` scrubbing of pex extras modules. #752
Maybe on the docket:
+ [ ] Problem running PEX files with restricted permissions #746
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '1.6.8'
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '1.6.8'
+__version__ = '1.6.9'
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '1.6.8'\n+__version__ = '1.6.9'\n", "issue": "Release 1.6.9\nOn the docket:\r\n+ [x] Fix pkg resource early import #750\r\n+ [x] Fix `sys.path` scrubbing of pex extras modules. #752\r\n\r\nMaybe on the docket:\r\n+ [ ] Problem running PEX files with restricted permissions #746\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.6.8'\n", "path": "pex/version.py"}]} | 653 | 95 |
gh_patches_debug_785 | rasdani/github-patches | git_diff | translate__pootle-5666 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Silence dev checks in the admin UI as well
There's the following section in the Pootle dev config:
``` Python
# Silence the DEBUG check on dev servers
SILENCED_SYSTEM_CHECKS = [
'pootle.W004', # python-levenstein not installed
'pootle.W005', # DEBUG = True
'pootle.W010', # DEFAULT_FROM_EMAIL has default setting
'pootle.W011', # POOTLE_CONTACT_EMAIL has default setting
]
```
When running Pootle, I still get these notifications in the admin UI among others:
> `/!\` DEBUG mode is on. Do not do this in production! Set DEBUG = False in Pootle settings
>
> `/!\` POOTLE_CONTACT_EMAIL is using the following default setting 'info@YOUR_DOMAIN.com'. POOTLE_CONTACT_EMAIL is the address that will receive messages sent by the contact form.
>
> `/!\` DEFAULT_FROM_EMAIL is using the following default setting 'webmaster@localhost'. DEFAULT_FROM_EMAIL is used in all outgoing Pootle email. Don't forget to review your mail server settings.
I think it might make sense to silence them consistently not only in the console, but in the admin UI as well.
</issue>
<code>
[start of pootle/apps/pootle_app/views/admin/dashboard.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import locale
10 import os
11
12 from redis.exceptions import ConnectionError
13
14 from django.contrib.auth import get_user_model
15 from django.core.cache import cache
16 from django.shortcuts import render
17
18 from django_rq.queues import get_failed_queue, get_queue
19 from django_rq.workers import Worker
20
21 from pootle.core.decorators import admin_required
22 from pootle.i18n.gettext import ugettext as _, ungettext
23 from pootle_statistics.models import Submission
24 from pootle_store.models import Suggestion
25
26
27 def _format_numbers(numbers):
28 for k in numbers.keys():
29 formatted_number = locale.format("%d", numbers[k], grouping=True)
30 # Under Windows, formatted number must be converted to Unicode
31 if os.name == 'nt':
32 formatted_number = formatted_number.decode(
33 locale.getpreferredencoding()
34 )
35 numbers[k] = formatted_number
36
37
38 def server_stats():
39 User = get_user_model()
40 result = cache.get("server_stats")
41 if result is None:
42 result = {}
43 result['user_count'] = max(User.objects.filter(
44 is_active=True).count()-2, 0)
45 # 'default' and 'nobody' might be counted
46 # FIXME: the special users should not be retuned with is_active
47 result['submission_count'] = Submission.objects.count()
48 result['pending_count'] = Suggestion.objects.pending().count()
49 cache.set("server_stats", result, 86400)
50 _format_numbers(result)
51 return result
52
53
54 def rq_stats():
55 queue = get_queue()
56 failed_queue = get_failed_queue()
57 try:
58 workers = Worker.all(queue.connection)
59 except ConnectionError:
60 return None
61
62 num_workers = len(workers)
63 is_running = len(queue.connection.smembers(Worker.redis_workers_keys)) > 0
64 if is_running:
65 # Translators: this refers to the status of the background job worker
66 status_msg = ungettext('Running (%d worker)', 'Running (%d workers)',
67 num_workers) % num_workers
68 else:
69 # Translators: this refers to the status of the background job worker
70 status_msg = _('Stopped')
71
72 result = {
73 'job_count': queue.count,
74 'failed_job_count': failed_queue.count,
75 'is_running': is_running,
76 'status_msg': status_msg,
77 }
78
79 return result
80
81
82 def checks():
83 from django.core.checks.registry import registry
84
85 return registry.run_checks()
86
87
88 @admin_required
89 def view(request):
90 ctx = {
91 'page': 'admin-dashboard',
92 'server_stats': server_stats(),
93 'rq_stats': rq_stats(),
94 'checks': checks(),
95 }
96 return render(request, "admin/dashboard.html", ctx)
97
[end of pootle/apps/pootle_app/views/admin/dashboard.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_app/views/admin/dashboard.py b/pootle/apps/pootle_app/views/admin/dashboard.py
--- a/pootle/apps/pootle_app/views/admin/dashboard.py
+++ b/pootle/apps/pootle_app/views/admin/dashboard.py
@@ -82,7 +82,7 @@
def checks():
from django.core.checks.registry import registry
- return registry.run_checks()
+ return [e for e in registry.run_checks() if not e.is_silenced()]
@admin_required
| {"golden_diff": "diff --git a/pootle/apps/pootle_app/views/admin/dashboard.py b/pootle/apps/pootle_app/views/admin/dashboard.py\n--- a/pootle/apps/pootle_app/views/admin/dashboard.py\n+++ b/pootle/apps/pootle_app/views/admin/dashboard.py\n@@ -82,7 +82,7 @@\n def checks():\n from django.core.checks.registry import registry\n \n- return registry.run_checks()\n+ return [e for e in registry.run_checks() if not e.is_silenced()]\n \n \n @admin_required\n", "issue": "Silence dev checks in the admin UI as well\nThere's the following section in the Pootle dev config:\n\n``` Python\n# Silence the DEBUG check on dev servers\nSILENCED_SYSTEM_CHECKS = [\n 'pootle.W004', # python-levenstein not installed\n 'pootle.W005', # DEBUG = True\n 'pootle.W010', # DEFAULT_FROM_EMAIL has default setting\n 'pootle.W011', # POOTLE_CONTACT_EMAIL has default setting\n]\n```\n\nWhen running Pootle, I still get these notifications in the admin UI among others:\n\n> `/!\\` DEBUG mode is on. Do not do this in production! Set DEBUG = False in Pootle settings\n> \n> `/!\\` POOTLE_CONTACT_EMAIL is using the following default setting 'info@YOUR_DOMAIN.com'. POOTLE_CONTACT_EMAIL is the address that will receive messages sent by the contact form.\n> \n> `/!\\` DEFAULT_FROM_EMAIL is using the following default setting 'webmaster@localhost'. DEFAULT_FROM_EMAIL is used in all outgoing Pootle email. Don't forget to review your mail server settings.\n\nI think it might make sense to silence them consistently not only in the console, but in the admin UI as well.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport locale\nimport os\n\nfrom redis.exceptions import ConnectionError\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import cache\nfrom django.shortcuts import render\n\nfrom django_rq.queues import get_failed_queue, get_queue\nfrom django_rq.workers import Worker\n\nfrom pootle.core.decorators import admin_required\nfrom pootle.i18n.gettext import ugettext as _, ungettext\nfrom pootle_statistics.models import Submission\nfrom pootle_store.models import Suggestion\n\n\ndef _format_numbers(numbers):\n for k in numbers.keys():\n formatted_number = locale.format(\"%d\", numbers[k], grouping=True)\n # Under Windows, formatted number must be converted to Unicode\n if os.name == 'nt':\n formatted_number = formatted_number.decode(\n locale.getpreferredencoding()\n )\n numbers[k] = formatted_number\n\n\ndef server_stats():\n User = get_user_model()\n result = cache.get(\"server_stats\")\n if result is None:\n result = {}\n result['user_count'] = max(User.objects.filter(\n is_active=True).count()-2, 0)\n # 'default' and 'nobody' might be counted\n # FIXME: the special users should not be retuned with is_active\n result['submission_count'] = Submission.objects.count()\n result['pending_count'] = Suggestion.objects.pending().count()\n cache.set(\"server_stats\", result, 86400)\n _format_numbers(result)\n return result\n\n\ndef rq_stats():\n queue = get_queue()\n failed_queue = get_failed_queue()\n try:\n workers = Worker.all(queue.connection)\n except ConnectionError:\n return None\n\n num_workers = len(workers)\n is_running = len(queue.connection.smembers(Worker.redis_workers_keys)) > 0\n if is_running:\n # Translators: this refers to the status of the background job worker\n status_msg = ungettext('Running (%d worker)', 'Running (%d workers)',\n num_workers) % num_workers\n else:\n # Translators: this refers to the status of the background job worker\n status_msg = _('Stopped')\n\n result = {\n 'job_count': queue.count,\n 'failed_job_count': failed_queue.count,\n 'is_running': is_running,\n 'status_msg': status_msg,\n }\n\n return result\n\n\ndef checks():\n from django.core.checks.registry import registry\n\n return registry.run_checks()\n\n\n@admin_required\ndef view(request):\n ctx = {\n 'page': 'admin-dashboard',\n 'server_stats': server_stats(),\n 'rq_stats': rq_stats(),\n 'checks': checks(),\n }\n return render(request, \"admin/dashboard.html\", ctx)\n", "path": "pootle/apps/pootle_app/views/admin/dashboard.py"}]} | 1,671 | 118 |
gh_patches_debug_3666 | rasdani/github-patches | git_diff | carpentries__amy-245 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use Bower for JS, CSS dependencies
We need something to manage our JavaScript (and perhaps CSS) dependencies and this tool is called [Bower](http://bower.io).
@wking [wrote](https://github.com/swcarpentry/amy/pull/239#issuecomment-89585714) some handy instructions on getting Bower up and running.
Bower packages we'd use for now:
- `bootstrap`
- `bootstrap-datepicker`
(This should land after #239)
</issue>
<code>
[start of amy/settings.py]
1 """
2 Django settings for amy project.
3
4 For more information on this file, see
5 https://docs.djangoproject.com/en/1.7/topics/settings/
6
7 For the full list of settings and their values, see
8 https://docs.djangoproject.com/en/1.7/ref/settings/
9 """
10
11 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
12 import os
13 import json
14
15 from django.conf import global_settings
16
17 BASE_DIR = os.path.dirname(os.path.dirname(__file__))
18
19
20 # Quick-start development settings - unsuitable for production
21 # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
22
23
24 # SECURITY WARNING: don't run with DEBUG turned on in production!
25 DEBUG = json.loads(os.environ.get('AMY_DEBUG', 'true'))
26 # For deployment in production:
27 # AMY_DEBUG=false AMY_SECRET_KEY="..." ./manage.py runserver ...
28
29 if DEBUG:
30 SECRET_KEY = '3l$35+@a%g!(^y^98oi%ei+%+yvtl3y0k^_7-fmx2oj09-ac5@'
31 else:
32 SECRET_KEY = None
33 SECRET_KEY = os.environ.get('AMY_SECRET_KEY', SECRET_KEY)
34
35
36 TEMPLATE_DEBUG = True
37
38 ALLOWED_HOSTS = [
39 'software-carpentry.org',
40 'software-carpentry.org.'
41 ]
42
43
44 # Application definition
45
46 INSTALLED_APPS = (
47 'django.contrib.auth',
48 'django.contrib.contenttypes',
49 'django.contrib.sessions',
50 'django.contrib.messages',
51 'django.contrib.staticfiles',
52 'workshops',
53 # this should be after 'workshops' because templates in
54 # 'templates/registration/' clash
55 'django.contrib.admin',
56 'crispy_forms',
57 )
58
59 CRISPY_TEMPLATE_PACK = 'bootstrap3'
60
61 MIDDLEWARE_CLASSES = (
62 'django.contrib.sessions.middleware.SessionMiddleware',
63 'django.middleware.common.CommonMiddleware',
64 'django.middleware.csrf.CsrfViewMiddleware',
65 'django.contrib.auth.middleware.AuthenticationMiddleware',
66 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
67 'django.contrib.messages.middleware.MessageMiddleware',
68 'django.middleware.clickjacking.XFrameOptionsMiddleware',
69 )
70
71 # WARNING: in Django 1.8 processors get moved from ``django.core`` to
72 # ``django.template``.
73 TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
74 "django.core.context_processors.request",
75 )
76
77 ROOT_URLCONF = 'amy.urls'
78
79 WSGI_APPLICATION = 'amy.wsgi.application'
80
81 from django.contrib.messages import constants as message_constants
82 MESSAGE_TAGS = {
83 message_constants.INFO: 'alert-info',
84 message_constants.SUCCESS: 'alert-success',
85 message_constants.WARNING: 'alert-warning',
86 message_constants.ERROR: 'alert-danger',
87 }
88
89
90 # Database
91 # https://docs.djangoproject.com/en/1.7/ref/settings/#databases
92
93 DATABASES = {
94 'default': {
95 'ENGINE': 'django.db.backends.sqlite3',
96 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
97 }
98 }
99
100 # Authentication
101
102 AUTH_USER_MODEL = 'workshops.Person'
103
104 # Internationalization
105 # https://docs.djangoproject.com/en/1.7/topics/i18n/
106
107 LANGUAGE_CODE = 'en-us'
108
109 TIME_ZONE = 'EST'
110
111 USE_I18N = True
112
113 USE_L10N = True
114
115 USE_TZ = True
116
117
118 # Static files (CSS, JavaScript, Images)
119 # https://docs.djangoproject.com/en/1.7/howto/static-files/
120
121 STATIC_URL = '/static/'
122
123 # Warn viewers of invalid template strings
124 TEMPLATE_STRING_IF_INVALID = 'XXX-unset-variable-XXX'
125
126 # if "next" (or "?next") variable is not set when logging in, redirect to
127 # workshops
128 LOGIN_REDIRECT_URL = '/workshops/'
129
130 # here's where @login_required redirects to:
131 LOGIN_URL = '/account/login/'
132
[end of amy/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/amy/settings.py b/amy/settings.py
--- a/amy/settings.py
+++ b/amy/settings.py
@@ -119,6 +119,10 @@
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
+STATIC_ROOT = os.path.join(BASE_DIR, 'static')
+STATICFILES_DIRS = (
+ os.path.join(BASE_DIR, 'bower_components'),
+)
# Warn viewers of invalid template strings
TEMPLATE_STRING_IF_INVALID = 'XXX-unset-variable-XXX'
| {"golden_diff": "diff --git a/amy/settings.py b/amy/settings.py\n--- a/amy/settings.py\n+++ b/amy/settings.py\n@@ -119,6 +119,10 @@\n # https://docs.djangoproject.com/en/1.7/howto/static-files/\n \n STATIC_URL = '/static/'\n+STATIC_ROOT = os.path.join(BASE_DIR, 'static')\n+STATICFILES_DIRS = (\n+ os.path.join(BASE_DIR, 'bower_components'),\n+)\n \n # Warn viewers of invalid template strings\n TEMPLATE_STRING_IF_INVALID = 'XXX-unset-variable-XXX'\n", "issue": "Use Bower for JS, CSS dependencies\nWe need something to manage our JavaScript (and perhaps CSS) dependencies and this tool is called [Bower](http://bower.io).\n\n@wking [wrote](https://github.com/swcarpentry/amy/pull/239#issuecomment-89585714) some handy instructions on getting Bower up and running.\n\nBower packages we'd use for now:\n- `bootstrap`\n- `bootstrap-datepicker`\n\n(This should land after #239)\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for amy project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nimport json\n\nfrom django.conf import global_settings\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n\n# SECURITY WARNING: don't run with DEBUG turned on in production!\nDEBUG = json.loads(os.environ.get('AMY_DEBUG', 'true'))\n# For deployment in production:\n# AMY_DEBUG=false AMY_SECRET_KEY=\"...\" ./manage.py runserver ...\n\nif DEBUG:\n SECRET_KEY = '3l$35+@a%g!(^y^98oi%ei+%+yvtl3y0k^_7-fmx2oj09-ac5@'\nelse:\n SECRET_KEY = None\nSECRET_KEY = os.environ.get('AMY_SECRET_KEY', SECRET_KEY)\n\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = [\n 'software-carpentry.org',\n 'software-carpentry.org.'\n]\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'workshops',\n # this should be after 'workshops' because templates in\n # 'templates/registration/' clash\n 'django.contrib.admin',\n 'crispy_forms',\n)\n\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\n# WARNING: in Django 1.8 processors get moved from ``django.core`` to\n# ``django.template``.\nTEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (\n \"django.core.context_processors.request\",\n)\n\nROOT_URLCONF = 'amy.urls'\n\nWSGI_APPLICATION = 'amy.wsgi.application'\n\nfrom django.contrib.messages import constants as message_constants\nMESSAGE_TAGS = {\n message_constants.INFO: 'alert-info',\n message_constants.SUCCESS: 'alert-success',\n message_constants.WARNING: 'alert-warning',\n message_constants.ERROR: 'alert-danger',\n}\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Authentication\n\nAUTH_USER_MODEL = 'workshops.Person'\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'EST'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/static/'\n\n# Warn viewers of invalid template strings\nTEMPLATE_STRING_IF_INVALID = 'XXX-unset-variable-XXX'\n\n# if \"next\" (or \"?next\") variable is not set when logging in, redirect to\n# workshops\nLOGIN_REDIRECT_URL = '/workshops/'\n\n# here's where @login_required redirects to:\nLOGIN_URL = '/account/login/'\n", "path": "amy/settings.py"}]} | 1,760 | 122 |
gh_patches_debug_36040 | rasdani/github-patches | git_diff | geopandas__geopandas-1088 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DOC: update the contextily example in the gallery
The example about adding a background map to your plot using contextily at https://geopandas.readthedocs.io/en/latest/gallery/plotting_basemap_background.html#sphx-glr-gallery-plotting-basemap-background-py can be updated with the latest contextily (https://github.com/darribas/contextily/releases/tag/v1.0rc2).
For example, the `add_basemap` function is now available in contextily itself.
</issue>
<code>
[start of examples/plotting_basemap_background.py]
1 """
2 Adding a background map to plots
3 --------------------------------
4
5 This example shows how you can add a background basemap to plots created
6 with the geopandas ``.plot()`` method. This makes use of the
7 `contextily <https://github.com/darribas/contextily>`__ package to retrieve
8 web map tiles from several sources (OpenStreetMap, Stamen).
9
10 """
11 # sphinx_gallery_thumbnail_number = 3
12 import geopandas
13
14 ###############################################################################
15 # Let's use the NYC borough boundary data that is available in geopandas
16 # datasets. Plotting this gives the following result:
17
18 df = geopandas.read_file(geopandas.datasets.get_path('nybb'))
19 ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
20
21 ###############################################################################
22 # Convert the data to Web Mercator
23 # ================================
24 #
25 # Web map tiles are typically provided in
26 # `Web Mercator <https://en.wikipedia.org/wiki/Web_Mercator>`__
27 # (`EPSG 3857 <https://epsg.io/3857>`__), so we need to make sure to convert
28 # our data first to the same CRS to combine our polygons and background tiles
29 # in the same map:
30
31 df = df.to_crs(epsg=3857)
32
33 ###############################################################################
34 # Contextily helper function
35 # ==========================
36 #
37 # We define a small helper function that uses
38 # `contextily <https://github.com/darribas/contextily>`__ to add a map
39 # as background to an existing plot:
40
41 import contextily as ctx
42
43 def add_basemap(ax, zoom, url='http://tile.stamen.com/terrain/tileZ/tileX/tileY.png'):
44 xmin, xmax, ymin, ymax = ax.axis()
45 basemap, extent = ctx.bounds2img(xmin, ymin, xmax, ymax, zoom=zoom, url=url)
46 ax.imshow(basemap, extent=extent, interpolation='bilinear')
47 # restore original x/y limits
48 ax.axis((xmin, xmax, ymin, ymax))
49
50 ###############################################################################
51 # Add background tiles to plot
52 # ============================
53 #
54 # Now we can use the above function to easily add a background map to our
55 # plot. The `zoom` keyword is required and let's you specify the detail of the
56 # map tiles (be careful to not specify a too high `zoom` level, as this can
57 # result in a large download):
58
59 ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
60 add_basemap(ax, zoom=10)
61
62 ###############################################################################
63 # By default, contextily uses the Stamen Terrain style. We can specify a
64 # different style using ``ctx.sources``:
65
66 ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
67 add_basemap(ax, zoom=11, url=ctx.sources.ST_TONER_LITE)
68 ax.set_axis_off()
69
[end of examples/plotting_basemap_background.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/plotting_basemap_background.py b/examples/plotting_basemap_background.py
--- a/examples/plotting_basemap_background.py
+++ b/examples/plotting_basemap_background.py
@@ -1,5 +1,5 @@
"""
-Adding a background map to plots
+Adding a background map to plots
--------------------------------
This example shows how you can add a background basemap to plots created
@@ -31,38 +31,31 @@
df = df.to_crs(epsg=3857)
###############################################################################
-# Contextily helper function
-# ==========================
-#
-# We define a small helper function that uses
-# `contextily <https://github.com/darribas/contextily>`__ to add a map
-# as background to an existing plot:
import contextily as ctx
-def add_basemap(ax, zoom, url='http://tile.stamen.com/terrain/tileZ/tileX/tileY.png'):
- xmin, xmax, ymin, ymax = ax.axis()
- basemap, extent = ctx.bounds2img(xmin, ymin, xmax, ymax, zoom=zoom, url=url)
- ax.imshow(basemap, extent=extent, interpolation='bilinear')
- # restore original x/y limits
- ax.axis((xmin, xmax, ymin, ymax))
-
###############################################################################
# Add background tiles to plot
# ============================
#
-# Now we can use the above function to easily add a background map to our
-# plot. The `zoom` keyword is required and let's you specify the detail of the
-# map tiles (be careful to not specify a too high `zoom` level, as this can
-# result in a large download):
+# We can use `add_basemap` function of contextily to easily add a background
+# map to our plot. :
+
+ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
+ctx.add_basemap(ax)
+
+###############################################################################
+# We can control the detail of the map tiles using the optional `zoom` keyword
+# (be careful to not specify a too high `zoom` level,
+# as this can result in a large download).:
ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
-add_basemap(ax, zoom=10)
+ctx.add_basemap(ax, zoom=12)
###############################################################################
# By default, contextily uses the Stamen Terrain style. We can specify a
-# different style using ``ctx.sources``:
+# different style using ``ctx.providers``:
ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
-add_basemap(ax, zoom=11, url=ctx.sources.ST_TONER_LITE)
+ctx.add_basemap(ax, url=ctx.providers.Stamen.TonerLite)
ax.set_axis_off()
| {"golden_diff": "diff --git a/examples/plotting_basemap_background.py b/examples/plotting_basemap_background.py\n--- a/examples/plotting_basemap_background.py\n+++ b/examples/plotting_basemap_background.py\n@@ -1,5 +1,5 @@\n \"\"\"\n-Adding a background map to plots \n+Adding a background map to plots\n --------------------------------\n \n This example shows how you can add a background basemap to plots created\n@@ -31,38 +31,31 @@\n df = df.to_crs(epsg=3857)\n \n ###############################################################################\n-# Contextily helper function\n-# ==========================\n-#\n-# We define a small helper function that uses\n-# `contextily <https://github.com/darribas/contextily>`__ to add a map\n-# as background to an existing plot: \n \n import contextily as ctx\n \n-def add_basemap(ax, zoom, url='http://tile.stamen.com/terrain/tileZ/tileX/tileY.png'):\n- xmin, xmax, ymin, ymax = ax.axis()\n- basemap, extent = ctx.bounds2img(xmin, ymin, xmax, ymax, zoom=zoom, url=url)\n- ax.imshow(basemap, extent=extent, interpolation='bilinear')\n- # restore original x/y limits\n- ax.axis((xmin, xmax, ymin, ymax))\n-\n ###############################################################################\n # Add background tiles to plot\n # ============================\n #\n-# Now we can use the above function to easily add a background map to our\n-# plot. The `zoom` keyword is required and let's you specify the detail of the\n-# map tiles (be careful to not specify a too high `zoom` level, as this can\n-# result in a large download):\n+# We can use `add_basemap` function of contextily to easily add a background\n+# map to our plot. :\n+\n+ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')\n+ctx.add_basemap(ax)\n+\n+###############################################################################\n+# We can control the detail of the map tiles using the optional `zoom` keyword\n+# (be careful to not specify a too high `zoom` level,\n+# as this can result in a large download).:\n \n ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')\n-add_basemap(ax, zoom=10)\n+ctx.add_basemap(ax, zoom=12)\n \n ###############################################################################\n # By default, contextily uses the Stamen Terrain style. We can specify a\n-# different style using ``ctx.sources``:\n+# different style using ``ctx.providers``:\n \n ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')\n-add_basemap(ax, zoom=11, url=ctx.sources.ST_TONER_LITE)\n+ctx.add_basemap(ax, url=ctx.providers.Stamen.TonerLite)\n ax.set_axis_off()\n", "issue": "DOC: update the contextily example in the gallery\nThe example about adding a background map to your plot using contextily at https://geopandas.readthedocs.io/en/latest/gallery/plotting_basemap_background.html#sphx-glr-gallery-plotting-basemap-background-py can be updated with the latest contextily (https://github.com/darribas/contextily/releases/tag/v1.0rc2).\r\n\r\nFor example, the `add_basemap` function is now available in contextily itself.\n", "before_files": [{"content": "\"\"\"\nAdding a background map to plots \n--------------------------------\n\nThis example shows how you can add a background basemap to plots created\nwith the geopandas ``.plot()`` method. This makes use of the\n`contextily <https://github.com/darribas/contextily>`__ package to retrieve\nweb map tiles from several sources (OpenStreetMap, Stamen).\n\n\"\"\"\n# sphinx_gallery_thumbnail_number = 3\nimport geopandas\n\n###############################################################################\n# Let's use the NYC borough boundary data that is available in geopandas\n# datasets. Plotting this gives the following result:\n\ndf = geopandas.read_file(geopandas.datasets.get_path('nybb'))\nax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')\n\n###############################################################################\n# Convert the data to Web Mercator\n# ================================\n#\n# Web map tiles are typically provided in\n# `Web Mercator <https://en.wikipedia.org/wiki/Web_Mercator>`__\n# (`EPSG 3857 <https://epsg.io/3857>`__), so we need to make sure to convert\n# our data first to the same CRS to combine our polygons and background tiles\n# in the same map:\n\ndf = df.to_crs(epsg=3857)\n\n###############################################################################\n# Contextily helper function\n# ==========================\n#\n# We define a small helper function that uses\n# `contextily <https://github.com/darribas/contextily>`__ to add a map\n# as background to an existing plot: \n\nimport contextily as ctx\n\ndef add_basemap(ax, zoom, url='http://tile.stamen.com/terrain/tileZ/tileX/tileY.png'):\n xmin, xmax, ymin, ymax = ax.axis()\n basemap, extent = ctx.bounds2img(xmin, ymin, xmax, ymax, zoom=zoom, url=url)\n ax.imshow(basemap, extent=extent, interpolation='bilinear')\n # restore original x/y limits\n ax.axis((xmin, xmax, ymin, ymax))\n\n###############################################################################\n# Add background tiles to plot\n# ============================\n#\n# Now we can use the above function to easily add a background map to our\n# plot. The `zoom` keyword is required and let's you specify the detail of the\n# map tiles (be careful to not specify a too high `zoom` level, as this can\n# result in a large download):\n\nax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')\nadd_basemap(ax, zoom=10)\n\n###############################################################################\n# By default, contextily uses the Stamen Terrain style. We can specify a\n# different style using ``ctx.sources``:\n\nax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')\nadd_basemap(ax, zoom=11, url=ctx.sources.ST_TONER_LITE)\nax.set_axis_off()\n", "path": "examples/plotting_basemap_background.py"}]} | 1,425 | 647 |
gh_patches_debug_17208 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-2704 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/ChatGPT/setup.py]
1 from setuptools import find_packages, setup
2
3
4 def fetch_requirements(path):
5 with open(path, 'r') as fd:
6 return [r.strip() for r in fd.readlines()]
7
8
9 def fetch_readme():
10 with open('README.md', encoding='utf-8') as f:
11 return f.read()
12
13
14 def fetch_version():
15 with open('version.txt', 'r') as f:
16 return f.read().strip()
17
18
19 setup(
20 name='chatgpt',
21 version=fetch_version(),
22 packages=find_packages(exclude=(
23 'tests',
24 'benchmarks',
25 'requirements',
26 '*.egg-info',
27 )),
28 description='A RLFH implementation (ChatGPT) powered by ColossalAI',
29 long_description=fetch_readme(),
30 long_description_content_type='text/markdown',
31 license='Apache Software License 2.0',
32 url='https://github.com/hpcaitech/ChatGPT',
33 install_requires=fetch_requirements('requirements/requirements.txt'),
34 python_requires='>=3.6',
35 classifiers=[
36 'Programming Language :: Python :: 3',
37 'License :: OSI Approved :: Apache Software License',
38 'Environment :: GPU :: NVIDIA CUDA',
39 'Topic :: Scientific/Engineering :: Artificial Intelligence',
40 'Topic :: System :: Distributed Computing',
41 ],
42 )
43
[end of applications/ChatGPT/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/applications/ChatGPT/setup.py b/applications/ChatGPT/setup.py
--- a/applications/ChatGPT/setup.py
+++ b/applications/ChatGPT/setup.py
@@ -22,7 +22,6 @@
packages=find_packages(exclude=(
'tests',
'benchmarks',
- 'requirements',
'*.egg-info',
)),
description='A RLFH implementation (ChatGPT) powered by ColossalAI',
@@ -30,7 +29,7 @@
long_description_content_type='text/markdown',
license='Apache Software License 2.0',
url='https://github.com/hpcaitech/ChatGPT',
- install_requires=fetch_requirements('requirements/requirements.txt'),
+ install_requires=fetch_requirements('requirements.txt'),
python_requires='>=3.6',
classifiers=[
'Programming Language :: Python :: 3',
| {"golden_diff": "diff --git a/applications/ChatGPT/setup.py b/applications/ChatGPT/setup.py\n--- a/applications/ChatGPT/setup.py\n+++ b/applications/ChatGPT/setup.py\n@@ -22,7 +22,6 @@\n packages=find_packages(exclude=(\n 'tests',\n 'benchmarks',\n- 'requirements',\n '*.egg-info',\n )),\n description='A RLFH implementation (ChatGPT) powered by ColossalAI',\n@@ -30,7 +29,7 @@\n long_description_content_type='text/markdown',\n license='Apache Software License 2.0',\n url='https://github.com/hpcaitech/ChatGPT',\n- install_requires=fetch_requirements('requirements/requirements.txt'),\n+ install_requires=fetch_requirements('requirements.txt'),\n python_requires='>=3.6',\n classifiers=[\n 'Programming Language :: Python :: 3',\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\n\ndef fetch_requirements(path):\n with open(path, 'r') as fd:\n return [r.strip() for r in fd.readlines()]\n\n\ndef fetch_readme():\n with open('README.md', encoding='utf-8') as f:\n return f.read()\n\n\ndef fetch_version():\n with open('version.txt', 'r') as f:\n return f.read().strip()\n\n\nsetup(\n name='chatgpt',\n version=fetch_version(),\n packages=find_packages(exclude=(\n 'tests',\n 'benchmarks',\n 'requirements',\n '*.egg-info',\n )),\n description='A RLFH implementation (ChatGPT) powered by ColossalAI',\n long_description=fetch_readme(),\n long_description_content_type='text/markdown',\n license='Apache Software License 2.0',\n url='https://github.com/hpcaitech/ChatGPT',\n install_requires=fetch_requirements('requirements/requirements.txt'),\n python_requires='>=3.6',\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: Apache Software License',\n 'Environment :: GPU :: NVIDIA CUDA',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: System :: Distributed Computing',\n ],\n)\n", "path": "applications/ChatGPT/setup.py"}]} | 918 | 199 |
gh_patches_debug_49256 | rasdani/github-patches | git_diff | pyro-ppl__pyro-388 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Test batch_size parameter is correctly applied for all distributions
With #323, we are now supporting `batch_size` parameter for both the class and functional forms of both the distributions. Let us add a simple test to validate that this is working as expected.
</issue>
<code>
[start of pyro/distributions/log_normal.py]
1 import numpy as np
2 import torch
3 from torch.autograd import Variable
4
5 from pyro.distributions.distribution import Distribution
6
7
8 class LogNormal(Distribution):
9 """
10 :param mu: mean *(vector)*
11 :param sigma: standard deviations *(vector (0, Infinity))*
12
13 A distribution over probability vectors obtained by exp-transforming a random
14 variable drawn from ``Normal({mu: mu, sigma: sigma})``.
15 """
16 reparameterized = True
17
18 def __init__(self, mu, sigma, batch_size=None, *args, **kwargs):
19 """
20 Params:
21 `mu` - mean
22 `sigma` - root variance
23 """
24 self.mu = mu
25 self.sigma = sigma
26 if mu.size() != sigma.size():
27 raise ValueError("Expected mu.size() == sigma.size(), but got {} vs {}"
28 .format(mu.size(), sigma.size()))
29 if mu.dim() == 1 and batch_size is not None:
30 self.mu = mu.expand(batch_size, mu.size(0))
31 self.sigma = sigma.expand(batch_size, sigma.size(0))
32 super(LogNormal, self).__init__(*args, **kwargs)
33
34 def batch_shape(self, x=None):
35 event_dim = 1
36 mu = self.mu
37 if x is not None and x.size() != mu.size():
38 mu = self.mu.expand(x.size()[:-event_dim] + self.event_shape())
39 return mu.size()[:-event_dim]
40
41 def event_shape(self):
42 event_dim = 1
43 return self.mu.size()[-event_dim:]
44
45 def shape(self, x=None):
46 return self.batch_shape(x) + self.event_shape()
47
48 def sample(self):
49 """
50 Reparameterized log-normal sampler.
51 """
52 eps = Variable(torch.randn(1).type_as(self.mu.data))
53 z = self.mu + self.sigma * eps
54 return torch.exp(z)
55
56 def batch_log_pdf(self, x):
57 """
58 log-normal log-likelihood
59 """
60 mu = self.mu.expand(self.shape(x))
61 sigma = self.sigma.expand(self.shape(x))
62 ll_1 = Variable(torch.Tensor([-0.5 * np.log(2.0 * np.pi)])
63 .type_as(mu.data).expand_as(x))
64 ll_2 = -torch.log(sigma * x)
65 ll_3 = -0.5 * torch.pow((torch.log(x) - mu) / sigma, 2.0)
66 batch_log_pdf = torch.sum(ll_1 + ll_2 + ll_3, -1)
67 batch_log_pdf_shape = self.batch_shape(x) + (1,)
68 return batch_log_pdf.contiguous().view(batch_log_pdf_shape)
69
70 def analytic_mean(self):
71 return torch.exp(self.mu + 0.5 * torch.pow(self.sigma, 2.0))
72
73 def analytic_var(self):
74 return (torch.exp(torch.pow(self.sigma, 2.0)) - Variable(torch.ones(1))) * \
75 torch.pow(self.analytic_mean(), 2)
76
[end of pyro/distributions/log_normal.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyro/distributions/log_normal.py b/pyro/distributions/log_normal.py
--- a/pyro/distributions/log_normal.py
+++ b/pyro/distributions/log_normal.py
@@ -49,7 +49,7 @@
"""
Reparameterized log-normal sampler.
"""
- eps = Variable(torch.randn(1).type_as(self.mu.data))
+ eps = Variable(torch.randn(self.mu.size()).type_as(self.mu.data))
z = self.mu + self.sigma * eps
return torch.exp(z)
| {"golden_diff": "diff --git a/pyro/distributions/log_normal.py b/pyro/distributions/log_normal.py\n--- a/pyro/distributions/log_normal.py\n+++ b/pyro/distributions/log_normal.py\n@@ -49,7 +49,7 @@\n \"\"\"\n Reparameterized log-normal sampler.\n \"\"\"\n- eps = Variable(torch.randn(1).type_as(self.mu.data))\n+ eps = Variable(torch.randn(self.mu.size()).type_as(self.mu.data))\n z = self.mu + self.sigma * eps\n return torch.exp(z)\n", "issue": "Test batch_size parameter is correctly applied for all distributions\nWith #323, we are now supporting `batch_size` parameter for both the class and functional forms of both the distributions. Let us add a simple test to validate that this is working as expected. \n", "before_files": [{"content": "import numpy as np\nimport torch\nfrom torch.autograd import Variable\n\nfrom pyro.distributions.distribution import Distribution\n\n\nclass LogNormal(Distribution):\n \"\"\"\n :param mu: mean *(vector)*\n :param sigma: standard deviations *(vector (0, Infinity))*\n\n A distribution over probability vectors obtained by exp-transforming a random\n variable drawn from ``Normal({mu: mu, sigma: sigma})``.\n \"\"\"\n reparameterized = True\n\n def __init__(self, mu, sigma, batch_size=None, *args, **kwargs):\n \"\"\"\n Params:\n `mu` - mean\n `sigma` - root variance\n \"\"\"\n self.mu = mu\n self.sigma = sigma\n if mu.size() != sigma.size():\n raise ValueError(\"Expected mu.size() == sigma.size(), but got {} vs {}\"\n .format(mu.size(), sigma.size()))\n if mu.dim() == 1 and batch_size is not None:\n self.mu = mu.expand(batch_size, mu.size(0))\n self.sigma = sigma.expand(batch_size, sigma.size(0))\n super(LogNormal, self).__init__(*args, **kwargs)\n\n def batch_shape(self, x=None):\n event_dim = 1\n mu = self.mu\n if x is not None and x.size() != mu.size():\n mu = self.mu.expand(x.size()[:-event_dim] + self.event_shape())\n return mu.size()[:-event_dim]\n\n def event_shape(self):\n event_dim = 1\n return self.mu.size()[-event_dim:]\n\n def shape(self, x=None):\n return self.batch_shape(x) + self.event_shape()\n\n def sample(self):\n \"\"\"\n Reparameterized log-normal sampler.\n \"\"\"\n eps = Variable(torch.randn(1).type_as(self.mu.data))\n z = self.mu + self.sigma * eps\n return torch.exp(z)\n\n def batch_log_pdf(self, x):\n \"\"\"\n log-normal log-likelihood\n \"\"\"\n mu = self.mu.expand(self.shape(x))\n sigma = self.sigma.expand(self.shape(x))\n ll_1 = Variable(torch.Tensor([-0.5 * np.log(2.0 * np.pi)])\n .type_as(mu.data).expand_as(x))\n ll_2 = -torch.log(sigma * x)\n ll_3 = -0.5 * torch.pow((torch.log(x) - mu) / sigma, 2.0)\n batch_log_pdf = torch.sum(ll_1 + ll_2 + ll_3, -1)\n batch_log_pdf_shape = self.batch_shape(x) + (1,)\n return batch_log_pdf.contiguous().view(batch_log_pdf_shape)\n\n def analytic_mean(self):\n return torch.exp(self.mu + 0.5 * torch.pow(self.sigma, 2.0))\n\n def analytic_var(self):\n return (torch.exp(torch.pow(self.sigma, 2.0)) - Variable(torch.ones(1))) * \\\n torch.pow(self.analytic_mean(), 2)\n", "path": "pyro/distributions/log_normal.py"}]} | 1,386 | 116 |
gh_patches_debug_32700 | rasdani/github-patches | git_diff | mne-tools__mne-python-6252 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot pickle mne.preprocessing.ica.ICA anymore
With the current master (installed via `pip install git+https://github.com/mne-tools/mne-python.git#egg=mne`), I get an error when pickling an `mne.preprocessing.ica.ICA` object:
```
import pickle
import mne
print(mne.__version__)
ica = mne.preprocessing.ICA()
raw = mne.io.read_raw_edf("/Users/clemens/Downloads/testfiles/S001R04.edf", preload=True)
ica.fit(raw)
pickle.loads(pickle.dumps(ica))
# Traceback (most recent call last):
# File "/Users/clemens/Repositories/mnelab/.venv/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3296, in run_code
# exec(code_obj, self.user_global_ns, self.user_ns)
# File "<ipython-input-8-a056623ee708>", line 1, in <module>
# pickle.loads(pickle.dumps(ica))
# TypeError: __new__() missing 1 required positional argument: 'val'
```
The same code works with the latest released version 0.17.2. I didn't see any change to the `ICA` object that might be causing this (but maybe I'm missing something). If indeed nothing changed, could this behavior be due to how pip installs stuff from GitHub (vs. "proper" packages from PyPI)? If so, how can I fix this (I need to pickle because I want to use the `multiprocessing` module)?
</issue>
<code>
[start of mne/utils/_bunch.py]
1 # -*- coding: utf-8 -*-
2 """Bunch-related classes."""
3 # Authors: Alexandre Gramfort <[email protected]>
4 # Eric Larson <[email protected]>
5 #
6 # License: BSD (3-clause)
7
8 from copy import deepcopy
9
10
11 ###############################################################################
12 # Create a Bunch class that acts like a struct (mybunch.key = val )
13
14 class Bunch(dict):
15 """Dictionnary-like object thatexposes its keys as attributes."""
16
17 def __init__(self, **kwargs): # noqa: D102
18 dict.__init__(self, kwargs)
19 self.__dict__ = self
20
21
22 ###############################################################################
23 # A protected version that prevents overwriting
24
25 class BunchConst(Bunch):
26 """Class to prevent us from re-defining constants (DRY)."""
27
28 def __setattr__(self, attr, val): # noqa: D105
29 if attr != '__dict__' and hasattr(self, attr):
30 raise AttributeError('Attribute "%s" already set' % attr)
31 super().__setattr__(attr, val)
32
33
34 ###############################################################################
35 # A version that tweaks the __repr__ of its values based on keys
36
37 class BunchConstNamed(BunchConst):
38 """Class to provide nice __repr__ for our integer constants.
39
40 Only supports string keys and int or float values.
41 """
42
43 def __setattr__(self, attr, val): # noqa: D105
44 assert isinstance(attr, str)
45 if isinstance(val, int):
46 val = NamedInt(attr, val)
47 elif isinstance(val, float):
48 val = NamedFloat(attr, val)
49 else:
50 assert isinstance(val, BunchConstNamed), type(val)
51 super().__setattr__(attr, val)
52
53
54 class Named(object):
55 """Provide shared methods for giving named-representation subclasses."""
56
57 def __str__(self): # noqa: D105
58 return '%s (%s)' % (super().__str__(), self._name)
59
60 __repr__ = __str__
61
62 # see https://stackoverflow.com/a/15774013/2175965
63 def __copy__(self): # noqa: D105
64 cls = self.__class__
65 result = cls.__new__(cls)
66 result.__dict__.update(self.__dict__)
67 return result
68
69 def __deepcopy__(self, memo): # noqa: D105
70 cls = self.__class__
71 result = cls.__new__(cls, self._name, self)
72 memo[id(self)] = result
73 for k, v in self.__dict__.items():
74 setattr(result, k, deepcopy(v, memo))
75 return result
76
77
78 class NamedInt(Named, int):
79 """Int with a name in __repr__."""
80
81 def __new__(cls, name, val): # noqa: D102,D105
82 out = int.__new__(cls, val)
83 out._name = name
84 return out
85
86
87 class NamedFloat(Named, float):
88 """Float with a name in __repr__."""
89
90 def __new__(cls, name, val): # noqa: D102,D105
91 out = float.__new__(cls, val)
92 out._name = name
93 return out
94
[end of mne/utils/_bunch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mne/utils/_bunch.py b/mne/utils/_bunch.py
--- a/mne/utils/_bunch.py
+++ b/mne/utils/_bunch.py
@@ -9,10 +9,10 @@
###############################################################################
-# Create a Bunch class that acts like a struct (mybunch.key = val )
+# Create a Bunch class that acts like a struct (mybunch.key = val)
class Bunch(dict):
- """Dictionnary-like object thatexposes its keys as attributes."""
+ """Dictionary-like object that exposes its keys as attributes."""
def __init__(self, **kwargs): # noqa: D102
dict.__init__(self, kwargs)
@@ -51,9 +51,14 @@
super().__setattr__(attr, val)
-class Named(object):
+class _Named(object):
"""Provide shared methods for giving named-representation subclasses."""
+ def __new__(cls, name, val): # noqa: D102,D105
+ out = _named_subclass(cls).__new__(cls, val)
+ out._name = name
+ return out
+
def __str__(self): # noqa: D105
return '%s (%s)' % (super().__str__(), self._name)
@@ -74,20 +79,25 @@
setattr(result, k, deepcopy(v, memo))
return result
+ def __getnewargs__(self): # noqa: D105
+ return self._name, _named_subclass(self)(self)
+
+
+def _named_subclass(klass):
+ if not isinstance(klass, type):
+ klass = klass.__class__
+ subklass = klass.mro()[-2]
+ assert subklass in (int, float)
+ return subklass
+
-class NamedInt(Named, int):
+class NamedInt(_Named, int):
"""Int with a name in __repr__."""
- def __new__(cls, name, val): # noqa: D102,D105
- out = int.__new__(cls, val)
- out._name = name
- return out
+ pass
-class NamedFloat(Named, float):
+class NamedFloat(_Named, float):
"""Float with a name in __repr__."""
- def __new__(cls, name, val): # noqa: D102,D105
- out = float.__new__(cls, val)
- out._name = name
- return out
+ pass
| {"golden_diff": "diff --git a/mne/utils/_bunch.py b/mne/utils/_bunch.py\n--- a/mne/utils/_bunch.py\n+++ b/mne/utils/_bunch.py\n@@ -9,10 +9,10 @@\n \n \n ###############################################################################\n-# Create a Bunch class that acts like a struct (mybunch.key = val )\n+# Create a Bunch class that acts like a struct (mybunch.key = val)\n \n class Bunch(dict):\n- \"\"\"Dictionnary-like object thatexposes its keys as attributes.\"\"\"\n+ \"\"\"Dictionary-like object that exposes its keys as attributes.\"\"\"\n \n def __init__(self, **kwargs): # noqa: D102\n dict.__init__(self, kwargs)\n@@ -51,9 +51,14 @@\n super().__setattr__(attr, val)\n \n \n-class Named(object):\n+class _Named(object):\n \"\"\"Provide shared methods for giving named-representation subclasses.\"\"\"\n \n+ def __new__(cls, name, val): # noqa: D102,D105\n+ out = _named_subclass(cls).__new__(cls, val)\n+ out._name = name\n+ return out\n+\n def __str__(self): # noqa: D105\n return '%s (%s)' % (super().__str__(), self._name)\n \n@@ -74,20 +79,25 @@\n setattr(result, k, deepcopy(v, memo))\n return result\n \n+ def __getnewargs__(self): # noqa: D105\n+ return self._name, _named_subclass(self)(self)\n+\n+\n+def _named_subclass(klass):\n+ if not isinstance(klass, type):\n+ klass = klass.__class__\n+ subklass = klass.mro()[-2]\n+ assert subklass in (int, float)\n+ return subklass\n+\n \n-class NamedInt(Named, int):\n+class NamedInt(_Named, int):\n \"\"\"Int with a name in __repr__.\"\"\"\n \n- def __new__(cls, name, val): # noqa: D102,D105\n- out = int.__new__(cls, val)\n- out._name = name\n- return out\n+ pass\n \n \n-class NamedFloat(Named, float):\n+class NamedFloat(_Named, float):\n \"\"\"Float with a name in __repr__.\"\"\"\n \n- def __new__(cls, name, val): # noqa: D102,D105\n- out = float.__new__(cls, val)\n- out._name = name\n- return out\n+ pass\n", "issue": "Cannot pickle mne.preprocessing.ica.ICA anymore\nWith the current master (installed via `pip install git+https://github.com/mne-tools/mne-python.git#egg=mne`), I get an error when pickling an `mne.preprocessing.ica.ICA` object:\r\n\r\n```\r\nimport pickle\r\nimport mne\r\n\r\n\r\nprint(mne.__version__)\r\nica = mne.preprocessing.ICA()\r\nraw = mne.io.read_raw_edf(\"/Users/clemens/Downloads/testfiles/S001R04.edf\", preload=True)\r\nica.fit(raw)\r\npickle.loads(pickle.dumps(ica))\r\n# Traceback (most recent call last):\r\n# File \"/Users/clemens/Repositories/mnelab/.venv/lib/python3.7/site-packages/IPython/core/interactiveshell.py\", line 3296, in run_code\r\n# exec(code_obj, self.user_global_ns, self.user_ns)\r\n# File \"<ipython-input-8-a056623ee708>\", line 1, in <module>\r\n# pickle.loads(pickle.dumps(ica))\r\n# TypeError: __new__() missing 1 required positional argument: 'val'\r\n```\r\n\r\nThe same code works with the latest released version 0.17.2. I didn't see any change to the `ICA` object that might be causing this (but maybe I'm missing something). If indeed nothing changed, could this behavior be due to how pip installs stuff from GitHub (vs. \"proper\" packages from PyPI)? If so, how can I fix this (I need to pickle because I want to use the `multiprocessing` module)?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Bunch-related classes.\"\"\"\n# Authors: Alexandre Gramfort <[email protected]>\n# Eric Larson <[email protected]>\n#\n# License: BSD (3-clause)\n\nfrom copy import deepcopy\n\n\n###############################################################################\n# Create a Bunch class that acts like a struct (mybunch.key = val )\n\nclass Bunch(dict):\n \"\"\"Dictionnary-like object thatexposes its keys as attributes.\"\"\"\n\n def __init__(self, **kwargs): # noqa: D102\n dict.__init__(self, kwargs)\n self.__dict__ = self\n\n\n###############################################################################\n# A protected version that prevents overwriting\n\nclass BunchConst(Bunch):\n \"\"\"Class to prevent us from re-defining constants (DRY).\"\"\"\n\n def __setattr__(self, attr, val): # noqa: D105\n if attr != '__dict__' and hasattr(self, attr):\n raise AttributeError('Attribute \"%s\" already set' % attr)\n super().__setattr__(attr, val)\n\n\n###############################################################################\n# A version that tweaks the __repr__ of its values based on keys\n\nclass BunchConstNamed(BunchConst):\n \"\"\"Class to provide nice __repr__ for our integer constants.\n\n Only supports string keys and int or float values.\n \"\"\"\n\n def __setattr__(self, attr, val): # noqa: D105\n assert isinstance(attr, str)\n if isinstance(val, int):\n val = NamedInt(attr, val)\n elif isinstance(val, float):\n val = NamedFloat(attr, val)\n else:\n assert isinstance(val, BunchConstNamed), type(val)\n super().__setattr__(attr, val)\n\n\nclass Named(object):\n \"\"\"Provide shared methods for giving named-representation subclasses.\"\"\"\n\n def __str__(self): # noqa: D105\n return '%s (%s)' % (super().__str__(), self._name)\n\n __repr__ = __str__\n\n # see https://stackoverflow.com/a/15774013/2175965\n def __copy__(self): # noqa: D105\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result\n\n def __deepcopy__(self, memo): # noqa: D105\n cls = self.__class__\n result = cls.__new__(cls, self._name, self)\n memo[id(self)] = result\n for k, v in self.__dict__.items():\n setattr(result, k, deepcopy(v, memo))\n return result\n\n\nclass NamedInt(Named, int):\n \"\"\"Int with a name in __repr__.\"\"\"\n\n def __new__(cls, name, val): # noqa: D102,D105\n out = int.__new__(cls, val)\n out._name = name\n return out\n\n\nclass NamedFloat(Named, float):\n \"\"\"Float with a name in __repr__.\"\"\"\n\n def __new__(cls, name, val): # noqa: D102,D105\n out = float.__new__(cls, val)\n out._name = name\n return out\n", "path": "mne/utils/_bunch.py"}]} | 1,793 | 584 |
gh_patches_debug_14974 | rasdani/github-patches | git_diff | enthought__chaco-423 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Demo world_map.py does not work on python 2
**Problem Description**
urllib.request deprecated in Python 2
**Reproduction Steps:**
Run world_map.py
**Expected behavior:**
Traceback (most recent call last):
File "world_map.py", line 15, in <module>
import urllib.request, urllib.parse, urllib.error
ImportError: No module named request
**OS, Python version:** [Enter OS name and Python version]
OSX, Python 2.7
xref: #385
</issue>
<code>
[start of examples/demo/world_map.py]
1 #!/usr/bin/env python
2 """
3 Displays a world map with locations plotted on top. Locations are expected to
4 be tuples of latitude, longitude where West and South are expressed as
5 negative values.
6 - Mousewheel up and down zooms the plot in and out.
7 - Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular
8 region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow
9 and alt-right-arrow moves you forwards and backwards through the "zoom
10 history".
11 """
12
13 # Standard library imports
14 import os.path
15 import urllib.request, urllib.parse, urllib.error
16
17 # Major library imports
18 import numpy
19
20 # ETS imports
21 from chaco.api import Plot, ArrayPlotData, ImageData
22 from chaco.tools.api import ZoomTool
23 from enable.component_editor import ComponentEditor
24 from traits.api import HasTraits, Instance, Str
25 from traitsui.api import Item, View
26
27 class WorldMapPlot(HasTraits):
28
29 ### Public Traits ##########################################################
30
31 # The plot which will be displayed
32 plot = Instance(Plot)
33
34 # The URL which points to the world map image to be downloaded
35 image_url = Str("http://eoimages.gsfc.nasa.gov/ve//2433/land_shallow_topo_2048.jpg")
36
37
38 ### Private Traits #########################################################
39
40 # The path to where the image exists on the filesystem
41 image_path = Str()
42
43 # The view
44 traits_view = View(Item('plot', editor=ComponentEditor(),
45 width=800, height=400, show_label=False),
46 resizable=True)
47
48 #---------------------------------------------------------------------------
49 # Public interface
50 #---------------------------------------------------------------------------
51
52 def __init__(self, **kw):
53 super(WorldMapPlot, self).__init__(**kw)
54
55 self._download_map_image()
56 image = ImageData.fromfile(self.image_path)
57
58 # For now, the locations are hardcoded, though this can be changed
59 # eassily to take command line args, read from a file, or by other
60 # means
61 austin_loc = (30.16, -97.44)
62
63 locations_x = numpy.array([austin_loc[1]])
64 locations_y = numpy.array([austin_loc[0]])
65
66 # transform each of the locations to the image data space, including
67 # moving the origin from bottom left to top left
68 locations_x = (locations_x + 180) * image.data.shape[1]/360
69 locations_y = (locations_y*-1 + 90) * image.data.shape[0]/180
70
71 # Create the plott data, adding the image and the locations
72 plot_data = ArrayPlotData()
73 plot_data.set_data("imagedata", image._data)
74 plot_data.set_data("locations_x", locations_x)
75 plot_data.set_data("locations_y", locations_y)
76
77 # Create the plot with the origin as top left, which matches
78 # how the image data is aligned
79 self.plot = Plot(plot_data, default_origin="top left")
80 self.plot.img_plot('imagedata')
81
82 # Plot the locations as a scatter plot to be overlayed on top
83 # of the map
84 loc_plot = self.plot.plot(('locations_x', 'locations_y'),
85 type='scatter', size=3, color='yellow',
86 marker='dot')[0]
87
88 loc_plot.x_mapper.range.high = image.data.shape[1]
89 loc_plot.x_mapper.range.low = 0
90 loc_plot.y_mapper.range.high = image.data.shape[0]
91 loc_plot.y_mapper.range.low = -0
92
93 # set up any tools, in this case just the zoom tool
94 zoom = ZoomTool(component=self.plot, tool_mode="box", always_on=False)
95 self.plot.overlays.append(zoom)
96
97 #---------------------------------------------------------------------------
98 # Protected interface
99 #---------------------------------------------------------------------------
100
101 def _download_map_image(self):
102 """ Downloads a map from the image_url attribute. This is done
103 primarily to keep the redistributable Chaco package as small
104 as possible
105 """
106 example_dir = os.path.dirname(__file__)
107 self.image_path = os.path.join(example_dir, 'data',
108 os.path.split(self.image_url)[1])
109
110 if not os.path.exists(self.image_path):
111 print("Downloading map image")
112 urllib.request.urlretrieve(self.image_url, self.image_path)
113
114 #===============================================================================
115 # demo object that is used by the demo.py application.
116 #===============================================================================
117 demo = WorldMapPlot()
118
119 if __name__ == "__main__":
120 demo.configure_traits()
121
[end of examples/demo/world_map.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/demo/world_map.py b/examples/demo/world_map.py
--- a/examples/demo/world_map.py
+++ b/examples/demo/world_map.py
@@ -12,7 +12,7 @@
# Standard library imports
import os.path
-import urllib.request, urllib.parse, urllib.error
+from six.moves.urllib import request
# Major library imports
import numpy
@@ -109,7 +109,7 @@
if not os.path.exists(self.image_path):
print("Downloading map image")
- urllib.request.urlretrieve(self.image_url, self.image_path)
+ request.urlretrieve(self.image_url, self.image_path)
#===============================================================================
# demo object that is used by the demo.py application.
| {"golden_diff": "diff --git a/examples/demo/world_map.py b/examples/demo/world_map.py\n--- a/examples/demo/world_map.py\n+++ b/examples/demo/world_map.py\n@@ -12,7 +12,7 @@\n \n # Standard library imports\n import os.path\n-import urllib.request, urllib.parse, urllib.error\n+from six.moves.urllib import request\n \n # Major library imports\n import numpy\n@@ -109,7 +109,7 @@\n \n if not os.path.exists(self.image_path):\n print(\"Downloading map image\")\n- urllib.request.urlretrieve(self.image_url, self.image_path)\n+ request.urlretrieve(self.image_url, self.image_path)\n \n #===============================================================================\n # demo object that is used by the demo.py application.\n", "issue": "Demo world_map.py does not work on python 2\n**Problem Description**\r\nurllib.request deprecated in Python 2\r\n**Reproduction Steps:**\r\nRun world_map.py\r\n\r\n**Expected behavior:**\r\n\r\nTraceback (most recent call last):\r\n File \"world_map.py\", line 15, in <module>\r\n import urllib.request, urllib.parse, urllib.error\r\nImportError: No module named request\r\n\r\n**OS, Python version:** [Enter OS name and Python version]\r\nOSX, Python 2.7\r\n\r\nxref: #385\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nDisplays a world map with locations plotted on top. Locations are expected to\nbe tuples of latitude, longitude where West and South are expressed as\nnegative values.\n - Mousewheel up and down zooms the plot in and out.\n - Pressing \"z\" brings up the Zoom Box, and you can click-drag a rectangular\n region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow\n and alt-right-arrow moves you forwards and backwards through the \"zoom\n history\".\n\"\"\"\n\n# Standard library imports\nimport os.path\nimport urllib.request, urllib.parse, urllib.error\n\n# Major library imports\nimport numpy\n\n# ETS imports\nfrom chaco.api import Plot, ArrayPlotData, ImageData\nfrom chaco.tools.api import ZoomTool\nfrom enable.component_editor import ComponentEditor\nfrom traits.api import HasTraits, Instance, Str\nfrom traitsui.api import Item, View\n\nclass WorldMapPlot(HasTraits):\n\n ### Public Traits ##########################################################\n\n # The plot which will be displayed\n plot = Instance(Plot)\n\n # The URL which points to the world map image to be downloaded\n image_url = Str(\"http://eoimages.gsfc.nasa.gov/ve//2433/land_shallow_topo_2048.jpg\")\n\n\n ### Private Traits #########################################################\n\n # The path to where the image exists on the filesystem\n image_path = Str()\n\n # The view\n traits_view = View(Item('plot', editor=ComponentEditor(),\n width=800, height=400, show_label=False),\n resizable=True)\n\n #---------------------------------------------------------------------------\n # Public interface\n #---------------------------------------------------------------------------\n\n def __init__(self, **kw):\n super(WorldMapPlot, self).__init__(**kw)\n\n self._download_map_image()\n image = ImageData.fromfile(self.image_path)\n\n # For now, the locations are hardcoded, though this can be changed\n # eassily to take command line args, read from a file, or by other\n # means\n austin_loc = (30.16, -97.44)\n\n locations_x = numpy.array([austin_loc[1]])\n locations_y = numpy.array([austin_loc[0]])\n\n # transform each of the locations to the image data space, including\n # moving the origin from bottom left to top left\n locations_x = (locations_x + 180) * image.data.shape[1]/360\n locations_y = (locations_y*-1 + 90) * image.data.shape[0]/180\n\n # Create the plott data, adding the image and the locations\n plot_data = ArrayPlotData()\n plot_data.set_data(\"imagedata\", image._data)\n plot_data.set_data(\"locations_x\", locations_x)\n plot_data.set_data(\"locations_y\", locations_y)\n\n # Create the plot with the origin as top left, which matches\n # how the image data is aligned\n self.plot = Plot(plot_data, default_origin=\"top left\")\n self.plot.img_plot('imagedata')\n\n # Plot the locations as a scatter plot to be overlayed on top\n # of the map\n loc_plot = self.plot.plot(('locations_x', 'locations_y'),\n type='scatter', size=3, color='yellow',\n marker='dot')[0]\n\n loc_plot.x_mapper.range.high = image.data.shape[1]\n loc_plot.x_mapper.range.low = 0\n loc_plot.y_mapper.range.high = image.data.shape[0]\n loc_plot.y_mapper.range.low = -0\n\n # set up any tools, in this case just the zoom tool\n zoom = ZoomTool(component=self.plot, tool_mode=\"box\", always_on=False)\n self.plot.overlays.append(zoom)\n\n #---------------------------------------------------------------------------\n # Protected interface\n #---------------------------------------------------------------------------\n\n def _download_map_image(self):\n \"\"\" Downloads a map from the image_url attribute. This is done\n primarily to keep the redistributable Chaco package as small\n as possible\n \"\"\"\n example_dir = os.path.dirname(__file__)\n self.image_path = os.path.join(example_dir, 'data',\n os.path.split(self.image_url)[1])\n\n if not os.path.exists(self.image_path):\n print(\"Downloading map image\")\n urllib.request.urlretrieve(self.image_url, self.image_path)\n\n#===============================================================================\n# demo object that is used by the demo.py application.\n#===============================================================================\ndemo = WorldMapPlot()\n\nif __name__ == \"__main__\":\n demo.configure_traits()\n", "path": "examples/demo/world_map.py"}]} | 1,908 | 157 |
gh_patches_debug_37650 | rasdani/github-patches | git_diff | mirumee__ariadne-30 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
make_executable_schema resolvers arg should accept dict of dicts or list of dicts of dicts
The `make_executable_schema` utility should optionally take list of dicts of dicts (AKA "resolvers map"), this would allow larger projects to easily split and compose resolvers as needed:
```python
from ariadne import make_executable_schema
from products.graphql import resolvers as products_resolvers
from users.graphql import resolvers as users_resolvers
typedefs = "..."
resolvers = [products_resolvers, users_resolvers]
schema = make_executable_schema(typedefs, resolvers)
```
This task will likely require #13 to be done first, so we are 100% certain that all resolver mappings are dicts.
</issue>
<code>
[start of ariadne/executable_schema.py]
1 from typing import List, Union
2
3 from graphql import GraphQLSchema
4
5 from .build_schema import build_schema_from_type_definitions
6 from .resolvers import add_resolve_functions_to_schema
7
8
9 def make_executable_schema(
10 type_defs: Union[str, List[str]], resolvers: dict
11 ) -> GraphQLSchema:
12 schema = build_schema_from_type_definitions(type_defs)
13 add_resolve_functions_to_schema(schema, resolvers)
14 return schema
15
[end of ariadne/executable_schema.py]
[start of ariadne/build_schema.py]
1 from typing import List, Union
2
3 from graphql import GraphQLSchema, parse
4 from graphql.language.ast import (
5 Document,
6 ObjectTypeDefinition,
7 OperationTypeDefinition,
8 Name,
9 NamedType,
10 SchemaDefinition,
11 )
12 from graphql.utils.build_ast_schema import build_ast_schema
13
14
15 def build_schema_from_type_definitions(
16 type_defs: Union[str, List[str]]
17 ) -> GraphQLSchema:
18 if isinstance(type_defs, list):
19 type_defs = concatenate_type_defs(type_defs)
20
21 document = parse(type_defs)
22
23 if not document_has_schema(document):
24 schema_definition = build_default_schema(document)
25 document.definitions.append(schema_definition)
26
27 return build_ast_schema(document)
28
29
30 def concatenate_type_defs(type_defs: List[str]) -> str:
31 resolved_type_defs = []
32 for type_def in type_defs:
33 resolved_type_defs.append(type_def.strip())
34 return "\n\n".join(resolved_type_defs)
35
36
37 def build_default_schema(document: Document) -> SchemaDefinition:
38 defined_types = [
39 td.name.value
40 for td in document.definitions
41 if isinstance(td, ObjectTypeDefinition)
42 ]
43 operations = []
44 if "Query" in defined_types:
45 operations.append(
46 OperationTypeDefinition("query", type=NamedType(name=Name("Query")))
47 )
48 if "Mutation" in defined_types:
49 operations.append(
50 OperationTypeDefinition("mutation", type=NamedType(name=Name("Mutation")))
51 )
52 if "Subscription" in defined_types:
53 operations.append(
54 OperationTypeDefinition(
55 "subscription", type=NamedType(name=Name("Subscription"))
56 )
57 )
58 return SchemaDefinition(operation_types=operations, directives=[])
59
60
61 def document_has_schema(document: Document) -> bool:
62 return any(isinstance(td, SchemaDefinition) for td in document.definitions)
63
[end of ariadne/build_schema.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ariadne/build_schema.py b/ariadne/build_schema.py
--- a/ariadne/build_schema.py
+++ b/ariadne/build_schema.py
@@ -1,5 +1,3 @@
-from typing import List, Union
-
from graphql import GraphQLSchema, parse
from graphql.language.ast import (
Document,
@@ -12,12 +10,7 @@
from graphql.utils.build_ast_schema import build_ast_schema
-def build_schema_from_type_definitions(
- type_defs: Union[str, List[str]]
-) -> GraphQLSchema:
- if isinstance(type_defs, list):
- type_defs = concatenate_type_defs(type_defs)
-
+def build_schema_from_type_definitions(type_defs: str) -> GraphQLSchema:
document = parse(type_defs)
if not document_has_schema(document):
@@ -27,13 +20,6 @@
return build_ast_schema(document)
-def concatenate_type_defs(type_defs: List[str]) -> str:
- resolved_type_defs = []
- for type_def in type_defs:
- resolved_type_defs.append(type_def.strip())
- return "\n\n".join(resolved_type_defs)
-
-
def build_default_schema(document: Document) -> SchemaDefinition:
defined_types = [
td.name.value
diff --git a/ariadne/executable_schema.py b/ariadne/executable_schema.py
--- a/ariadne/executable_schema.py
+++ b/ariadne/executable_schema.py
@@ -1,4 +1,6 @@
-from typing import List, Union
+from collections import defaultdict
+from itertools import chain
+from typing import Iterator, List, Union
from graphql import GraphQLSchema
@@ -6,9 +8,39 @@
from .resolvers import add_resolve_functions_to_schema
+def decompose_maps(resolvers_maps: List[dict]) -> Iterator[tuple]:
+ def flatten(rm):
+ for key, value in rm.items():
+ for resolver_name, resolver in value.items():
+ yield (key, resolver_name, resolver)
+
+ return chain.from_iterable(flatten(m) for m in resolvers_maps)
+
+
+def merge_resolvers(resolver_list: Iterator[tuple]) -> dict:
+ output = defaultdict(dict) # type: dict
+ for key, resolver_name, resolver in resolver_list:
+ output[key][resolver_name] = resolver
+ return output
+
+
+def join_type_defs(type_defs: List[str]) -> str:
+ return "\n\n".join(t.strip() for t in type_defs)
+
+
def make_executable_schema(
- type_defs: Union[str, List[str]], resolvers: dict
+ type_defs: Union[str, List[str]], resolvers: Union[dict, List[dict]]
) -> GraphQLSchema:
+ if isinstance(type_defs, list):
+ type_defs = join_type_defs(type_defs)
+
schema = build_schema_from_type_definitions(type_defs)
- add_resolve_functions_to_schema(schema, resolvers)
+
+ if isinstance(resolvers, list):
+ add_resolve_functions_to_schema(
+ schema, merge_resolvers(decompose_maps(resolvers))
+ )
+ elif isinstance(resolvers, dict):
+ add_resolve_functions_to_schema(schema, resolvers)
+
return schema
| {"golden_diff": "diff --git a/ariadne/build_schema.py b/ariadne/build_schema.py\n--- a/ariadne/build_schema.py\n+++ b/ariadne/build_schema.py\n@@ -1,5 +1,3 @@\n-from typing import List, Union\n-\n from graphql import GraphQLSchema, parse\n from graphql.language.ast import (\n Document,\n@@ -12,12 +10,7 @@\n from graphql.utils.build_ast_schema import build_ast_schema\n \n \n-def build_schema_from_type_definitions(\n- type_defs: Union[str, List[str]]\n-) -> GraphQLSchema:\n- if isinstance(type_defs, list):\n- type_defs = concatenate_type_defs(type_defs)\n-\n+def build_schema_from_type_definitions(type_defs: str) -> GraphQLSchema:\n document = parse(type_defs)\n \n if not document_has_schema(document):\n@@ -27,13 +20,6 @@\n return build_ast_schema(document)\n \n \n-def concatenate_type_defs(type_defs: List[str]) -> str:\n- resolved_type_defs = []\n- for type_def in type_defs:\n- resolved_type_defs.append(type_def.strip())\n- return \"\\n\\n\".join(resolved_type_defs)\n-\n-\n def build_default_schema(document: Document) -> SchemaDefinition:\n defined_types = [\n td.name.value\ndiff --git a/ariadne/executable_schema.py b/ariadne/executable_schema.py\n--- a/ariadne/executable_schema.py\n+++ b/ariadne/executable_schema.py\n@@ -1,4 +1,6 @@\n-from typing import List, Union\n+from collections import defaultdict\n+from itertools import chain\n+from typing import Iterator, List, Union\n \n from graphql import GraphQLSchema\n \n@@ -6,9 +8,39 @@\n from .resolvers import add_resolve_functions_to_schema\n \n \n+def decompose_maps(resolvers_maps: List[dict]) -> Iterator[tuple]:\n+ def flatten(rm):\n+ for key, value in rm.items():\n+ for resolver_name, resolver in value.items():\n+ yield (key, resolver_name, resolver)\n+\n+ return chain.from_iterable(flatten(m) for m in resolvers_maps)\n+\n+\n+def merge_resolvers(resolver_list: Iterator[tuple]) -> dict:\n+ output = defaultdict(dict) # type: dict\n+ for key, resolver_name, resolver in resolver_list:\n+ output[key][resolver_name] = resolver\n+ return output\n+\n+\n+def join_type_defs(type_defs: List[str]) -> str:\n+ return \"\\n\\n\".join(t.strip() for t in type_defs)\n+\n+\n def make_executable_schema(\n- type_defs: Union[str, List[str]], resolvers: dict\n+ type_defs: Union[str, List[str]], resolvers: Union[dict, List[dict]]\n ) -> GraphQLSchema:\n+ if isinstance(type_defs, list):\n+ type_defs = join_type_defs(type_defs)\n+\n schema = build_schema_from_type_definitions(type_defs)\n- add_resolve_functions_to_schema(schema, resolvers)\n+\n+ if isinstance(resolvers, list):\n+ add_resolve_functions_to_schema(\n+ schema, merge_resolvers(decompose_maps(resolvers))\n+ )\n+ elif isinstance(resolvers, dict):\n+ add_resolve_functions_to_schema(schema, resolvers)\n+\n return schema\n", "issue": "make_executable_schema resolvers arg should accept dict of dicts or list of dicts of dicts\nThe `make_executable_schema` utility should optionally take list of dicts of dicts (AKA \"resolvers map\"), this would allow larger projects to easily split and compose resolvers as needed:\r\n\r\n```python\r\nfrom ariadne import make_executable_schema\r\n\r\nfrom products.graphql import resolvers as products_resolvers\r\nfrom users.graphql import resolvers as users_resolvers\r\n\r\n\r\ntypedefs = \"...\"\r\nresolvers = [products_resolvers, users_resolvers]\r\n\r\nschema = make_executable_schema(typedefs, resolvers)\r\n```\r\n\r\nThis task will likely require #13 to be done first, so we are 100% certain that all resolver mappings are dicts.\n", "before_files": [{"content": "from typing import List, Union\n\nfrom graphql import GraphQLSchema\n\nfrom .build_schema import build_schema_from_type_definitions\nfrom .resolvers import add_resolve_functions_to_schema\n\n\ndef make_executable_schema(\n type_defs: Union[str, List[str]], resolvers: dict\n) -> GraphQLSchema:\n schema = build_schema_from_type_definitions(type_defs)\n add_resolve_functions_to_schema(schema, resolvers)\n return schema\n", "path": "ariadne/executable_schema.py"}, {"content": "from typing import List, Union\n\nfrom graphql import GraphQLSchema, parse\nfrom graphql.language.ast import (\n Document,\n ObjectTypeDefinition,\n OperationTypeDefinition,\n Name,\n NamedType,\n SchemaDefinition,\n)\nfrom graphql.utils.build_ast_schema import build_ast_schema\n\n\ndef build_schema_from_type_definitions(\n type_defs: Union[str, List[str]]\n) -> GraphQLSchema:\n if isinstance(type_defs, list):\n type_defs = concatenate_type_defs(type_defs)\n\n document = parse(type_defs)\n\n if not document_has_schema(document):\n schema_definition = build_default_schema(document)\n document.definitions.append(schema_definition)\n\n return build_ast_schema(document)\n\n\ndef concatenate_type_defs(type_defs: List[str]) -> str:\n resolved_type_defs = []\n for type_def in type_defs:\n resolved_type_defs.append(type_def.strip())\n return \"\\n\\n\".join(resolved_type_defs)\n\n\ndef build_default_schema(document: Document) -> SchemaDefinition:\n defined_types = [\n td.name.value\n for td in document.definitions\n if isinstance(td, ObjectTypeDefinition)\n ]\n operations = []\n if \"Query\" in defined_types:\n operations.append(\n OperationTypeDefinition(\"query\", type=NamedType(name=Name(\"Query\")))\n )\n if \"Mutation\" in defined_types:\n operations.append(\n OperationTypeDefinition(\"mutation\", type=NamedType(name=Name(\"Mutation\")))\n )\n if \"Subscription\" in defined_types:\n operations.append(\n OperationTypeDefinition(\n \"subscription\", type=NamedType(name=Name(\"Subscription\"))\n )\n )\n return SchemaDefinition(operation_types=operations, directives=[])\n\n\ndef document_has_schema(document: Document) -> bool:\n return any(isinstance(td, SchemaDefinition) for td in document.definitions)\n", "path": "ariadne/build_schema.py"}]} | 1,322 | 710 |
gh_patches_debug_10331 | rasdani/github-patches | git_diff | pytorch__vision-2086 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pretrain models for FCN and DeepLabV3
Is it possible to provide the pretrain weight for deeplabv3 and FCN?
right now when i do:
`model = torchvision.models.segmentation.fcn_resnet50(pretrained=True)`
I receive this:
> NotImplementedError: pretrained fcn_resnet50_coco is not supported as of now
</issue>
<code>
[start of torchvision/models/segmentation/segmentation.py]
1 from .._utils import IntermediateLayerGetter
2 from ..utils import load_state_dict_from_url
3 from .. import resnet
4 from .deeplabv3 import DeepLabHead, DeepLabV3
5 from .fcn import FCN, FCNHead
6
7
8 __all__ = ['fcn_resnet50', 'fcn_resnet101', 'deeplabv3_resnet50', 'deeplabv3_resnet101']
9
10
11 model_urls = {
12 'fcn_resnet50_coco': None,
13 'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth',
14 'deeplabv3_resnet50_coco': None,
15 'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth',
16 }
17
18
19 def _segm_resnet(name, backbone_name, num_classes, aux, pretrained_backbone=True):
20 backbone = resnet.__dict__[backbone_name](
21 pretrained=pretrained_backbone,
22 replace_stride_with_dilation=[False, True, True])
23
24 return_layers = {'layer4': 'out'}
25 if aux:
26 return_layers['layer3'] = 'aux'
27 backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
28
29 aux_classifier = None
30 if aux:
31 inplanes = 1024
32 aux_classifier = FCNHead(inplanes, num_classes)
33
34 model_map = {
35 'deeplabv3': (DeepLabHead, DeepLabV3),
36 'fcn': (FCNHead, FCN),
37 }
38 inplanes = 2048
39 classifier = model_map[name][0](inplanes, num_classes)
40 base_model = model_map[name][1]
41
42 model = base_model(backbone, classifier, aux_classifier)
43 return model
44
45
46 def _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs):
47 if pretrained:
48 aux_loss = True
49 model = _segm_resnet(arch_type, backbone, num_classes, aux_loss, **kwargs)
50 if pretrained:
51 arch = arch_type + '_' + backbone + '_coco'
52 model_url = model_urls[arch]
53 if model_url is None:
54 raise NotImplementedError('pretrained {} is not supported as of now'.format(arch))
55 else:
56 state_dict = load_state_dict_from_url(model_url, progress=progress)
57 model.load_state_dict(state_dict)
58 return model
59
60
61 def fcn_resnet50(pretrained=False, progress=True,
62 num_classes=21, aux_loss=None, **kwargs):
63 """Constructs a Fully-Convolutional Network model with a ResNet-50 backbone.
64
65 Args:
66 pretrained (bool): If True, returns a model pre-trained on COCO train2017 which
67 contains the same classes as Pascal VOC
68 progress (bool): If True, displays a progress bar of the download to stderr
69 """
70 return _load_model('fcn', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)
71
72
73 def fcn_resnet101(pretrained=False, progress=True,
74 num_classes=21, aux_loss=None, **kwargs):
75 """Constructs a Fully-Convolutional Network model with a ResNet-101 backbone.
76
77 Args:
78 pretrained (bool): If True, returns a model pre-trained on COCO train2017 which
79 contains the same classes as Pascal VOC
80 progress (bool): If True, displays a progress bar of the download to stderr
81 """
82 return _load_model('fcn', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)
83
84
85 def deeplabv3_resnet50(pretrained=False, progress=True,
86 num_classes=21, aux_loss=None, **kwargs):
87 """Constructs a DeepLabV3 model with a ResNet-50 backbone.
88
89 Args:
90 pretrained (bool): If True, returns a model pre-trained on COCO train2017 which
91 contains the same classes as Pascal VOC
92 progress (bool): If True, displays a progress bar of the download to stderr
93 """
94 return _load_model('deeplabv3', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)
95
96
97 def deeplabv3_resnet101(pretrained=False, progress=True,
98 num_classes=21, aux_loss=None, **kwargs):
99 """Constructs a DeepLabV3 model with a ResNet-101 backbone.
100
101 Args:
102 pretrained (bool): If True, returns a model pre-trained on COCO train2017 which
103 contains the same classes as Pascal VOC
104 progress (bool): If True, displays a progress bar of the download to stderr
105 """
106 return _load_model('deeplabv3', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)
107
[end of torchvision/models/segmentation/segmentation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchvision/models/segmentation/segmentation.py b/torchvision/models/segmentation/segmentation.py
--- a/torchvision/models/segmentation/segmentation.py
+++ b/torchvision/models/segmentation/segmentation.py
@@ -9,9 +9,9 @@
model_urls = {
- 'fcn_resnet50_coco': None,
+ 'fcn_resnet50_coco': 'https://download.pytorch.org/models/fcn_resnet50_coco-1167a1af.pth',
'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth',
- 'deeplabv3_resnet50_coco': None,
+ 'deeplabv3_resnet50_coco': 'https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth',
'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth',
}
| {"golden_diff": "diff --git a/torchvision/models/segmentation/segmentation.py b/torchvision/models/segmentation/segmentation.py\n--- a/torchvision/models/segmentation/segmentation.py\n+++ b/torchvision/models/segmentation/segmentation.py\n@@ -9,9 +9,9 @@\n \n \n model_urls = {\n- 'fcn_resnet50_coco': None,\n+ 'fcn_resnet50_coco': 'https://download.pytorch.org/models/fcn_resnet50_coco-1167a1af.pth',\n 'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth',\n- 'deeplabv3_resnet50_coco': None,\n+ 'deeplabv3_resnet50_coco': 'https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth',\n 'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth',\n }\n", "issue": "pretrain models for FCN and DeepLabV3\nIs it possible to provide the pretrain weight for deeplabv3 and FCN?\r\nright now when i do:\r\n`model = torchvision.models.segmentation.fcn_resnet50(pretrained=True)`\r\nI receive this:\r\n\r\n> NotImplementedError: pretrained fcn_resnet50_coco is not supported as of now\n", "before_files": [{"content": "from .._utils import IntermediateLayerGetter\nfrom ..utils import load_state_dict_from_url\nfrom .. import resnet\nfrom .deeplabv3 import DeepLabHead, DeepLabV3\nfrom .fcn import FCN, FCNHead\n\n\n__all__ = ['fcn_resnet50', 'fcn_resnet101', 'deeplabv3_resnet50', 'deeplabv3_resnet101']\n\n\nmodel_urls = {\n 'fcn_resnet50_coco': None,\n 'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth',\n 'deeplabv3_resnet50_coco': None,\n 'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth',\n}\n\n\ndef _segm_resnet(name, backbone_name, num_classes, aux, pretrained_backbone=True):\n backbone = resnet.__dict__[backbone_name](\n pretrained=pretrained_backbone,\n replace_stride_with_dilation=[False, True, True])\n\n return_layers = {'layer4': 'out'}\n if aux:\n return_layers['layer3'] = 'aux'\n backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)\n\n aux_classifier = None\n if aux:\n inplanes = 1024\n aux_classifier = FCNHead(inplanes, num_classes)\n\n model_map = {\n 'deeplabv3': (DeepLabHead, DeepLabV3),\n 'fcn': (FCNHead, FCN),\n }\n inplanes = 2048\n classifier = model_map[name][0](inplanes, num_classes)\n base_model = model_map[name][1]\n\n model = base_model(backbone, classifier, aux_classifier)\n return model\n\n\ndef _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs):\n if pretrained:\n aux_loss = True\n model = _segm_resnet(arch_type, backbone, num_classes, aux_loss, **kwargs)\n if pretrained:\n arch = arch_type + '_' + backbone + '_coco'\n model_url = model_urls[arch]\n if model_url is None:\n raise NotImplementedError('pretrained {} is not supported as of now'.format(arch))\n else:\n state_dict = load_state_dict_from_url(model_url, progress=progress)\n model.load_state_dict(state_dict)\n return model\n\n\ndef fcn_resnet50(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a Fully-Convolutional Network model with a ResNet-50 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _load_model('fcn', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef fcn_resnet101(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a Fully-Convolutional Network model with a ResNet-101 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _load_model('fcn', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef deeplabv3_resnet50(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a DeepLabV3 model with a ResNet-50 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _load_model('deeplabv3', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs)\n\n\ndef deeplabv3_resnet101(pretrained=False, progress=True,\n num_classes=21, aux_loss=None, **kwargs):\n \"\"\"Constructs a DeepLabV3 model with a ResNet-101 backbone.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017 which\n contains the same classes as Pascal VOC\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _load_model('deeplabv3', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)\n", "path": "torchvision/models/segmentation/segmentation.py"}]} | 1,967 | 269 |
gh_patches_debug_28995 | rasdani/github-patches | git_diff | vacanza__python-holidays-1020 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update MANIFEST.in
Clean up and make sure `sdist` contains all needed files.
</issue>
<code>
[start of setup.py]
1 # python-holidays
2 # ---------------
3 # A fast, efficient Python library for generating country, province and state
4 # specific sets of holidays on the fly. It aims to make determining whether a
5 # specific date is a holiday as fast and flexible as possible.
6 #
7 # Authors: dr-prodigy <[email protected]> (c) 2017-2023
8 # ryanss <[email protected]> (c) 2014-2017
9 # Website: https://github.com/dr-prodigy/python-holidays
10 # License: MIT (see LICENSE file)
11
12 import os
13 import subprocess
14 import sys
15 from pathlib import Path
16
17 from setuptools import setup
18
19
20 def generate_mo_files():
21 """Looks up for .po files and generates respective .mo files."""
22 for po_path in Path(os.path.join("holidays", "locale")).rglob("*.po"):
23 po_file = str(po_path)
24 mo_file = po_file.replace(".po", ".mo")
25
26 if os.path.exists(mo_file):
27 os.unlink(mo_file)
28 subprocess.run(
29 (
30 sys.executable,
31 os.path.join("scripts", "l10n", "msgfmt.py"),
32 "-o",
33 mo_file,
34 po_file,
35 ),
36 check=True,
37 )
38
39
40 if __name__ == "__main__":
41 generate_mo_files()
42 setup()
43
[end of setup.py]
[start of scripts/l10n/generate_mo_files.py]
1 #!/usr/bin/env python3
2
3 # python-holidays
4 # ---------------
5 # A fast, efficient Python library for generating country, province and state
6 # specific sets of holidays on the fly. It aims to make determining whether a
7 # specific date is a holiday as fast and flexible as possible.
8 #
9 # Authors: dr-prodigy <[email protected]> (c) 2017-2023
10 # ryanss <[email protected]> (c) 2014-2017
11 # Website: https://github.com/dr-prodigy/python-holidays
12 # License: MIT (see LICENSE file)
13
14 import os
15 import subprocess
16 import sys
17 from pathlib import Path
18
19
20 class MOGenerator:
21 """Creates .mo files for supported country/market entities."""
22
23 def run(self):
24 """Runs the .mo files generation process."""
25 # Delete old files.
26 for mo_file in Path(os.path.join("holidays", "locale")).rglob("*.mo"):
27 os.unlink(str(mo_file))
28
29 # Create new files.
30 for po_path in Path(os.path.join("holidays", "locale")).rglob("*.po"):
31 po_file = str(po_path)
32 mo_file = po_file.replace(".po", ".mo")
33 subprocess.run(
34 (
35 sys.executable,
36 os.path.join("scripts", "l10n", "msgfmt.py"),
37 "-o",
38 mo_file,
39 po_file,
40 ),
41 check=True,
42 )
43
44
45 if __name__ == "__main__":
46 MOGenerator().run()
47
[end of scripts/l10n/generate_mo_files.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/l10n/generate_mo_files.py b/scripts/l10n/generate_mo_files.py
--- a/scripts/l10n/generate_mo_files.py
+++ b/scripts/l10n/generate_mo_files.py
@@ -22,14 +22,12 @@
def run(self):
"""Runs the .mo files generation process."""
- # Delete old files.
- for mo_file in Path(os.path.join("holidays", "locale")).rglob("*.mo"):
- os.unlink(str(mo_file))
-
- # Create new files.
for po_path in Path(os.path.join("holidays", "locale")).rglob("*.po"):
po_file = str(po_path)
mo_file = po_file.replace(".po", ".mo")
+
+ if os.path.exists(mo_file):
+ os.unlink(mo_file)
subprocess.run(
(
sys.executable,
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,34 +9,7 @@
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
-import os
-import subprocess
-import sys
-from pathlib import Path
-
from setuptools import setup
-
-def generate_mo_files():
- """Looks up for .po files and generates respective .mo files."""
- for po_path in Path(os.path.join("holidays", "locale")).rglob("*.po"):
- po_file = str(po_path)
- mo_file = po_file.replace(".po", ".mo")
-
- if os.path.exists(mo_file):
- os.unlink(mo_file)
- subprocess.run(
- (
- sys.executable,
- os.path.join("scripts", "l10n", "msgfmt.py"),
- "-o",
- mo_file,
- po_file,
- ),
- check=True,
- )
-
-
if __name__ == "__main__":
- generate_mo_files()
setup()
| {"golden_diff": "diff --git a/scripts/l10n/generate_mo_files.py b/scripts/l10n/generate_mo_files.py\n--- a/scripts/l10n/generate_mo_files.py\n+++ b/scripts/l10n/generate_mo_files.py\n@@ -22,14 +22,12 @@\n \n def run(self):\n \"\"\"Runs the .mo files generation process.\"\"\"\n- # Delete old files.\n- for mo_file in Path(os.path.join(\"holidays\", \"locale\")).rglob(\"*.mo\"):\n- os.unlink(str(mo_file))\n-\n- # Create new files.\n for po_path in Path(os.path.join(\"holidays\", \"locale\")).rglob(\"*.po\"):\n po_file = str(po_path)\n mo_file = po_file.replace(\".po\", \".mo\")\n+\n+ if os.path.exists(mo_file):\n+ os.unlink(mo_file)\n subprocess.run(\n (\n sys.executable,\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,34 +9,7 @@\n # Website: https://github.com/dr-prodigy/python-holidays\n # License: MIT (see LICENSE file)\n \n-import os\n-import subprocess\n-import sys\n-from pathlib import Path\n-\n from setuptools import setup\n \n-\n-def generate_mo_files():\n- \"\"\"Looks up for .po files and generates respective .mo files.\"\"\"\n- for po_path in Path(os.path.join(\"holidays\", \"locale\")).rglob(\"*.po\"):\n- po_file = str(po_path)\n- mo_file = po_file.replace(\".po\", \".mo\")\n-\n- if os.path.exists(mo_file):\n- os.unlink(mo_file)\n- subprocess.run(\n- (\n- sys.executable,\n- os.path.join(\"scripts\", \"l10n\", \"msgfmt.py\"),\n- \"-o\",\n- mo_file,\n- po_file,\n- ),\n- check=True,\n- )\n-\n-\n if __name__ == \"__main__\":\n- generate_mo_files()\n setup()\n", "issue": "Update MANIFEST.in\nClean up and make sure `sdist` contains all needed files.\n", "before_files": [{"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2023\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nimport os\nimport subprocess\nimport sys\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef generate_mo_files():\n \"\"\"Looks up for .po files and generates respective .mo files.\"\"\"\n for po_path in Path(os.path.join(\"holidays\", \"locale\")).rglob(\"*.po\"):\n po_file = str(po_path)\n mo_file = po_file.replace(\".po\", \".mo\")\n\n if os.path.exists(mo_file):\n os.unlink(mo_file)\n subprocess.run(\n (\n sys.executable,\n os.path.join(\"scripts\", \"l10n\", \"msgfmt.py\"),\n \"-o\",\n mo_file,\n po_file,\n ),\n check=True,\n )\n\n\nif __name__ == \"__main__\":\n generate_mo_files()\n setup()\n", "path": "setup.py"}, {"content": "#!/usr/bin/env python3\n\n# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2023\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nimport os\nimport subprocess\nimport sys\nfrom pathlib import Path\n\n\nclass MOGenerator:\n \"\"\"Creates .mo files for supported country/market entities.\"\"\"\n\n def run(self):\n \"\"\"Runs the .mo files generation process.\"\"\"\n # Delete old files.\n for mo_file in Path(os.path.join(\"holidays\", \"locale\")).rglob(\"*.mo\"):\n os.unlink(str(mo_file))\n\n # Create new files.\n for po_path in Path(os.path.join(\"holidays\", \"locale\")).rglob(\"*.po\"):\n po_file = str(po_path)\n mo_file = po_file.replace(\".po\", \".mo\")\n subprocess.run(\n (\n sys.executable,\n os.path.join(\"scripts\", \"l10n\", \"msgfmt.py\"),\n \"-o\",\n mo_file,\n po_file,\n ),\n check=True,\n )\n\n\nif __name__ == \"__main__\":\n MOGenerator().run()\n", "path": "scripts/l10n/generate_mo_files.py"}]} | 1,408 | 446 |
gh_patches_debug_35484 | rasdani/github-patches | git_diff | DDMAL__CantusDB-271 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"My sources" panel should appear on homepage
In OldCantus on the homepage when a user is logged in, a list of sources appears in a "My sources" box in the left sidebar. This box replaces the "jump to source" and "what's new" boxes that are visible when a user is not logged in.
In NewCantus, the "jump to source" and "what's new" boxes appear whether or not a user is logged in. Behavior on NewCantus should be changed to match that of OldCantus
</issue>
<code>
[start of django/cantusdb_project/main_app/templatetags/helper_tags.py]
1 import calendar
2 from typing import Union, Optional
3 from django.utils.http import urlencode
4 from django import template
5 from main_app.models import Source
6 from articles.models import Article
7 from django.utils.safestring import mark_safe
8
9 register = template.Library()
10
11
12 @register.simple_tag(takes_context=False)
13 def recent_articles():
14 articles = Article.objects.order_by("-date_created")[:5]
15 list_item_template = '<li><a href="{url}">{title}</a><br><small>{date}</small></li>'
16 list_items = [
17 list_item_template.format(
18 url=a.get_absolute_url(),
19 title=a.title,
20 date=a.date_created.strftime("%x"),
21 )
22 for a
23 in articles
24 ]
25 list_items_string = "".join(list_items)
26 recent_articles_string = "<ul>{lis}</ul>".format(lis=list_items_string)
27 return mark_safe(recent_articles_string)
28
29
30 @register.filter(name="month_to_string")
31 def month_to_string(value: Optional[Union[str, int]]) -> Optional[Union[str, int]]:
32 """Converts month number to textual representation, 3 letters (Jan, Mar, etc)"""
33 if type(value) == int and value in range(1, 13):
34 return calendar.month_abbr[value]
35 else:
36 return value
37
38
39 @register.simple_tag(takes_context=True)
40 def url_add_get_params(context, **kwargs):
41 query = context["request"].GET.copy()
42 # accounts for the situations where there may be two paginations in one page
43 if "page" in kwargs:
44 query.pop("page", None)
45 if "page2" in kwargs:
46 query.pop("page2", None)
47 query.update(kwargs)
48 return query.urlencode()
49
50
51 @register.simple_tag(takes_context=False)
52 def source_links():
53 sources = (
54 Source.objects.filter(public=True, visible=True, segment__id=4063)
55 .exclude(siglum=None)
56 .values("siglum", "id")
57 .order_by("siglum")
58 )
59 options = ""
60 # <option value="source1">Source 1</option>
61 # <option value="source2">Source 2</option>
62 # <option value="source3">Source 3</option>
63 for source in sources:
64 option_str = (
65 f"<option value=source/{source['id']}>{source['siglum']}</option>\n"
66 )
67 options += option_str
68
69 return mark_safe(options)
70
71
72
73 @register.filter
74 def classname(obj):
75 """
76 Returns the name of the object's class
77 A use-case is: {% if object|classname == "Notation" %}
78 """
79 return obj.__class__.__name__
80
81 @register.filter
82 def admin_url_name(class_name, action):
83 """
84 Accepts a class name and an action (either "change" or "delete") as arguments.
85 Returns the name of the URL for changing/deleting an object in the admin interface.
86 """
87 class_name = class_name.lower()
88 action = action.lower()
89
90 return f"admin:main_app_{class_name}_{action}"
91
92 @register.filter(name='has_group')
93 def has_group(user, group_name):
94 return user.groups.filter(name=group_name).exists()
95
[end of django/cantusdb_project/main_app/templatetags/helper_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django/cantusdb_project/main_app/templatetags/helper_tags.py b/django/cantusdb_project/main_app/templatetags/helper_tags.py
--- a/django/cantusdb_project/main_app/templatetags/helper_tags.py
+++ b/django/cantusdb_project/main_app/templatetags/helper_tags.py
@@ -5,6 +5,7 @@
from main_app.models import Source
from articles.models import Article
from django.utils.safestring import mark_safe
+from django.urls import reverse
register = template.Library()
@@ -27,6 +28,57 @@
return mark_safe(recent_articles_string)
[email protected]_tag(takes_context=False)
+def my_sources(user):
+ def make_source_detail_link_with_siglum(source):
+ id = source.id
+ siglum = source.rism_siglum
+ url = reverse("source-detail", args=[id])
+ link = '<a href="{}">{}</a>'.format(url, siglum)
+ return link
+ def make_source_detail_link_with_title(source):
+ id = source.id
+ title = source.title
+ url = reverse("source-detail", args=[id])
+ link = '<a href="{}">{}</a>'.format(url, title)
+ return link
+ def make_add_new_chants_link(source):
+ id = source.id
+ url = reverse("chant-create", args=[id])
+ link = '<a href="{}">+ Add new chant</a>'.format(url)
+ return link
+ def make_edit_chants_link(source):
+ id = source.id
+ url = reverse("source-edit-volpiano", args=[id])
+ link = '<a href="{}">Edit chants (Fulltext & Volpiano editor)</a>'.format(url)
+ return link
+ def make_links_for_source(source):
+ link_with_siglum = make_source_detail_link_with_siglum(source)
+ link_with_title = make_source_detail_link_with_title(source)
+ add_new_chants_link = make_add_new_chants_link(source)
+ edit_chants_link = make_edit_chants_link(source)
+ template = """{sigl}<br>
+ <small>
+ <b>{title}</b><br>
+ {add}<br>
+ {edit}<br>
+ </small>
+ """
+ links_string = template.format(
+ sigl=link_with_siglum,
+ title=link_with_title,
+ add=add_new_chants_link,
+ edit=edit_chants_link,
+ )
+ return links_string
+ MAX_SOURCES_TO_DISPLAY = 6
+ sources = list(user.sources_user_can_edit.all())[:MAX_SOURCES_TO_DISPLAY]
+ source_links = [make_links_for_source(source) for source in sources]
+ list_items = ["<li>{}</li>".format(link) for link in source_links]
+ joined_list_items = "".join(list_items)
+ links_ul = "<ul>{}</ul>".format(joined_list_items)
+ return mark_safe(links_ul)
+
@register.filter(name="month_to_string")
def month_to_string(value: Optional[Union[str, int]]) -> Optional[Union[str, int]]:
"""Converts month number to textual representation, 3 letters (Jan, Mar, etc)"""
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/templatetags/helper_tags.py b/django/cantusdb_project/main_app/templatetags/helper_tags.py\n--- a/django/cantusdb_project/main_app/templatetags/helper_tags.py\n+++ b/django/cantusdb_project/main_app/templatetags/helper_tags.py\n@@ -5,6 +5,7 @@\n from main_app.models import Source\n from articles.models import Article\n from django.utils.safestring import mark_safe\n+from django.urls import reverse\n \n register = template.Library()\n \n@@ -27,6 +28,57 @@\n return mark_safe(recent_articles_string)\n \n \[email protected]_tag(takes_context=False)\n+def my_sources(user):\n+ def make_source_detail_link_with_siglum(source):\n+ id = source.id\n+ siglum = source.rism_siglum\n+ url = reverse(\"source-detail\", args=[id])\n+ link = '<a href=\"{}\">{}</a>'.format(url, siglum)\n+ return link\n+ def make_source_detail_link_with_title(source):\n+ id = source.id\n+ title = source.title\n+ url = reverse(\"source-detail\", args=[id])\n+ link = '<a href=\"{}\">{}</a>'.format(url, title) \n+ return link\n+ def make_add_new_chants_link(source):\n+ id = source.id\n+ url = reverse(\"chant-create\", args=[id])\n+ link = '<a href=\"{}\">+ Add new chant</a>'.format(url) \n+ return link\n+ def make_edit_chants_link(source):\n+ id = source.id\n+ url = reverse(\"source-edit-volpiano\", args=[id])\n+ link = '<a href=\"{}\">Edit chants (Fulltext & Volpiano editor)</a>'.format(url) \n+ return link\n+ def make_links_for_source(source):\n+ link_with_siglum = make_source_detail_link_with_siglum(source)\n+ link_with_title = make_source_detail_link_with_title(source)\n+ add_new_chants_link = make_add_new_chants_link(source)\n+ edit_chants_link = make_edit_chants_link(source)\n+ template = \"\"\"{sigl}<br>\n+ <small>\n+ <b>{title}</b><br>\n+ {add}<br>\n+ {edit}<br>\n+ </small>\n+ \"\"\"\n+ links_string = template.format(\n+ sigl=link_with_siglum,\n+ title=link_with_title,\n+ add=add_new_chants_link,\n+ edit=edit_chants_link,\n+ )\n+ return links_string\n+ MAX_SOURCES_TO_DISPLAY = 6\n+ sources = list(user.sources_user_can_edit.all())[:MAX_SOURCES_TO_DISPLAY]\n+ source_links = [make_links_for_source(source) for source in sources]\n+ list_items = [\"<li>{}</li>\".format(link) for link in source_links]\n+ joined_list_items = \"\".join(list_items)\n+ links_ul = \"<ul>{}</ul>\".format(joined_list_items)\n+ return mark_safe(links_ul)\n+\n @register.filter(name=\"month_to_string\")\n def month_to_string(value: Optional[Union[str, int]]) -> Optional[Union[str, int]]:\n \"\"\"Converts month number to textual representation, 3 letters (Jan, Mar, etc)\"\"\"\n", "issue": "\"My sources\" panel should appear on homepage\nIn OldCantus on the homepage when a user is logged in, a list of sources appears in a \"My sources\" box in the left sidebar. This box replaces the \"jump to source\" and \"what's new\" boxes that are visible when a user is not logged in.\r\n\r\nIn NewCantus, the \"jump to source\" and \"what's new\" boxes appear whether or not a user is logged in. Behavior on NewCantus should be changed to match that of OldCantus\n", "before_files": [{"content": "import calendar\nfrom typing import Union, Optional\nfrom django.utils.http import urlencode\nfrom django import template\nfrom main_app.models import Source\nfrom articles.models import Article\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\[email protected]_tag(takes_context=False)\ndef recent_articles():\n articles = Article.objects.order_by(\"-date_created\")[:5]\n list_item_template = '<li><a href=\"{url}\">{title}</a><br><small>{date}</small></li>'\n list_items = [\n list_item_template.format(\n url=a.get_absolute_url(),\n title=a.title,\n date=a.date_created.strftime(\"%x\"),\n )\n for a\n in articles\n ]\n list_items_string = \"\".join(list_items)\n recent_articles_string = \"<ul>{lis}</ul>\".format(lis=list_items_string)\n return mark_safe(recent_articles_string)\n\n\[email protected](name=\"month_to_string\")\ndef month_to_string(value: Optional[Union[str, int]]) -> Optional[Union[str, int]]:\n \"\"\"Converts month number to textual representation, 3 letters (Jan, Mar, etc)\"\"\"\n if type(value) == int and value in range(1, 13):\n return calendar.month_abbr[value]\n else:\n return value\n\n\[email protected]_tag(takes_context=True)\ndef url_add_get_params(context, **kwargs):\n query = context[\"request\"].GET.copy()\n # accounts for the situations where there may be two paginations in one page\n if \"page\" in kwargs:\n query.pop(\"page\", None)\n if \"page2\" in kwargs:\n query.pop(\"page2\", None)\n query.update(kwargs)\n return query.urlencode()\n\n\[email protected]_tag(takes_context=False)\ndef source_links():\n sources = (\n Source.objects.filter(public=True, visible=True, segment__id=4063)\n .exclude(siglum=None)\n .values(\"siglum\", \"id\")\n .order_by(\"siglum\")\n )\n options = \"\"\n # <option value=\"source1\">Source 1</option>\n # <option value=\"source2\">Source 2</option>\n # <option value=\"source3\">Source 3</option>\n for source in sources:\n option_str = (\n f\"<option value=source/{source['id']}>{source['siglum']}</option>\\n\"\n )\n options += option_str\n\n return mark_safe(options)\n\n\n\[email protected]\ndef classname(obj):\n \"\"\"\n Returns the name of the object's class\n A use-case is: {% if object|classname == \"Notation\" %}\n \"\"\"\n return obj.__class__.__name__\n\[email protected]\ndef admin_url_name(class_name, action):\n \"\"\"\n Accepts a class name and an action (either \"change\" or \"delete\") as arguments.\n Returns the name of the URL for changing/deleting an object in the admin interface.\n \"\"\"\n class_name = class_name.lower()\n action = action.lower()\n\n return f\"admin:main_app_{class_name}_{action}\"\n\[email protected](name='has_group') \ndef has_group(user, group_name):\n return user.groups.filter(name=group_name).exists() \n", "path": "django/cantusdb_project/main_app/templatetags/helper_tags.py"}]} | 1,556 | 752 |
gh_patches_debug_24026 | rasdani/github-patches | git_diff | mesonbuild__meson-4627 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
run_target improperly handles command if 'python' is in meson's installation path
My build environment is RHEL 7.6 with the python 3.6 Red Hat software collection enabled in my .bashrc. Meson was installed by running ``pip3.6 install .`` as root in the ``meson-0.49.0/`` folder from a fresh download. (This issue also happens in meson 48.)
I was setting up a custom run_target to run cppcheck with the command
```
run_target('cppcheck', command : ['cppcheck', '--project=' + join_paths(meson.build_root(), 'compile_commands.json')])
```
but noticed that it was failing to run with the following error message:
```
Could not execute command "--project=[build dir]/compile_commands.json". File not found.
FAILED: meson-cppcheck
/opt/rh/rh-python36/root/usr/bin/meson --internal commandrunner [src dir] [build dir] '' /opt/rh/rh-python36/root/usr/bin/meson cppcheck --project=[build dir]/compile_commands.json
ninja: build stopped: subcommand failed
```
I traced the issue for a bit and it the issue appears to be in ``commandrunner.py`` line 61 (ironically the line with the comment "Hack." haha): it looks for the presence of the string ``python`` in the meson path, presumably to catch instances of meson being launched with a ``python3 meson`` call. However it ignores the possibility that python is simply a part of the path to the meson executable. When this happens, the commandrunner script grabs the fifth argument and assumes that it's part of the meson command, which is wrong.
I'm currently working around the issue by inserting an empty argument into the run_target command, like this:
```
run_target('cppcheck', command : ['', 'cppcheck', '--project=' + join_paths(meson.build_root(), 'compile_commands.json')])
```
</issue>
<code>
[start of mesonbuild/scripts/commandrunner.py]
1 # Copyright 2014 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """This program is a wrapper to run external commands. It determines
16 what to run, sets up the environment and executes the command."""
17
18 import sys, os, subprocess, shutil, shlex
19
20 def run_command(source_dir, build_dir, subdir, meson_command, command, arguments):
21 env = {'MESON_SOURCE_ROOT': source_dir,
22 'MESON_BUILD_ROOT': build_dir,
23 'MESON_SUBDIR': subdir,
24 'MESONINTROSPECT': ' '.join([shlex.quote(x) for x in meson_command + ['introspect']]),
25 }
26 cwd = os.path.join(source_dir, subdir)
27 child_env = os.environ.copy()
28 child_env.update(env)
29
30 # Is the command an executable in path?
31 exe = shutil.which(command)
32 if exe is not None:
33 command_array = [exe] + arguments
34 else:# No? Maybe it is a script in the source tree.
35 fullpath = os.path.join(source_dir, subdir, command)
36 command_array = [fullpath] + arguments
37 try:
38 return subprocess.Popen(command_array, env=child_env, cwd=cwd)
39 except FileNotFoundError:
40 print('Could not execute command "%s". File not found.' % command)
41 sys.exit(1)
42 except PermissionError:
43 print('Could not execute command "%s". File not executable.' % command)
44 sys.exit(1)
45 except OSError as err:
46 print('Could not execute command "{}": {}'.format(command, err))
47 sys.exit(1)
48 except subprocess.SubprocessError as err:
49 print('Could not execute command "{}": {}'.format(command, err))
50 sys.exit(1)
51
52
53 def run(args):
54 if len(args) < 4:
55 print('commandrunner.py <source dir> <build dir> <subdir> <command> [arguments]')
56 return 1
57 src_dir = args[0]
58 build_dir = args[1]
59 subdir = args[2]
60 meson_command = args[3]
61 if 'python' in meson_command: # Hack.
62 meson_command = [meson_command, args[4]]
63 command = args[5]
64 arguments = args[6:]
65 else:
66 meson_command = [meson_command]
67 command = args[4]
68 arguments = args[5:]
69 pc = run_command(src_dir, build_dir, subdir, meson_command, command, arguments)
70 pc.wait()
71 return pc.returncode
72
73 if __name__ == '__main__':
74 sys.exit(run(sys.argv[1:]))
75
[end of mesonbuild/scripts/commandrunner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mesonbuild/scripts/commandrunner.py b/mesonbuild/scripts/commandrunner.py
--- a/mesonbuild/scripts/commandrunner.py
+++ b/mesonbuild/scripts/commandrunner.py
@@ -16,6 +16,7 @@
what to run, sets up the environment and executes the command."""
import sys, os, subprocess, shutil, shlex
+import re
def run_command(source_dir, build_dir, subdir, meson_command, command, arguments):
env = {'MESON_SOURCE_ROOT': source_dir,
@@ -49,6 +50,9 @@
print('Could not execute command "{}": {}'.format(command, err))
sys.exit(1)
+def is_python_command(cmdname):
+ end_py_regex = r'python(3|3\.\d+)?(\.exe)?$'
+ return re.search(end_py_regex, cmdname) is not None
def run(args):
if len(args) < 4:
@@ -58,7 +62,7 @@
build_dir = args[1]
subdir = args[2]
meson_command = args[3]
- if 'python' in meson_command: # Hack.
+ if is_python_command(meson_command):
meson_command = [meson_command, args[4]]
command = args[5]
arguments = args[6:]
| {"golden_diff": "diff --git a/mesonbuild/scripts/commandrunner.py b/mesonbuild/scripts/commandrunner.py\n--- a/mesonbuild/scripts/commandrunner.py\n+++ b/mesonbuild/scripts/commandrunner.py\n@@ -16,6 +16,7 @@\n what to run, sets up the environment and executes the command.\"\"\"\n \n import sys, os, subprocess, shutil, shlex\n+import re\n \n def run_command(source_dir, build_dir, subdir, meson_command, command, arguments):\n env = {'MESON_SOURCE_ROOT': source_dir,\n@@ -49,6 +50,9 @@\n print('Could not execute command \"{}\": {}'.format(command, err))\n sys.exit(1)\n \n+def is_python_command(cmdname):\n+ end_py_regex = r'python(3|3\\.\\d+)?(\\.exe)?$'\n+ return re.search(end_py_regex, cmdname) is not None\n \n def run(args):\n if len(args) < 4:\n@@ -58,7 +62,7 @@\n build_dir = args[1]\n subdir = args[2]\n meson_command = args[3]\n- if 'python' in meson_command: # Hack.\n+ if is_python_command(meson_command):\n meson_command = [meson_command, args[4]]\n command = args[5]\n arguments = args[6:]\n", "issue": "run_target improperly handles command if 'python' is in meson's installation path\nMy build environment is RHEL 7.6 with the python 3.6 Red Hat software collection enabled in my .bashrc. Meson was installed by running ``pip3.6 install .`` as root in the ``meson-0.49.0/`` folder from a fresh download. (This issue also happens in meson 48.)\r\n\r\nI was setting up a custom run_target to run cppcheck with the command\r\n```\r\nrun_target('cppcheck', command : ['cppcheck', '--project=' + join_paths(meson.build_root(), 'compile_commands.json')])\r\n```\r\nbut noticed that it was failing to run with the following error message:\r\n```\r\nCould not execute command \"--project=[build dir]/compile_commands.json\". File not found.\r\nFAILED: meson-cppcheck\r\n/opt/rh/rh-python36/root/usr/bin/meson --internal commandrunner [src dir] [build dir] '' /opt/rh/rh-python36/root/usr/bin/meson cppcheck --project=[build dir]/compile_commands.json\r\nninja: build stopped: subcommand failed\r\n```\r\n\r\nI traced the issue for a bit and it the issue appears to be in ``commandrunner.py`` line 61 (ironically the line with the comment \"Hack.\" haha): it looks for the presence of the string ``python`` in the meson path, presumably to catch instances of meson being launched with a ``python3 meson`` call. However it ignores the possibility that python is simply a part of the path to the meson executable. When this happens, the commandrunner script grabs the fifth argument and assumes that it's part of the meson command, which is wrong.\r\n\r\nI'm currently working around the issue by inserting an empty argument into the run_target command, like this:\r\n```\r\nrun_target('cppcheck', command : ['', 'cppcheck', '--project=' + join_paths(meson.build_root(), 'compile_commands.json')])\r\n```\n", "before_files": [{"content": "# Copyright 2014 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This program is a wrapper to run external commands. It determines\nwhat to run, sets up the environment and executes the command.\"\"\"\n\nimport sys, os, subprocess, shutil, shlex\n\ndef run_command(source_dir, build_dir, subdir, meson_command, command, arguments):\n env = {'MESON_SOURCE_ROOT': source_dir,\n 'MESON_BUILD_ROOT': build_dir,\n 'MESON_SUBDIR': subdir,\n 'MESONINTROSPECT': ' '.join([shlex.quote(x) for x in meson_command + ['introspect']]),\n }\n cwd = os.path.join(source_dir, subdir)\n child_env = os.environ.copy()\n child_env.update(env)\n\n # Is the command an executable in path?\n exe = shutil.which(command)\n if exe is not None:\n command_array = [exe] + arguments\n else:# No? Maybe it is a script in the source tree.\n fullpath = os.path.join(source_dir, subdir, command)\n command_array = [fullpath] + arguments\n try:\n return subprocess.Popen(command_array, env=child_env, cwd=cwd)\n except FileNotFoundError:\n print('Could not execute command \"%s\". File not found.' % command)\n sys.exit(1)\n except PermissionError:\n print('Could not execute command \"%s\". File not executable.' % command)\n sys.exit(1)\n except OSError as err:\n print('Could not execute command \"{}\": {}'.format(command, err))\n sys.exit(1)\n except subprocess.SubprocessError as err:\n print('Could not execute command \"{}\": {}'.format(command, err))\n sys.exit(1)\n\n\ndef run(args):\n if len(args) < 4:\n print('commandrunner.py <source dir> <build dir> <subdir> <command> [arguments]')\n return 1\n src_dir = args[0]\n build_dir = args[1]\n subdir = args[2]\n meson_command = args[3]\n if 'python' in meson_command: # Hack.\n meson_command = [meson_command, args[4]]\n command = args[5]\n arguments = args[6:]\n else:\n meson_command = [meson_command]\n command = args[4]\n arguments = args[5:]\n pc = run_command(src_dir, build_dir, subdir, meson_command, command, arguments)\n pc.wait()\n return pc.returncode\n\nif __name__ == '__main__':\n sys.exit(run(sys.argv[1:]))\n", "path": "mesonbuild/scripts/commandrunner.py"}]} | 1,791 | 296 |
gh_patches_debug_7604 | rasdani/github-patches | git_diff | robocorp__rpaframework-617 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`RPA.Windows.Get Elements` not copying the `robocorp_click_offset` attribute
Should make available this attribute in all siblings found elements, otherwise the `Click` keyword will fail when using offsets:
<img width="841" alt="Screen Shot 2022-08-26 at 4 48 38 PM" src="https://user-images.githubusercontent.com/709053/187146331-32d1ea86-7da7-4afe-b8ec-ea9b5289db70.png">
<img width="1391" alt="Screen Shot 2022-08-26 at 4 48 20 PM" src="https://user-images.githubusercontent.com/709053/187146342-b6ae653f-0ae7-4870-bdc9-edba1ef83533.png">
</issue>
<code>
[start of packages/windows/src/RPA/Windows/keywords/locators.py]
1 from typing import List, Optional
2
3 from RPA.core.windows.locators import (
4 Locator,
5 LocatorMethods,
6 WindowsElement,
7 )
8
9 from RPA.Windows.keywords import keyword
10 from RPA.Windows.keywords.context import with_timeout
11
12
13 class LocatorKeywords(LocatorMethods):
14 """Keywords for handling Windows locators"""
15
16 @keyword
17 @with_timeout
18 def get_element(
19 self,
20 locator: Optional[Locator] = None,
21 search_depth: int = 8,
22 root_element: Optional[WindowsElement] = None,
23 timeout: Optional[float] = None, # pylint: disable=unused-argument
24 ) -> WindowsElement:
25 """Get Control element defined by the locator.
26
27 Returned element can be used instead of a locator string for
28 keywords accepting `locator`.
29
30 Keyword ``Get Attribute`` can be used to read element attribute values.
31
32 If `locator` is *None* then returned `element` will be in order of preference:
33
34 1. anchor element if that has been set with `Set Anchor`
35 2. current active window if that has been set with `Control Window`
36 3. final option is the `Desktop`
37
38 :param locator: locator as a string or as an element
39 :param search_depth: how deep the element search will traverse (default 8)
40 :param root_element: can be used to set search root element
41 :param timeout: float value in seconds, see keyword
42 ``Set Global Timeout``
43 :return: WindowsElement object
44
45 Example:
46
47 .. code-block:: robotframework
48
49 ${element} = Get Element name:"RichEdit Control"
50 Set Value ${element} note to myself
51 """
52 # NOTE(cmiN): Timeout is automatically set to `None` in the upper call by the
53 # `with_timeout` decorator, so we alter the behaviour (context timeout
54 # setting) at this level only.
55 return super().get_element(
56 locator=locator,
57 search_depth=search_depth,
58 root_element=root_element,
59 timeout=timeout,
60 )
61
62 @keyword
63 @with_timeout
64 def get_elements(
65 self,
66 locator: Optional[Locator] = None,
67 search_depth: int = 8,
68 root_element: Optional[WindowsElement] = None,
69 timeout: Optional[float] = None, # pylint: disable=unused-argument
70 ) -> List[WindowsElement]:
71 """Get list of elements matching locator.
72
73 :param locator: locator as a string or as an element
74 :param search_depth: how deep the element search will traverse (default 8)
75 :param root_element: can be used to set search root element
76 :param timeout: float value in seconds, see keyword
77 ``Set Global Timeout``
78 :return: list of WindowsElement objects
79
80 Example:
81
82 .. code-block:: robotframework
83
84 Set Anchor id:DataGrid
85 ${elements}= Get Elements type:HeaderItem
86 FOR ${el} IN @{elements}
87 Log To Console ${el.Name}
88 END
89 """
90 elements = []
91 initial_element = element = self.get_element(
92 locator, search_depth, root_element
93 )
94 elements.append(initial_element)
95 while True:
96 next_control = element.item.GetNextSiblingControl()
97 if next_control:
98 element = WindowsElement(next_control, locator)
99 if initial_element.is_sibling(element):
100 elements.append(element)
101 else:
102 break
103 return elements
104
[end of packages/windows/src/RPA/Windows/keywords/locators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/packages/windows/src/RPA/Windows/keywords/locators.py b/packages/windows/src/RPA/Windows/keywords/locators.py
--- a/packages/windows/src/RPA/Windows/keywords/locators.py
+++ b/packages/windows/src/RPA/Windows/keywords/locators.py
@@ -97,6 +97,9 @@
if next_control:
element = WindowsElement(next_control, locator)
if initial_element.is_sibling(element):
+ element.item.robocorp_click_offset = (
+ initial_element.item.robocorp_click_offset
+ )
elements.append(element)
else:
break
| {"golden_diff": "diff --git a/packages/windows/src/RPA/Windows/keywords/locators.py b/packages/windows/src/RPA/Windows/keywords/locators.py\n--- a/packages/windows/src/RPA/Windows/keywords/locators.py\n+++ b/packages/windows/src/RPA/Windows/keywords/locators.py\n@@ -97,6 +97,9 @@\n if next_control:\n element = WindowsElement(next_control, locator)\n if initial_element.is_sibling(element):\n+ element.item.robocorp_click_offset = (\n+ initial_element.item.robocorp_click_offset\n+ )\n elements.append(element)\n else:\n break\n", "issue": "`RPA.Windows.Get Elements` not copying the `robocorp_click_offset` attribute\nShould make available this attribute in all siblings found elements, otherwise the `Click` keyword will fail when using offsets:\r\n\r\n<img width=\"841\" alt=\"Screen Shot 2022-08-26 at 4 48 38 PM\" src=\"https://user-images.githubusercontent.com/709053/187146331-32d1ea86-7da7-4afe-b8ec-ea9b5289db70.png\">\r\n<img width=\"1391\" alt=\"Screen Shot 2022-08-26 at 4 48 20 PM\" src=\"https://user-images.githubusercontent.com/709053/187146342-b6ae653f-0ae7-4870-bdc9-edba1ef83533.png\">\r\n\n", "before_files": [{"content": "from typing import List, Optional\n\nfrom RPA.core.windows.locators import (\n Locator,\n LocatorMethods,\n WindowsElement,\n)\n\nfrom RPA.Windows.keywords import keyword\nfrom RPA.Windows.keywords.context import with_timeout\n\n\nclass LocatorKeywords(LocatorMethods):\n \"\"\"Keywords for handling Windows locators\"\"\"\n\n @keyword\n @with_timeout\n def get_element(\n self,\n locator: Optional[Locator] = None,\n search_depth: int = 8,\n root_element: Optional[WindowsElement] = None,\n timeout: Optional[float] = None, # pylint: disable=unused-argument\n ) -> WindowsElement:\n \"\"\"Get Control element defined by the locator.\n\n Returned element can be used instead of a locator string for\n keywords accepting `locator`.\n\n Keyword ``Get Attribute`` can be used to read element attribute values.\n\n If `locator` is *None* then returned `element` will be in order of preference:\n\n 1. anchor element if that has been set with `Set Anchor`\n 2. current active window if that has been set with `Control Window`\n 3. final option is the `Desktop`\n\n :param locator: locator as a string or as an element\n :param search_depth: how deep the element search will traverse (default 8)\n :param root_element: can be used to set search root element\n :param timeout: float value in seconds, see keyword\n ``Set Global Timeout``\n :return: WindowsElement object\n\n Example:\n\n .. code-block:: robotframework\n\n ${element} = Get Element name:\"RichEdit Control\"\n Set Value ${element} note to myself\n \"\"\"\n # NOTE(cmiN): Timeout is automatically set to `None` in the upper call by the\n # `with_timeout` decorator, so we alter the behaviour (context timeout\n # setting) at this level only.\n return super().get_element(\n locator=locator,\n search_depth=search_depth,\n root_element=root_element,\n timeout=timeout,\n )\n\n @keyword\n @with_timeout\n def get_elements(\n self,\n locator: Optional[Locator] = None,\n search_depth: int = 8,\n root_element: Optional[WindowsElement] = None,\n timeout: Optional[float] = None, # pylint: disable=unused-argument\n ) -> List[WindowsElement]:\n \"\"\"Get list of elements matching locator.\n\n :param locator: locator as a string or as an element\n :param search_depth: how deep the element search will traverse (default 8)\n :param root_element: can be used to set search root element\n :param timeout: float value in seconds, see keyword\n ``Set Global Timeout``\n :return: list of WindowsElement objects\n\n Example:\n\n .. code-block:: robotframework\n\n Set Anchor id:DataGrid\n ${elements}= Get Elements type:HeaderItem\n FOR ${el} IN @{elements}\n Log To Console ${el.Name}\n END\n \"\"\"\n elements = []\n initial_element = element = self.get_element(\n locator, search_depth, root_element\n )\n elements.append(initial_element)\n while True:\n next_control = element.item.GetNextSiblingControl()\n if next_control:\n element = WindowsElement(next_control, locator)\n if initial_element.is_sibling(element):\n elements.append(element)\n else:\n break\n return elements\n", "path": "packages/windows/src/RPA/Windows/keywords/locators.py"}]} | 1,738 | 138 |
gh_patches_debug_29378 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5018 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV_AWS_338 does not account for 0 (keep forever) & has insufficient error message
**Describe the issue**
CKV_AWS_338 is a check for the number of retention days for a Cloudwatch log group. However, the actual check source code requires not just being specified, but also to be at least 1 year.
1 - The message "Ensure that CloudWatch Log Group specifies retention days" does not reflect the need to not only be specified, but also be of sufficient length (or provide ref to web page providing more detail on the check)
2 - 0 is a valid value and means that logs are always retained and never expire, which should result in passing the "must retain for at least 1 year" rule.
**Examples**
Please share an example code sample (in the IaC of your choice) + the expected outcomes.
**Version (please complete the following information):**
2.3.212
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/CloudWatchLogGroupRetentionYear.py]
1 from __future__ import annotations
2
3 from typing import Any
4
5 from checkov.common.models.enums import CheckCategories, CheckResult
6 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
7
8
9 class CloudWatchLogGroupRetentionYear(BaseResourceCheck):
10 def __init__(self) -> None:
11 """
12 NIST.800-53.r5 AU-10, NIST.800-53.r5 AU-11, NIST.800-53.r5 AU-6(3), NIST.800-53.r5 AU-6(4),
13 NIST.800-53.r5 CA-7, NIST.800-53.r5 SI-12
14 CloudWatch log groups should be retained for at least 1 year
15 """
16 name = "Ensure that CloudWatch Log Group specifies retention days"
17 id = "CKV_AWS_338"
18 supported_resource = ("aws_cloudwatch_log_group",)
19 categories = (CheckCategories.LOGGING,)
20 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)
21
22 def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
23 retention = conf.get("retention_in_days")
24 if retention and isinstance(retention, list):
25 retention = retention[0]
26 if not isinstance(retention, int):
27 # probably a dependent variable
28 return CheckResult.UNKNOWN
29
30 if retention >= 365:
31 return CheckResult.PASSED
32
33 return CheckResult.FAILED
34
35
36 check = CloudWatchLogGroupRetentionYear()
37
[end of checkov/terraform/checks/resource/aws/CloudWatchLogGroupRetentionYear.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/aws/CloudWatchLogGroupRetentionYear.py b/checkov/terraform/checks/resource/aws/CloudWatchLogGroupRetentionYear.py
--- a/checkov/terraform/checks/resource/aws/CloudWatchLogGroupRetentionYear.py
+++ b/checkov/terraform/checks/resource/aws/CloudWatchLogGroupRetentionYear.py
@@ -3,7 +3,8 @@
from typing import Any
from checkov.common.models.enums import CheckCategories, CheckResult
-from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
+from checkov.terraform.checks.resource.base_resource_check import \
+ BaseResourceCheck
class CloudWatchLogGroupRetentionYear(BaseResourceCheck):
@@ -13,7 +14,7 @@
NIST.800-53.r5 CA-7, NIST.800-53.r5 SI-12
CloudWatch log groups should be retained for at least 1 year
"""
- name = "Ensure that CloudWatch Log Group specifies retention days"
+ name = "Ensure CloudWatch log groups retains logs for at least 1 year"
id = "CKV_AWS_338"
supported_resource = ("aws_cloudwatch_log_group",)
categories = (CheckCategories.LOGGING,)
@@ -26,8 +27,8 @@
if not isinstance(retention, int):
# probably a dependent variable
return CheckResult.UNKNOWN
-
- if retention >= 365:
+ # If you select 0, the events in the log group are always retained and never expire.
+ if retention == 0 or retention >= 365:
return CheckResult.PASSED
return CheckResult.FAILED
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/CloudWatchLogGroupRetentionYear.py b/checkov/terraform/checks/resource/aws/CloudWatchLogGroupRetentionYear.py\n--- a/checkov/terraform/checks/resource/aws/CloudWatchLogGroupRetentionYear.py\n+++ b/checkov/terraform/checks/resource/aws/CloudWatchLogGroupRetentionYear.py\n@@ -3,7 +3,8 @@\n from typing import Any\n \n from checkov.common.models.enums import CheckCategories, CheckResult\n-from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n+from checkov.terraform.checks.resource.base_resource_check import \\\n+ BaseResourceCheck\n \n \n class CloudWatchLogGroupRetentionYear(BaseResourceCheck):\n@@ -13,7 +14,7 @@\n NIST.800-53.r5 CA-7, NIST.800-53.r5 SI-12\n CloudWatch log groups should be retained for at least 1 year\n \"\"\"\n- name = \"Ensure that CloudWatch Log Group specifies retention days\"\n+ name = \"Ensure CloudWatch log groups retains logs for at least 1 year\"\n id = \"CKV_AWS_338\"\n supported_resource = (\"aws_cloudwatch_log_group\",)\n categories = (CheckCategories.LOGGING,)\n@@ -26,8 +27,8 @@\n if not isinstance(retention, int):\n # probably a dependent variable\n return CheckResult.UNKNOWN\n-\n- if retention >= 365:\n+ # If you select 0, the events in the log group are always retained and never expire.\n+ if retention == 0 or retention >= 365:\n return CheckResult.PASSED\n \n return CheckResult.FAILED\n", "issue": "CKV_AWS_338 does not account for 0 (keep forever) & has insufficient error message\n**Describe the issue**\r\nCKV_AWS_338 is a check for the number of retention days for a Cloudwatch log group. However, the actual check source code requires not just being specified, but also to be at least 1 year.\r\n\r\n1 - The message \"Ensure that CloudWatch Log Group specifies retention days\" does not reflect the need to not only be specified, but also be of sufficient length (or provide ref to web page providing more detail on the check)\r\n\r\n2 - 0 is a valid value and means that logs are always retained and never expire, which should result in passing the \"must retain for at least 1 year\" rule.\r\n\r\n**Examples**\r\nPlease share an example code sample (in the IaC of your choice) + the expected outcomes.\r\n\r\n**Version (please complete the following information):**\r\n2.3.212\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass CloudWatchLogGroupRetentionYear(BaseResourceCheck):\n def __init__(self) -> None:\n \"\"\"\n NIST.800-53.r5 AU-10, NIST.800-53.r5 AU-11, NIST.800-53.r5 AU-6(3), NIST.800-53.r5 AU-6(4),\n NIST.800-53.r5 CA-7, NIST.800-53.r5 SI-12\n CloudWatch log groups should be retained for at least 1 year\n \"\"\"\n name = \"Ensure that CloudWatch Log Group specifies retention days\"\n id = \"CKV_AWS_338\"\n supported_resource = (\"aws_cloudwatch_log_group\",)\n categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n retention = conf.get(\"retention_in_days\")\n if retention and isinstance(retention, list):\n retention = retention[0]\n if not isinstance(retention, int):\n # probably a dependent variable\n return CheckResult.UNKNOWN\n\n if retention >= 365:\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = CloudWatchLogGroupRetentionYear()\n", "path": "checkov/terraform/checks/resource/aws/CloudWatchLogGroupRetentionYear.py"}]} | 1,185 | 380 |
gh_patches_debug_35330 | rasdani/github-patches | git_diff | iterative__dvc-1531 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
daemon: address dvc directly
Our daemon is using `sys.executable -m dvc` to launch a new dvc process, but a more proper way would be to address dvc directly, by dynamically computing its location ourselves from `__file__`. See discussion https://github.com/iterative/dvc/issues/1509#issuecomment-457415307 .
</issue>
<code>
[start of dvc/daemon.py]
1 """Launch `dvc daemon` command in a separate detached process."""
2
3 import os
4 import sys
5 from subprocess import Popen
6
7 import dvc.logger as logger
8 from dvc.utils import is_binary, fix_env
9
10
11 CREATE_NEW_PROCESS_GROUP = 0x00000200
12 DETACHED_PROCESS = 0x00000008
13
14
15 def _spawn_windows(cmd):
16 from subprocess import STARTUPINFO, STARTF_USESHOWWINDOW
17
18 creationflags = CREATE_NEW_PROCESS_GROUP | DETACHED_PROCESS
19
20 startupinfo = STARTUPINFO()
21 startupinfo.dwFlags |= STARTF_USESHOWWINDOW
22
23 Popen(cmd,
24 env=fix_env(),
25 close_fds=True,
26 shell=False,
27 creationflags=creationflags,
28 startupinfo=startupinfo).communicate()
29
30
31 def _spawn_posix(cmd):
32 # NOTE: using os._exit instead of sys.exit, because dvc built
33 # with PyInstaller has trouble with SystemExit exeption and throws
34 # errors such as "[26338] Failed to execute script __main__"
35 try:
36 pid = os.fork()
37 if pid > 0:
38 return
39 except OSError:
40 logger.error("failed at first fork")
41 os._exit(1) # pylint: disable=protected-access
42
43 os.setsid()
44 os.umask(0)
45
46 try:
47 pid = os.fork()
48 if pid > 0:
49 os._exit(0) # pylint: disable=protected-access
50 except OSError:
51 logger.error("failed at second fork")
52 os._exit(1) # pylint: disable=protected-access
53
54 sys.stdin.close()
55 sys.stdout.close()
56 sys.stderr.close()
57
58 Popen(cmd, env=fix_env(), close_fds=True, shell=False).communicate()
59
60 os._exit(0) # pylint: disable=protected-access
61
62
63 def daemon(args):
64 """Launch a `dvc daemon` command in a detached process.
65
66 Args:
67 args (list): list of arguments to append to `dvc daemon` command.
68 """
69 cmd = [sys.executable]
70 if not is_binary():
71 cmd += ['-m', 'dvc']
72 cmd += ['daemon', '-q'] + args
73
74 logger.debug("Trying to spawn '{}'".format(cmd))
75
76 if os.name == 'nt':
77 _spawn_windows(cmd)
78 elif os.name == 'posix':
79 _spawn_posix(cmd)
80 else:
81 raise NotImplementedError
82
83 logger.debug("Spawned '{}'".format(cmd))
84
[end of dvc/daemon.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/daemon.py b/dvc/daemon.py
--- a/dvc/daemon.py
+++ b/dvc/daemon.py
@@ -2,6 +2,7 @@
import os
import sys
+import inspect
from subprocess import Popen
import dvc.logger as logger
@@ -12,7 +13,7 @@
DETACHED_PROCESS = 0x00000008
-def _spawn_windows(cmd):
+def _spawn_windows(cmd, env):
from subprocess import STARTUPINFO, STARTF_USESHOWWINDOW
creationflags = CREATE_NEW_PROCESS_GROUP | DETACHED_PROCESS
@@ -21,14 +22,14 @@
startupinfo.dwFlags |= STARTF_USESHOWWINDOW
Popen(cmd,
- env=fix_env(),
+ env=env,
close_fds=True,
shell=False,
creationflags=creationflags,
startupinfo=startupinfo).communicate()
-def _spawn_posix(cmd):
+def _spawn_posix(cmd, env):
# NOTE: using os._exit instead of sys.exit, because dvc built
# with PyInstaller has trouble with SystemExit exeption and throws
# errors such as "[26338] Failed to execute script __main__"
@@ -55,7 +56,7 @@
sys.stdout.close()
sys.stderr.close()
- Popen(cmd, env=fix_env(), close_fds=True, shell=False).communicate()
+ Popen(cmd, env=env, close_fds=True, shell=False).communicate()
os._exit(0) # pylint: disable=protected-access
@@ -71,12 +72,16 @@
cmd += ['-m', 'dvc']
cmd += ['daemon', '-q'] + args
- logger.debug("Trying to spawn '{}'".format(cmd))
+ env = fix_env()
+ file_path = os.path.abspath(inspect.stack()[0][1])
+ env['PYTHONPATH'] = os.path.dirname(os.path.dirname(file_path))
+
+ logger.debug("Trying to spawn '{}' with env '{}'".format(cmd, env))
if os.name == 'nt':
- _spawn_windows(cmd)
+ _spawn_windows(cmd, env)
elif os.name == 'posix':
- _spawn_posix(cmd)
+ _spawn_posix(cmd, env)
else:
raise NotImplementedError
| {"golden_diff": "diff --git a/dvc/daemon.py b/dvc/daemon.py\n--- a/dvc/daemon.py\n+++ b/dvc/daemon.py\n@@ -2,6 +2,7 @@\n \n import os\n import sys\n+import inspect\n from subprocess import Popen\n \n import dvc.logger as logger\n@@ -12,7 +13,7 @@\n DETACHED_PROCESS = 0x00000008\n \n \n-def _spawn_windows(cmd):\n+def _spawn_windows(cmd, env):\n from subprocess import STARTUPINFO, STARTF_USESHOWWINDOW\n \n creationflags = CREATE_NEW_PROCESS_GROUP | DETACHED_PROCESS\n@@ -21,14 +22,14 @@\n startupinfo.dwFlags |= STARTF_USESHOWWINDOW\n \n Popen(cmd,\n- env=fix_env(),\n+ env=env,\n close_fds=True,\n shell=False,\n creationflags=creationflags,\n startupinfo=startupinfo).communicate()\n \n \n-def _spawn_posix(cmd):\n+def _spawn_posix(cmd, env):\n # NOTE: using os._exit instead of sys.exit, because dvc built\n # with PyInstaller has trouble with SystemExit exeption and throws\n # errors such as \"[26338] Failed to execute script __main__\"\n@@ -55,7 +56,7 @@\n sys.stdout.close()\n sys.stderr.close()\n \n- Popen(cmd, env=fix_env(), close_fds=True, shell=False).communicate()\n+ Popen(cmd, env=env, close_fds=True, shell=False).communicate()\n \n os._exit(0) # pylint: disable=protected-access\n \n@@ -71,12 +72,16 @@\n cmd += ['-m', 'dvc']\n cmd += ['daemon', '-q'] + args\n \n- logger.debug(\"Trying to spawn '{}'\".format(cmd))\n+ env = fix_env()\n+ file_path = os.path.abspath(inspect.stack()[0][1])\n+ env['PYTHONPATH'] = os.path.dirname(os.path.dirname(file_path))\n+\n+ logger.debug(\"Trying to spawn '{}' with env '{}'\".format(cmd, env))\n \n if os.name == 'nt':\n- _spawn_windows(cmd)\n+ _spawn_windows(cmd, env)\n elif os.name == 'posix':\n- _spawn_posix(cmd)\n+ _spawn_posix(cmd, env)\n else:\n raise NotImplementedError\n", "issue": "daemon: address dvc directly\nOur daemon is using `sys.executable -m dvc` to launch a new dvc process, but a more proper way would be to address dvc directly, by dynamically computing its location ourselves from `__file__`. See discussion https://github.com/iterative/dvc/issues/1509#issuecomment-457415307 .\n", "before_files": [{"content": "\"\"\"Launch `dvc daemon` command in a separate detached process.\"\"\"\n\nimport os\nimport sys\nfrom subprocess import Popen\n\nimport dvc.logger as logger\nfrom dvc.utils import is_binary, fix_env\n\n\nCREATE_NEW_PROCESS_GROUP = 0x00000200\nDETACHED_PROCESS = 0x00000008\n\n\ndef _spawn_windows(cmd):\n from subprocess import STARTUPINFO, STARTF_USESHOWWINDOW\n\n creationflags = CREATE_NEW_PROCESS_GROUP | DETACHED_PROCESS\n\n startupinfo = STARTUPINFO()\n startupinfo.dwFlags |= STARTF_USESHOWWINDOW\n\n Popen(cmd,\n env=fix_env(),\n close_fds=True,\n shell=False,\n creationflags=creationflags,\n startupinfo=startupinfo).communicate()\n\n\ndef _spawn_posix(cmd):\n # NOTE: using os._exit instead of sys.exit, because dvc built\n # with PyInstaller has trouble with SystemExit exeption and throws\n # errors such as \"[26338] Failed to execute script __main__\"\n try:\n pid = os.fork()\n if pid > 0:\n return\n except OSError:\n logger.error(\"failed at first fork\")\n os._exit(1) # pylint: disable=protected-access\n\n os.setsid()\n os.umask(0)\n\n try:\n pid = os.fork()\n if pid > 0:\n os._exit(0) # pylint: disable=protected-access\n except OSError:\n logger.error(\"failed at second fork\")\n os._exit(1) # pylint: disable=protected-access\n\n sys.stdin.close()\n sys.stdout.close()\n sys.stderr.close()\n\n Popen(cmd, env=fix_env(), close_fds=True, shell=False).communicate()\n\n os._exit(0) # pylint: disable=protected-access\n\n\ndef daemon(args):\n \"\"\"Launch a `dvc daemon` command in a detached process.\n\n Args:\n args (list): list of arguments to append to `dvc daemon` command.\n \"\"\"\n cmd = [sys.executable]\n if not is_binary():\n cmd += ['-m', 'dvc']\n cmd += ['daemon', '-q'] + args\n\n logger.debug(\"Trying to spawn '{}'\".format(cmd))\n\n if os.name == 'nt':\n _spawn_windows(cmd)\n elif os.name == 'posix':\n _spawn_posix(cmd)\n else:\n raise NotImplementedError\n\n logger.debug(\"Spawned '{}'\".format(cmd))\n", "path": "dvc/daemon.py"}]} | 1,339 | 526 |
gh_patches_debug_27533 | rasdani/github-patches | git_diff | akvo__akvo-rsr-5271 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature Request: Document the task management
### What are you trying to do?
Understand how the task management functions
### Describe the solution you'd like
A technical documentation of how it works.
### Have you consider alternatives?
_No response_
### Additional context
_No response_
</issue>
<code>
[start of akvo/rsr/management/commands/django_q_probettp.py]
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3
4 # Akvo Reporting is covered by the GNU Affero General Public License.
5 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
6 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
7
8 """
9 Provides a localhost HTTP server to query the local status of the django-q cluster
10 """
11 import logging
12 import signal
13 import socket
14 from http.server import BaseHTTPRequestHandler, HTTPServer
15
16 from django.core.management.base import BaseCommand
17 from django_q.conf import Conf
18 from django_q.status import Stat
19
20 logger = logging.getLogger(__name__)
21
22
23 class Command(BaseCommand):
24 help = __doc__
25
26 def handle(self, *args, **options):
27 server = HTTPServer(("localhost", 8080), DjangoQRequestHandler)
28
29 def handle_end(*_):
30 logger.info("Stopping server")
31 server.shutdown()
32
33 signal.signal(signal.SIGINT, handle_end)
34 signal.signal(signal.SIGTERM, handle_end)
35
36 logger.info("Starting server...")
37 server.serve_forever()
38
39
40 class DjangoQRequestHandler(BaseHTTPRequestHandler):
41
42 def do_GET(self):
43 hostname = socket.gethostname()
44
45 # Find local cluster
46 local_stat = next(iter(stat for stat in Stat.get_all() if stat.host == hostname), None)
47 if local_stat:
48 message = local_stat.status
49 else:
50 message = Conf.STOPPED
51 logger.info(f"Probe response {message}")
52 self.send_response(200)
53 self.end_headers()
54 self.wfile.write(message.encode())
55
56 def log_message(self, format: str, *args) -> None:
57 logger.debug(format, *args)
58
[end of akvo/rsr/management/commands/django_q_probettp.py]
[start of akvo/cache/heartbeat.py]
1 import datetime
2 import logging
3 from threading import Event, Thread
4 from typing import Union
5
6 from django.core.cache import cache
7
8
9 class CacheHeartbeat(Thread):
10 """
11 Thread to update set a cache key with a max life and refresh it as long as the thread is alive
12
13 The thread can be ended by setting the `event_end` flag
14 """
15
16 def __init__(self, cache_key: str, key_timeout: float = 30.0, beat_interval: int = 3):
17 """
18 :param cache_key: The cache key to keep alive
19 :param key_timeout: How long the cache key should live without the heartbeat thread
20 :param beat_interval: How often per timeout the key should "beat"
21 """
22 super().__init__()
23 self.cache_key = cache_key
24 self.event_end = Event()
25 self.key_timeout = key_timeout
26 self.beat_interval = beat_interval
27
28 def run(self) -> None:
29 logger = logging.getLogger("akvo.rsr.CacheHeartBeat")
30 logger.info("Starting cache heartbeat for '%s' with timeout %s", self.cache_key, self.key_timeout)
31 self.event_end.clear()
32 while not self.event_end.is_set():
33 # Refresh the heartbeat
34 self.set_cache_value()
35 self.event_end.wait(self.key_timeout / self.beat_interval)
36
37 cache.delete(self.cache_key)
38 logger.info("Ended cache heartbeat for '%s'", self.cache_key)
39
40 def set_cache_value(self):
41 cache.set(self.cache_key, self.get_calc_value(), self.key_timeout)
42
43 def get_calc_value(self) -> Union[str, int, float]:
44 return datetime.datetime.utcnow().timestamp()
45
[end of akvo/cache/heartbeat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/cache/heartbeat.py b/akvo/cache/heartbeat.py
--- a/akvo/cache/heartbeat.py
+++ b/akvo/cache/heartbeat.py
@@ -8,7 +8,7 @@
class CacheHeartbeat(Thread):
"""
- Thread to update set a cache key with a max life and refresh it as long as the thread is alive
+ Thread to set a cache key with a max life and refresh it as long as the thread is alive
The thread can be ended by setting the `event_end` flag
"""
@@ -38,7 +38,13 @@
logger.info("Ended cache heartbeat for '%s'", self.cache_key)
def set_cache_value(self):
- cache.set(self.cache_key, self.get_calc_value(), self.key_timeout)
+ """
+ Set the cache key and its value
+ """
+ cache.set(self.cache_key, self.calc_cache_value(), self.key_timeout)
- def get_calc_value(self) -> Union[str, int, float]:
+ def calc_cache_value(self) -> Union[str, int, float]:
+ """
+ Calculate a value to be used when setting the cache key
+ """
return datetime.datetime.utcnow().timestamp()
diff --git a/akvo/rsr/management/commands/django_q_probettp.py b/akvo/rsr/management/commands/django_q_probettp.py
--- a/akvo/rsr/management/commands/django_q_probettp.py
+++ b/akvo/rsr/management/commands/django_q_probettp.py
@@ -38,8 +38,14 @@
class DjangoQRequestHandler(BaseHTTPRequestHandler):
+ """
+ A handler to be used with HTTPServer to get the status of the local django-q cluster
+ """
def do_GET(self):
+ """
+ Handle GET requests to return a simple string indicating the status of the django-q cluster
+ """
hostname = socket.gethostname()
# Find local cluster
| {"golden_diff": "diff --git a/akvo/cache/heartbeat.py b/akvo/cache/heartbeat.py\n--- a/akvo/cache/heartbeat.py\n+++ b/akvo/cache/heartbeat.py\n@@ -8,7 +8,7 @@\n \n class CacheHeartbeat(Thread):\n \"\"\"\n- Thread to update set a cache key with a max life and refresh it as long as the thread is alive\n+ Thread to set a cache key with a max life and refresh it as long as the thread is alive\n \n The thread can be ended by setting the `event_end` flag\n \"\"\"\n@@ -38,7 +38,13 @@\n logger.info(\"Ended cache heartbeat for '%s'\", self.cache_key)\n \n def set_cache_value(self):\n- cache.set(self.cache_key, self.get_calc_value(), self.key_timeout)\n+ \"\"\"\n+ Set the cache key and its value\n+ \"\"\"\n+ cache.set(self.cache_key, self.calc_cache_value(), self.key_timeout)\n \n- def get_calc_value(self) -> Union[str, int, float]:\n+ def calc_cache_value(self) -> Union[str, int, float]:\n+ \"\"\"\n+ Calculate a value to be used when setting the cache key\n+ \"\"\"\n return datetime.datetime.utcnow().timestamp()\ndiff --git a/akvo/rsr/management/commands/django_q_probettp.py b/akvo/rsr/management/commands/django_q_probettp.py\n--- a/akvo/rsr/management/commands/django_q_probettp.py\n+++ b/akvo/rsr/management/commands/django_q_probettp.py\n@@ -38,8 +38,14 @@\n \n \n class DjangoQRequestHandler(BaseHTTPRequestHandler):\n+ \"\"\"\n+ A handler to be used with HTTPServer to get the status of the local django-q cluster\n+ \"\"\"\n \n def do_GET(self):\n+ \"\"\"\n+ Handle GET requests to return a simple string indicating the status of the django-q cluster\n+ \"\"\"\n hostname = socket.gethostname()\n \n # Find local cluster\n", "issue": "Feature Request: Document the task management\n### What are you trying to do?\n\nUnderstand how the task management functions\n\n### Describe the solution you'd like\n\nA technical documentation of how it works.\n\n### Have you consider alternatives?\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Akvo Reporting is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\"\"\"\nProvides a localhost HTTP server to query the local status of the django-q cluster\n\"\"\"\nimport logging\nimport signal\nimport socket\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\n\nfrom django.core.management.base import BaseCommand\nfrom django_q.conf import Conf\nfrom django_q.status import Stat\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n help = __doc__\n\n def handle(self, *args, **options):\n server = HTTPServer((\"localhost\", 8080), DjangoQRequestHandler)\n\n def handle_end(*_):\n logger.info(\"Stopping server\")\n server.shutdown()\n\n signal.signal(signal.SIGINT, handle_end)\n signal.signal(signal.SIGTERM, handle_end)\n\n logger.info(\"Starting server...\")\n server.serve_forever()\n\n\nclass DjangoQRequestHandler(BaseHTTPRequestHandler):\n\n def do_GET(self):\n hostname = socket.gethostname()\n\n # Find local cluster\n local_stat = next(iter(stat for stat in Stat.get_all() if stat.host == hostname), None)\n if local_stat:\n message = local_stat.status\n else:\n message = Conf.STOPPED\n logger.info(f\"Probe response {message}\")\n self.send_response(200)\n self.end_headers()\n self.wfile.write(message.encode())\n\n def log_message(self, format: str, *args) -> None:\n logger.debug(format, *args)\n", "path": "akvo/rsr/management/commands/django_q_probettp.py"}, {"content": "import datetime\nimport logging\nfrom threading import Event, Thread\nfrom typing import Union\n\nfrom django.core.cache import cache\n\n\nclass CacheHeartbeat(Thread):\n \"\"\"\n Thread to update set a cache key with a max life and refresh it as long as the thread is alive\n\n The thread can be ended by setting the `event_end` flag\n \"\"\"\n\n def __init__(self, cache_key: str, key_timeout: float = 30.0, beat_interval: int = 3):\n \"\"\"\n :param cache_key: The cache key to keep alive\n :param key_timeout: How long the cache key should live without the heartbeat thread\n :param beat_interval: How often per timeout the key should \"beat\"\n \"\"\"\n super().__init__()\n self.cache_key = cache_key\n self.event_end = Event()\n self.key_timeout = key_timeout\n self.beat_interval = beat_interval\n\n def run(self) -> None:\n logger = logging.getLogger(\"akvo.rsr.CacheHeartBeat\")\n logger.info(\"Starting cache heartbeat for '%s' with timeout %s\", self.cache_key, self.key_timeout)\n self.event_end.clear()\n while not self.event_end.is_set():\n # Refresh the heartbeat\n self.set_cache_value()\n self.event_end.wait(self.key_timeout / self.beat_interval)\n\n cache.delete(self.cache_key)\n logger.info(\"Ended cache heartbeat for '%s'\", self.cache_key)\n\n def set_cache_value(self):\n cache.set(self.cache_key, self.get_calc_value(), self.key_timeout)\n\n def get_calc_value(self) -> Union[str, int, float]:\n return datetime.datetime.utcnow().timestamp()\n", "path": "akvo/cache/heartbeat.py"}]} | 1,554 | 443 |
gh_patches_debug_11081 | rasdani/github-patches | git_diff | deeppavlov__DeepPavlov-635 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add an environment variable to skip nltk.download()
Add an environment variable to skip nltk.download()
</issue>
<code>
[start of deeppavlov/models/__init__.py]
1 # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import nltk
16
17 from deeppavlov.core.common.prints import RedirectedPrints
18
19
20 with RedirectedPrints():
21 nltk.download('punkt')
22 nltk.download('stopwords')
23 nltk.download('perluniprops')
24 nltk.download('nonbreaking_prefixes')
25
[end of deeppavlov/models/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/deeppavlov/models/__init__.py b/deeppavlov/models/__init__.py
--- a/deeppavlov/models/__init__.py
+++ b/deeppavlov/models/__init__.py
@@ -12,13 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import os
import nltk
from deeppavlov.core.common.prints import RedirectedPrints
-
-with RedirectedPrints():
- nltk.download('punkt')
- nltk.download('stopwords')
- nltk.download('perluniprops')
- nltk.download('nonbreaking_prefixes')
+if not os.environ.get('DP_SKIP_NLTK_DOWNLOAD'):
+ with RedirectedPrints():
+ nltk.download('punkt')
+ nltk.download('stopwords')
+ nltk.download('perluniprops')
+ nltk.download('nonbreaking_prefixes')
| {"golden_diff": "diff --git a/deeppavlov/models/__init__.py b/deeppavlov/models/__init__.py\n--- a/deeppavlov/models/__init__.py\n+++ b/deeppavlov/models/__init__.py\n@@ -12,13 +12,14 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import os\n import nltk\n \n from deeppavlov.core.common.prints import RedirectedPrints\n \n-\n-with RedirectedPrints():\n- nltk.download('punkt')\n- nltk.download('stopwords')\n- nltk.download('perluniprops')\n- nltk.download('nonbreaking_prefixes')\n+if not os.environ.get('DP_SKIP_NLTK_DOWNLOAD'):\n+ with RedirectedPrints():\n+ nltk.download('punkt')\n+ nltk.download('stopwords')\n+ nltk.download('perluniprops')\n+ nltk.download('nonbreaking_prefixes')\n", "issue": "Add an environment variable to skip nltk.download()\n\nAdd an environment variable to skip nltk.download()\n\n", "before_files": [{"content": "# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport nltk\n\nfrom deeppavlov.core.common.prints import RedirectedPrints\n\n\nwith RedirectedPrints():\n nltk.download('punkt')\n nltk.download('stopwords')\n nltk.download('perluniprops')\n nltk.download('nonbreaking_prefixes')\n", "path": "deeppavlov/models/__init__.py"}]} | 799 | 207 |
gh_patches_debug_2610 | rasdani/github-patches | git_diff | openai__openai-python-1007 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing default value to logprobs in openai.types.chat.chat_completion.Choice
### Confirm this is an issue with the Python library and not an underlying OpenAI API
- [X] This is an issue with the Python library
### Describe the bug
#980 added token `logprobs` to chat completions of type `Optional[ChoiceLogprobs]` in [`openai.types.chat.chat_completion.Choice`](https://github.com/openai/openai-python/blob/3ad4e8bc9d89d7a81586bf598289ff62b0a339b9/src/openai/types/chat/chat_completion.py#L33) and [`openai.types.chat.chat_completion_chunk.Choice`](https://github.com/openai/openai-python/blob/3ad4e8bc9d89d7a81586bf598289ff62b0a339b9/src/openai/types/chat/chat_completion_chunk.py#L97). In the latter, the default value is set to `None`, while in the former it is not set. This causes backward compatibility problems with code written for versions prior to 1.5.0.
### To Reproduce
Execution of the following code fails:
```python
from openai.types.chat.chat_completion import ChatCompletionMessage, Choice
msg = ChatCompletionMessage(role="assistant", content="")
Choice(
index=0,
finish_reason="stop",
message=msg,
)
```
The output
```
----> 1 Choice(
2 index=0,
3 finish_reason="stop",
4 message=msg,
5 )
File /.venv-3.10/lib/python3.10/site-packages/pydantic/main.py:164, in BaseModel.__init__(__pydantic_self__, **data)
162 # `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks
163 __tracebackhide__ = True
--> 164 __pydantic_self__.__pydantic_validator__.validate_python(data, self_instance=__pydantic_self__)
ValidationError: 1 validation error for Choice
logprobs
Field required [type=missing, input_value={'index': 0, 'finish_reas...=None, tool_calls=None)}, input_type=dict]
For further information visit https://errors.pydantic.dev/2.5/v/missing
```
Setting `logprobs` to `None` fixes the problem.
```python
from openai.types.chat.chat_completion import ChatCompletionMessage, Choice
msg = ChatCompletionMessage(role="assistant", content="")
Choice(
index=0,
finish_reason="stop",
message=msg,
logprobs=None # added line
)
```
### Code snippets
```Python
see above
```
### OS
Linux
### Python version
Python 3.10.13
### Library version
openai 1.6.0
</issue>
<code>
[start of src/openai/types/chat/chat_completion.py]
1 # File generated from our OpenAPI spec by Stainless.
2
3 from typing import List, Optional
4 from typing_extensions import Literal
5
6 from ..._models import BaseModel
7 from ..completion_usage import CompletionUsage
8 from .chat_completion_message import ChatCompletionMessage
9 from .chat_completion_token_logprob import ChatCompletionTokenLogprob
10
11 __all__ = ["ChatCompletion", "Choice", "ChoiceLogprobs"]
12
13
14 class ChoiceLogprobs(BaseModel):
15 content: Optional[List[ChatCompletionTokenLogprob]]
16 """A list of message content tokens with log probability information."""
17
18
19 class Choice(BaseModel):
20 finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"]
21 """The reason the model stopped generating tokens.
22
23 This will be `stop` if the model hit a natural stop point or a provided stop
24 sequence, `length` if the maximum number of tokens specified in the request was
25 reached, `content_filter` if content was omitted due to a flag from our content
26 filters, `tool_calls` if the model called a tool, or `function_call`
27 (deprecated) if the model called a function.
28 """
29
30 index: int
31 """The index of the choice in the list of choices."""
32
33 logprobs: Optional[ChoiceLogprobs]
34 """Log probability information for the choice."""
35
36 message: ChatCompletionMessage
37 """A chat completion message generated by the model."""
38
39
40 class ChatCompletion(BaseModel):
41 id: str
42 """A unique identifier for the chat completion."""
43
44 choices: List[Choice]
45 """A list of chat completion choices.
46
47 Can be more than one if `n` is greater than 1.
48 """
49
50 created: int
51 """The Unix timestamp (in seconds) of when the chat completion was created."""
52
53 model: str
54 """The model used for the chat completion."""
55
56 object: Literal["chat.completion"]
57 """The object type, which is always `chat.completion`."""
58
59 system_fingerprint: Optional[str] = None
60 """This fingerprint represents the backend configuration that the model runs with.
61
62 Can be used in conjunction with the `seed` request parameter to understand when
63 backend changes have been made that might impact determinism.
64 """
65
66 usage: Optional[CompletionUsage] = None
67 """Usage statistics for the completion request."""
68
[end of src/openai/types/chat/chat_completion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/openai/types/chat/chat_completion.py b/src/openai/types/chat/chat_completion.py
--- a/src/openai/types/chat/chat_completion.py
+++ b/src/openai/types/chat/chat_completion.py
@@ -30,7 +30,7 @@
index: int
"""The index of the choice in the list of choices."""
- logprobs: Optional[ChoiceLogprobs]
+ logprobs: Optional[ChoiceLogprobs] = None
"""Log probability information for the choice."""
message: ChatCompletionMessage
| {"golden_diff": "diff --git a/src/openai/types/chat/chat_completion.py b/src/openai/types/chat/chat_completion.py\n--- a/src/openai/types/chat/chat_completion.py\n+++ b/src/openai/types/chat/chat_completion.py\n@@ -30,7 +30,7 @@\n index: int\n \"\"\"The index of the choice in the list of choices.\"\"\"\n \n- logprobs: Optional[ChoiceLogprobs]\n+ logprobs: Optional[ChoiceLogprobs] = None\n \"\"\"Log probability information for the choice.\"\"\"\n \n message: ChatCompletionMessage\n", "issue": "Missing default value to logprobs in openai.types.chat.chat_completion.Choice\n### Confirm this is an issue with the Python library and not an underlying OpenAI API\r\n\r\n- [X] This is an issue with the Python library\r\n\r\n### Describe the bug\r\n\r\n#980 added token `logprobs` to chat completions of type `Optional[ChoiceLogprobs]` in [`openai.types.chat.chat_completion.Choice`](https://github.com/openai/openai-python/blob/3ad4e8bc9d89d7a81586bf598289ff62b0a339b9/src/openai/types/chat/chat_completion.py#L33) and [`openai.types.chat.chat_completion_chunk.Choice`](https://github.com/openai/openai-python/blob/3ad4e8bc9d89d7a81586bf598289ff62b0a339b9/src/openai/types/chat/chat_completion_chunk.py#L97). In the latter, the default value is set to `None`, while in the former it is not set. This causes backward compatibility problems with code written for versions prior to 1.5.0.\r\n\r\n\r\n\r\n### To Reproduce\r\n\r\nExecution of the following code fails:\r\n\r\n```python \r\nfrom openai.types.chat.chat_completion import ChatCompletionMessage, Choice\r\n\r\nmsg = ChatCompletionMessage(role=\"assistant\", content=\"\")\r\n\r\nChoice(\r\n index=0,\r\n finish_reason=\"stop\",\r\n message=msg,\r\n)\r\n```\r\nThe output\r\n```\r\n----> 1 Choice(\r\n 2 index=0,\r\n 3 finish_reason=\"stop\",\r\n 4 message=msg,\r\n 5 )\r\n\r\nFile /.venv-3.10/lib/python3.10/site-packages/pydantic/main.py:164, in BaseModel.__init__(__pydantic_self__, **data)\r\n 162 # `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks\r\n 163 __tracebackhide__ = True\r\n--> 164 __pydantic_self__.__pydantic_validator__.validate_python(data, self_instance=__pydantic_self__)\r\n\r\nValidationError: 1 validation error for Choice\r\nlogprobs\r\n Field required [type=missing, input_value={'index': 0, 'finish_reas...=None, tool_calls=None)}, input_type=dict]\r\n For further information visit https://errors.pydantic.dev/2.5/v/missing\r\n```\r\nSetting `logprobs` to `None` fixes the problem.\r\n```python \r\nfrom openai.types.chat.chat_completion import ChatCompletionMessage, Choice\r\n\r\nmsg = ChatCompletionMessage(role=\"assistant\", content=\"\")\r\n\r\nChoice(\r\n index=0,\r\n finish_reason=\"stop\",\r\n message=msg,\r\n logprobs=None # added line\r\n)\r\n```\r\n\r\n### Code snippets\r\n\r\n```Python\r\nsee above\r\n```\r\n\r\n\r\n### OS\r\n\r\nLinux\r\n\r\n### Python version\r\n\r\nPython 3.10.13\r\n\r\n### Library version\r\n\r\nopenai 1.6.0\n", "before_files": [{"content": "# File generated from our OpenAPI spec by Stainless.\n\nfrom typing import List, Optional\nfrom typing_extensions import Literal\n\nfrom ..._models import BaseModel\nfrom ..completion_usage import CompletionUsage\nfrom .chat_completion_message import ChatCompletionMessage\nfrom .chat_completion_token_logprob import ChatCompletionTokenLogprob\n\n__all__ = [\"ChatCompletion\", \"Choice\", \"ChoiceLogprobs\"]\n\n\nclass ChoiceLogprobs(BaseModel):\n content: Optional[List[ChatCompletionTokenLogprob]]\n \"\"\"A list of message content tokens with log probability information.\"\"\"\n\n\nclass Choice(BaseModel):\n finish_reason: Literal[\"stop\", \"length\", \"tool_calls\", \"content_filter\", \"function_call\"]\n \"\"\"The reason the model stopped generating tokens.\n\n This will be `stop` if the model hit a natural stop point or a provided stop\n sequence, `length` if the maximum number of tokens specified in the request was\n reached, `content_filter` if content was omitted due to a flag from our content\n filters, `tool_calls` if the model called a tool, or `function_call`\n (deprecated) if the model called a function.\n \"\"\"\n\n index: int\n \"\"\"The index of the choice in the list of choices.\"\"\"\n\n logprobs: Optional[ChoiceLogprobs]\n \"\"\"Log probability information for the choice.\"\"\"\n\n message: ChatCompletionMessage\n \"\"\"A chat completion message generated by the model.\"\"\"\n\n\nclass ChatCompletion(BaseModel):\n id: str\n \"\"\"A unique identifier for the chat completion.\"\"\"\n\n choices: List[Choice]\n \"\"\"A list of chat completion choices.\n\n Can be more than one if `n` is greater than 1.\n \"\"\"\n\n created: int\n \"\"\"The Unix timestamp (in seconds) of when the chat completion was created.\"\"\"\n\n model: str\n \"\"\"The model used for the chat completion.\"\"\"\n\n object: Literal[\"chat.completion\"]\n \"\"\"The object type, which is always `chat.completion`.\"\"\"\n\n system_fingerprint: Optional[str] = None\n \"\"\"This fingerprint represents the backend configuration that the model runs with.\n\n Can be used in conjunction with the `seed` request parameter to understand when\n backend changes have been made that might impact determinism.\n \"\"\"\n\n usage: Optional[CompletionUsage] = None\n \"\"\"Usage statistics for the completion request.\"\"\"\n", "path": "src/openai/types/chat/chat_completion.py"}]} | 1,837 | 122 |
gh_patches_debug_13241 | rasdani/github-patches | git_diff | pypa__pip-12173 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
23.2: Unable to add zsh completion as documented
### Description
Attempting to follow the steps in the [documentation for adding zsh shell completion](https://pip.pypa.io/en/stable/user_guide/#command-completion) raises an error regarding the use of `compadd` outside of a completion function. This seems to be new with 23.2 and added in https://github.com/pypa/pip/pull/11417.
### Expected behavior
As in previous versions, following the documented steps should successfully add completion for pip.
### pip version
23.2
### Python version
3.11.4
### OS
macOS 12.6.8
### How to Reproduce
As documented:
1. Execute `python -m pip completion --zsh >> ~/.zprofile` or add eval "`pip completion --zsh`" to `.zprofile` or `.zshrc`
2. Start a new shell
### Output
From startup:
```
/dev/fd/16:compadd:4: can only be called from completion function
```
### Code of Conduct
- [X] I agree to follow the [PSF Code of Conduct](https://www.python.org/psf/conduct/).
</issue>
<code>
[start of src/pip/_internal/commands/completion.py]
1 import sys
2 import textwrap
3 from optparse import Values
4 from typing import List
5
6 from pip._internal.cli.base_command import Command
7 from pip._internal.cli.status_codes import SUCCESS
8 from pip._internal.utils.misc import get_prog
9
10 BASE_COMPLETION = """
11 # pip {shell} completion start{script}# pip {shell} completion end
12 """
13
14 COMPLETION_SCRIPTS = {
15 "bash": """
16 _pip_completion()
17 {{
18 COMPREPLY=( $( COMP_WORDS="${{COMP_WORDS[*]}}" \\
19 COMP_CWORD=$COMP_CWORD \\
20 PIP_AUTO_COMPLETE=1 $1 2>/dev/null ) )
21 }}
22 complete -o default -F _pip_completion {prog}
23 """,
24 "zsh": """
25 #compdef -P pip[0-9.]#
26 compadd $( COMP_WORDS="$words[*]" \\
27 COMP_CWORD=$((CURRENT-1)) \\
28 PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null )
29 """,
30 "fish": """
31 function __fish_complete_pip
32 set -lx COMP_WORDS (commandline -o) ""
33 set -lx COMP_CWORD ( \\
34 math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\
35 )
36 set -lx PIP_AUTO_COMPLETE 1
37 string split \\ -- (eval $COMP_WORDS[1])
38 end
39 complete -fa "(__fish_complete_pip)" -c {prog}
40 """,
41 "powershell": """
42 if ((Test-Path Function:\\TabExpansion) -and -not `
43 (Test-Path Function:\\_pip_completeBackup)) {{
44 Rename-Item Function:\\TabExpansion _pip_completeBackup
45 }}
46 function TabExpansion($line, $lastWord) {{
47 $lastBlock = [regex]::Split($line, '[|;]')[-1].TrimStart()
48 if ($lastBlock.StartsWith("{prog} ")) {{
49 $Env:COMP_WORDS=$lastBlock
50 $Env:COMP_CWORD=$lastBlock.Split().Length - 1
51 $Env:PIP_AUTO_COMPLETE=1
52 (& {prog}).Split()
53 Remove-Item Env:COMP_WORDS
54 Remove-Item Env:COMP_CWORD
55 Remove-Item Env:PIP_AUTO_COMPLETE
56 }}
57 elseif (Test-Path Function:\\_pip_completeBackup) {{
58 # Fall back on existing tab expansion
59 _pip_completeBackup $line $lastWord
60 }}
61 }}
62 """,
63 }
64
65
66 class CompletionCommand(Command):
67 """A helper command to be used for command completion."""
68
69 ignore_require_venv = True
70
71 def add_options(self) -> None:
72 self.cmd_opts.add_option(
73 "--bash",
74 "-b",
75 action="store_const",
76 const="bash",
77 dest="shell",
78 help="Emit completion code for bash",
79 )
80 self.cmd_opts.add_option(
81 "--zsh",
82 "-z",
83 action="store_const",
84 const="zsh",
85 dest="shell",
86 help="Emit completion code for zsh",
87 )
88 self.cmd_opts.add_option(
89 "--fish",
90 "-f",
91 action="store_const",
92 const="fish",
93 dest="shell",
94 help="Emit completion code for fish",
95 )
96 self.cmd_opts.add_option(
97 "--powershell",
98 "-p",
99 action="store_const",
100 const="powershell",
101 dest="shell",
102 help="Emit completion code for powershell",
103 )
104
105 self.parser.insert_option_group(0, self.cmd_opts)
106
107 def run(self, options: Values, args: List[str]) -> int:
108 """Prints the completion code of the given shell"""
109 shells = COMPLETION_SCRIPTS.keys()
110 shell_options = ["--" + shell for shell in sorted(shells)]
111 if options.shell in shells:
112 script = textwrap.dedent(
113 COMPLETION_SCRIPTS.get(options.shell, "").format(prog=get_prog())
114 )
115 print(BASE_COMPLETION.format(script=script, shell=options.shell))
116 return SUCCESS
117 else:
118 sys.stderr.write(
119 "ERROR: You must pass {}\n".format(" or ".join(shell_options))
120 )
121 return SUCCESS
122
[end of src/pip/_internal/commands/completion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pip/_internal/commands/completion.py b/src/pip/_internal/commands/completion.py
--- a/src/pip/_internal/commands/completion.py
+++ b/src/pip/_internal/commands/completion.py
@@ -23,9 +23,18 @@
""",
"zsh": """
#compdef -P pip[0-9.]#
- compadd $( COMP_WORDS="$words[*]" \\
- COMP_CWORD=$((CURRENT-1)) \\
- PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null )
+ __pip() {{
+ compadd $( COMP_WORDS="$words[*]" \\
+ COMP_CWORD=$((CURRENT-1)) \\
+ PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null )
+ }}
+ if [[ $zsh_eval_context[-1] == loadautofunc ]]; then
+ # autoload from fpath, call function directly
+ __pip "$@"
+ else
+ # eval/source/. command, register function for later
+ compdef __pip -P 'pip[0-9.]#'
+ fi
""",
"fish": """
function __fish_complete_pip
| {"golden_diff": "diff --git a/src/pip/_internal/commands/completion.py b/src/pip/_internal/commands/completion.py\n--- a/src/pip/_internal/commands/completion.py\n+++ b/src/pip/_internal/commands/completion.py\n@@ -23,9 +23,18 @@\n \"\"\",\n \"zsh\": \"\"\"\n #compdef -P pip[0-9.]#\n- compadd $( COMP_WORDS=\"$words[*]\" \\\\\n- COMP_CWORD=$((CURRENT-1)) \\\\\n- PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null )\n+ __pip() {{\n+ compadd $( COMP_WORDS=\"$words[*]\" \\\\\n+ COMP_CWORD=$((CURRENT-1)) \\\\\n+ PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null )\n+ }}\n+ if [[ $zsh_eval_context[-1] == loadautofunc ]]; then\n+ # autoload from fpath, call function directly\n+ __pip \"$@\"\n+ else\n+ # eval/source/. command, register function for later\n+ compdef __pip -P 'pip[0-9.]#'\n+ fi\n \"\"\",\n \"fish\": \"\"\"\n function __fish_complete_pip\n", "issue": "23.2: Unable to add zsh completion as documented\n### Description\r\n\r\nAttempting to follow the steps in the [documentation for adding zsh shell completion](https://pip.pypa.io/en/stable/user_guide/#command-completion) raises an error regarding the use of `compadd` outside of a completion function. This seems to be new with 23.2 and added in https://github.com/pypa/pip/pull/11417.\r\n\r\n### Expected behavior\r\n\r\nAs in previous versions, following the documented steps should successfully add completion for pip.\r\n\r\n### pip version\r\n\r\n23.2\r\n\r\n### Python version\r\n\r\n3.11.4\r\n\r\n### OS\r\n\r\nmacOS 12.6.8\r\n\r\n### How to Reproduce\r\n\r\nAs documented:\r\n1. Execute `python -m pip completion --zsh >> ~/.zprofile` or add eval \"`pip completion --zsh`\" to `.zprofile` or `.zshrc`\r\n2. Start a new shell\r\n\r\n### Output\r\n\r\nFrom startup:\r\n```\r\n/dev/fd/16:compadd:4: can only be called from completion function\r\n```\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the [PSF Code of Conduct](https://www.python.org/psf/conduct/).\n", "before_files": [{"content": "import sys\nimport textwrap\nfrom optparse import Values\nfrom typing import List\n\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import SUCCESS\nfrom pip._internal.utils.misc import get_prog\n\nBASE_COMPLETION = \"\"\"\n# pip {shell} completion start{script}# pip {shell} completion end\n\"\"\"\n\nCOMPLETION_SCRIPTS = {\n \"bash\": \"\"\"\n _pip_completion()\n {{\n COMPREPLY=( $( COMP_WORDS=\"${{COMP_WORDS[*]}}\" \\\\\n COMP_CWORD=$COMP_CWORD \\\\\n PIP_AUTO_COMPLETE=1 $1 2>/dev/null ) )\n }}\n complete -o default -F _pip_completion {prog}\n \"\"\",\n \"zsh\": \"\"\"\n #compdef -P pip[0-9.]#\n compadd $( COMP_WORDS=\"$words[*]\" \\\\\n COMP_CWORD=$((CURRENT-1)) \\\\\n PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null )\n \"\"\",\n \"fish\": \"\"\"\n function __fish_complete_pip\n set -lx COMP_WORDS (commandline -o) \"\"\n set -lx COMP_CWORD ( \\\\\n math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\\\\n )\n set -lx PIP_AUTO_COMPLETE 1\n string split \\\\ -- (eval $COMP_WORDS[1])\n end\n complete -fa \"(__fish_complete_pip)\" -c {prog}\n \"\"\",\n \"powershell\": \"\"\"\n if ((Test-Path Function:\\\\TabExpansion) -and -not `\n (Test-Path Function:\\\\_pip_completeBackup)) {{\n Rename-Item Function:\\\\TabExpansion _pip_completeBackup\n }}\n function TabExpansion($line, $lastWord) {{\n $lastBlock = [regex]::Split($line, '[|;]')[-1].TrimStart()\n if ($lastBlock.StartsWith(\"{prog} \")) {{\n $Env:COMP_WORDS=$lastBlock\n $Env:COMP_CWORD=$lastBlock.Split().Length - 1\n $Env:PIP_AUTO_COMPLETE=1\n (& {prog}).Split()\n Remove-Item Env:COMP_WORDS\n Remove-Item Env:COMP_CWORD\n Remove-Item Env:PIP_AUTO_COMPLETE\n }}\n elseif (Test-Path Function:\\\\_pip_completeBackup) {{\n # Fall back on existing tab expansion\n _pip_completeBackup $line $lastWord\n }}\n }}\n \"\"\",\n}\n\n\nclass CompletionCommand(Command):\n \"\"\"A helper command to be used for command completion.\"\"\"\n\n ignore_require_venv = True\n\n def add_options(self) -> None:\n self.cmd_opts.add_option(\n \"--bash\",\n \"-b\",\n action=\"store_const\",\n const=\"bash\",\n dest=\"shell\",\n help=\"Emit completion code for bash\",\n )\n self.cmd_opts.add_option(\n \"--zsh\",\n \"-z\",\n action=\"store_const\",\n const=\"zsh\",\n dest=\"shell\",\n help=\"Emit completion code for zsh\",\n )\n self.cmd_opts.add_option(\n \"--fish\",\n \"-f\",\n action=\"store_const\",\n const=\"fish\",\n dest=\"shell\",\n help=\"Emit completion code for fish\",\n )\n self.cmd_opts.add_option(\n \"--powershell\",\n \"-p\",\n action=\"store_const\",\n const=\"powershell\",\n dest=\"shell\",\n help=\"Emit completion code for powershell\",\n )\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options: Values, args: List[str]) -> int:\n \"\"\"Prints the completion code of the given shell\"\"\"\n shells = COMPLETION_SCRIPTS.keys()\n shell_options = [\"--\" + shell for shell in sorted(shells)]\n if options.shell in shells:\n script = textwrap.dedent(\n COMPLETION_SCRIPTS.get(options.shell, \"\").format(prog=get_prog())\n )\n print(BASE_COMPLETION.format(script=script, shell=options.shell))\n return SUCCESS\n else:\n sys.stderr.write(\n \"ERROR: You must pass {}\\n\".format(\" or \".join(shell_options))\n )\n return SUCCESS\n", "path": "src/pip/_internal/commands/completion.py"}]} | 1,978 | 274 |
gh_patches_debug_27686 | rasdani/github-patches | git_diff | xonsh__xonsh-861 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
COMPLETIONS_MENU_ROWS not working
Hello
My value for `COMPLETIONS_MENU_ROWS` is 5 (default). But it does not seem to work. My terminal grow until the bottom of the window and when it has reached the last line then I don't get completions anymore.
I use gnome-terminal on Ubuntu 14.04
Thank you for your help !
EDIT: I installed HEAD from the git repo. I still have the problem
EDIT2+3: I also have the problem with xtrem and Terminator (It seems Terminal independant)
</issue>
<code>
[start of xonsh/ptk/completer.py]
1 # -*- coding: utf-8 -*-
2 """Completer implementation to use with prompt_toolkit."""
3 import os
4 import builtins
5
6 from prompt_toolkit.layout.dimension import LayoutDimension
7 from prompt_toolkit.completion import Completer, Completion
8
9
10 class PromptToolkitCompleter(Completer):
11 """Simple prompt_toolkit Completer object.
12
13 It just redirects requests to normal Xonsh completer.
14 """
15
16 def __init__(self, completer, ctx):
17 """Takes instance of xonsh.completer.Completer and dict with context."""
18 self.completer = completer
19 self.ctx = ctx
20
21 def get_completions(self, document, complete_event):
22 """Returns a generator for list of completions."""
23
24 # Only generate completions when the user hits tab.
25 if complete_event.completion_requested:
26 line = document.current_line.lstrip()
27 endidx = document.cursor_position_col
28 begidx = line[:endidx].rfind(' ') + 1 if line[:endidx].rfind(' ') >= 0 else 0
29 prefix = line[begidx:endidx]
30 completions, l = self.completer.complete(prefix,
31 line,
32 begidx,
33 endidx,
34 self.ctx)
35 if len(completions) <= 1:
36 pass
37 elif len(os.path.commonprefix(completions)) <= len(prefix):
38 self.reserve_space()
39 for comp in completions:
40 yield Completion(comp, -l)
41
42 def reserve_space(self):
43 cli = builtins.__xonsh_shell__.shell.prompter.cli
44 #TODO remove after next prompt_toolkit release
45 try:
46 #old layout to be removed at next ptk release
47 window = cli.application.layout.children[1].children[1].content
48 except AttributeError:
49 #new layout to become default
50 window = cli.application.layout.children[1].content
51 if window and window.render_info:
52 h = window.render_info.content_height
53 r = builtins.__xonsh_env__.get('COMPLETIONS_MENU_ROWS')
54 size = h + r
55 def comp_height(cli):
56 # If there is an autocompletion menu to be shown, make sure that o
57 # layout has at least a minimal height in order to display it.
58 if not cli.is_done:
59 return LayoutDimension(min=size)
60 else:
61 return LayoutDimension()
62 window._height = comp_height
63
[end of xonsh/ptk/completer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/xonsh/ptk/completer.py b/xonsh/ptk/completer.py
--- a/xonsh/ptk/completer.py
+++ b/xonsh/ptk/completer.py
@@ -2,6 +2,7 @@
"""Completer implementation to use with prompt_toolkit."""
import os
import builtins
+import xonsh.shell
from prompt_toolkit.layout.dimension import LayoutDimension
from prompt_toolkit.completion import Completer, Completion
@@ -41,13 +42,18 @@
def reserve_space(self):
cli = builtins.__xonsh_shell__.shell.prompter.cli
- #TODO remove after next prompt_toolkit release
- try:
- #old layout to be removed at next ptk release
- window = cli.application.layout.children[1].children[1].content
- except AttributeError:
- #new layout to become default
- window = cli.application.layout.children[1].content
+ if xonsh.shell.prompt_toolkit_version().startswith("1.0"):
+ # This is the layout for ptk 1.0
+ window = cli.application.layout.children[0].content.children[1]
+ else:
+ #TODO remove after next prompt_toolkit release
+ try:
+ #old layout to be removed at next ptk release
+ window = cli.application.layout.children[1].children[1].content
+ except AttributeError:
+ #new layout to become default
+ window = cli.application.layout.children[1].content
+
if window and window.render_info:
h = window.render_info.content_height
r = builtins.__xonsh_env__.get('COMPLETIONS_MENU_ROWS')
| {"golden_diff": "diff --git a/xonsh/ptk/completer.py b/xonsh/ptk/completer.py\n--- a/xonsh/ptk/completer.py\n+++ b/xonsh/ptk/completer.py\n@@ -2,6 +2,7 @@\n \"\"\"Completer implementation to use with prompt_toolkit.\"\"\"\n import os\n import builtins\n+import xonsh.shell\n \n from prompt_toolkit.layout.dimension import LayoutDimension\n from prompt_toolkit.completion import Completer, Completion\n@@ -41,13 +42,18 @@\n \n def reserve_space(self):\n cli = builtins.__xonsh_shell__.shell.prompter.cli\n- #TODO remove after next prompt_toolkit release\n- try:\n- #old layout to be removed at next ptk release\n- window = cli.application.layout.children[1].children[1].content\n- except AttributeError:\n- #new layout to become default\n- window = cli.application.layout.children[1].content\n+ if xonsh.shell.prompt_toolkit_version().startswith(\"1.0\"):\n+ # This is the layout for ptk 1.0\n+ window = cli.application.layout.children[0].content.children[1]\n+ else:\n+ #TODO remove after next prompt_toolkit release\n+ try:\n+ #old layout to be removed at next ptk release\n+ window = cli.application.layout.children[1].children[1].content\n+ except AttributeError:\n+ #new layout to become default\n+ window = cli.application.layout.children[1].content\n+\n if window and window.render_info:\n h = window.render_info.content_height\n r = builtins.__xonsh_env__.get('COMPLETIONS_MENU_ROWS')\n", "issue": "COMPLETIONS_MENU_ROWS not working\nHello\n\nMy value for `COMPLETIONS_MENU_ROWS` is 5 (default). But it does not seem to work. My terminal grow until the bottom of the window and when it has reached the last line then I don't get completions anymore.\n\nI use gnome-terminal on Ubuntu 14.04\n\nThank you for your help !\n\nEDIT: I installed HEAD from the git repo. I still have the problem\n\nEDIT2+3: I also have the problem with xtrem and Terminator (It seems Terminal independant)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Completer implementation to use with prompt_toolkit.\"\"\"\nimport os\nimport builtins\n\nfrom prompt_toolkit.layout.dimension import LayoutDimension\nfrom prompt_toolkit.completion import Completer, Completion\n\n\nclass PromptToolkitCompleter(Completer):\n \"\"\"Simple prompt_toolkit Completer object.\n\n It just redirects requests to normal Xonsh completer.\n \"\"\"\n\n def __init__(self, completer, ctx):\n \"\"\"Takes instance of xonsh.completer.Completer and dict with context.\"\"\"\n self.completer = completer\n self.ctx = ctx\n\n def get_completions(self, document, complete_event):\n \"\"\"Returns a generator for list of completions.\"\"\"\n\n # Only generate completions when the user hits tab.\n if complete_event.completion_requested:\n line = document.current_line.lstrip()\n endidx = document.cursor_position_col\n begidx = line[:endidx].rfind(' ') + 1 if line[:endidx].rfind(' ') >= 0 else 0\n prefix = line[begidx:endidx]\n completions, l = self.completer.complete(prefix,\n line,\n begidx,\n endidx,\n self.ctx)\n if len(completions) <= 1:\n pass\n elif len(os.path.commonprefix(completions)) <= len(prefix):\n self.reserve_space()\n for comp in completions:\n yield Completion(comp, -l)\n\n def reserve_space(self):\n cli = builtins.__xonsh_shell__.shell.prompter.cli\n #TODO remove after next prompt_toolkit release\n try:\n #old layout to be removed at next ptk release\n window = cli.application.layout.children[1].children[1].content\n except AttributeError:\n #new layout to become default\n window = cli.application.layout.children[1].content\n if window and window.render_info:\n h = window.render_info.content_height\n r = builtins.__xonsh_env__.get('COMPLETIONS_MENU_ROWS')\n size = h + r\n def comp_height(cli):\n # If there is an autocompletion menu to be shown, make sure that o\n # layout has at least a minimal height in order to display it.\n if not cli.is_done:\n return LayoutDimension(min=size)\n else:\n return LayoutDimension()\n window._height = comp_height\n", "path": "xonsh/ptk/completer.py"}]} | 1,303 | 384 |
gh_patches_debug_37069 | rasdani/github-patches | git_diff | TheAlgorithms__Python-8674 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
There is a missing check for the "associativity" of the operators in the stacks/infix_to_postfix_conversion.py file.
### Repository commit
11582943a555ae3b6a22938df6d3645b0327562e
### Python version (python --version)
Python 3.10.6
### Dependencies version (pip freeze)
astroid==2.11.3
bcc==0.18.0
black==22.3.0
blinker==1.4
Brlapi==0.8.3
certifi==2020.6.20
chardet==4.0.0
### Expected behavior
Hi, In the [data_structures/stacks/infix_to_postfix_conversion.py](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/infix_to_postfix_conversion.py) file, when we pass `"2^3^2"` argument to the `infix_to_postfix` function, it should return `'2 3 2 ^ ^'` back. This results in number `512` which is the correct calculation.
### Actual behavior
Currently `infix_to_postfix("2^3^2")` returns `'2 3 ^ 2 ^'` which is wrong. If we calculate it, it returns number `64`.
The reason behind this is that currently we don't have any check for the "associativity" of the operators. In particular `^` operator causes problem here. It has `Right-To-Left` associativity.
There is a missing check for the "associativity" of the operators in the stacks/infix_to_postfix_conversion.py file.
### Repository commit
11582943a555ae3b6a22938df6d3645b0327562e
### Python version (python --version)
Python 3.10.6
### Dependencies version (pip freeze)
astroid==2.11.3
bcc==0.18.0
black==22.3.0
blinker==1.4
Brlapi==0.8.3
certifi==2020.6.20
chardet==4.0.0
### Expected behavior
Hi, In the [data_structures/stacks/infix_to_postfix_conversion.py](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/infix_to_postfix_conversion.py) file, when we pass `"2^3^2"` argument to the `infix_to_postfix` function, it should return `'2 3 2 ^ ^'` back. This results in number `512` which is the correct calculation.
### Actual behavior
Currently `infix_to_postfix("2^3^2")` returns `'2 3 ^ 2 ^'` which is wrong. If we calculate it, it returns number `64`.
The reason behind this is that currently we don't have any check for the "associativity" of the operators. In particular `^` operator causes problem here. It has `Right-To-Left` associativity.
</issue>
<code>
[start of data_structures/stacks/infix_to_postfix_conversion.py]
1 """
2 https://en.wikipedia.org/wiki/Infix_notation
3 https://en.wikipedia.org/wiki/Reverse_Polish_notation
4 https://en.wikipedia.org/wiki/Shunting-yard_algorithm
5 """
6
7 from .balanced_parentheses import balanced_parentheses
8 from .stack import Stack
9
10
11 def precedence(char: str) -> int:
12 """
13 Return integer value representing an operator's precedence, or
14 order of operation.
15 https://en.wikipedia.org/wiki/Order_of_operations
16 """
17 return {"+": 1, "-": 1, "*": 2, "/": 2, "^": 3}.get(char, -1)
18
19
20 def infix_to_postfix(expression_str: str) -> str:
21 """
22 >>> infix_to_postfix("(1*(2+3)+4))")
23 Traceback (most recent call last):
24 ...
25 ValueError: Mismatched parentheses
26 >>> infix_to_postfix("")
27 ''
28 >>> infix_to_postfix("3+2")
29 '3 2 +'
30 >>> infix_to_postfix("(3+4)*5-6")
31 '3 4 + 5 * 6 -'
32 >>> infix_to_postfix("(1+2)*3/4-5")
33 '1 2 + 3 * 4 / 5 -'
34 >>> infix_to_postfix("a+b*c+(d*e+f)*g")
35 'a b c * + d e * f + g * +'
36 >>> infix_to_postfix("x^y/(5*z)+2")
37 'x y ^ 5 z * / 2 +'
38 """
39 if not balanced_parentheses(expression_str):
40 raise ValueError("Mismatched parentheses")
41 stack: Stack[str] = Stack()
42 postfix = []
43 for char in expression_str:
44 if char.isalpha() or char.isdigit():
45 postfix.append(char)
46 elif char == "(":
47 stack.push(char)
48 elif char == ")":
49 while not stack.is_empty() and stack.peek() != "(":
50 postfix.append(stack.pop())
51 stack.pop()
52 else:
53 while not stack.is_empty() and precedence(char) <= precedence(stack.peek()):
54 postfix.append(stack.pop())
55 stack.push(char)
56 while not stack.is_empty():
57 postfix.append(stack.pop())
58 return " ".join(postfix)
59
60
61 if __name__ == "__main__":
62 from doctest import testmod
63
64 testmod()
65 expression = "a+b*(c^d-e)^(f+g*h)-i"
66
67 print("Infix to Postfix Notation demonstration:\n")
68 print("Infix notation: " + expression)
69 print("Postfix notation: " + infix_to_postfix(expression))
70
[end of data_structures/stacks/infix_to_postfix_conversion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/data_structures/stacks/infix_to_postfix_conversion.py b/data_structures/stacks/infix_to_postfix_conversion.py
--- a/data_structures/stacks/infix_to_postfix_conversion.py
+++ b/data_structures/stacks/infix_to_postfix_conversion.py
@@ -4,9 +4,26 @@
https://en.wikipedia.org/wiki/Shunting-yard_algorithm
"""
+from typing import Literal
+
from .balanced_parentheses import balanced_parentheses
from .stack import Stack
+PRECEDENCES: dict[str, int] = {
+ "+": 1,
+ "-": 1,
+ "*": 2,
+ "/": 2,
+ "^": 3,
+}
+ASSOCIATIVITIES: dict[str, Literal["LR", "RL"]] = {
+ "+": "LR",
+ "-": "LR",
+ "*": "LR",
+ "/": "LR",
+ "^": "RL",
+}
+
def precedence(char: str) -> int:
"""
@@ -14,7 +31,15 @@
order of operation.
https://en.wikipedia.org/wiki/Order_of_operations
"""
- return {"+": 1, "-": 1, "*": 2, "/": 2, "^": 3}.get(char, -1)
+ return PRECEDENCES.get(char, -1)
+
+
+def associativity(char: str) -> Literal["LR", "RL"]:
+ """
+ Return the associativity of the operator `char`.
+ https://en.wikipedia.org/wiki/Operator_associativity
+ """
+ return ASSOCIATIVITIES[char]
def infix_to_postfix(expression_str: str) -> str:
@@ -35,6 +60,8 @@
'a b c * + d e * f + g * +'
>>> infix_to_postfix("x^y/(5*z)+2")
'x y ^ 5 z * / 2 +'
+ >>> infix_to_postfix("2^3^2")
+ '2 3 2 ^ ^'
"""
if not balanced_parentheses(expression_str):
raise ValueError("Mismatched parentheses")
@@ -50,9 +77,26 @@
postfix.append(stack.pop())
stack.pop()
else:
- while not stack.is_empty() and precedence(char) <= precedence(stack.peek()):
+ while True:
+ if stack.is_empty():
+ stack.push(char)
+ break
+
+ char_precedence = precedence(char)
+ tos_precedence = precedence(stack.peek())
+
+ if char_precedence > tos_precedence:
+ stack.push(char)
+ break
+ if char_precedence < tos_precedence:
+ postfix.append(stack.pop())
+ continue
+ # Precedences are equal
+ if associativity(char) == "RL":
+ stack.push(char)
+ break
postfix.append(stack.pop())
- stack.push(char)
+
while not stack.is_empty():
postfix.append(stack.pop())
return " ".join(postfix)
| {"golden_diff": "diff --git a/data_structures/stacks/infix_to_postfix_conversion.py b/data_structures/stacks/infix_to_postfix_conversion.py\n--- a/data_structures/stacks/infix_to_postfix_conversion.py\n+++ b/data_structures/stacks/infix_to_postfix_conversion.py\n@@ -4,9 +4,26 @@\n https://en.wikipedia.org/wiki/Shunting-yard_algorithm\n \"\"\"\n \n+from typing import Literal\n+\n from .balanced_parentheses import balanced_parentheses\n from .stack import Stack\n \n+PRECEDENCES: dict[str, int] = {\n+ \"+\": 1,\n+ \"-\": 1,\n+ \"*\": 2,\n+ \"/\": 2,\n+ \"^\": 3,\n+}\n+ASSOCIATIVITIES: dict[str, Literal[\"LR\", \"RL\"]] = {\n+ \"+\": \"LR\",\n+ \"-\": \"LR\",\n+ \"*\": \"LR\",\n+ \"/\": \"LR\",\n+ \"^\": \"RL\",\n+}\n+\n \n def precedence(char: str) -> int:\n \"\"\"\n@@ -14,7 +31,15 @@\n order of operation.\n https://en.wikipedia.org/wiki/Order_of_operations\n \"\"\"\n- return {\"+\": 1, \"-\": 1, \"*\": 2, \"/\": 2, \"^\": 3}.get(char, -1)\n+ return PRECEDENCES.get(char, -1)\n+\n+\n+def associativity(char: str) -> Literal[\"LR\", \"RL\"]:\n+ \"\"\"\n+ Return the associativity of the operator `char`.\n+ https://en.wikipedia.org/wiki/Operator_associativity\n+ \"\"\"\n+ return ASSOCIATIVITIES[char]\n \n \n def infix_to_postfix(expression_str: str) -> str:\n@@ -35,6 +60,8 @@\n 'a b c * + d e * f + g * +'\n >>> infix_to_postfix(\"x^y/(5*z)+2\")\n 'x y ^ 5 z * / 2 +'\n+ >>> infix_to_postfix(\"2^3^2\")\n+ '2 3 2 ^ ^'\n \"\"\"\n if not balanced_parentheses(expression_str):\n raise ValueError(\"Mismatched parentheses\")\n@@ -50,9 +77,26 @@\n postfix.append(stack.pop())\n stack.pop()\n else:\n- while not stack.is_empty() and precedence(char) <= precedence(stack.peek()):\n+ while True:\n+ if stack.is_empty():\n+ stack.push(char)\n+ break\n+\n+ char_precedence = precedence(char)\n+ tos_precedence = precedence(stack.peek())\n+\n+ if char_precedence > tos_precedence:\n+ stack.push(char)\n+ break\n+ if char_precedence < tos_precedence:\n+ postfix.append(stack.pop())\n+ continue\n+ # Precedences are equal\n+ if associativity(char) == \"RL\":\n+ stack.push(char)\n+ break\n postfix.append(stack.pop())\n- stack.push(char)\n+\n while not stack.is_empty():\n postfix.append(stack.pop())\n return \" \".join(postfix)\n", "issue": "There is a missing check for the \"associativity\" of the operators in the stacks/infix_to_postfix_conversion.py file.\n### Repository commit\r\n\r\n11582943a555ae3b6a22938df6d3645b0327562e\r\n\r\n### Python version (python --version)\r\n\r\nPython 3.10.6\r\n\r\n### Dependencies version (pip freeze)\r\n\r\nastroid==2.11.3\r\nbcc==0.18.0\r\nblack==22.3.0\r\nblinker==1.4\r\nBrlapi==0.8.3\r\ncertifi==2020.6.20\r\nchardet==4.0.0\r\n\r\n\r\n### Expected behavior\r\n\r\nHi, In the [data_structures/stacks/infix_to_postfix_conversion.py](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/infix_to_postfix_conversion.py) file, when we pass `\"2^3^2\"` argument to the `infix_to_postfix` function, it should return `'2 3 2 ^ ^'` back. This results in number `512` which is the correct calculation.\r\n\r\n### Actual behavior\r\n\r\nCurrently `infix_to_postfix(\"2^3^2\")` returns `'2 3 ^ 2 ^'` which is wrong. If we calculate it, it returns number `64`.\r\n\r\nThe reason behind this is that currently we don't have any check for the \"associativity\" of the operators. In particular `^` operator causes problem here. It has `Right-To-Left` associativity. \nThere is a missing check for the \"associativity\" of the operators in the stacks/infix_to_postfix_conversion.py file.\n### Repository commit\r\n\r\n11582943a555ae3b6a22938df6d3645b0327562e\r\n\r\n### Python version (python --version)\r\n\r\nPython 3.10.6\r\n\r\n### Dependencies version (pip freeze)\r\n\r\nastroid==2.11.3\r\nbcc==0.18.0\r\nblack==22.3.0\r\nblinker==1.4\r\nBrlapi==0.8.3\r\ncertifi==2020.6.20\r\nchardet==4.0.0\r\n\r\n\r\n### Expected behavior\r\n\r\nHi, In the [data_structures/stacks/infix_to_postfix_conversion.py](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/infix_to_postfix_conversion.py) file, when we pass `\"2^3^2\"` argument to the `infix_to_postfix` function, it should return `'2 3 2 ^ ^'` back. This results in number `512` which is the correct calculation.\r\n\r\n### Actual behavior\r\n\r\nCurrently `infix_to_postfix(\"2^3^2\")` returns `'2 3 ^ 2 ^'` which is wrong. If we calculate it, it returns number `64`.\r\n\r\nThe reason behind this is that currently we don't have any check for the \"associativity\" of the operators. In particular `^` operator causes problem here. It has `Right-To-Left` associativity. \n", "before_files": [{"content": "\"\"\"\nhttps://en.wikipedia.org/wiki/Infix_notation\nhttps://en.wikipedia.org/wiki/Reverse_Polish_notation\nhttps://en.wikipedia.org/wiki/Shunting-yard_algorithm\n\"\"\"\n\nfrom .balanced_parentheses import balanced_parentheses\nfrom .stack import Stack\n\n\ndef precedence(char: str) -> int:\n \"\"\"\n Return integer value representing an operator's precedence, or\n order of operation.\n https://en.wikipedia.org/wiki/Order_of_operations\n \"\"\"\n return {\"+\": 1, \"-\": 1, \"*\": 2, \"/\": 2, \"^\": 3}.get(char, -1)\n\n\ndef infix_to_postfix(expression_str: str) -> str:\n \"\"\"\n >>> infix_to_postfix(\"(1*(2+3)+4))\")\n Traceback (most recent call last):\n ...\n ValueError: Mismatched parentheses\n >>> infix_to_postfix(\"\")\n ''\n >>> infix_to_postfix(\"3+2\")\n '3 2 +'\n >>> infix_to_postfix(\"(3+4)*5-6\")\n '3 4 + 5 * 6 -'\n >>> infix_to_postfix(\"(1+2)*3/4-5\")\n '1 2 + 3 * 4 / 5 -'\n >>> infix_to_postfix(\"a+b*c+(d*e+f)*g\")\n 'a b c * + d e * f + g * +'\n >>> infix_to_postfix(\"x^y/(5*z)+2\")\n 'x y ^ 5 z * / 2 +'\n \"\"\"\n if not balanced_parentheses(expression_str):\n raise ValueError(\"Mismatched parentheses\")\n stack: Stack[str] = Stack()\n postfix = []\n for char in expression_str:\n if char.isalpha() or char.isdigit():\n postfix.append(char)\n elif char == \"(\":\n stack.push(char)\n elif char == \")\":\n while not stack.is_empty() and stack.peek() != \"(\":\n postfix.append(stack.pop())\n stack.pop()\n else:\n while not stack.is_empty() and precedence(char) <= precedence(stack.peek()):\n postfix.append(stack.pop())\n stack.push(char)\n while not stack.is_empty():\n postfix.append(stack.pop())\n return \" \".join(postfix)\n\n\nif __name__ == \"__main__\":\n from doctest import testmod\n\n testmod()\n expression = \"a+b*(c^d-e)^(f+g*h)-i\"\n\n print(\"Infix to Postfix Notation demonstration:\\n\")\n print(\"Infix notation: \" + expression)\n print(\"Postfix notation: \" + infix_to_postfix(expression))\n", "path": "data_structures/stacks/infix_to_postfix_conversion.py"}]} | 1,962 | 688 |
gh_patches_debug_24866 | rasdani/github-patches | git_diff | facebookresearch__hydra-914 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update Example Launcher Plugin mention restoring the singletons
The singletons should be restored when performing multi-process launching.
something
```python
# In the launcher's launch:
state = Singleton.get_state()
# In the remote process
Singleton.set_state(state)
```
</issue>
<code>
[start of plugins/examples/example_launcher_plugin/hydra_plugins/example_launcher_plugin/example_launcher.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import logging
3 from pathlib import Path
4 from typing import Optional, Sequence
5
6 from hydra.core.config_loader import ConfigLoader
7 from hydra.core.config_search_path import ConfigSearchPath
8 from hydra.core.hydra_config import HydraConfig
9 from hydra.core.utils import (
10 JobReturn,
11 configure_log,
12 filter_overrides,
13 run_job,
14 setup_globals,
15 )
16 from hydra.plugins.launcher import Launcher
17 from hydra.plugins.search_path_plugin import SearchPathPlugin
18 from hydra.types import TaskFunction
19 from omegaconf import DictConfig, open_dict
20
21 # IMPORTANT:
22 # If your plugin imports any module that takes more than a fraction of a second to import,
23 # Import the module lazily (typically inside launch()).
24 # Installed plugins are imported during Hydra initialization and plugins that are slow to import plugins will slow
25 # the startup of ALL hydra applications.
26 # Another approach is to place heavy includes in a file prefixed by _, such as _core.py:
27 # Hydra will not look for plugin in such files and will not import them during plugin discovery.
28
29
30 log = logging.getLogger(__name__)
31
32
33 class ExampleLauncherSearchPathPlugin(SearchPathPlugin):
34 """
35 This plugin is allowing configuration files provided by the ExampleLauncher plugin to be discovered
36 and used once the ExampleLauncher plugin is installed
37 """
38
39 def manipulate_search_path(self, search_path: ConfigSearchPath) -> None:
40 # Appends the search path for this plugin to the end of the search path
41 search_path.append(
42 "hydra-example-launcher", "pkg://hydra_plugins.example_launcher_plugin.conf"
43 )
44
45
46 class ExampleLauncher(Launcher):
47 def __init__(self, foo: str, bar: str) -> None:
48 self.config: Optional[DictConfig] = None
49 self.config_loader: Optional[ConfigLoader] = None
50 self.task_function: Optional[TaskFunction] = None
51
52 # foo and var are coming from the the plugin's configuration
53 self.foo = foo
54 self.bar = bar
55
56 def setup(
57 self,
58 config: DictConfig,
59 config_loader: ConfigLoader,
60 task_function: TaskFunction,
61 ) -> None:
62 self.config = config
63 self.config_loader = config_loader
64 self.task_function = task_function
65
66 def launch(
67 self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int
68 ) -> Sequence[JobReturn]:
69 """
70 :param job_overrides: a List of List<String>, where each inner list is the arguments for one job run.
71 :param initial_job_idx: Initial job idx in batch.
72 :return: an array of return values from run_job with indexes corresponding to the input list indexes.
73 """
74 setup_globals()
75 assert self.config is not None
76 assert self.config_loader is not None
77 assert self.task_function is not None
78
79 configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose)
80 sweep_dir = Path(str(self.config.hydra.sweep.dir))
81 sweep_dir.mkdir(parents=True, exist_ok=True)
82 log.info(
83 f"Example Launcher(foo={self.foo}, bar={self.bar}) is launching {len(job_overrides)} jobs locally"
84 )
85 log.info(f"Sweep output dir : {sweep_dir}")
86 runs = []
87
88 for idx, overrides in enumerate(job_overrides):
89 idx = initial_job_idx + idx
90 lst = " ".join(filter_overrides(overrides))
91 log.info(f"\t#{idx} : {lst}")
92 sweep_config = self.config_loader.load_sweep_config(
93 self.config, list(overrides)
94 )
95 with open_dict(sweep_config):
96 # This typically coming from the underlying scheduler (SLURM_JOB_ID for instance)
97 # In that case, it will not be available here because we are still in the main process.
98 # but instead should be populated remotely before calling the task_function.
99 sweep_config.hydra.job.id = f"job_id_for_{idx}"
100 sweep_config.hydra.job.num = idx
101 HydraConfig.instance().set_config(sweep_config)
102
103 ret = run_job(
104 config=sweep_config,
105 task_function=self.task_function,
106 job_dir_key="hydra.sweep.dir",
107 job_subdir_key="hydra.sweep.subdir",
108 )
109 runs.append(ret)
110 # reconfigure the logging subsystem for Hydra as the run_job call configured it for the Job.
111 # This is needed for launchers that calls run_job in the same process and not spawn a new one.
112 configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose)
113 return runs
114
[end of plugins/examples/example_launcher_plugin/hydra_plugins/example_launcher_plugin/example_launcher.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/examples/example_launcher_plugin/hydra_plugins/example_launcher_plugin/example_launcher.py b/plugins/examples/example_launcher_plugin/hydra_plugins/example_launcher_plugin/example_launcher.py
--- a/plugins/examples/example_launcher_plugin/hydra_plugins/example_launcher_plugin/example_launcher.py
+++ b/plugins/examples/example_launcher_plugin/hydra_plugins/example_launcher_plugin/example_launcher.py
@@ -6,6 +6,7 @@
from hydra.core.config_loader import ConfigLoader
from hydra.core.config_search_path import ConfigSearchPath
from hydra.core.hydra_config import HydraConfig
+from hydra.core.singleton import Singleton
from hydra.core.utils import (
JobReturn,
configure_log,
@@ -100,6 +101,17 @@
sweep_config.hydra.job.num = idx
HydraConfig.instance().set_config(sweep_config)
+ # If your launcher is executing code in a different process, it is important to restore
+ # the singleton state in the new process.
+ # To do this, you will likely need to serialize the singleton state along with the other
+ # parameters passed to the child process.
+
+ # happening on launcher process
+ state = Singleton.get_state()
+
+ # happening on the spawned process
+ Singleton.set_state(state)
+
ret = run_job(
config=sweep_config,
task_function=self.task_function,
| {"golden_diff": "diff --git a/plugins/examples/example_launcher_plugin/hydra_plugins/example_launcher_plugin/example_launcher.py b/plugins/examples/example_launcher_plugin/hydra_plugins/example_launcher_plugin/example_launcher.py\n--- a/plugins/examples/example_launcher_plugin/hydra_plugins/example_launcher_plugin/example_launcher.py\n+++ b/plugins/examples/example_launcher_plugin/hydra_plugins/example_launcher_plugin/example_launcher.py\n@@ -6,6 +6,7 @@\n from hydra.core.config_loader import ConfigLoader\n from hydra.core.config_search_path import ConfigSearchPath\n from hydra.core.hydra_config import HydraConfig\n+from hydra.core.singleton import Singleton\n from hydra.core.utils import (\n JobReturn,\n configure_log,\n@@ -100,6 +101,17 @@\n sweep_config.hydra.job.num = idx\n HydraConfig.instance().set_config(sweep_config)\n \n+ # If your launcher is executing code in a different process, it is important to restore\n+ # the singleton state in the new process.\n+ # To do this, you will likely need to serialize the singleton state along with the other\n+ # parameters passed to the child process.\n+\n+ # happening on launcher process\n+ state = Singleton.get_state()\n+\n+ # happening on the spawned process\n+ Singleton.set_state(state)\n+\n ret = run_job(\n config=sweep_config,\n task_function=self.task_function,\n", "issue": "Update Example Launcher Plugin mention restoring the singletons\nThe singletons should be restored when performing multi-process launching.\r\nsomething \r\n\r\n```python\r\n# In the launcher's launch:\r\nstate = Singleton.get_state()\r\n\r\n# In the remote process\r\nSingleton.set_state(state)\r\n```\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nfrom pathlib import Path\nfrom typing import Optional, Sequence\n\nfrom hydra.core.config_loader import ConfigLoader\nfrom hydra.core.config_search_path import ConfigSearchPath\nfrom hydra.core.hydra_config import HydraConfig\nfrom hydra.core.utils import (\n JobReturn,\n configure_log,\n filter_overrides,\n run_job,\n setup_globals,\n)\nfrom hydra.plugins.launcher import Launcher\nfrom hydra.plugins.search_path_plugin import SearchPathPlugin\nfrom hydra.types import TaskFunction\nfrom omegaconf import DictConfig, open_dict\n\n# IMPORTANT:\n# If your plugin imports any module that takes more than a fraction of a second to import,\n# Import the module lazily (typically inside launch()).\n# Installed plugins are imported during Hydra initialization and plugins that are slow to import plugins will slow\n# the startup of ALL hydra applications.\n# Another approach is to place heavy includes in a file prefixed by _, such as _core.py:\n# Hydra will not look for plugin in such files and will not import them during plugin discovery.\n\n\nlog = logging.getLogger(__name__)\n\n\nclass ExampleLauncherSearchPathPlugin(SearchPathPlugin):\n \"\"\"\n This plugin is allowing configuration files provided by the ExampleLauncher plugin to be discovered\n and used once the ExampleLauncher plugin is installed\n \"\"\"\n\n def manipulate_search_path(self, search_path: ConfigSearchPath) -> None:\n # Appends the search path for this plugin to the end of the search path\n search_path.append(\n \"hydra-example-launcher\", \"pkg://hydra_plugins.example_launcher_plugin.conf\"\n )\n\n\nclass ExampleLauncher(Launcher):\n def __init__(self, foo: str, bar: str) -> None:\n self.config: Optional[DictConfig] = None\n self.config_loader: Optional[ConfigLoader] = None\n self.task_function: Optional[TaskFunction] = None\n\n # foo and var are coming from the the plugin's configuration\n self.foo = foo\n self.bar = bar\n\n def setup(\n self,\n config: DictConfig,\n config_loader: ConfigLoader,\n task_function: TaskFunction,\n ) -> None:\n self.config = config\n self.config_loader = config_loader\n self.task_function = task_function\n\n def launch(\n self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int\n ) -> Sequence[JobReturn]:\n \"\"\"\n :param job_overrides: a List of List<String>, where each inner list is the arguments for one job run.\n :param initial_job_idx: Initial job idx in batch.\n :return: an array of return values from run_job with indexes corresponding to the input list indexes.\n \"\"\"\n setup_globals()\n assert self.config is not None\n assert self.config_loader is not None\n assert self.task_function is not None\n\n configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose)\n sweep_dir = Path(str(self.config.hydra.sweep.dir))\n sweep_dir.mkdir(parents=True, exist_ok=True)\n log.info(\n f\"Example Launcher(foo={self.foo}, bar={self.bar}) is launching {len(job_overrides)} jobs locally\"\n )\n log.info(f\"Sweep output dir : {sweep_dir}\")\n runs = []\n\n for idx, overrides in enumerate(job_overrides):\n idx = initial_job_idx + idx\n lst = \" \".join(filter_overrides(overrides))\n log.info(f\"\\t#{idx} : {lst}\")\n sweep_config = self.config_loader.load_sweep_config(\n self.config, list(overrides)\n )\n with open_dict(sweep_config):\n # This typically coming from the underlying scheduler (SLURM_JOB_ID for instance)\n # In that case, it will not be available here because we are still in the main process.\n # but instead should be populated remotely before calling the task_function.\n sweep_config.hydra.job.id = f\"job_id_for_{idx}\"\n sweep_config.hydra.job.num = idx\n HydraConfig.instance().set_config(sweep_config)\n\n ret = run_job(\n config=sweep_config,\n task_function=self.task_function,\n job_dir_key=\"hydra.sweep.dir\",\n job_subdir_key=\"hydra.sweep.subdir\",\n )\n runs.append(ret)\n # reconfigure the logging subsystem for Hydra as the run_job call configured it for the Job.\n # This is needed for launchers that calls run_job in the same process and not spawn a new one.\n configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose)\n return runs\n", "path": "plugins/examples/example_launcher_plugin/hydra_plugins/example_launcher_plugin/example_launcher.py"}]} | 1,840 | 293 |
gh_patches_debug_30573 | rasdani/github-patches | git_diff | wagtail__wagtail-1357 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StreamField EmbedBlock renders the url only
Given the following setup:
```
class EmbedOnImage(StructBlock):
embed = EmbedBlock()
background_image = ImageChooserBlock(required=False)
class Meta:
template = 'streamfield_blocks/embed_on_image.html'
```
`{{ self.embed }}` outputs the embed url only in `streamfield_blocks/embed_on_image.html`.
And `EmbedBlock.render_basic` is never called: https://github.com/torchbox/wagtail/blob/master/wagtail/wagtailembeds/blocks.py#L8
Also, if I apply the embed filter too: `{{ self.embed|embed }}` then it outputs nothing because the embed filter won't call `embed_to_frontend_html` either, which would use the correct template to render it: https://github.com/torchbox/wagtail/blob/master/wagtail/wagtailembeds/templatetags/wagtailembeds_tags.py#L14
</issue>
<code>
[start of wagtail/wagtailembeds/blocks.py]
1 from wagtail.wagtailcore import blocks
2
3 from wagtail.wagtailembeds.format import embed_to_frontend_html
4
5
6 class EmbedBlock(blocks.URLBlock):
7 def render_basic(self, value):
8 return embed_to_frontend_html(value)
9
[end of wagtail/wagtailembeds/blocks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/wagtailembeds/blocks.py b/wagtail/wagtailembeds/blocks.py
--- a/wagtail/wagtailembeds/blocks.py
+++ b/wagtail/wagtailembeds/blocks.py
@@ -1,8 +1,62 @@
+from django.utils.encoding import python_2_unicode_compatible
+
from wagtail.wagtailcore import blocks
from wagtail.wagtailembeds.format import embed_to_frontend_html
+@python_2_unicode_compatible
+class EmbedValue(object):
+ """
+ Native value of an EmbedBlock. Should, at minimum, have a 'url' property
+ and render as the embed HTML when rendered in a template.
+ NB We don't use a wagtailembeds.model.Embed object for this, because
+ we want to be able to do {{ value.url|embed:max_width=500 }} without
+ doing a redundant fetch of the embed at the default width.
+ """
+ def __init__(self, url):
+ self.url = url
+
+ def __str__(self):
+ return embed_to_frontend_html(self.url)
+
+
class EmbedBlock(blocks.URLBlock):
- def render_basic(self, value):
- return embed_to_frontend_html(value)
+ def get_default(self):
+ # Allow specifying the default for an EmbedBlock as either an EmbedValue or a string (or None).
+ if not self.meta.default:
+ return None
+ elif isinstance(self.meta.default, EmbedValue):
+ return self.meta.default
+ else:
+ # assume default has been passed as a string
+ return EmbedValue(self.meta.default)
+
+ def to_python(self, value):
+ # The JSON representation of an EmbedBlock's value is a URL string;
+ # this should be converted to an EmbedValue (or None).
+ if not value:
+ return None
+ else:
+ return EmbedValue(value)
+
+ def get_prep_value(self, value):
+ # serialisable value should be a URL string
+ if value is None:
+ return ''
+ else:
+ return value.url
+
+ def value_for_form(self, value):
+ # the value to be handled by the URLField is a plain URL string (or the empty string)
+ if value is None:
+ return ''
+ else:
+ return value.url
+
+ def value_from_form(self, value):
+ # convert the value returned from the form (a URL string) to an EmbedValue (or None)
+ if not value:
+ return None
+ else:
+ return EmbedValue(value)
| {"golden_diff": "diff --git a/wagtail/wagtailembeds/blocks.py b/wagtail/wagtailembeds/blocks.py\n--- a/wagtail/wagtailembeds/blocks.py\n+++ b/wagtail/wagtailembeds/blocks.py\n@@ -1,8 +1,62 @@\n+from django.utils.encoding import python_2_unicode_compatible\n+\n from wagtail.wagtailcore import blocks\n \n from wagtail.wagtailembeds.format import embed_to_frontend_html\n \n \n+@python_2_unicode_compatible\n+class EmbedValue(object):\n+ \"\"\"\n+ Native value of an EmbedBlock. Should, at minimum, have a 'url' property\n+ and render as the embed HTML when rendered in a template.\n+ NB We don't use a wagtailembeds.model.Embed object for this, because\n+ we want to be able to do {{ value.url|embed:max_width=500 }} without\n+ doing a redundant fetch of the embed at the default width.\n+ \"\"\"\n+ def __init__(self, url):\n+ self.url = url\n+\n+ def __str__(self):\n+ return embed_to_frontend_html(self.url)\n+\n+\n class EmbedBlock(blocks.URLBlock):\n- def render_basic(self, value):\n- return embed_to_frontend_html(value)\n+ def get_default(self):\n+ # Allow specifying the default for an EmbedBlock as either an EmbedValue or a string (or None).\n+ if not self.meta.default:\n+ return None\n+ elif isinstance(self.meta.default, EmbedValue):\n+ return self.meta.default\n+ else:\n+ # assume default has been passed as a string\n+ return EmbedValue(self.meta.default)\n+\n+ def to_python(self, value):\n+ # The JSON representation of an EmbedBlock's value is a URL string;\n+ # this should be converted to an EmbedValue (or None).\n+ if not value:\n+ return None\n+ else:\n+ return EmbedValue(value)\n+\n+ def get_prep_value(self, value):\n+ # serialisable value should be a URL string\n+ if value is None:\n+ return ''\n+ else:\n+ return value.url\n+\n+ def value_for_form(self, value):\n+ # the value to be handled by the URLField is a plain URL string (or the empty string)\n+ if value is None:\n+ return ''\n+ else:\n+ return value.url\n+\n+ def value_from_form(self, value):\n+ # convert the value returned from the form (a URL string) to an EmbedValue (or None)\n+ if not value:\n+ return None\n+ else:\n+ return EmbedValue(value)\n", "issue": "StreamField EmbedBlock renders the url only\nGiven the following setup:\n\n```\nclass EmbedOnImage(StructBlock):\n embed = EmbedBlock()\n background_image = ImageChooserBlock(required=False)\n\n class Meta:\n template = 'streamfield_blocks/embed_on_image.html'\n```\n\n`{{ self.embed }}` outputs the embed url only in `streamfield_blocks/embed_on_image.html`.\n\nAnd `EmbedBlock.render_basic` is never called: https://github.com/torchbox/wagtail/blob/master/wagtail/wagtailembeds/blocks.py#L8\n\nAlso, if I apply the embed filter too: `{{ self.embed|embed }}` then it outputs nothing because the embed filter won't call `embed_to_frontend_html` either, which would use the correct template to render it: https://github.com/torchbox/wagtail/blob/master/wagtail/wagtailembeds/templatetags/wagtailembeds_tags.py#L14\n\n", "before_files": [{"content": "from wagtail.wagtailcore import blocks\n\nfrom wagtail.wagtailembeds.format import embed_to_frontend_html\n\n\nclass EmbedBlock(blocks.URLBlock):\n def render_basic(self, value):\n return embed_to_frontend_html(value)\n", "path": "wagtail/wagtailembeds/blocks.py"}]} | 819 | 601 |
gh_patches_debug_30740 | rasdani/github-patches | git_diff | translate__translate-3520 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add traits and variants support in l20nfile
L20n traits allow storing value variants and additional information for a unit. We need to add such support when parsing `.ftl` and convert resources.
```
file-menu =
[xul/label] File
[xul/accesskey] F
```
</issue>
<code>
[start of translate/storage/l20n.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2016 Zuza Software Foundation
4 #
5 # This file is part of translate.
6 #
7 # translate is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 2 of the License, or
10 # (at your option) any later version.
11 #
12 # translate is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with this program; if not, see <http://www.gnu.org/licenses/>.
19
20 from __future__ import absolute_import
21
22 from codecs import iterencode
23
24 from l20n.format.parser import FTLParser as L20nParser
25 from l20n.format.serializer import FTLSerializer as L20nSerializer
26
27 from translate.storage import base
28
29
30 class l20nunit(base.TranslationUnit):
31 """Single L20n Entity"""
32
33 def __init__(self, source='', id='', comment=''):
34 super(l20nunit, self).__init__(source)
35 self.id = id
36 self.value = source
37 self.comment = comment
38
39 # Note that source and target are equivalent for monolingual units
40 def getsource(self):
41 return self.value
42
43 def setsource(self, source):
44 self.value = source
45
46 source = property(getsource, setsource)
47
48 def gettarget(self):
49 return self.value
50
51 def settarget(self, target):
52 self.value = target
53
54 target = property(gettarget, settarget)
55
56 def getid(self):
57 return self.id
58
59 def setid(self, new_id):
60 self.id = new_id
61
62 def getoutput(self):
63 return u"%s = %s\n" % (self.id, self.value)
64
65
66 class l20nfile(base.TranslationStore):
67 UnitClass = l20nunit
68 encoding = 'utf8'
69
70 def __init__(self, inputfile=None):
71 super(l20nfile, self).__init__(unitclass=self.UnitClass)
72 self.filename = getattr(inputfile, 'name', '')
73 if inputfile is not None:
74 l20nsrc = inputfile.read()
75 self.parse(l20nsrc)
76 self.makeindex()
77
78 def parse(self, l20nsrc):
79 text, encoding = self.detect_encoding(
80 l20nsrc, default_encodings=[self.encoding])
81 if not text:
82 raise IOError("Cannot detect encoding for %s." % (self.filename or
83 "given string"))
84 l20nsrc = text
85
86 parser = L20nParser()
87 ast, errors = parser.parseResource(l20nsrc)
88
89 for entry in ast['body']:
90 if entry['type'] == 'Entity':
91 translation = L20nSerializer().dumpPattern(entry['value'])
92 comment = ''
93 if entry['comment']:
94 comment = entry['comment']['content']
95
96 newl20n = l20nunit(
97 source=translation,
98 id=entry['id']['name'],
99 comment=comment
100 )
101 self.addunit(newl20n)
102
103 def serialize(self, out):
104 """Write the units back to file."""
105 # Thanks to iterencode, a possible BOM is written only once
106 for chunk in iterencode((unit.getoutput() for unit in self.units), self.encoding):
107 out.write(chunk)
108
[end of translate/storage/l20n.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/translate/storage/l20n.py b/translate/storage/l20n.py
--- a/translate/storage/l20n.py
+++ b/translate/storage/l20n.py
@@ -27,6 +27,17 @@
from translate.storage import base
+def dump_l20n_entity_value(entity):
+ serializer = L20nSerializer()
+ value = serializer.dumpPattern(entity['value'])
+
+ if len(entity['traits']):
+ traits = serializer.dumpMembers(entity['traits'], 2)
+ return u'{}\n{}'.format(value, traits)
+
+ return value
+
+
class l20nunit(base.TranslationUnit):
"""Single L20n Entity"""
@@ -75,6 +86,19 @@
self.parse(l20nsrc)
self.makeindex()
+ def parse_entity(self, entity):
+ translation = dump_l20n_entity_value(entity)
+ comment = ''
+ if entity['comment']:
+ comment = entity['comment']['content']
+
+ newl20n = l20nunit(
+ source=translation,
+ id=entity['id']['name'],
+ comment=comment
+ )
+ self.addunit(newl20n)
+
def parse(self, l20nsrc):
text, encoding = self.detect_encoding(
l20nsrc, default_encodings=[self.encoding])
@@ -88,17 +112,7 @@
for entry in ast['body']:
if entry['type'] == 'Entity':
- translation = L20nSerializer().dumpPattern(entry['value'])
- comment = ''
- if entry['comment']:
- comment = entry['comment']['content']
-
- newl20n = l20nunit(
- source=translation,
- id=entry['id']['name'],
- comment=comment
- )
- self.addunit(newl20n)
+ self.parse_entity(entry)
def serialize(self, out):
"""Write the units back to file."""
| {"golden_diff": "diff --git a/translate/storage/l20n.py b/translate/storage/l20n.py\n--- a/translate/storage/l20n.py\n+++ b/translate/storage/l20n.py\n@@ -27,6 +27,17 @@\n from translate.storage import base\n \n \n+def dump_l20n_entity_value(entity):\n+ serializer = L20nSerializer()\n+ value = serializer.dumpPattern(entity['value'])\n+\n+ if len(entity['traits']):\n+ traits = serializer.dumpMembers(entity['traits'], 2)\n+ return u'{}\\n{}'.format(value, traits)\n+\n+ return value\n+\n+\n class l20nunit(base.TranslationUnit):\n \"\"\"Single L20n Entity\"\"\"\n \n@@ -75,6 +86,19 @@\n self.parse(l20nsrc)\n self.makeindex()\n \n+ def parse_entity(self, entity):\n+ translation = dump_l20n_entity_value(entity)\n+ comment = ''\n+ if entity['comment']:\n+ comment = entity['comment']['content']\n+\n+ newl20n = l20nunit(\n+ source=translation,\n+ id=entity['id']['name'],\n+ comment=comment\n+ )\n+ self.addunit(newl20n)\n+\n def parse(self, l20nsrc):\n text, encoding = self.detect_encoding(\n l20nsrc, default_encodings=[self.encoding])\n@@ -88,17 +112,7 @@\n \n for entry in ast['body']:\n if entry['type'] == 'Entity':\n- translation = L20nSerializer().dumpPattern(entry['value'])\n- comment = ''\n- if entry['comment']:\n- comment = entry['comment']['content']\n-\n- newl20n = l20nunit(\n- source=translation,\n- id=entry['id']['name'],\n- comment=comment\n- )\n- self.addunit(newl20n)\n+ self.parse_entity(entry)\n \n def serialize(self, out):\n \"\"\"Write the units back to file.\"\"\"\n", "issue": "Add traits and variants support in l20nfile\nL20n traits allow storing value variants and additional information for a unit. We need to add such support when parsing `.ftl` and convert resources.\r\n\r\n```\r\nfile-menu =\r\n [xul/label] File\r\n [xul/accesskey] F\r\n```\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2016 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import absolute_import\n\nfrom codecs import iterencode\n\nfrom l20n.format.parser import FTLParser as L20nParser\nfrom l20n.format.serializer import FTLSerializer as L20nSerializer\n\nfrom translate.storage import base\n\n\nclass l20nunit(base.TranslationUnit):\n \"\"\"Single L20n Entity\"\"\"\n\n def __init__(self, source='', id='', comment=''):\n super(l20nunit, self).__init__(source)\n self.id = id\n self.value = source\n self.comment = comment\n\n # Note that source and target are equivalent for monolingual units\n def getsource(self):\n return self.value\n\n def setsource(self, source):\n self.value = source\n\n source = property(getsource, setsource)\n\n def gettarget(self):\n return self.value\n\n def settarget(self, target):\n self.value = target\n\n target = property(gettarget, settarget)\n\n def getid(self):\n return self.id\n\n def setid(self, new_id):\n self.id = new_id\n\n def getoutput(self):\n return u\"%s = %s\\n\" % (self.id, self.value)\n\n\nclass l20nfile(base.TranslationStore):\n UnitClass = l20nunit\n encoding = 'utf8'\n\n def __init__(self, inputfile=None):\n super(l20nfile, self).__init__(unitclass=self.UnitClass)\n self.filename = getattr(inputfile, 'name', '')\n if inputfile is not None:\n l20nsrc = inputfile.read()\n self.parse(l20nsrc)\n self.makeindex()\n\n def parse(self, l20nsrc):\n text, encoding = self.detect_encoding(\n l20nsrc, default_encodings=[self.encoding])\n if not text:\n raise IOError(\"Cannot detect encoding for %s.\" % (self.filename or\n \"given string\"))\n l20nsrc = text\n\n parser = L20nParser()\n ast, errors = parser.parseResource(l20nsrc)\n\n for entry in ast['body']:\n if entry['type'] == 'Entity':\n translation = L20nSerializer().dumpPattern(entry['value'])\n comment = ''\n if entry['comment']:\n comment = entry['comment']['content']\n\n newl20n = l20nunit(\n source=translation,\n id=entry['id']['name'],\n comment=comment\n )\n self.addunit(newl20n)\n\n def serialize(self, out):\n \"\"\"Write the units back to file.\"\"\"\n # Thanks to iterencode, a possible BOM is written only once\n for chunk in iterencode((unit.getoutput() for unit in self.units), self.encoding):\n out.write(chunk)\n", "path": "translate/storage/l20n.py"}]} | 1,625 | 466 |
gh_patches_debug_19703 | rasdani/github-patches | git_diff | certbot__certbot-5349 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't install new certificates with certbot-apache under Gentoo since version 0.20.0
It seems that version 0.20.0 has a major architecture revision from 0.19.0, in particular regarding OS specific overrides. But the Gentoo overrides have a flaw when trying to install a new certificate : at one point it tries to run "apache2ctl -t -D DUMP_MODULES", which is no good at all under Gentoo because apache2ctl is a dummy script redirecting to /etc/init.d/apache2. It generates this error :
`Error in checking parameter list: /etc/init.d/apache2: invalid option -- 't'`
## My operating system is (include version):
Gentoo
## I installed Certbot with (certbot-auto, OS package manager, pip, etc):
OS package manager (emerge) with testing (~amd64) flag
## I ran this command and it produced this output:
```
certbot -d test.example.com
Saving debug log to /var/log/letsencrypt/letsencrypt.log
Plugins selected: Authenticator apache, Installer apache
Obtaining a new certificate
Performing the following challenges:
tls-sni-01 challenge for test.example.com
Waiting for verification...
Cleaning up challenges
Error in checking parameter list: /etc/init.d/apache2: invalid option -- 't'
Apache is unable to check whether or not the module is loaded because Apache is misconfigured.
```
## Certbot's behavior differed from what I expected because:
Certbot did not install the certificate because it failed to run apache2ctl -t -D DUMP_MODULES
</issue>
<code>
[start of certbot-apache/certbot_apache/override_gentoo.py]
1 """ Distribution specific override class for Gentoo Linux """
2 import pkg_resources
3
4 import zope.interface
5
6 from certbot import interfaces
7
8 from certbot_apache import apache_util
9 from certbot_apache import configurator
10 from certbot_apache import parser
11
12 @zope.interface.provider(interfaces.IPluginFactory)
13 class GentooConfigurator(configurator.ApacheConfigurator):
14 """Gentoo specific ApacheConfigurator override class"""
15
16 OS_DEFAULTS = dict(
17 server_root="/etc/apache2",
18 vhost_root="/etc/apache2/vhosts.d",
19 vhost_files="*.conf",
20 logs_root="/var/log/apache2",
21 version_cmd=['/usr/sbin/apache2', '-v'],
22 apache_cmd="apache2ctl",
23 restart_cmd=['apache2ctl', 'graceful'],
24 conftest_cmd=['apache2ctl', 'configtest'],
25 enmod=None,
26 dismod=None,
27 le_vhost_ext="-le-ssl.conf",
28 handle_mods=False,
29 handle_sites=False,
30 challenge_location="/etc/apache2/vhosts.d",
31 MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
32 "certbot_apache", "options-ssl-apache.conf")
33 )
34
35 def get_parser(self):
36 """Initializes the ApacheParser"""
37 return GentooParser(
38 self.aug, self.conf("server-root"), self.conf("vhost-root"),
39 self.version, configurator=self)
40
41
42 class GentooParser(parser.ApacheParser):
43 """Gentoo specific ApacheParser override class"""
44 def __init__(self, *args, **kwargs):
45 # Gentoo specific configuration file for Apache2
46 self.apacheconfig_filep = "/etc/conf.d/apache2"
47 super(GentooParser, self).__init__(*args, **kwargs)
48
49 def update_runtime_variables(self):
50 """ Override for update_runtime_variables for custom parsing """
51 self.parse_sysconfig_var()
52
53 def parse_sysconfig_var(self):
54 """ Parses Apache CLI options from Gentoo configuration file """
55 defines = apache_util.parse_define_file(self.apacheconfig_filep,
56 "APACHE2_OPTS")
57 for k in defines.keys():
58 self.variables[k] = defines[k]
59
[end of certbot-apache/certbot_apache/override_gentoo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/certbot-apache/certbot_apache/override_gentoo.py b/certbot-apache/certbot_apache/override_gentoo.py
--- a/certbot-apache/certbot_apache/override_gentoo.py
+++ b/certbot-apache/certbot_apache/override_gentoo.py
@@ -49,6 +49,7 @@
def update_runtime_variables(self):
""" Override for update_runtime_variables for custom parsing """
self.parse_sysconfig_var()
+ self.update_modules()
def parse_sysconfig_var(self):
""" Parses Apache CLI options from Gentoo configuration file """
@@ -56,3 +57,10 @@
"APACHE2_OPTS")
for k in defines.keys():
self.variables[k] = defines[k]
+
+ def update_modules(self):
+ """Get loaded modules from httpd process, and add them to DOM"""
+ mod_cmd = [self.configurator.constant("apache_cmd"), "modules"]
+ matches = self.parse_from_subprocess(mod_cmd, r"(.*)_module")
+ for mod in matches:
+ self.add_mod(mod.strip())
| {"golden_diff": "diff --git a/certbot-apache/certbot_apache/override_gentoo.py b/certbot-apache/certbot_apache/override_gentoo.py\n--- a/certbot-apache/certbot_apache/override_gentoo.py\n+++ b/certbot-apache/certbot_apache/override_gentoo.py\n@@ -49,6 +49,7 @@\n def update_runtime_variables(self):\n \"\"\" Override for update_runtime_variables for custom parsing \"\"\"\n self.parse_sysconfig_var()\n+ self.update_modules()\n \n def parse_sysconfig_var(self):\n \"\"\" Parses Apache CLI options from Gentoo configuration file \"\"\"\n@@ -56,3 +57,10 @@\n \"APACHE2_OPTS\")\n for k in defines.keys():\n self.variables[k] = defines[k]\n+\n+ def update_modules(self):\n+ \"\"\"Get loaded modules from httpd process, and add them to DOM\"\"\"\n+ mod_cmd = [self.configurator.constant(\"apache_cmd\"), \"modules\"]\n+ matches = self.parse_from_subprocess(mod_cmd, r\"(.*)_module\")\n+ for mod in matches:\n+ self.add_mod(mod.strip())\n", "issue": "Can't install new certificates with certbot-apache under Gentoo since version 0.20.0\nIt seems that version 0.20.0 has a major architecture revision from 0.19.0, in particular regarding OS specific overrides. But the Gentoo overrides have a flaw when trying to install a new certificate : at one point it tries to run \"apache2ctl -t -D DUMP_MODULES\", which is no good at all under Gentoo because apache2ctl is a dummy script redirecting to /etc/init.d/apache2. It generates this error :\r\n\r\n`Error in checking parameter list: /etc/init.d/apache2: invalid option -- 't'`\r\n\r\n## My operating system is (include version):\r\nGentoo\r\n\r\n## I installed Certbot with (certbot-auto, OS package manager, pip, etc):\r\nOS package manager (emerge) with testing (~amd64) flag\r\n\r\n## I ran this command and it produced this output:\r\n```\r\ncertbot -d test.example.com\r\nSaving debug log to /var/log/letsencrypt/letsencrypt.log\r\nPlugins selected: Authenticator apache, Installer apache\r\nObtaining a new certificate\r\nPerforming the following challenges:\r\ntls-sni-01 challenge for test.example.com\r\nWaiting for verification...\r\nCleaning up challenges\r\nError in checking parameter list: /etc/init.d/apache2: invalid option -- 't'\r\n\r\nApache is unable to check whether or not the module is loaded because Apache is misconfigured.\r\n```\r\n\r\n## Certbot's behavior differed from what I expected because:\r\nCertbot did not install the certificate because it failed to run apache2ctl -t -D DUMP_MODULES\n", "before_files": [{"content": "\"\"\" Distribution specific override class for Gentoo Linux \"\"\"\nimport pkg_resources\n\nimport zope.interface\n\nfrom certbot import interfaces\n\nfrom certbot_apache import apache_util\nfrom certbot_apache import configurator\nfrom certbot_apache import parser\n\[email protected](interfaces.IPluginFactory)\nclass GentooConfigurator(configurator.ApacheConfigurator):\n \"\"\"Gentoo specific ApacheConfigurator override class\"\"\"\n\n OS_DEFAULTS = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/vhosts.d\",\n vhost_files=\"*.conf\",\n logs_root=\"/var/log/apache2\",\n version_cmd=['/usr/sbin/apache2', '-v'],\n apache_cmd=\"apache2ctl\",\n restart_cmd=['apache2ctl', 'graceful'],\n conftest_cmd=['apache2ctl', 'configtest'],\n enmod=None,\n dismod=None,\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2/vhosts.d\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n )\n\n def get_parser(self):\n \"\"\"Initializes the ApacheParser\"\"\"\n return GentooParser(\n self.aug, self.conf(\"server-root\"), self.conf(\"vhost-root\"),\n self.version, configurator=self)\n\n\nclass GentooParser(parser.ApacheParser):\n \"\"\"Gentoo specific ApacheParser override class\"\"\"\n def __init__(self, *args, **kwargs):\n # Gentoo specific configuration file for Apache2\n self.apacheconfig_filep = \"/etc/conf.d/apache2\"\n super(GentooParser, self).__init__(*args, **kwargs)\n\n def update_runtime_variables(self):\n \"\"\" Override for update_runtime_variables for custom parsing \"\"\"\n self.parse_sysconfig_var()\n\n def parse_sysconfig_var(self):\n \"\"\" Parses Apache CLI options from Gentoo configuration file \"\"\"\n defines = apache_util.parse_define_file(self.apacheconfig_filep,\n \"APACHE2_OPTS\")\n for k in defines.keys():\n self.variables[k] = defines[k]\n", "path": "certbot-apache/certbot_apache/override_gentoo.py"}]} | 1,472 | 252 |
gh_patches_debug_2097 | rasdani/github-patches | git_diff | sosreport__sos-1100 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[rabbitmq] does not elide password in /etc/rabbitmq.config
Hi,
Sosreport as in sos-3.2-36.eo7ost.1.noarch includes in `/etc/rabbitmq/rabbitmq.conf` `default_user` and `default_pass` without being hidden.
Regards,
Pablo
</issue>
<code>
[start of sos/plugins/rabbitmq.py]
1 # This program is free software; you can redistribute it and/or modify
2 # it under the terms of the GNU General Public License as published by
3 # the Free Software Foundation; either version 2 of the License, or
4 # (at your option) any later version.
5
6 # This program is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 # GNU General Public License for more details.
10
11 # You should have received a copy of the GNU General Public License along
12 # with this program; if not, write to the Free Software Foundation, Inc.,
13 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
14
15 from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
16
17
18 class RabbitMQ(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
19 """RabbitMQ messaging service
20 """
21 plugin_name = 'rabbitmq'
22 profiles = ('services',)
23 var_puppet_gen = "/var/lib/config-data/puppet-generated/rabbitmq"
24 files = (
25 '/etc/rabbitmq/rabbitmq.conf',
26 var_puppet_gen + '/etc/rabbitmq/rabbitmq.config'
27 )
28 packages = ('rabbitmq-server',)
29
30 def setup(self):
31 container_status = self.get_command_output(
32 "docker ps -a --format='{{ .Names }}'")
33
34 in_container = False
35 container_names = []
36 if container_status['status'] == 0:
37 for line in container_status['output'].splitlines():
38 if line.startswith("rabbitmq"):
39 in_container = True
40 container_names.append(line)
41
42 if in_container:
43 for container in container_names:
44 self.add_cmd_output('docker logs {0}'.format(container))
45 self.add_cmd_output(
46 'docker exec -t {0} rabbitmqctl report'
47 .format(container)
48 )
49 else:
50 self.add_cmd_output("rabbitmqctl report")
51
52 self.add_copy_spec([
53 "/etc/rabbitmq/*",
54 self.var_puppet_gen + "/etc/rabbitmq/*",
55 self.var_puppet_gen + "/etc/security/limits.d/",
56 self.var_puppet_gen + "/etc/systemd/"
57 ])
58 self.add_copy_spec([
59 "/var/log/rabbitmq/*",
60 "/var/log/containers/rabbitmq/*"
61 ], sizelimit=self.get_option('log_size'))
62
63
64 # vim: set et ts=4 sw=4 :
65
[end of sos/plugins/rabbitmq.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sos/plugins/rabbitmq.py b/sos/plugins/rabbitmq.py
--- a/sos/plugins/rabbitmq.py
+++ b/sos/plugins/rabbitmq.py
@@ -60,5 +60,8 @@
"/var/log/containers/rabbitmq/*"
], sizelimit=self.get_option('log_size'))
+ def postproc(self):
+ self.do_file_sub("/etc/rabbitmq/rabbitmq.conf",
+ r"(\s*default_pass\s*,\s*)\S+", r"\1<<***>>},")
# vim: set et ts=4 sw=4 :
| {"golden_diff": "diff --git a/sos/plugins/rabbitmq.py b/sos/plugins/rabbitmq.py\n--- a/sos/plugins/rabbitmq.py\n+++ b/sos/plugins/rabbitmq.py\n@@ -60,5 +60,8 @@\n \"/var/log/containers/rabbitmq/*\"\n ], sizelimit=self.get_option('log_size'))\n \n+ def postproc(self):\n+ self.do_file_sub(\"/etc/rabbitmq/rabbitmq.conf\",\n+ r\"(\\s*default_pass\\s*,\\s*)\\S+\", r\"\\1<<***>>},\")\n \n # vim: set et ts=4 sw=4 :\n", "issue": "[rabbitmq] does not elide password in /etc/rabbitmq.config\nHi,\nSosreport as in sos-3.2-36.eo7ost.1.noarch includes in `/etc/rabbitmq/rabbitmq.conf` `default_user` and `default_pass` without being hidden.\n\nRegards,\nPablo\n\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin\n\n\nclass RabbitMQ(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):\n \"\"\"RabbitMQ messaging service\n \"\"\"\n plugin_name = 'rabbitmq'\n profiles = ('services',)\n var_puppet_gen = \"/var/lib/config-data/puppet-generated/rabbitmq\"\n files = (\n '/etc/rabbitmq/rabbitmq.conf',\n var_puppet_gen + '/etc/rabbitmq/rabbitmq.config'\n )\n packages = ('rabbitmq-server',)\n\n def setup(self):\n container_status = self.get_command_output(\n \"docker ps -a --format='{{ .Names }}'\")\n\n in_container = False\n container_names = []\n if container_status['status'] == 0:\n for line in container_status['output'].splitlines():\n if line.startswith(\"rabbitmq\"):\n in_container = True\n container_names.append(line)\n\n if in_container:\n for container in container_names:\n self.add_cmd_output('docker logs {0}'.format(container))\n self.add_cmd_output(\n 'docker exec -t {0} rabbitmqctl report'\n .format(container)\n )\n else:\n self.add_cmd_output(\"rabbitmqctl report\")\n\n self.add_copy_spec([\n \"/etc/rabbitmq/*\",\n self.var_puppet_gen + \"/etc/rabbitmq/*\",\n self.var_puppet_gen + \"/etc/security/limits.d/\",\n self.var_puppet_gen + \"/etc/systemd/\"\n ])\n self.add_copy_spec([\n \"/var/log/rabbitmq/*\",\n \"/var/log/containers/rabbitmq/*\"\n ], sizelimit=self.get_option('log_size'))\n\n\n# vim: set et ts=4 sw=4 :\n", "path": "sos/plugins/rabbitmq.py"}]} | 1,273 | 138 |
gh_patches_debug_62860 | rasdani/github-patches | git_diff | conan-io__conan-center-index-7774 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[request] sigslot/1.2.1
### Package Details
* Package Name/Version: **sigslot/1.2.1**
* Changelog: **https://github.com/palacaze/sigslot/releases/tag/v1.2.1**
The above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.
</issue>
<code>
[start of recipes/sigslot/all/conanfile.py]
1 import os
2
3 from conans import ConanFile, tools
4 from conans.errors import ConanInvalidConfiguration
5
6
7 class SigslotConan(ConanFile):
8 name = "sigslot"
9 description = "Sigslot is a header-only, thread safe implementation of signal-slots for C++."
10 topics = ("signal", "slot", "c++14", "header-only")
11 url = "https://github.com/conan-io/conan-center-index"
12 homepage = "https://github.com/palacaze/sigslot"
13 license = "MIT"
14 settings = "compiler", "os"
15 no_copy_source = True
16
17 @property
18 def _source_subfolder(self):
19 return "source_subfolder"
20
21 def configure(self):
22 minimal_cpp_standard = "14"
23 if self.settings.compiler.cppstd:
24 tools.check_min_cppstd(self, minimal_cpp_standard)
25 minimal_version = {
26 "gcc": "5",
27 "clang": "3.4",
28 "apple-clang": "10",
29 "Visual Studio": "15" # 14 is not supported by the library
30 }
31 compiler = str(self.settings.compiler)
32 if compiler not in minimal_version:
33 self.output.warn(
34 "%s recipe lacks information about the %s compiler standard version support" % (self.name, compiler))
35 self.output.warn(
36 "%s requires a compiler that supports at least C++%s" % (self.name, minimal_cpp_standard))
37 return
38 version = tools.Version(self.settings.compiler.version)
39 if version < minimal_version[compiler]:
40 raise ConanInvalidConfiguration("%s requires a compiler that supports at least C++%s" % (self.name, minimal_cpp_standard))
41
42 def source(self):
43 tools.get(**self.conan_data["sources"][self.version])
44 extracted_dir = "sigslot-" + self.version
45 os.rename(extracted_dir, self._source_subfolder)
46
47 def package(self):
48 self.copy(pattern="LICENSE", src=self._source_subfolder, dst="licenses")
49 self.copy(pattern="signal.hpp", src=os.path.join(self._source_subfolder, "include", "sigslot"), dst=os.path.join("include", "sigslot"))
50
51 def package_id(self):
52 self.info.header_only()
53
54 def package_info(self):
55 self.cpp_info.filenames["cmake_find_package"] = "PalSigslot"
56 self.cpp_info.filenames["cmake_find_package_multi"] = "PalSigslot"
57 self.cpp_info.names["cmake_find_package"] = "Pal"
58 self.cpp_info.names["cmake_find_package_multi"] = "Pal"
59
60 self.cpp_info.components["_sigslot"].libs = []
61 self.cpp_info.components["_sigslot"].names["cmake_find_package"] = "Sigslot"
62 self.cpp_info.components["_sigslot"].names["cmake_find_package_multi"] = "Sigslot"
63
64 if self.settings.os == "Linux":
65 self.cpp_info.components["_sigslot"].system_libs.append("pthread")
66 if self.settings.os == "Windows":
67 if self.settings.compiler in ("Visual Studio", "clang"):
68 self.cpp_info.components["_sigslot"].exelinkflags.append('/OPT:NOICF')
69
[end of recipes/sigslot/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/sigslot/all/conanfile.py b/recipes/sigslot/all/conanfile.py
--- a/recipes/sigslot/all/conanfile.py
+++ b/recipes/sigslot/all/conanfile.py
@@ -65,4 +65,4 @@
self.cpp_info.components["_sigslot"].system_libs.append("pthread")
if self.settings.os == "Windows":
if self.settings.compiler in ("Visual Studio", "clang"):
- self.cpp_info.components["_sigslot"].exelinkflags.append('/OPT:NOICF')
+ self.cpp_info.components["_sigslot"].exelinkflags.append('-OPT:NOICF')
| {"golden_diff": "diff --git a/recipes/sigslot/all/conanfile.py b/recipes/sigslot/all/conanfile.py\n--- a/recipes/sigslot/all/conanfile.py\n+++ b/recipes/sigslot/all/conanfile.py\n@@ -65,4 +65,4 @@\n self.cpp_info.components[\"_sigslot\"].system_libs.append(\"pthread\")\n if self.settings.os == \"Windows\":\n if self.settings.compiler in (\"Visual Studio\", \"clang\"):\n- self.cpp_info.components[\"_sigslot\"].exelinkflags.append('/OPT:NOICF')\n+ self.cpp_info.components[\"_sigslot\"].exelinkflags.append('-OPT:NOICF')\n", "issue": "[request] sigslot/1.2.1\n### Package Details\r\n * Package Name/Version: **sigslot/1.2.1**\r\n * Changelog: **https://github.com/palacaze/sigslot/releases/tag/v1.2.1**\r\n\r\n\r\nThe above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.\r\n\n", "before_files": [{"content": "import os\n\nfrom conans import ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\n\n\nclass SigslotConan(ConanFile):\n name = \"sigslot\"\n description = \"Sigslot is a header-only, thread safe implementation of signal-slots for C++.\"\n topics = (\"signal\", \"slot\", \"c++14\", \"header-only\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/palacaze/sigslot\"\n license = \"MIT\"\n settings = \"compiler\", \"os\"\n no_copy_source = True\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def configure(self):\n minimal_cpp_standard = \"14\"\n if self.settings.compiler.cppstd:\n tools.check_min_cppstd(self, minimal_cpp_standard)\n minimal_version = {\n \"gcc\": \"5\",\n \"clang\": \"3.4\",\n \"apple-clang\": \"10\",\n \"Visual Studio\": \"15\" # 14 is not supported by the library\n }\n compiler = str(self.settings.compiler)\n if compiler not in minimal_version:\n self.output.warn(\n \"%s recipe lacks information about the %s compiler standard version support\" % (self.name, compiler))\n self.output.warn(\n \"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n return\n version = tools.Version(self.settings.compiler.version)\n if version < minimal_version[compiler]:\n raise ConanInvalidConfiguration(\"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = \"sigslot-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def package(self):\n self.copy(pattern=\"LICENSE\", src=self._source_subfolder, dst=\"licenses\")\n self.copy(pattern=\"signal.hpp\", src=os.path.join(self._source_subfolder, \"include\", \"sigslot\"), dst=os.path.join(\"include\", \"sigslot\"))\n\n def package_id(self):\n self.info.header_only()\n\n def package_info(self):\n self.cpp_info.filenames[\"cmake_find_package\"] = \"PalSigslot\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"PalSigslot\"\n self.cpp_info.names[\"cmake_find_package\"] = \"Pal\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Pal\"\n\n self.cpp_info.components[\"_sigslot\"].libs = []\n self.cpp_info.components[\"_sigslot\"].names[\"cmake_find_package\"] = \"Sigslot\"\n self.cpp_info.components[\"_sigslot\"].names[\"cmake_find_package_multi\"] = \"Sigslot\"\n\n if self.settings.os == \"Linux\":\n self.cpp_info.components[\"_sigslot\"].system_libs.append(\"pthread\")\n if self.settings.os == \"Windows\":\n if self.settings.compiler in (\"Visual Studio\", \"clang\"):\n self.cpp_info.components[\"_sigslot\"].exelinkflags.append('/OPT:NOICF')\n", "path": "recipes/sigslot/all/conanfile.py"}]} | 1,443 | 145 |
gh_patches_debug_11690 | rasdani/github-patches | git_diff | searxng__searxng-3369 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
remove ask engine
Reasons:
1. User Tracking via query parameters.
Example Query: [!ask best museums in Köln](https://searx.be/search?q=%21ask+best+museums+in+K%C3%B6ln&category_none=&language=auto&time_range=&safesearch=0&theme=simple)
Notice the ```euid=some_id``` at the end for most urls.
2. Very bad results - Something must have changed.
Example Query: [!ask "yt-dlp"](https://searx.be/search?q=%21ask+%22yt-dlp%22&category_none=&language=auto&time_range=&safesearch=0&theme=simple)
</issue>
<code>
[start of searx/engines/ask.py]
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 """Ask.com"""
3
4 from urllib.parse import urlencode
5 import dateutil
6 from lxml import html
7 from searx import utils
8
9 # Metadata
10 about = {
11 "website": "https://www.ask.com/",
12 "wikidata_id": 'Q847564',
13 "official_api_documentation": None,
14 "use_official_api": False,
15 "require_api_key": False,
16 "results": "HTML",
17 }
18
19 # Engine Configuration
20 categories = ['general']
21 paging = True
22
23 # Base URL
24 base_url = "https://www.ask.com/web"
25
26
27 def request(query, params):
28
29 query_params = {
30 "q": query,
31 "page": params["pageno"],
32 }
33
34 params["url"] = f"{base_url}?{urlencode(query_params)}"
35 return params
36
37
38 def response(resp):
39
40 start_tag = 'window.MESON.initialState = {'
41 end_tag = '}};'
42
43 dom = html.fromstring(resp.text)
44 script = utils.eval_xpath_getindex(dom, '//script', 0, default=None).text
45
46 pos = script.index(start_tag) + len(start_tag) - 1
47 script = script[pos:]
48 pos = script.index(end_tag) + len(end_tag) - 1
49 script = script[:pos]
50
51 json_resp = utils.js_variable_to_python(script)
52
53 results = []
54
55 for item in json_resp['search']['webResults']['results']:
56
57 pubdate_original = item.get('pubdate_original')
58 if pubdate_original:
59 pubdate_original = dateutil.parser.parse(pubdate_original)
60 metadata = [item.get(field) for field in ['category_l1', 'catsy'] if item.get(field)]
61
62 results.append(
63 {
64 "url": item['url'],
65 "title": item['title'],
66 "content": item['abstract'],
67 "publishedDate": pubdate_original,
68 # "img_src": item.get('image_url') or None, # these are not thumbs / to large
69 "metadata": ' | '.join(metadata),
70 }
71 )
72
73 return results
74
[end of searx/engines/ask.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/ask.py b/searx/engines/ask.py
--- a/searx/engines/ask.py
+++ b/searx/engines/ask.py
@@ -19,6 +19,7 @@
# Engine Configuration
categories = ['general']
paging = True
+max_page = 5
# Base URL
base_url = "https://www.ask.com/web"
@@ -61,7 +62,7 @@
results.append(
{
- "url": item['url'],
+ "url": item['url'].split('&ueid')[0],
"title": item['title'],
"content": item['abstract'],
"publishedDate": pubdate_original,
| {"golden_diff": "diff --git a/searx/engines/ask.py b/searx/engines/ask.py\n--- a/searx/engines/ask.py\n+++ b/searx/engines/ask.py\n@@ -19,6 +19,7 @@\n # Engine Configuration\n categories = ['general']\n paging = True\n+max_page = 5\n \n # Base URL\n base_url = \"https://www.ask.com/web\"\n@@ -61,7 +62,7 @@\n \n results.append(\n {\n- \"url\": item['url'],\n+ \"url\": item['url'].split('&ueid')[0],\n \"title\": item['title'],\n \"content\": item['abstract'],\n \"publishedDate\": pubdate_original,\n", "issue": "remove ask engine\nReasons:\r\n\r\n1. User Tracking via query parameters.\r\n\r\n Example Query: [!ask best museums in K\u00f6ln](https://searx.be/search?q=%21ask+best+museums+in+K%C3%B6ln&category_none=&language=auto&time_range=&safesearch=0&theme=simple)\r\n\r\n Notice the ```euid=some_id``` at the end for most urls.\r\n\r\n2. Very bad results - Something must have changed.\r\n\r\n Example Query: [!ask \"yt-dlp\"](https://searx.be/search?q=%21ask+%22yt-dlp%22&category_none=&language=auto&time_range=&safesearch=0&theme=simple)\r\n\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"Ask.com\"\"\"\n\nfrom urllib.parse import urlencode\nimport dateutil\nfrom lxml import html\nfrom searx import utils\n\n# Metadata\nabout = {\n \"website\": \"https://www.ask.com/\",\n \"wikidata_id\": 'Q847564',\n \"official_api_documentation\": None,\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": \"HTML\",\n}\n\n# Engine Configuration\ncategories = ['general']\npaging = True\n\n# Base URL\nbase_url = \"https://www.ask.com/web\"\n\n\ndef request(query, params):\n\n query_params = {\n \"q\": query,\n \"page\": params[\"pageno\"],\n }\n\n params[\"url\"] = f\"{base_url}?{urlencode(query_params)}\"\n return params\n\n\ndef response(resp):\n\n start_tag = 'window.MESON.initialState = {'\n end_tag = '}};'\n\n dom = html.fromstring(resp.text)\n script = utils.eval_xpath_getindex(dom, '//script', 0, default=None).text\n\n pos = script.index(start_tag) + len(start_tag) - 1\n script = script[pos:]\n pos = script.index(end_tag) + len(end_tag) - 1\n script = script[:pos]\n\n json_resp = utils.js_variable_to_python(script)\n\n results = []\n\n for item in json_resp['search']['webResults']['results']:\n\n pubdate_original = item.get('pubdate_original')\n if pubdate_original:\n pubdate_original = dateutil.parser.parse(pubdate_original)\n metadata = [item.get(field) for field in ['category_l1', 'catsy'] if item.get(field)]\n\n results.append(\n {\n \"url\": item['url'],\n \"title\": item['title'],\n \"content\": item['abstract'],\n \"publishedDate\": pubdate_original,\n # \"img_src\": item.get('image_url') or None, # these are not thumbs / to large\n \"metadata\": ' | '.join(metadata),\n }\n )\n\n return results\n", "path": "searx/engines/ask.py"}]} | 1,313 | 165 |
gh_patches_debug_3277 | rasdani/github-patches | git_diff | spack__spack-11755 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Installing rclone leaves a go folder outside spack root
Spack version: commit a45c4ef33de (if no change on relevant part, it is basically the head).
OS distribution: Ubuntu server 18.04.
After `spack install rclone`, there is a folder named go outside spack installation root as `~/go/` (the spack root is `~/spack/`). I have no idea about go lang, but it seems to be the go packages directory, denoted as `$GOPATH`.
I wonder whether it is necessary to leave something outside spack root or should it be simple to move such GOPATH within spack root by hacking some repos (such as go or rclone).
BTW,
```bash
module show go-1.11.5-gcc-7.4.0-gm
--------------------------------------------------------------------------------------------------------------------------------
/home/ubuntu/spack/share/spack/modules/linux-ubuntu18.04-x86_64/go-1.11.5-gcc-7.4.0-gm:
--------------------------------------------------------------------------------------------------------------------------------
whatis("The golang compiler and build environment ")
prepend_path("PATH","/home/ubuntu/spack/opt/spack/linux-ubuntu18.04-x86_64/gcc-7.4.0/go-1.11.5-gmcvqdwr6az4hxbicr375if5fttk6sna/bin")
prepend_path("LD_LIBRARY_PATH","/home/ubuntu/spack/opt/spack/linux-ubuntu18.04-x86_64/gcc-7.4.0/go-1.11.5-gmcvqdwr6az4hxbicr375if5fttk6sna/lib")
prepend_path("LIBRARY_PATH","/home/ubuntu/spack/opt/spack/linux-ubuntu18.04-x86_64/gcc-7.4.0/go-1.11.5-gmcvqdwr6az4hxbicr375if5fttk6sna/lib")
prepend_path("CMAKE_PREFIX_PATH","/home/ubuntu/spack/opt/spack/linux-ubuntu18.04-x86_64/gcc-7.4.0/go-1.11.5-gmcvqdwr6az4hxbicr375if5fttk6sna/")
help([[The golang compiler and build environment
]])
```
The auto generated module file for go by spack has no modification on `$GOPATH`.
Correct me if I have any misunderstanding on go or its way of doing things, since literally I know nothing about it.
</issue>
<code>
[start of var/spack/repos/builtin/packages/rclone/package.py]
1 # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 from spack import *
7
8
9 class Rclone(Package):
10 """Rclone is a command line program to sync files and directories
11 to and from various cloud storage providers"""
12
13 homepage = "http://rclone.org"
14 url = "https://github.com/ncw/rclone/releases/download/v1.43/rclone-v1.43.tar.gz"
15
16 version('1.43', sha256='d30527b00cecb4e5e7188dddb78e5cec62d67cf2422dab82190db58512b5a4e3')
17
18 depends_on("go", type='build')
19
20 def install(self, spec, prefix):
21 go('build')
22 mkdirp(prefix.bin)
23 install('rclone', prefix.bin)
24
[end of var/spack/repos/builtin/packages/rclone/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/var/spack/repos/builtin/packages/rclone/package.py b/var/spack/repos/builtin/packages/rclone/package.py
--- a/var/spack/repos/builtin/packages/rclone/package.py
+++ b/var/spack/repos/builtin/packages/rclone/package.py
@@ -17,6 +17,10 @@
depends_on("go", type='build')
+ def setup_environment(self, spack_env, run_env):
+ # Point GOPATH at the top of the staging dir for the build step.
+ spack_env.prepend_path('GOPATH', self.stage.path)
+
def install(self, spec, prefix):
go('build')
mkdirp(prefix.bin)
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/rclone/package.py b/var/spack/repos/builtin/packages/rclone/package.py\n--- a/var/spack/repos/builtin/packages/rclone/package.py\n+++ b/var/spack/repos/builtin/packages/rclone/package.py\n@@ -17,6 +17,10 @@\n \n depends_on(\"go\", type='build')\n \n+ def setup_environment(self, spack_env, run_env):\n+ # Point GOPATH at the top of the staging dir for the build step.\n+ spack_env.prepend_path('GOPATH', self.stage.path)\n+\n def install(self, spec, prefix):\n go('build')\n mkdirp(prefix.bin)\n", "issue": "Installing rclone leaves a go folder outside spack root\nSpack version: commit a45c4ef33de (if no change on relevant part, it is basically the head).\r\nOS distribution: Ubuntu server 18.04.\r\n\r\nAfter `spack install rclone`, there is a folder named go outside spack installation root as `~/go/` (the spack root is `~/spack/`). I have no idea about go lang, but it seems to be the go packages directory, denoted as `$GOPATH`.\r\n\r\nI wonder whether it is necessary to leave something outside spack root or should it be simple to move such GOPATH within spack root by hacking some repos (such as go or rclone).\r\n\r\nBTW, \r\n```bash\r\nmodule show go-1.11.5-gcc-7.4.0-gm\r\n--------------------------------------------------------------------------------------------------------------------------------\r\n /home/ubuntu/spack/share/spack/modules/linux-ubuntu18.04-x86_64/go-1.11.5-gcc-7.4.0-gm:\r\n--------------------------------------------------------------------------------------------------------------------------------\r\nwhatis(\"The golang compiler and build environment \")\r\nprepend_path(\"PATH\",\"/home/ubuntu/spack/opt/spack/linux-ubuntu18.04-x86_64/gcc-7.4.0/go-1.11.5-gmcvqdwr6az4hxbicr375if5fttk6sna/bin\")\r\nprepend_path(\"LD_LIBRARY_PATH\",\"/home/ubuntu/spack/opt/spack/linux-ubuntu18.04-x86_64/gcc-7.4.0/go-1.11.5-gmcvqdwr6az4hxbicr375if5fttk6sna/lib\")\r\nprepend_path(\"LIBRARY_PATH\",\"/home/ubuntu/spack/opt/spack/linux-ubuntu18.04-x86_64/gcc-7.4.0/go-1.11.5-gmcvqdwr6az4hxbicr375if5fttk6sna/lib\")\r\nprepend_path(\"CMAKE_PREFIX_PATH\",\"/home/ubuntu/spack/opt/spack/linux-ubuntu18.04-x86_64/gcc-7.4.0/go-1.11.5-gmcvqdwr6az4hxbicr375if5fttk6sna/\")\r\nhelp([[The golang compiler and build environment\r\n]])\r\n```\r\nThe auto generated module file for go by spack has no modification on `$GOPATH`.\r\n\r\nCorrect me if I have any misunderstanding on go or its way of doing things, since literally I know nothing about it.\n", "before_files": [{"content": "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass Rclone(Package):\n \"\"\"Rclone is a command line program to sync files and directories\n to and from various cloud storage providers\"\"\"\n\n homepage = \"http://rclone.org\"\n url = \"https://github.com/ncw/rclone/releases/download/v1.43/rclone-v1.43.tar.gz\"\n\n version('1.43', sha256='d30527b00cecb4e5e7188dddb78e5cec62d67cf2422dab82190db58512b5a4e3')\n\n depends_on(\"go\", type='build')\n\n def install(self, spec, prefix):\n go('build')\n mkdirp(prefix.bin)\n install('rclone', prefix.bin)\n", "path": "var/spack/repos/builtin/packages/rclone/package.py"}]} | 1,401 | 150 |
gh_patches_debug_30001 | rasdani/github-patches | git_diff | getsentry__sentry-5722 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[plugin] Regression: Webhook Test Button
Somehow during transfer to the new plugin architecture, the webhook plugin's Test button has disappeared.
</issue>
<code>
[start of src/sentry/api/endpoints/project_plugin_details.py]
1 from __future__ import absolute_import
2
3 import six
4
5 from django import forms
6 from django.core.urlresolvers import reverse
7 from rest_framework import serializers
8 from rest_framework.response import Response
9
10 from sentry.exceptions import PluginError, PluginIdentityRequired
11 from sentry.plugins import plugins
12 from sentry.api.bases.project import ProjectEndpoint
13 from sentry.api.exceptions import ResourceDoesNotExist
14 from sentry.api.serializers import serialize
15 from sentry.api.serializers.models.plugin import (
16 PluginSerializer, PluginWithConfigSerializer, serialize_field
17 )
18 from sentry.signals import plugin_enabled
19
20 ERR_ALWAYS_ENABLED = 'This plugin is always enabled.'
21 ERR_FIELD_REQUIRED = 'This field is required.'
22
23 OK_UPDATED = 'Successfully updated configuration.'
24
25
26 class ProjectPluginDetailsEndpoint(ProjectEndpoint):
27 def _get_plugin(self, plugin_id):
28 try:
29 return plugins.get(plugin_id)
30 except KeyError:
31 raise ResourceDoesNotExist
32
33 def get(self, request, project, plugin_id):
34 plugin = self._get_plugin(plugin_id)
35
36 try:
37 context = serialize(plugin, request.user, PluginWithConfigSerializer(project))
38 except PluginIdentityRequired as e:
39 context = serialize(plugin, request.user, PluginSerializer(project))
40 context['config_error'] = e.message
41 context['auth_url'] = reverse('socialauth_associate', args=[plugin.slug])
42
43 return Response(context)
44
45 def post(self, request, project, plugin_id):
46 """
47 Enable plugin
48 """
49 plugin = self._get_plugin(plugin_id)
50
51 if not plugin.can_disable:
52 return Response({'detail': ERR_ALWAYS_ENABLED}, status=400)
53
54 plugin.enable(project)
55
56 return Response(status=201)
57
58 def delete(self, request, project, plugin_id):
59 """
60 Disable plugin
61 """
62 plugin = self._get_plugin(plugin_id)
63
64 if not plugin.can_disable:
65 return Response({'detail': ERR_ALWAYS_ENABLED}, status=400)
66
67 plugin.disable(project)
68
69 return Response(status=204)
70
71 def put(self, request, project, plugin_id):
72 plugin = self._get_plugin(plugin_id)
73
74 config = [
75 serialize_field(project, plugin, c)
76 for c in plugin.get_config(
77 project=project,
78 user=request.user,
79 )
80 ]
81
82 cleaned = {}
83 errors = {}
84 for field in config:
85 key = field['name']
86 value = request.DATA.get(key)
87
88 if field.get('required') and not value:
89 errors[key] = ERR_FIELD_REQUIRED
90
91 try:
92 value = plugin.validate_config_field(
93 project=project,
94 name=key,
95 value=value,
96 actor=request.user,
97 )
98 except (forms.ValidationError, serializers.ValidationError, PluginError) as e:
99 errors[key] = e.message
100
101 if not errors.get(key):
102 cleaned[key] = value
103
104 if not errors:
105 try:
106 cleaned = plugin.validate_config(
107 project=project,
108 config=cleaned,
109 actor=request.user,
110 )
111 except PluginError as e:
112 errors['__all__'] = e.message
113
114 if errors:
115 return Response(
116 {
117 'errors': errors,
118 }, status=400
119 )
120
121 for key, value in six.iteritems(cleaned):
122 if value is None:
123 plugin.unset_option(
124 project=project,
125 key=key,
126 )
127 else:
128 plugin.set_option(
129 project=project,
130 key=key,
131 value=value,
132 )
133
134 context = serialize(plugin, request.user, PluginWithConfigSerializer(project))
135
136 plugin_enabled.send(plugin=plugin, project=project, user=request.user, sender=self)
137
138 return Response(context)
139
[end of src/sentry/api/endpoints/project_plugin_details.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/api/endpoints/project_plugin_details.py b/src/sentry/api/endpoints/project_plugin_details.py
--- a/src/sentry/api/endpoints/project_plugin_details.py
+++ b/src/sentry/api/endpoints/project_plugin_details.py
@@ -1,11 +1,13 @@
from __future__ import absolute_import
+import logging
import six
from django import forms
from django.core.urlresolvers import reverse
from rest_framework import serializers
from rest_framework.response import Response
+from requests.exceptions import HTTPError
from sentry.exceptions import PluginError, PluginIdentityRequired
from sentry.plugins import plugins
@@ -44,10 +46,26 @@
def post(self, request, project, plugin_id):
"""
- Enable plugin
+ Enable plugin or Test plugin
"""
plugin = self._get_plugin(plugin_id)
+ if request.DATA.get('test') and plugin.is_testable():
+ try:
+ test_results = plugin.test_configuration(project)
+ except Exception as exc:
+ if isinstance(exc, HTTPError):
+ test_results = '%s\n%s' % (exc, exc.response.text[:256])
+ elif hasattr(exc, 'read') and callable(exc.read):
+ test_results = '%s\n%s' % (exc, exc.read()[:256])
+ else:
+ logging.exception('Plugin(%s) raised an error during test',
+ plugin_id)
+ test_results = 'There was an internal error with the Plugin'
+ if not test_results:
+ test_results = 'No errors returned'
+ return Response({'detail': test_results}, status=200)
+
if not plugin.can_disable:
return Response({'detail': ERR_ALWAYS_ENABLED}, status=400)
| {"golden_diff": "diff --git a/src/sentry/api/endpoints/project_plugin_details.py b/src/sentry/api/endpoints/project_plugin_details.py\n--- a/src/sentry/api/endpoints/project_plugin_details.py\n+++ b/src/sentry/api/endpoints/project_plugin_details.py\n@@ -1,11 +1,13 @@\n from __future__ import absolute_import\n \n+import logging\n import six\n \n from django import forms\n from django.core.urlresolvers import reverse\n from rest_framework import serializers\n from rest_framework.response import Response\n+from requests.exceptions import HTTPError\n \n from sentry.exceptions import PluginError, PluginIdentityRequired\n from sentry.plugins import plugins\n@@ -44,10 +46,26 @@\n \n def post(self, request, project, plugin_id):\n \"\"\"\n- Enable plugin\n+ Enable plugin or Test plugin\n \"\"\"\n plugin = self._get_plugin(plugin_id)\n \n+ if request.DATA.get('test') and plugin.is_testable():\n+ try:\n+ test_results = plugin.test_configuration(project)\n+ except Exception as exc:\n+ if isinstance(exc, HTTPError):\n+ test_results = '%s\\n%s' % (exc, exc.response.text[:256])\n+ elif hasattr(exc, 'read') and callable(exc.read):\n+ test_results = '%s\\n%s' % (exc, exc.read()[:256])\n+ else:\n+ logging.exception('Plugin(%s) raised an error during test',\n+ plugin_id)\n+ test_results = 'There was an internal error with the Plugin'\n+ if not test_results:\n+ test_results = 'No errors returned'\n+ return Response({'detail': test_results}, status=200)\n+\n if not plugin.can_disable:\n return Response({'detail': ERR_ALWAYS_ENABLED}, status=400)\n", "issue": "[plugin] Regression: Webhook Test Button\nSomehow during transfer to the new plugin architecture, the webhook plugin's Test button has disappeared.\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport six\n\nfrom django import forms\nfrom django.core.urlresolvers import reverse\nfrom rest_framework import serializers\nfrom rest_framework.response import Response\n\nfrom sentry.exceptions import PluginError, PluginIdentityRequired\nfrom sentry.plugins import plugins\nfrom sentry.api.bases.project import ProjectEndpoint\nfrom sentry.api.exceptions import ResourceDoesNotExist\nfrom sentry.api.serializers import serialize\nfrom sentry.api.serializers.models.plugin import (\n PluginSerializer, PluginWithConfigSerializer, serialize_field\n)\nfrom sentry.signals import plugin_enabled\n\nERR_ALWAYS_ENABLED = 'This plugin is always enabled.'\nERR_FIELD_REQUIRED = 'This field is required.'\n\nOK_UPDATED = 'Successfully updated configuration.'\n\n\nclass ProjectPluginDetailsEndpoint(ProjectEndpoint):\n def _get_plugin(self, plugin_id):\n try:\n return plugins.get(plugin_id)\n except KeyError:\n raise ResourceDoesNotExist\n\n def get(self, request, project, plugin_id):\n plugin = self._get_plugin(plugin_id)\n\n try:\n context = serialize(plugin, request.user, PluginWithConfigSerializer(project))\n except PluginIdentityRequired as e:\n context = serialize(plugin, request.user, PluginSerializer(project))\n context['config_error'] = e.message\n context['auth_url'] = reverse('socialauth_associate', args=[plugin.slug])\n\n return Response(context)\n\n def post(self, request, project, plugin_id):\n \"\"\"\n Enable plugin\n \"\"\"\n plugin = self._get_plugin(plugin_id)\n\n if not plugin.can_disable:\n return Response({'detail': ERR_ALWAYS_ENABLED}, status=400)\n\n plugin.enable(project)\n\n return Response(status=201)\n\n def delete(self, request, project, plugin_id):\n \"\"\"\n Disable plugin\n \"\"\"\n plugin = self._get_plugin(plugin_id)\n\n if not plugin.can_disable:\n return Response({'detail': ERR_ALWAYS_ENABLED}, status=400)\n\n plugin.disable(project)\n\n return Response(status=204)\n\n def put(self, request, project, plugin_id):\n plugin = self._get_plugin(plugin_id)\n\n config = [\n serialize_field(project, plugin, c)\n for c in plugin.get_config(\n project=project,\n user=request.user,\n )\n ]\n\n cleaned = {}\n errors = {}\n for field in config:\n key = field['name']\n value = request.DATA.get(key)\n\n if field.get('required') and not value:\n errors[key] = ERR_FIELD_REQUIRED\n\n try:\n value = plugin.validate_config_field(\n project=project,\n name=key,\n value=value,\n actor=request.user,\n )\n except (forms.ValidationError, serializers.ValidationError, PluginError) as e:\n errors[key] = e.message\n\n if not errors.get(key):\n cleaned[key] = value\n\n if not errors:\n try:\n cleaned = plugin.validate_config(\n project=project,\n config=cleaned,\n actor=request.user,\n )\n except PluginError as e:\n errors['__all__'] = e.message\n\n if errors:\n return Response(\n {\n 'errors': errors,\n }, status=400\n )\n\n for key, value in six.iteritems(cleaned):\n if value is None:\n plugin.unset_option(\n project=project,\n key=key,\n )\n else:\n plugin.set_option(\n project=project,\n key=key,\n value=value,\n )\n\n context = serialize(plugin, request.user, PluginWithConfigSerializer(project))\n\n plugin_enabled.send(plugin=plugin, project=project, user=request.user, sender=self)\n\n return Response(context)\n", "path": "src/sentry/api/endpoints/project_plugin_details.py"}]} | 1,674 | 388 |
gh_patches_debug_15019 | rasdani/github-patches | git_diff | Mailu__Mailu-1305 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Certwatcher does not work with symlinks (on Kubernetes)
I need to recheck this, but I just had this issue... Will report in 3 Months
</issue>
<code>
[start of core/nginx/certwatcher.py]
1 #!/usr/bin/python3
2 """
3 Certificate watcher which reloads nginx or reconfigures it, depending on what
4 happens to externally supplied certificates. Only executed by start.py in case
5 of TLS_FLAVOR=[mail, cert]
6 """
7
8 from os.path import exists, split as path_split
9 from os import system
10 import time
11 from watchdog.observers import Observer
12 from watchdog.events import FileSystemEventHandler, FileDeletedEvent, \
13 FileCreatedEvent, FileModifiedEvent, FileMovedEvent
14
15 class ChangeHandler(FileSystemEventHandler):
16 "watchdog-handler listening on any event, executing the correct configuration/reload steps"
17 @staticmethod
18 def reload_nginx():
19 "merely reload nginx without re-configuring everything"
20 if exists("/var/run/nginx.pid"):
21 print("Reloading a running nginx")
22 system("nginx -s reload")
23
24 @staticmethod
25 def reexec_config():
26 "execute a reconfiguration of the system, which also reloads"
27 print("Reconfiguring system")
28 system("/config.py")
29
30 def on_any_event(self, event):
31 "event-listener checking if the affected files are the cert-files we're interested in"
32 if event.is_directory:
33 return
34
35 filename = path_split(event.src_path)[-1]
36 if isinstance(event, FileMovedEvent):
37 filename = path_split(event.dest_path)[-1]
38
39 if filename in ['cert.pem', 'key.pem']:
40 # all cases except for FileModified need re-configure
41 if isinstance(event, (FileCreatedEvent, FileMovedEvent, FileDeletedEvent)):
42 ChangeHandler.reexec_config()
43 # file modification needs only a nginx reload without config.py
44 elif isinstance(event, FileModifiedEvent):
45 ChangeHandler.reload_nginx()
46 # cert files have been moved away, re-configure
47 elif isinstance(event, FileMovedEvent) and path_split(event.src_path)[-1] in ['cert.pem', 'key.pem']:
48 ChangeHandler.reexec_config()
49
50
51 if __name__ == '__main__':
52 observer = Observer()
53 handler = ChangeHandler()
54 observer.schedule(handler, "/certs", recursive=False)
55 observer.start()
56
57 try:
58 while True:
59 time.sleep(1)
60 except KeyboardInterrupt:
61 observer.stop()
62
63 observer.join()
64
[end of core/nginx/certwatcher.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/nginx/certwatcher.py b/core/nginx/certwatcher.py
--- a/core/nginx/certwatcher.py
+++ b/core/nginx/certwatcher.py
@@ -8,7 +8,7 @@
from os.path import exists, split as path_split
from os import system
import time
-from watchdog.observers import Observer
+from watchdog.observers.polling import PollingObserver
from watchdog.events import FileSystemEventHandler, FileDeletedEvent, \
FileCreatedEvent, FileModifiedEvent, FileMovedEvent
@@ -49,7 +49,7 @@
if __name__ == '__main__':
- observer = Observer()
+ observer = PollingObserver()
handler = ChangeHandler()
observer.schedule(handler, "/certs", recursive=False)
observer.start()
| {"golden_diff": "diff --git a/core/nginx/certwatcher.py b/core/nginx/certwatcher.py\n--- a/core/nginx/certwatcher.py\n+++ b/core/nginx/certwatcher.py\n@@ -8,7 +8,7 @@\n from os.path import exists, split as path_split\n from os import system\n import time\n-from watchdog.observers import Observer\n+from watchdog.observers.polling import PollingObserver\n from watchdog.events import FileSystemEventHandler, FileDeletedEvent, \\\n FileCreatedEvent, FileModifiedEvent, FileMovedEvent\n \n@@ -49,7 +49,7 @@\n \n \n if __name__ == '__main__':\n- observer = Observer()\n+ observer = PollingObserver()\n handler = ChangeHandler()\n observer.schedule(handler, \"/certs\", recursive=False)\n observer.start()\n", "issue": "Certwatcher does not work with symlinks (on Kubernetes)\nI need to recheck this, but I just had this issue... Will report in 3 Months\n", "before_files": [{"content": "#!/usr/bin/python3\n\"\"\"\nCertificate watcher which reloads nginx or reconfigures it, depending on what\nhappens to externally supplied certificates. Only executed by start.py in case\nof TLS_FLAVOR=[mail, cert]\n\"\"\"\n\nfrom os.path import exists, split as path_split\nfrom os import system\nimport time\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler, FileDeletedEvent, \\\n FileCreatedEvent, FileModifiedEvent, FileMovedEvent\n\nclass ChangeHandler(FileSystemEventHandler):\n \"watchdog-handler listening on any event, executing the correct configuration/reload steps\"\n @staticmethod\n def reload_nginx():\n \"merely reload nginx without re-configuring everything\"\n if exists(\"/var/run/nginx.pid\"):\n print(\"Reloading a running nginx\")\n system(\"nginx -s reload\")\n\n @staticmethod\n def reexec_config():\n \"execute a reconfiguration of the system, which also reloads\"\n print(\"Reconfiguring system\")\n system(\"/config.py\")\n\n def on_any_event(self, event):\n \"event-listener checking if the affected files are the cert-files we're interested in\"\n if event.is_directory:\n return\n\n filename = path_split(event.src_path)[-1]\n if isinstance(event, FileMovedEvent):\n filename = path_split(event.dest_path)[-1]\n\n if filename in ['cert.pem', 'key.pem']:\n # all cases except for FileModified need re-configure\n if isinstance(event, (FileCreatedEvent, FileMovedEvent, FileDeletedEvent)):\n ChangeHandler.reexec_config()\n # file modification needs only a nginx reload without config.py\n elif isinstance(event, FileModifiedEvent):\n ChangeHandler.reload_nginx()\n # cert files have been moved away, re-configure\n elif isinstance(event, FileMovedEvent) and path_split(event.src_path)[-1] in ['cert.pem', 'key.pem']:\n ChangeHandler.reexec_config()\n\n\nif __name__ == '__main__':\n observer = Observer()\n handler = ChangeHandler()\n observer.schedule(handler, \"/certs\", recursive=False)\n observer.start()\n\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n\n observer.join()\n", "path": "core/nginx/certwatcher.py"}]} | 1,170 | 171 |
gh_patches_debug_4511 | rasdani/github-patches | git_diff | zigpy__zha-device-handlers-704 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Xiaomi Mijia Light Sensor (GZCGQ01LM) in ZHA not detecting battery level anymore (it did in previous versions lower than 2020.12.7)
Hai The original call is from ( lustreking aka Steve Small) , I (yfands) can confirm Steve's findings indeed no battery level.
Steve posted his issue [here ](https://community.home-assistant.io/t/xiaomi-mijia-zha-not-detecting-battery-level/257827)but no answer there, so iam trying it here. In his original call there are 2 pictures one when all was well and the second picture after the update to 2020.12.7, the one that is not working any more, the difference is the class the device was assigned to.
The Problem
A while ago I got a Xiaomi Mijia Light Sensor (GZCGQ01LM), and it worked well for me, so I eventually ordered 2 more. Before I got them, I updated to the current version of HA (2020.12.7), as well as migrated HA to a new RPi.
When I tried to add them, ZHA found the illuminance entity, but not the battery entity, which my previous sensor had.
In ZHA I see the new sensors listed as Xiaomi, while the previous sensor’s manufacturer was listed as lumi. The model for both is lumi.sen_ill.mgl01
Under the zigbee info for the new ones it says “Power Source: Mains” which is obviously incorrect!
Here are the zigbee details of the good and bad ones, (see his original link).. The configuration below is from me (yfands), so to be short in ZHA found the illuminance entity, but not the battery entity anymore!
## System Health
version | 2020.12.2
-- | --
installation_type | Home Assistant OS
dev | false
hassio | true
docker | true
virtualenv | false
python_version | 3.8.6
os_name | Linux
os_version | 5.4.79-v8
arch | aarch64
timezone | Europe/Amsterdam
<details><summary>Home Assistant Community Store</summary>
GitHub API | ok
-- | --
Github API Calls Remaining | 4646
Installed Version | 1.9.0
Stage | running
Available Repositories | 711
Installed Repositories | 29
</details>
<details><summary>Home Assistant Cloud</summary>
logged_in | false
-- | --
can_reach_cert_server | ok
can_reach_cloud_auth | ok
can_reach_cloud | ok
</details>
<details><summary>Home Assistant Supervisor</summary>
host_os | Home Assistant OS 5.9
-- | --
update_channel | stable
supervisor_version | 2020.12.7
docker_version | 19.03.13
disk_total | 27.2 GB
disk_used | 5.5 GB
healthy | true
supported | true
board | rpi4-64
supervisor_api | ok
version_api | ok
installed_addons | File editor (5.2.0), Samba share (9.3.0), Terminal & SSH (8.10.0), Mosquitto broker (5.1), Node-RED (7.2.11)
</details>
<details><summary>Lovelace</summary>
dashboards | 1
-- | --
mode | storage
views | 10
resources | 20
</details>
</issue>
<code>
[start of zhaquirks/xiaomi/aqara/illumination.py]
1 """Quirk for lumi.sen_ill.mgl01 illumination sensor."""
2 import logging
3
4 from zigpy.profiles import zha
5 from zigpy.zcl.clusters.general import Basic, Identify
6 from zigpy.zcl.clusters.measurement import IlluminanceMeasurement
7 from zigpy.zdo.types import NodeDescriptor
8
9 from .. import LUMI, BasicCluster, XiaomiCustomDevice
10 from ... import PowerConfigurationCluster
11 from ...const import (
12 DEVICE_TYPE,
13 ENDPOINTS,
14 INPUT_CLUSTERS,
15 MODELS_INFO,
16 NODE_DESCRIPTOR,
17 OUTPUT_CLUSTERS,
18 PROFILE_ID,
19 )
20
21 _LOGGER = logging.getLogger(__name__)
22
23
24 class Illumination(XiaomiCustomDevice):
25 """Aqara LUMI lumi.sen_ill.mgl01."""
26
27 signature = {
28 # <SimpleDescriptor endpoint=1 profile=260 device_type=262
29 # device_version=1
30 # input_clusters=[0, 1024, 3, 1]
31 # output_clusters=[3]>
32 MODELS_INFO: [(LUMI, "lumi.sen_ill.mgl01")],
33 ENDPOINTS: {
34 1: {
35 PROFILE_ID: zha.PROFILE_ID,
36 DEVICE_TYPE: zha.DeviceType.LIGHT_SENSOR,
37 INPUT_CLUSTERS: [
38 Basic.cluster_id,
39 Identify.cluster_id,
40 IlluminanceMeasurement.cluster_id,
41 PowerConfigurationCluster.cluster_id,
42 ],
43 OUTPUT_CLUSTERS: [Identify.cluster_id],
44 }
45 },
46 }
47
48 replacement = {
49 NODE_DESCRIPTOR: NodeDescriptor(
50 0x02, 0x40, 0x80, 0x115F, 0x7F, 0x0064, 0x2C00, 0x0064, 0x00
51 ),
52 ENDPOINTS: {
53 1: {
54 PROFILE_ID: zha.PROFILE_ID,
55 DEVICE_TYPE: zha.DeviceType.LIGHT_SENSOR,
56 INPUT_CLUSTERS: [
57 BasicCluster,
58 Identify.cluster_id,
59 IlluminanceMeasurement.cluster_id,
60 PowerConfigurationCluster,
61 ],
62 OUTPUT_CLUSTERS: [Identify.cluster_id],
63 }
64 },
65 }
66
[end of zhaquirks/xiaomi/aqara/illumination.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zhaquirks/xiaomi/aqara/illumination.py b/zhaquirks/xiaomi/aqara/illumination.py
--- a/zhaquirks/xiaomi/aqara/illumination.py
+++ b/zhaquirks/xiaomi/aqara/illumination.py
@@ -29,7 +29,7 @@
# device_version=1
# input_clusters=[0, 1024, 3, 1]
# output_clusters=[3]>
- MODELS_INFO: [(LUMI, "lumi.sen_ill.mgl01")],
+ MODELS_INFO: [(LUMI, "lumi.sen_ill.mgl01"), ("XIAOMI", "lumi.sen_ill.mgl01")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
| {"golden_diff": "diff --git a/zhaquirks/xiaomi/aqara/illumination.py b/zhaquirks/xiaomi/aqara/illumination.py\n--- a/zhaquirks/xiaomi/aqara/illumination.py\n+++ b/zhaquirks/xiaomi/aqara/illumination.py\n@@ -29,7 +29,7 @@\n # device_version=1\n # input_clusters=[0, 1024, 3, 1]\n # output_clusters=[3]>\n- MODELS_INFO: [(LUMI, \"lumi.sen_ill.mgl01\")],\n+ MODELS_INFO: [(LUMI, \"lumi.sen_ill.mgl01\"), (\"XIAOMI\", \"lumi.sen_ill.mgl01\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n", "issue": "Xiaomi Mijia Light Sensor (GZCGQ01LM) in ZHA not detecting battery level anymore (it did in previous versions lower than 2020.12.7)\nHai The original call is from ( lustreking aka Steve Small) , I (yfands) can confirm Steve's findings indeed no battery level.\r\nSteve posted his issue [here ](https://community.home-assistant.io/t/xiaomi-mijia-zha-not-detecting-battery-level/257827)but no answer there, so iam trying it here. In his original call there are 2 pictures one when all was well and the second picture after the update to 2020.12.7, the one that is not working any more, the difference is the class the device was assigned to.\r\n\r\nThe Problem\r\nA while ago I got a Xiaomi Mijia Light Sensor (GZCGQ01LM), and it worked well for me, so I eventually ordered 2 more. Before I got them, I updated to the current version of HA (2020.12.7), as well as migrated HA to a new RPi.\r\n\r\nWhen I tried to add them, ZHA found the illuminance entity, but not the battery entity, which my previous sensor had.\r\n\r\nIn ZHA I see the new sensors listed as Xiaomi, while the previous sensor\u2019s manufacturer was listed as lumi. The model for both is lumi.sen_ill.mgl01\r\n\r\nUnder the zigbee info for the new ones it says \u201cPower Source: Mains\u201d which is obviously incorrect!\r\nHere are the zigbee details of the good and bad ones, (see his original link).. The configuration below is from me (yfands), so to be short in ZHA found the illuminance entity, but not the battery entity anymore!\r\n\r\n## System Health\r\n\r\nversion | 2020.12.2\r\n-- | --\r\ninstallation_type | Home Assistant OS\r\ndev | false\r\nhassio | true\r\ndocker | true\r\nvirtualenv | false\r\npython_version | 3.8.6\r\nos_name | Linux\r\nos_version | 5.4.79-v8\r\narch | aarch64\r\ntimezone | Europe/Amsterdam\r\n\r\n<details><summary>Home Assistant Community Store</summary>\r\n\r\nGitHub API | ok\r\n-- | --\r\nGithub API Calls Remaining | 4646\r\nInstalled Version | 1.9.0\r\nStage | running\r\nAvailable Repositories | 711\r\nInstalled Repositories | 29\r\n\r\n</details>\r\n\r\n<details><summary>Home Assistant Cloud</summary>\r\n\r\nlogged_in | false\r\n-- | --\r\ncan_reach_cert_server | ok\r\ncan_reach_cloud_auth | ok\r\ncan_reach_cloud | ok\r\n\r\n</details>\r\n\r\n<details><summary>Home Assistant Supervisor</summary>\r\n\r\nhost_os | Home Assistant OS 5.9\r\n-- | --\r\nupdate_channel | stable\r\nsupervisor_version | 2020.12.7\r\ndocker_version | 19.03.13\r\ndisk_total | 27.2 GB\r\ndisk_used | 5.5 GB\r\nhealthy | true\r\nsupported | true\r\nboard | rpi4-64\r\nsupervisor_api | ok\r\nversion_api | ok\r\ninstalled_addons | File editor (5.2.0), Samba share (9.3.0), Terminal & SSH (8.10.0), Mosquitto broker (5.1), Node-RED (7.2.11)\r\n\r\n</details>\r\n\r\n<details><summary>Lovelace</summary>\r\n\r\ndashboards | 1\r\n-- | --\r\nmode | storage\r\nviews | 10\r\nresources | 20\r\n\r\n</details>\r\n\n", "before_files": [{"content": "\"\"\"Quirk for lumi.sen_ill.mgl01 illumination sensor.\"\"\"\nimport logging\n\nfrom zigpy.profiles import zha\nfrom zigpy.zcl.clusters.general import Basic, Identify\nfrom zigpy.zcl.clusters.measurement import IlluminanceMeasurement\nfrom zigpy.zdo.types import NodeDescriptor\n\nfrom .. import LUMI, BasicCluster, XiaomiCustomDevice\nfrom ... import PowerConfigurationCluster\nfrom ...const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n NODE_DESCRIPTOR,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Illumination(XiaomiCustomDevice):\n \"\"\"Aqara LUMI lumi.sen_ill.mgl01.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=262\n # device_version=1\n # input_clusters=[0, 1024, 3, 1]\n # output_clusters=[3]>\n MODELS_INFO: [(LUMI, \"lumi.sen_ill.mgl01\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.LIGHT_SENSOR,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n IlluminanceMeasurement.cluster_id,\n PowerConfigurationCluster.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Identify.cluster_id],\n }\n },\n }\n\n replacement = {\n NODE_DESCRIPTOR: NodeDescriptor(\n 0x02, 0x40, 0x80, 0x115F, 0x7F, 0x0064, 0x2C00, 0x0064, 0x00\n ),\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.LIGHT_SENSOR,\n INPUT_CLUSTERS: [\n BasicCluster,\n Identify.cluster_id,\n IlluminanceMeasurement.cluster_id,\n PowerConfigurationCluster,\n ],\n OUTPUT_CLUSTERS: [Identify.cluster_id],\n }\n },\n }\n", "path": "zhaquirks/xiaomi/aqara/illumination.py"}]} | 1,964 | 200 |
gh_patches_debug_661 | rasdani/github-patches | git_diff | pex-tool__pex-2055 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.122
On the docket:
+ [x] Support the latest Pip releases: 22.3.1 & 23.0 #2056
+ [x] Lock sdists with prepare-metadata-for-build-wheel. #2053
+ [x] Fix `execute_parallel` "leaking" a thread. #2052
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.121"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.121"
+__version__ = "2.1.122"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.121\"\n+__version__ = \"2.1.122\"\n", "issue": "Release 2.1.122\nOn the docket:\r\n+ [x] Support the latest Pip releases: 22.3.1 & 23.0 #2056\r\n+ [x] Lock sdists with prepare-metadata-for-build-wheel. #2053\r\n+ [x] Fix `execute_parallel` \"leaking\" a thread. #2052\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.121\"\n", "path": "pex/version.py"}]} | 671 | 99 |
gh_patches_debug_16138 | rasdani/github-patches | git_diff | uccser__cs-unplugged-718 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Workaround Crowdin bug where integer yaml keys are not preserved
When downloading in-context localisation files, integer keys in YAML files are not preserved. This is only an issue in the file `topics/content/en/programming-challenges-structure-difficulties.yaml`, which uses the difficulty number as the key.
As a work around, we can use the string value of the integer as the key, i.e. `"0"` instead of `0`
Workaround Crowdin bug where integer yaml keys are not preserved
When downloading in-context localisation files, integer keys in YAML files are not preserved. This is only an issue in the file `topics/content/en/programming-challenges-structure-difficulties.yaml`, which uses the difficulty number as the key.
As a work around, we can use the string value of the integer as the key, i.e. `"0"` instead of `0`
</issue>
<code>
[start of csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py]
1 """Custom loader for loading structure of programming challenges."""
2
3 import os
4 from django.db import transaction
5 from utils.errors.MissingRequiredFieldError import MissingRequiredFieldError
6 from utils.TranslatableModelLoader import TranslatableModelLoader
7 from topics.models import ProgrammingChallengeLanguage, ProgrammingChallengeDifficulty
8
9
10 class ProgrammingChallengesStructureLoader(TranslatableModelLoader):
11 """Custom loader for loading structure of programming challenges."""
12
13 @transaction.atomic
14 def load(self):
15 """Load the content for structure of programming challenges.
16
17 Raises:
18 MissingRequiredFieldError: when no object can be found with the matching
19 attribute.
20 """
21 structure = self.load_yaml_file(self.structure_file_path)
22
23 prog_languages = structure.get("languages", None)
24 difficulty_levels = structure.get("difficulties", None)
25 if None in [prog_languages, difficulty_levels]:
26 raise MissingRequiredFieldError(
27 self.structure_file_path,
28 ["lanugages", "difficulties"],
29 "Programming Challenge Structure"
30 )
31
32 # Add "-languages" to the structure filename
33 prog_languages_translation_filename = "{}-languages.yaml".format(
34 os.path.splitext(self.structure_filename)[0]
35 )
36 prog_languages_translations = self.get_yaml_translations(
37 prog_languages_translation_filename,
38 required_slugs=prog_languages.keys(),
39 required_fields=["name"]
40 )
41
42 for (prog_language, prog_language_data) in prog_languages.items():
43
44 if prog_language_data is None:
45 raise MissingRequiredFieldError(
46 self.structure_file_path,
47 ["number"],
48 "Programming Challenge Language"
49 )
50
51 # Check for required fields
52 prog_language_number = prog_language_data.get("number", None)
53 if prog_language_number is None:
54 raise MissingRequiredFieldError(
55 self.structure_file_path,
56 ["number"],
57 "Programming Challenge Language"
58 )
59
60 # Check if icon is given
61 if "icon" in prog_language_data:
62 prog_language_icon = prog_language_data["icon"]
63 else:
64 prog_language_icon = None
65
66 new_prog_language = ProgrammingChallengeLanguage(
67 slug=prog_language,
68 number=prog_language_number,
69 icon=prog_language_icon
70 )
71
72 translations = prog_languages_translations.get(prog_language, dict())
73 self.populate_translations(new_prog_language, translations)
74 self.mark_translation_availability(new_prog_language, required_fields=["name"])
75 new_prog_language.save()
76
77 self.log("Added programming language: {}".format(new_prog_language.__str__()))
78
79 # Add "-languages" to the structure filename
80 difficulties_translation_filename = "{}-difficulties.yaml".format(
81 os.path.splitext(self.structure_filename)[0]
82 )
83 difficulties_translations = self.get_yaml_translations(
84 difficulties_translation_filename,
85 required_slugs=difficulty_levels,
86 required_fields=["name"],
87 )
88
89 for difficulty in difficulty_levels:
90
91 new_difficulty = ProgrammingChallengeDifficulty(
92 level=int(difficulty),
93 )
94
95 translations = difficulties_translations.get(difficulty, dict())
96 self.populate_translations(new_difficulty, translations)
97 self.mark_translation_availability(new_difficulty, required_fields=["name"])
98 new_difficulty.save()
99
100 self.log("Added programming difficulty level: {}".format(new_difficulty.__str__()))
101
102 self.log("")
103
[end of csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
--- a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
+++ b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
@@ -86,13 +86,13 @@
required_fields=["name"],
)
- for difficulty in difficulty_levels:
+ for level, difficulty_slug in enumerate(difficulty_levels):
new_difficulty = ProgrammingChallengeDifficulty(
- level=int(difficulty),
+ level=level,
)
- translations = difficulties_translations.get(difficulty, dict())
+ translations = difficulties_translations.get(difficulty_slug, dict())
self.populate_translations(new_difficulty, translations)
self.mark_translation_availability(new_difficulty, required_fields=["name"])
new_difficulty.save()
| {"golden_diff": "diff --git a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n--- a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n+++ b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n@@ -86,13 +86,13 @@\n required_fields=[\"name\"],\n )\n \n- for difficulty in difficulty_levels:\n+ for level, difficulty_slug in enumerate(difficulty_levels):\n \n new_difficulty = ProgrammingChallengeDifficulty(\n- level=int(difficulty),\n+ level=level,\n )\n \n- translations = difficulties_translations.get(difficulty, dict())\n+ translations = difficulties_translations.get(difficulty_slug, dict())\n self.populate_translations(new_difficulty, translations)\n self.mark_translation_availability(new_difficulty, required_fields=[\"name\"])\n new_difficulty.save()\n", "issue": "Workaround Crowdin bug where integer yaml keys are not preserved\nWhen downloading in-context localisation files, integer keys in YAML files are not preserved. This is only an issue in the file `topics/content/en/programming-challenges-structure-difficulties.yaml`, which uses the difficulty number as the key.\r\n\r\nAs a work around, we can use the string value of the integer as the key, i.e. `\"0\"` instead of `0`\nWorkaround Crowdin bug where integer yaml keys are not preserved\nWhen downloading in-context localisation files, integer keys in YAML files are not preserved. This is only an issue in the file `topics/content/en/programming-challenges-structure-difficulties.yaml`, which uses the difficulty number as the key.\r\n\r\nAs a work around, we can use the string value of the integer as the key, i.e. `\"0\"` instead of `0`\n", "before_files": [{"content": "\"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\nimport os\nfrom django.db import transaction\nfrom utils.errors.MissingRequiredFieldError import MissingRequiredFieldError\nfrom utils.TranslatableModelLoader import TranslatableModelLoader\nfrom topics.models import ProgrammingChallengeLanguage, ProgrammingChallengeDifficulty\n\n\nclass ProgrammingChallengesStructureLoader(TranslatableModelLoader):\n \"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\n @transaction.atomic\n def load(self):\n \"\"\"Load the content for structure of programming challenges.\n\n Raises:\n MissingRequiredFieldError: when no object can be found with the matching\n attribute.\n \"\"\"\n structure = self.load_yaml_file(self.structure_file_path)\n\n prog_languages = structure.get(\"languages\", None)\n difficulty_levels = structure.get(\"difficulties\", None)\n if None in [prog_languages, difficulty_levels]:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"lanugages\", \"difficulties\"],\n \"Programming Challenge Structure\"\n )\n\n # Add \"-languages\" to the structure filename\n prog_languages_translation_filename = \"{}-languages.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n prog_languages_translations = self.get_yaml_translations(\n prog_languages_translation_filename,\n required_slugs=prog_languages.keys(),\n required_fields=[\"name\"]\n )\n\n for (prog_language, prog_language_data) in prog_languages.items():\n\n if prog_language_data is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check for required fields\n prog_language_number = prog_language_data.get(\"number\", None)\n if prog_language_number is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check if icon is given\n if \"icon\" in prog_language_data:\n prog_language_icon = prog_language_data[\"icon\"]\n else:\n prog_language_icon = None\n\n new_prog_language = ProgrammingChallengeLanguage(\n slug=prog_language,\n number=prog_language_number,\n icon=prog_language_icon\n )\n\n translations = prog_languages_translations.get(prog_language, dict())\n self.populate_translations(new_prog_language, translations)\n self.mark_translation_availability(new_prog_language, required_fields=[\"name\"])\n new_prog_language.save()\n\n self.log(\"Added programming language: {}\".format(new_prog_language.__str__()))\n\n # Add \"-languages\" to the structure filename\n difficulties_translation_filename = \"{}-difficulties.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n difficulties_translations = self.get_yaml_translations(\n difficulties_translation_filename,\n required_slugs=difficulty_levels,\n required_fields=[\"name\"],\n )\n\n for difficulty in difficulty_levels:\n\n new_difficulty = ProgrammingChallengeDifficulty(\n level=int(difficulty),\n )\n\n translations = difficulties_translations.get(difficulty, dict())\n self.populate_translations(new_difficulty, translations)\n self.mark_translation_availability(new_difficulty, required_fields=[\"name\"])\n new_difficulty.save()\n\n self.log(\"Added programming difficulty level: {}\".format(new_difficulty.__str__()))\n\n self.log(\"\")\n", "path": "csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py"}]} | 1,631 | 206 |
gh_patches_debug_1183 | rasdani/github-patches | git_diff | bokeh__bokeh-2235 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
VBoxForm broken
Added a `float:left` to fix `sliders.py` which broke stock app example worse.
</issue>
<code>
[start of examples/plotting/file/slider.py]
1
2 from bokeh.io import vform
3 from bokeh.plotting import figure, hplot, output_file, show, vplot, ColumnDataSource
4 from bokeh.models.actions import Callback
5 from bokeh.models.widgets import Slider
6
7 import numpy as np
8
9 x = np.linspace(0, 10, 500)
10 y = np.sin(x)
11
12 source = ColumnDataSource(data=dict(x=x, y=y))
13
14
15 plot = figure(y_range=(-10, 10), plot_width=400, plot_height=400)
16 plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)
17
18 callback = Callback(args=dict(source=source), code="""
19 var data = source.get('data');
20 var A = amp.get('value')
21 var k = freq.get('value')
22 var phi = phase.get('value')
23 var B = offset.get('value')
24 x = data['x']
25 y = data['y']
26 for (i = 0; i < x.length; i++) {
27 y[i] = B + A*Math.sin(k*x[i]+phi);
28 }
29 source.trigger('change');
30 """)
31
32 amp_slider = Slider(start=0.1, end=10, value=1, step=.1, title="Amplitude", callback=callback)
33 callback.args["amp"] = amp_slider
34
35 freq_slider = Slider(start=0.1, end=10, value=1, step=.1, title="Frequency", callback=callback)
36 callback.args["freq"] = freq_slider
37
38 phase_slider = Slider(start=0, end=6.4, value=0, step=.1, title="Phase", callback=callback)
39 callback.args["phase"] = phase_slider
40
41 offset_slider = Slider(start=-5, end=5, value=0, step=.1, title="Offset", callback=callback)
42 callback.args["offset"] = offset_slider
43
44 layout = hplot(
45 vform(amp_slider, freq_slider, phase_slider, offset_slider),
46 plot
47 )
48
49 output_file("slider.html")
50
51 show(layout)
52
[end of examples/plotting/file/slider.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/plotting/file/slider.py b/examples/plotting/file/slider.py
--- a/examples/plotting/file/slider.py
+++ b/examples/plotting/file/slider.py
@@ -42,8 +42,8 @@
callback.args["offset"] = offset_slider
layout = hplot(
+ plot,
vform(amp_slider, freq_slider, phase_slider, offset_slider),
- plot
)
output_file("slider.html")
| {"golden_diff": "diff --git a/examples/plotting/file/slider.py b/examples/plotting/file/slider.py\n--- a/examples/plotting/file/slider.py\n+++ b/examples/plotting/file/slider.py\n@@ -42,8 +42,8 @@\n callback.args[\"offset\"] = offset_slider\n \n layout = hplot(\n+ plot,\n vform(amp_slider, freq_slider, phase_slider, offset_slider),\n- plot\n )\n \n output_file(\"slider.html\")\n", "issue": "VBoxForm broken\nAdded a `float:left` to fix `sliders.py` which broke stock app example worse. \n\n", "before_files": [{"content": "\nfrom bokeh.io import vform\nfrom bokeh.plotting import figure, hplot, output_file, show, vplot, ColumnDataSource\nfrom bokeh.models.actions import Callback\nfrom bokeh.models.widgets import Slider\n\nimport numpy as np\n\nx = np.linspace(0, 10, 500)\ny = np.sin(x)\n\nsource = ColumnDataSource(data=dict(x=x, y=y))\n\n\nplot = figure(y_range=(-10, 10), plot_width=400, plot_height=400)\nplot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)\n\ncallback = Callback(args=dict(source=source), code=\"\"\"\n var data = source.get('data');\n var A = amp.get('value')\n var k = freq.get('value')\n var phi = phase.get('value')\n var B = offset.get('value')\n x = data['x']\n y = data['y']\n for (i = 0; i < x.length; i++) {\n y[i] = B + A*Math.sin(k*x[i]+phi);\n }\n source.trigger('change');\n\"\"\")\n\namp_slider = Slider(start=0.1, end=10, value=1, step=.1, title=\"Amplitude\", callback=callback)\ncallback.args[\"amp\"] = amp_slider\n\nfreq_slider = Slider(start=0.1, end=10, value=1, step=.1, title=\"Frequency\", callback=callback)\ncallback.args[\"freq\"] = freq_slider\n\nphase_slider = Slider(start=0, end=6.4, value=0, step=.1, title=\"Phase\", callback=callback)\ncallback.args[\"phase\"] = phase_slider\n\noffset_slider = Slider(start=-5, end=5, value=0, step=.1, title=\"Offset\", callback=callback)\ncallback.args[\"offset\"] = offset_slider\n\nlayout = hplot(\n vform(amp_slider, freq_slider, phase_slider, offset_slider),\n plot\n)\n\noutput_file(\"slider.html\")\n\nshow(layout)\n", "path": "examples/plotting/file/slider.py"}]} | 1,115 | 99 |
gh_patches_debug_21852 | rasdani/github-patches | git_diff | wagtail__wagtail-7315 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Site history report n+1s
While checking Scout APM for a client project with lots of different authors and overall activity, I've noticed that the site history report has a few N+1s.

</issue>
<code>
[start of wagtail/admin/views/reports/audit_logging.py]
1 import datetime
2
3 import django_filters
4
5 from django import forms
6 from django.db.models import Q, Subquery
7 from django.utils.encoding import force_str
8 from django.utils.translation import gettext_lazy as _
9
10 from wagtail.admin.filters import DateRangePickerWidget, WagtailFilterSet
11 from wagtail.core.log_actions import page_log_action_registry
12 from wagtail.core.models import Page, PageLogEntry, UserPagePermissionsProxy
13
14 from .base import ReportView
15
16
17 class SiteHistoryReportFilterSet(WagtailFilterSet):
18 action = django_filters.ChoiceFilter(choices=page_log_action_registry.get_choices)
19 hide_commenting_actions = django_filters.BooleanFilter(
20 label=_('Hide commenting actions'),
21 method='filter_hide_commenting_actions',
22 widget=forms.CheckboxInput,
23 )
24 timestamp = django_filters.DateFromToRangeFilter(label=_('Date'), widget=DateRangePickerWidget)
25 label = django_filters.CharFilter(label=_('Title'), lookup_expr='icontains')
26 user = django_filters.ModelChoiceFilter(
27 field_name='user', queryset=lambda request: PageLogEntry.objects.all().get_users()
28 )
29
30 def filter_hide_commenting_actions(self, queryset, name, value):
31 if value:
32 queryset = queryset.exclude(
33 action__startswith='wagtail.comments'
34 )
35 return queryset
36
37 class Meta:
38 model = PageLogEntry
39 fields = ['label', 'action', 'user', 'timestamp', 'hide_commenting_actions']
40
41
42 class LogEntriesView(ReportView):
43 template_name = 'wagtailadmin/reports/site_history.html'
44 title = _('Site history')
45 header_icon = 'history'
46 filterset_class = SiteHistoryReportFilterSet
47
48 export_headings = {
49 "object_id": _("ID"),
50 "title": _("Title"),
51 "object_verbose_name": _("Type"),
52 "action": _("Action type"),
53 "timestamp": _("Date/Time")
54 }
55 list_export = [
56 "object_id",
57 "label",
58 "object_verbose_name",
59 "action",
60 "timestamp"
61 ]
62
63 def __init__(self, **kwargs):
64 super().__init__(**kwargs)
65
66 self.custom_field_preprocess['action'] = {
67 self.FORMAT_CSV: self.get_action_label,
68 self.FORMAT_XLSX: self.get_action_label
69 }
70
71 def get_filename(self):
72 return "audit-log-{}".format(
73 datetime.datetime.today().strftime("%Y-%m-%d")
74 )
75
76 def get_queryset(self):
77 q = Q(
78 page__in=UserPagePermissionsProxy(self.request.user).explorable_pages().values_list('pk', flat=True)
79 )
80
81 root_page_permissions = Page.get_first_root_node().permissions_for_user(self.request.user)
82 if (
83 self.request.user.is_superuser
84 or root_page_permissions.can_add_subpage() or root_page_permissions.can_edit()
85 ):
86 # Include deleted entries
87 q = q | Q(page_id__in=Subquery(
88 PageLogEntry.objects.filter(deleted=True).values('page_id')
89 ))
90
91 return PageLogEntry.objects.filter(q)
92
93 def get_action_label(self, action):
94 from wagtail.core.log_actions import page_log_action_registry
95 return force_str(page_log_action_registry.get_action_label(action))
96
[end of wagtail/admin/views/reports/audit_logging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/admin/views/reports/audit_logging.py b/wagtail/admin/views/reports/audit_logging.py
--- a/wagtail/admin/views/reports/audit_logging.py
+++ b/wagtail/admin/views/reports/audit_logging.py
@@ -74,9 +74,7 @@
)
def get_queryset(self):
- q = Q(
- page__in=UserPagePermissionsProxy(self.request.user).explorable_pages().values_list('pk', flat=True)
- )
+ q = Q(page__in=UserPagePermissionsProxy(self.request.user).explorable_pages())
root_page_permissions = Page.get_first_root_node().permissions_for_user(self.request.user)
if (
@@ -88,7 +86,11 @@
PageLogEntry.objects.filter(deleted=True).values('page_id')
))
- return PageLogEntry.objects.filter(q)
+ # Using prefech_related() on page, as select_related() generates an INNER JOIN,
+ # which filters out entries for deleted pages
+ return PageLogEntry.objects.filter(q).select_related(
+ 'user', 'user__wagtail_userprofile'
+ ).prefetch_related('page')
def get_action_label(self, action):
from wagtail.core.log_actions import page_log_action_registry
| {"golden_diff": "diff --git a/wagtail/admin/views/reports/audit_logging.py b/wagtail/admin/views/reports/audit_logging.py\n--- a/wagtail/admin/views/reports/audit_logging.py\n+++ b/wagtail/admin/views/reports/audit_logging.py\n@@ -74,9 +74,7 @@\n )\n \n def get_queryset(self):\n- q = Q(\n- page__in=UserPagePermissionsProxy(self.request.user).explorable_pages().values_list('pk', flat=True)\n- )\n+ q = Q(page__in=UserPagePermissionsProxy(self.request.user).explorable_pages())\n \n root_page_permissions = Page.get_first_root_node().permissions_for_user(self.request.user)\n if (\n@@ -88,7 +86,11 @@\n PageLogEntry.objects.filter(deleted=True).values('page_id')\n ))\n \n- return PageLogEntry.objects.filter(q)\n+ # Using prefech_related() on page, as select_related() generates an INNER JOIN,\n+ # which filters out entries for deleted pages\n+ return PageLogEntry.objects.filter(q).select_related(\n+ 'user', 'user__wagtail_userprofile'\n+ ).prefetch_related('page')\n \n def get_action_label(self, action):\n from wagtail.core.log_actions import page_log_action_registry\n", "issue": "Site history report n+1s\nWhile checking Scout APM for a client project with lots of different authors and overall activity, I've noticed that the site history report has a few N+1s.\r\n\r\n\r\n\n", "before_files": [{"content": "import datetime\n\nimport django_filters\n\nfrom django import forms\nfrom django.db.models import Q, Subquery\nfrom django.utils.encoding import force_str\nfrom django.utils.translation import gettext_lazy as _\n\nfrom wagtail.admin.filters import DateRangePickerWidget, WagtailFilterSet\nfrom wagtail.core.log_actions import page_log_action_registry\nfrom wagtail.core.models import Page, PageLogEntry, UserPagePermissionsProxy\n\nfrom .base import ReportView\n\n\nclass SiteHistoryReportFilterSet(WagtailFilterSet):\n action = django_filters.ChoiceFilter(choices=page_log_action_registry.get_choices)\n hide_commenting_actions = django_filters.BooleanFilter(\n label=_('Hide commenting actions'),\n method='filter_hide_commenting_actions',\n widget=forms.CheckboxInput,\n )\n timestamp = django_filters.DateFromToRangeFilter(label=_('Date'), widget=DateRangePickerWidget)\n label = django_filters.CharFilter(label=_('Title'), lookup_expr='icontains')\n user = django_filters.ModelChoiceFilter(\n field_name='user', queryset=lambda request: PageLogEntry.objects.all().get_users()\n )\n\n def filter_hide_commenting_actions(self, queryset, name, value):\n if value:\n queryset = queryset.exclude(\n action__startswith='wagtail.comments'\n )\n return queryset\n\n class Meta:\n model = PageLogEntry\n fields = ['label', 'action', 'user', 'timestamp', 'hide_commenting_actions']\n\n\nclass LogEntriesView(ReportView):\n template_name = 'wagtailadmin/reports/site_history.html'\n title = _('Site history')\n header_icon = 'history'\n filterset_class = SiteHistoryReportFilterSet\n\n export_headings = {\n \"object_id\": _(\"ID\"),\n \"title\": _(\"Title\"),\n \"object_verbose_name\": _(\"Type\"),\n \"action\": _(\"Action type\"),\n \"timestamp\": _(\"Date/Time\")\n }\n list_export = [\n \"object_id\",\n \"label\",\n \"object_verbose_name\",\n \"action\",\n \"timestamp\"\n ]\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.custom_field_preprocess['action'] = {\n self.FORMAT_CSV: self.get_action_label,\n self.FORMAT_XLSX: self.get_action_label\n }\n\n def get_filename(self):\n return \"audit-log-{}\".format(\n datetime.datetime.today().strftime(\"%Y-%m-%d\")\n )\n\n def get_queryset(self):\n q = Q(\n page__in=UserPagePermissionsProxy(self.request.user).explorable_pages().values_list('pk', flat=True)\n )\n\n root_page_permissions = Page.get_first_root_node().permissions_for_user(self.request.user)\n if (\n self.request.user.is_superuser\n or root_page_permissions.can_add_subpage() or root_page_permissions.can_edit()\n ):\n # Include deleted entries\n q = q | Q(page_id__in=Subquery(\n PageLogEntry.objects.filter(deleted=True).values('page_id')\n ))\n\n return PageLogEntry.objects.filter(q)\n\n def get_action_label(self, action):\n from wagtail.core.log_actions import page_log_action_registry\n return force_str(page_log_action_registry.get_action_label(action))\n", "path": "wagtail/admin/views/reports/audit_logging.py"}]} | 1,532 | 286 |
gh_patches_debug_25226 | rasdani/github-patches | git_diff | rotki__rotki-1397 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Install roboto mono
## Abstract
We should install the mono version of our main font in order to increase legibility and proper alignment of content in certain scenarios.
## Specification
https://www.npmjs.com/package/typeface-roboto-mono install, require it where we require roboto, use it in classes where a mono font is required to line things up properly in data tables / cards / etc.
</issue>
<code>
[start of rotkehlchen/chain/ethereum/transactions.py]
1 import logging
2 from collections import defaultdict
3 from typing import Dict, List, Optional
4
5 from rotkehlchen.db.dbhandler import DBHandler
6 from rotkehlchen.db.ranges import DBQueryRanges
7 from rotkehlchen.errors import RemoteError
8 from rotkehlchen.externalapis.etherscan import Etherscan
9 from rotkehlchen.logging import RotkehlchenLogsAdapter
10 from rotkehlchen.typing import ChecksumEthAddress, EthereumTransaction, Timestamp
11 from rotkehlchen.user_messages import MessagesAggregator
12 from rotkehlchen.utils.interfaces import LockableQueryObject, protect_with_lock
13
14 logger = logging.getLogger(__name__)
15 log = RotkehlchenLogsAdapter(logger)
16
17 FREE_ETH_TX_LIMIT = 500
18
19
20 class EthTransactions(LockableQueryObject):
21
22 def __init__(
23 self,
24 database: DBHandler,
25 etherscan: Etherscan,
26 msg_aggregator: MessagesAggregator,
27 ) -> None:
28 super().__init__()
29 self.database = database
30 self.etherscan = etherscan
31 self.msg_aggregator = msg_aggregator
32 self.tx_per_address: Dict[ChecksumEthAddress, int] = defaultdict(int)
33
34 def _single_address_query_transactions(
35 self,
36 address: ChecksumEthAddress,
37 start_ts: Timestamp,
38 end_ts: Timestamp,
39 with_limit: bool,
40 ) -> List[EthereumTransaction]:
41 self.tx_per_address[address] = 0
42 transactions = self.database.get_ethereum_transactions(
43 from_ts=start_ts,
44 to_ts=end_ts,
45 address=address,
46 )
47 ranges = DBQueryRanges(self.database)
48 ranges_to_query = ranges.get_location_query_ranges(
49 location_string=f'ethtxs_{address}',
50 start_ts=start_ts,
51 end_ts=end_ts,
52 )
53 new_transactions = []
54 for query_start_ts, query_end_ts in ranges_to_query:
55 for internal in (False, True):
56 try:
57 new_transactions.extend(self.etherscan.get_transactions(
58 account=address,
59 internal=internal,
60 from_ts=query_start_ts,
61 to_ts=query_end_ts,
62 ))
63 except RemoteError as e:
64 self.msg_aggregator.add_error(
65 f'Got error "{str(e)}" while querying ethereum transactions '
66 f'from Etherscan. Transactions not added to the DB '
67 f'from_ts: {query_start_ts} '
68 f'to_ts: {query_end_ts} '
69 f'internal: {internal}',
70 )
71
72 # add new transactions to the DB
73 if new_transactions != []:
74 self.database.add_ethereum_transactions(new_transactions, from_etherscan=True)
75 # and also set the last queried timestamps for the address
76 ranges.update_used_query_range(
77 location_string=f'ethtxs_{address}',
78 start_ts=start_ts,
79 end_ts=end_ts,
80 ranges_to_query=ranges_to_query,
81 )
82 # finally append them to the already returned DB transactions
83 transactions.extend(new_transactions)
84
85 if with_limit:
86 transactions_queried_so_far = sum(x for _, x in self.tx_per_address.items())
87 remaining_num_tx = FREE_ETH_TX_LIMIT - transactions_queried_so_far
88 returning_tx_length = min(remaining_num_tx, len(transactions))
89 # Note down how many we got for this address
90 self.tx_per_address[address] = returning_tx_length
91 return transactions[:returning_tx_length]
92
93 return transactions
94
95 @protect_with_lock()
96 def query(
97 self,
98 address: Optional[ChecksumEthAddress],
99 from_ts: Timestamp,
100 to_ts: Timestamp,
101 with_limit: bool = False,
102 recent_first: bool = False,
103 ) -> List[EthereumTransaction]:
104 """Queries for all transactions (normal AND internal) of all ethereum accounts.
105 Returns a list of all transactions of all accounts sorted by time.
106
107 If `with_limit` is true then the api limit is applied
108
109 if `recent_first` is true then the transactions are returned with the most
110 recent first on the list
111
112 May raise:
113 - RemoteError if etherscan is used and there is a problem with reaching it or
114 with parsing the response.
115 """
116 transactions: List[EthereumTransaction] = []
117
118 if address is not None:
119 accounts = [address]
120 else:
121 accounts = self.database.get_blockchain_accounts().eth
122
123 for address in accounts:
124 transactions.extend(self._single_address_query_transactions(
125 address=address,
126 start_ts=from_ts,
127 end_ts=to_ts,
128 with_limit=with_limit,
129 ))
130
131 transactions.sort(key=lambda tx: tx.timestamp, reverse=recent_first)
132 return transactions
133
[end of rotkehlchen/chain/ethereum/transactions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rotkehlchen/chain/ethereum/transactions.py b/rotkehlchen/chain/ethereum/transactions.py
--- a/rotkehlchen/chain/ethereum/transactions.py
+++ b/rotkehlchen/chain/ethereum/transactions.py
@@ -72,6 +72,15 @@
# add new transactions to the DB
if new_transactions != []:
self.database.add_ethereum_transactions(new_transactions, from_etherscan=True)
+ # And since at least for now the increasingly negative nonce for the internal
+ # transactions happens only in the DB writing, requery the entire batch from
+ # the DB to get the updated transactions
+ transactions = self.database.get_ethereum_transactions(
+ from_ts=start_ts,
+ to_ts=end_ts,
+ address=address,
+ )
+
# and also set the last queried timestamps for the address
ranges.update_used_query_range(
location_string=f'ethtxs_{address}',
@@ -79,8 +88,6 @@
end_ts=end_ts,
ranges_to_query=ranges_to_query,
)
- # finally append them to the already returned DB transactions
- transactions.extend(new_transactions)
if with_limit:
transactions_queried_so_far = sum(x for _, x in self.tx_per_address.items())
| {"golden_diff": "diff --git a/rotkehlchen/chain/ethereum/transactions.py b/rotkehlchen/chain/ethereum/transactions.py\n--- a/rotkehlchen/chain/ethereum/transactions.py\n+++ b/rotkehlchen/chain/ethereum/transactions.py\n@@ -72,6 +72,15 @@\n # add new transactions to the DB\n if new_transactions != []:\n self.database.add_ethereum_transactions(new_transactions, from_etherscan=True)\n+ # And since at least for now the increasingly negative nonce for the internal\n+ # transactions happens only in the DB writing, requery the entire batch from\n+ # the DB to get the updated transactions\n+ transactions = self.database.get_ethereum_transactions(\n+ from_ts=start_ts,\n+ to_ts=end_ts,\n+ address=address,\n+ )\n+\n # and also set the last queried timestamps for the address\n ranges.update_used_query_range(\n location_string=f'ethtxs_{address}',\n@@ -79,8 +88,6 @@\n end_ts=end_ts,\n ranges_to_query=ranges_to_query,\n )\n- # finally append them to the already returned DB transactions\n- transactions.extend(new_transactions)\n \n if with_limit:\n transactions_queried_so_far = sum(x for _, x in self.tx_per_address.items())\n", "issue": "Install roboto mono\n## Abstract\r\n\r\nWe should install the mono version of our main font in order to increase legibility and proper alignment of content in certain scenarios.\r\n\r\n## Specification\r\n\r\nhttps://www.npmjs.com/package/typeface-roboto-mono install, require it where we require roboto, use it in classes where a mono font is required to line things up properly in data tables / cards / etc.\n", "before_files": [{"content": "import logging\nfrom collections import defaultdict\nfrom typing import Dict, List, Optional\n\nfrom rotkehlchen.db.dbhandler import DBHandler\nfrom rotkehlchen.db.ranges import DBQueryRanges\nfrom rotkehlchen.errors import RemoteError\nfrom rotkehlchen.externalapis.etherscan import Etherscan\nfrom rotkehlchen.logging import RotkehlchenLogsAdapter\nfrom rotkehlchen.typing import ChecksumEthAddress, EthereumTransaction, Timestamp\nfrom rotkehlchen.user_messages import MessagesAggregator\nfrom rotkehlchen.utils.interfaces import LockableQueryObject, protect_with_lock\n\nlogger = logging.getLogger(__name__)\nlog = RotkehlchenLogsAdapter(logger)\n\nFREE_ETH_TX_LIMIT = 500\n\n\nclass EthTransactions(LockableQueryObject):\n\n def __init__(\n self,\n database: DBHandler,\n etherscan: Etherscan,\n msg_aggregator: MessagesAggregator,\n ) -> None:\n super().__init__()\n self.database = database\n self.etherscan = etherscan\n self.msg_aggregator = msg_aggregator\n self.tx_per_address: Dict[ChecksumEthAddress, int] = defaultdict(int)\n\n def _single_address_query_transactions(\n self,\n address: ChecksumEthAddress,\n start_ts: Timestamp,\n end_ts: Timestamp,\n with_limit: bool,\n ) -> List[EthereumTransaction]:\n self.tx_per_address[address] = 0\n transactions = self.database.get_ethereum_transactions(\n from_ts=start_ts,\n to_ts=end_ts,\n address=address,\n )\n ranges = DBQueryRanges(self.database)\n ranges_to_query = ranges.get_location_query_ranges(\n location_string=f'ethtxs_{address}',\n start_ts=start_ts,\n end_ts=end_ts,\n )\n new_transactions = []\n for query_start_ts, query_end_ts in ranges_to_query:\n for internal in (False, True):\n try:\n new_transactions.extend(self.etherscan.get_transactions(\n account=address,\n internal=internal,\n from_ts=query_start_ts,\n to_ts=query_end_ts,\n ))\n except RemoteError as e:\n self.msg_aggregator.add_error(\n f'Got error \"{str(e)}\" while querying ethereum transactions '\n f'from Etherscan. Transactions not added to the DB '\n f'from_ts: {query_start_ts} '\n f'to_ts: {query_end_ts} '\n f'internal: {internal}',\n )\n\n # add new transactions to the DB\n if new_transactions != []:\n self.database.add_ethereum_transactions(new_transactions, from_etherscan=True)\n # and also set the last queried timestamps for the address\n ranges.update_used_query_range(\n location_string=f'ethtxs_{address}',\n start_ts=start_ts,\n end_ts=end_ts,\n ranges_to_query=ranges_to_query,\n )\n # finally append them to the already returned DB transactions\n transactions.extend(new_transactions)\n\n if with_limit:\n transactions_queried_so_far = sum(x for _, x in self.tx_per_address.items())\n remaining_num_tx = FREE_ETH_TX_LIMIT - transactions_queried_so_far\n returning_tx_length = min(remaining_num_tx, len(transactions))\n # Note down how many we got for this address\n self.tx_per_address[address] = returning_tx_length\n return transactions[:returning_tx_length]\n\n return transactions\n\n @protect_with_lock()\n def query(\n self,\n address: Optional[ChecksumEthAddress],\n from_ts: Timestamp,\n to_ts: Timestamp,\n with_limit: bool = False,\n recent_first: bool = False,\n ) -> List[EthereumTransaction]:\n \"\"\"Queries for all transactions (normal AND internal) of all ethereum accounts.\n Returns a list of all transactions of all accounts sorted by time.\n\n If `with_limit` is true then the api limit is applied\n\n if `recent_first` is true then the transactions are returned with the most\n recent first on the list\n\n May raise:\n - RemoteError if etherscan is used and there is a problem with reaching it or\n with parsing the response.\n \"\"\"\n transactions: List[EthereumTransaction] = []\n\n if address is not None:\n accounts = [address]\n else:\n accounts = self.database.get_blockchain_accounts().eth\n\n for address in accounts:\n transactions.extend(self._single_address_query_transactions(\n address=address,\n start_ts=from_ts,\n end_ts=to_ts,\n with_limit=with_limit,\n ))\n\n transactions.sort(key=lambda tx: tx.timestamp, reverse=recent_first)\n return transactions\n", "path": "rotkehlchen/chain/ethereum/transactions.py"}]} | 1,926 | 292 |
gh_patches_debug_17132 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-299 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Don't attempt to start on Windows (until supported)
As of right now, we error out when attempting to start on a Microsoft Windows system.
We should detect that the OS is not supported, and not start at all (and log that fact).
</issue>
<code>
[start of src/scout_apm/core/__init__.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import logging
5 from os import getpid
6
7 from scout_apm.core.config import ScoutConfig
8 from scout_apm.core.context import AgentContext
9 from scout_apm.core.core_agent_manager import CoreAgentManager
10 from scout_apm.core.instrument_manager import InstrumentManager
11 from scout_apm.core.metadata import AppMetadata
12
13 try:
14 from scout_apm.core import objtrace
15 except ImportError:
16 objtrace = None
17
18 logger = logging.getLogger(__name__)
19
20
21 def install(*args, **kwargs):
22 if "config" in kwargs:
23 ScoutConfig().set(**kwargs["config"])
24 context = AgentContext.build(config=ScoutConfig())
25
26 if not context.config.value("monitor"):
27 logger.info(
28 "APM Not Launching on PID: %s - Configuration 'monitor' is not true",
29 getpid(),
30 )
31 return False
32
33 InstrumentManager().install_all()
34
35 if objtrace is not None:
36 objtrace.enable()
37
38 logger.debug("APM Launching on PID: %s", getpid())
39 launched = CoreAgentManager().launch()
40
41 AppMetadata.report()
42 if launched:
43 AgentContext.socket().stop()
44
45 return True
46
[end of src/scout_apm/core/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/scout_apm/core/__init__.py b/src/scout_apm/core/__init__.py
--- a/src/scout_apm/core/__init__.py
+++ b/src/scout_apm/core/__init__.py
@@ -2,6 +2,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
+import sys
from os import getpid
from scout_apm.core.config import ScoutConfig
@@ -23,6 +24,10 @@
ScoutConfig().set(**kwargs["config"])
context = AgentContext.build(config=ScoutConfig())
+ if sys.platform == "win32":
+ logger.info("APM Not Launching on PID: %s - Windows is not supported", getpid())
+ return False
+
if not context.config.value("monitor"):
logger.info(
"APM Not Launching on PID: %s - Configuration 'monitor' is not true",
| {"golden_diff": "diff --git a/src/scout_apm/core/__init__.py b/src/scout_apm/core/__init__.py\n--- a/src/scout_apm/core/__init__.py\n+++ b/src/scout_apm/core/__init__.py\n@@ -2,6 +2,7 @@\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n import logging\n+import sys\n from os import getpid\n \n from scout_apm.core.config import ScoutConfig\n@@ -23,6 +24,10 @@\n ScoutConfig().set(**kwargs[\"config\"])\n context = AgentContext.build(config=ScoutConfig())\n \n+ if sys.platform == \"win32\":\n+ logger.info(\"APM Not Launching on PID: %s - Windows is not supported\", getpid())\n+ return False\n+\n if not context.config.value(\"monitor\"):\n logger.info(\n \"APM Not Launching on PID: %s - Configuration 'monitor' is not true\",\n", "issue": "Don't attempt to start on Windows (until supported)\nAs of right now, we error out when attempting to start on a Microsoft Windows system.\r\n\r\nWe should detect that the OS is not supported, and not start at all (and log that fact).\r\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nfrom os import getpid\n\nfrom scout_apm.core.config import ScoutConfig\nfrom scout_apm.core.context import AgentContext\nfrom scout_apm.core.core_agent_manager import CoreAgentManager\nfrom scout_apm.core.instrument_manager import InstrumentManager\nfrom scout_apm.core.metadata import AppMetadata\n\ntry:\n from scout_apm.core import objtrace\nexcept ImportError:\n objtrace = None\n\nlogger = logging.getLogger(__name__)\n\n\ndef install(*args, **kwargs):\n if \"config\" in kwargs:\n ScoutConfig().set(**kwargs[\"config\"])\n context = AgentContext.build(config=ScoutConfig())\n\n if not context.config.value(\"monitor\"):\n logger.info(\n \"APM Not Launching on PID: %s - Configuration 'monitor' is not true\",\n getpid(),\n )\n return False\n\n InstrumentManager().install_all()\n\n if objtrace is not None:\n objtrace.enable()\n\n logger.debug(\"APM Launching on PID: %s\", getpid())\n launched = CoreAgentManager().launch()\n\n AppMetadata.report()\n if launched:\n AgentContext.socket().stop()\n\n return True\n", "path": "src/scout_apm/core/__init__.py"}]} | 951 | 211 |
gh_patches_debug_18254 | rasdani/github-patches | git_diff | streamlit__streamlit-2470 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Watchdog error on install (Mac)
I've tried to install it in a MAC using PIP and PIP3 and I get the same error:
Command "/usr/local/opt/python/bin/python3.7 -u -c "import setuptools, tokenize;__file__='/private/var/folders/0s/rkvsfhzn2930_3902mpks9lm0000gn/T/pip-install-3cscl6kq/watchdog/setup.py';f=getattr(tokenize, 'open', open)(__file__);code=f.read().replace('\r\n', '\n');f.close();exec(compile(code, __file__, 'exec'))" install --record /private/var/folders/0s/rkvsfhzn2930_3902mpks9lm0000gn/T/pip-record-qr49cvd0/install-record.txt --single-version-externally-managed --compile" failed with error code 1 in /private/var/folders/0s/rkvsfhzn2930_3902mpks9lm0000gn/T/pip-install-3cscl6kq/watchdog/
I'm using:
macOS Mojave 10.14.5
</issue>
<code>
[start of lib/setup.py]
1 import os
2 import platform
3 import setuptools
4 import subprocess
5 import sys
6
7 from setuptools.command.install import install
8
9 try:
10 from pipenv.project import Project
11 from pipenv.utils import convert_deps_to_pip
12 except:
13 exit_msg = (
14 "pipenv is required to package Streamlit. Please install pipenv and try again"
15 )
16 sys.exit(exit_msg)
17
18 VERSION = "0.72.0" # PEP-440
19
20 NAME = "streamlit"
21
22 DESCRIPTION = "The fastest way to build data apps in Python"
23
24 LONG_DESCRIPTION = (
25 "Streamlit's open-source app framework is the easiest way "
26 "for data scientists and machine learning engineers to "
27 "create beautiful, performant apps in only a few hours! "
28 "All in pure Python. All for free."
29 )
30
31 pipfile = Project(chdir=False).parsed_pipfile
32
33 packages = pipfile["packages"].copy()
34 requirements = convert_deps_to_pip(packages, r=False)
35
36 # Check whether xcode tools are available before making watchdog a
37 # dependency (only if the current system is a Mac).
38 if platform.system() == "Darwin":
39 has_xcode = subprocess.call(["xcode-select", "--version"], shell=False) == 0
40 has_gcc = subprocess.call(["gcc", "--version"], shell=False) == 0
41
42 if not (has_xcode and has_gcc):
43 try:
44 requirements.remove("watchdog")
45 except ValueError:
46 pass
47
48
49 class VerifyVersionCommand(install):
50 """Custom command to verify that the git tag matches our version"""
51
52 description = "verify that the git tag matches our version"
53
54 def run(self):
55 tag = os.getenv("CIRCLE_TAG")
56
57 if tag != VERSION:
58 info = "Git tag: {0} does not match the version of this app: {1}".format(
59 tag, VERSION
60 )
61 sys.exit(info)
62
63
64 setuptools.setup(
65 name=NAME,
66 version=VERSION,
67 description=DESCRIPTION,
68 long_description=LONG_DESCRIPTION,
69 url="https://streamlit.io",
70 author="Streamlit Inc",
71 author_email="[email protected]",
72 python_requires=">=3.6",
73 license="Apache 2",
74 packages=setuptools.find_packages(exclude=["tests", "tests.*"]),
75 # Requirements
76 install_requires=requirements,
77 zip_safe=False, # install source files not egg
78 include_package_data=True, # copy html and friends
79 entry_points={"console_scripts": ["streamlit = streamlit.cli:main"]},
80 # For Windows so that streamlit * commands work ie.
81 # - streamlit version
82 # - streamlit hello
83 scripts=["bin/streamlit.cmd"],
84 cmdclass={
85 "verify": VerifyVersionCommand,
86 },
87 )
88
[end of lib/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/setup.py b/lib/setup.py
--- a/lib/setup.py
+++ b/lib/setup.py
@@ -1,7 +1,5 @@
import os
-import platform
import setuptools
-import subprocess
import sys
from setuptools.command.install import install
@@ -33,18 +31,6 @@
packages = pipfile["packages"].copy()
requirements = convert_deps_to_pip(packages, r=False)
-# Check whether xcode tools are available before making watchdog a
-# dependency (only if the current system is a Mac).
-if platform.system() == "Darwin":
- has_xcode = subprocess.call(["xcode-select", "--version"], shell=False) == 0
- has_gcc = subprocess.call(["gcc", "--version"], shell=False) == 0
-
- if not (has_xcode and has_gcc):
- try:
- requirements.remove("watchdog")
- except ValueError:
- pass
-
class VerifyVersionCommand(install):
"""Custom command to verify that the git tag matches our version"""
| {"golden_diff": "diff --git a/lib/setup.py b/lib/setup.py\n--- a/lib/setup.py\n+++ b/lib/setup.py\n@@ -1,7 +1,5 @@\n import os\n-import platform\n import setuptools\n-import subprocess\n import sys\n \n from setuptools.command.install import install\n@@ -33,18 +31,6 @@\n packages = pipfile[\"packages\"].copy()\n requirements = convert_deps_to_pip(packages, r=False)\n \n-# Check whether xcode tools are available before making watchdog a\n-# dependency (only if the current system is a Mac).\n-if platform.system() == \"Darwin\":\n- has_xcode = subprocess.call([\"xcode-select\", \"--version\"], shell=False) == 0\n- has_gcc = subprocess.call([\"gcc\", \"--version\"], shell=False) == 0\n-\n- if not (has_xcode and has_gcc):\n- try:\n- requirements.remove(\"watchdog\")\n- except ValueError:\n- pass\n-\n \n class VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n", "issue": "Watchdog error on install (Mac)\nI've tried to install it in a MAC using PIP and PIP3 and I get the same error:\r\n\r\nCommand \"/usr/local/opt/python/bin/python3.7 -u -c \"import setuptools, tokenize;__file__='/private/var/folders/0s/rkvsfhzn2930_3902mpks9lm0000gn/T/pip-install-3cscl6kq/watchdog/setup.py';f=getattr(tokenize, 'open', open)(__file__);code=f.read().replace('\\r\\n', '\\n');f.close();exec(compile(code, __file__, 'exec'))\" install --record /private/var/folders/0s/rkvsfhzn2930_3902mpks9lm0000gn/T/pip-record-qr49cvd0/install-record.txt --single-version-externally-managed --compile\" failed with error code 1 in /private/var/folders/0s/rkvsfhzn2930_3902mpks9lm0000gn/T/pip-install-3cscl6kq/watchdog/\r\n\r\nI'm using: \r\nmacOS Mojave 10.14.5\r\n\n", "before_files": [{"content": "import os\nimport platform\nimport setuptools\nimport subprocess\nimport sys\n\nfrom setuptools.command.install import install\n\ntry:\n from pipenv.project import Project\n from pipenv.utils import convert_deps_to_pip\nexcept:\n exit_msg = (\n \"pipenv is required to package Streamlit. Please install pipenv and try again\"\n )\n sys.exit(exit_msg)\n\nVERSION = \"0.72.0\" # PEP-440\n\nNAME = \"streamlit\"\n\nDESCRIPTION = \"The fastest way to build data apps in Python\"\n\nLONG_DESCRIPTION = (\n \"Streamlit's open-source app framework is the easiest way \"\n \"for data scientists and machine learning engineers to \"\n \"create beautiful, performant apps in only a few hours! \"\n \"All in pure Python. All for free.\"\n)\n\npipfile = Project(chdir=False).parsed_pipfile\n\npackages = pipfile[\"packages\"].copy()\nrequirements = convert_deps_to_pip(packages, r=False)\n\n# Check whether xcode tools are available before making watchdog a\n# dependency (only if the current system is a Mac).\nif platform.system() == \"Darwin\":\n has_xcode = subprocess.call([\"xcode-select\", \"--version\"], shell=False) == 0\n has_gcc = subprocess.call([\"gcc\", \"--version\"], shell=False) == 0\n\n if not (has_xcode and has_gcc):\n try:\n requirements.remove(\"watchdog\")\n except ValueError:\n pass\n\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n\n description = \"verify that the git tag matches our version\"\n\n def run(self):\n tag = os.getenv(\"CIRCLE_TAG\")\n\n if tag != VERSION:\n info = \"Git tag: {0} does not match the version of this app: {1}\".format(\n tag, VERSION\n )\n sys.exit(info)\n\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n url=\"https://streamlit.io\",\n author=\"Streamlit Inc\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n license=\"Apache 2\",\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n # Requirements\n install_requires=requirements,\n zip_safe=False, # install source files not egg\n include_package_data=True, # copy html and friends\n entry_points={\"console_scripts\": [\"streamlit = streamlit.cli:main\"]},\n # For Windows so that streamlit * commands work ie.\n # - streamlit version\n # - streamlit hello\n scripts=[\"bin/streamlit.cmd\"],\n cmdclass={\n \"verify\": VerifyVersionCommand,\n },\n)\n", "path": "lib/setup.py"}]} | 1,577 | 231 |
gh_patches_debug_27712 | rasdani/github-patches | git_diff | pyca__cryptography-10277 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cryptography_vectors is missing from `test` extra
There is missing `cryptography_vectors` in the `test` extra in `pyproject.toml` so when tests are run from sdist with the `test` extra installed then testing fails and `cryptography_vectors` needs to be installed manually to make tests passing.
</issue>
<code>
[start of release.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 import pathlib
6 import re
7 import subprocess
8
9 import click
10 import tomllib
11 from packaging.version import Version
12
13
14 def run(*args: str) -> None:
15 print(f"[running] {list(args)}")
16 subprocess.check_call(list(args))
17
18
19 @click.group()
20 def cli():
21 pass
22
23
24 @cli.command()
25 def release() -> None:
26 base_dir = pathlib.Path(__file__).parent
27 with (base_dir / "pyproject.toml").open("rb") as f:
28 pyproject = tomllib.load(f)
29 version = pyproject["project"]["version"]
30
31 if Version(version).is_prerelease:
32 raise RuntimeError(
33 f"Can't release, pyproject.toml version is pre-release: {version}"
34 )
35
36 # Tag and push the tag (this will trigger the wheel builder in Actions)
37 run("git", "tag", "-s", version, "-m", f"{version} release")
38 run("git", "push", "--tags", "[email protected]:pyca/cryptography.git")
39
40
41 def replace_version(
42 p: pathlib.Path, variable_name: str, new_version: str
43 ) -> None:
44 content = p.read_text()
45
46 pattern = rf"^{variable_name}\s*=\s*.*$"
47 match = re.search(pattern, content, re.MULTILINE)
48 assert match is not None
49
50 start, end = match.span()
51 new_content = (
52 content[:start] + f'{variable_name} = "{new_version}"' + content[end:]
53 )
54
55 p.write_text(new_content)
56
57
58 @cli.command()
59 @click.argument("new_version")
60 def bump_version(new_version: str) -> None:
61 base_dir = pathlib.Path(__file__).parent
62
63 replace_version(base_dir / "pyproject.toml", "version", new_version)
64 replace_version(
65 base_dir / "src/cryptography/__about__.py", "__version__", new_version
66 )
67 replace_version(
68 base_dir / "vectors/pyproject.toml",
69 "version",
70 new_version,
71 )
72 replace_version(
73 base_dir / "vectors/cryptography_vectors/__about__.py",
74 "__version__",
75 new_version,
76 )
77
78
79 if __name__ == "__main__":
80 cli()
81
[end of release.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/release.py b/release.py
--- a/release.py
+++ b/release.py
@@ -38,23 +38,24 @@
run("git", "push", "--tags", "[email protected]:pyca/cryptography.git")
-def replace_version(
- p: pathlib.Path, variable_name: str, new_version: str
-) -> None:
+def replace_pattern(p: pathlib.Path, pattern: str, replacement: str) -> None:
content = p.read_text()
-
- pattern = rf"^{variable_name}\s*=\s*.*$"
match = re.search(pattern, content, re.MULTILINE)
assert match is not None
start, end = match.span()
- new_content = (
- content[:start] + f'{variable_name} = "{new_version}"' + content[end:]
- )
-
+ new_content = content[:start] + replacement + content[end:]
p.write_text(new_content)
+def replace_version(
+ p: pathlib.Path, variable_name: str, new_version: str
+) -> None:
+ replace_pattern(
+ p, rf"^{variable_name}\s*=\s*.*$", f'{variable_name} = "{new_version}"'
+ )
+
+
@cli.command()
@click.argument("new_version")
def bump_version(new_version: str) -> None:
@@ -75,6 +76,19 @@
new_version,
)
+ if Version(new_version).is_prerelease:
+ replace_pattern(
+ base_dir / "pyproject.toml",
+ r'"cryptography_vectors(==.*?)?"',
+ '"cryptography_vectors"',
+ )
+ else:
+ replace_pattern(
+ base_dir / "pyproject.toml",
+ r'"cryptography_vectors(==.*?)?"',
+ f'"cryptography_vectors=={new_version}"',
+ )
+
if __name__ == "__main__":
cli()
| {"golden_diff": "diff --git a/release.py b/release.py\n--- a/release.py\n+++ b/release.py\n@@ -38,23 +38,24 @@\n run(\"git\", \"push\", \"--tags\", \"[email protected]:pyca/cryptography.git\")\n \n \n-def replace_version(\n- p: pathlib.Path, variable_name: str, new_version: str\n-) -> None:\n+def replace_pattern(p: pathlib.Path, pattern: str, replacement: str) -> None:\n content = p.read_text()\n-\n- pattern = rf\"^{variable_name}\\s*=\\s*.*$\"\n match = re.search(pattern, content, re.MULTILINE)\n assert match is not None\n \n start, end = match.span()\n- new_content = (\n- content[:start] + f'{variable_name} = \"{new_version}\"' + content[end:]\n- )\n-\n+ new_content = content[:start] + replacement + content[end:]\n p.write_text(new_content)\n \n \n+def replace_version(\n+ p: pathlib.Path, variable_name: str, new_version: str\n+) -> None:\n+ replace_pattern(\n+ p, rf\"^{variable_name}\\s*=\\s*.*$\", f'{variable_name} = \"{new_version}\"'\n+ )\n+\n+\n @cli.command()\n @click.argument(\"new_version\")\n def bump_version(new_version: str) -> None:\n@@ -75,6 +76,19 @@\n new_version,\n )\n \n+ if Version(new_version).is_prerelease:\n+ replace_pattern(\n+ base_dir / \"pyproject.toml\",\n+ r'\"cryptography_vectors(==.*?)?\"',\n+ '\"cryptography_vectors\"',\n+ )\n+ else:\n+ replace_pattern(\n+ base_dir / \"pyproject.toml\",\n+ r'\"cryptography_vectors(==.*?)?\"',\n+ f'\"cryptography_vectors=={new_version}\"',\n+ )\n+\n \n if __name__ == \"__main__\":\n cli()\n", "issue": "cryptography_vectors is missing from `test` extra\nThere is missing `cryptography_vectors` in the `test` extra in `pyproject.toml` so when tests are run from sdist with the `test` extra installed then testing fails and `cryptography_vectors` needs to be installed manually to make tests passing.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport pathlib\nimport re\nimport subprocess\n\nimport click\nimport tomllib\nfrom packaging.version import Version\n\n\ndef run(*args: str) -> None:\n print(f\"[running] {list(args)}\")\n subprocess.check_call(list(args))\n\n\[email protected]()\ndef cli():\n pass\n\n\[email protected]()\ndef release() -> None:\n base_dir = pathlib.Path(__file__).parent\n with (base_dir / \"pyproject.toml\").open(\"rb\") as f:\n pyproject = tomllib.load(f)\n version = pyproject[\"project\"][\"version\"]\n\n if Version(version).is_prerelease:\n raise RuntimeError(\n f\"Can't release, pyproject.toml version is pre-release: {version}\"\n )\n\n # Tag and push the tag (this will trigger the wheel builder in Actions)\n run(\"git\", \"tag\", \"-s\", version, \"-m\", f\"{version} release\")\n run(\"git\", \"push\", \"--tags\", \"[email protected]:pyca/cryptography.git\")\n\n\ndef replace_version(\n p: pathlib.Path, variable_name: str, new_version: str\n) -> None:\n content = p.read_text()\n\n pattern = rf\"^{variable_name}\\s*=\\s*.*$\"\n match = re.search(pattern, content, re.MULTILINE)\n assert match is not None\n\n start, end = match.span()\n new_content = (\n content[:start] + f'{variable_name} = \"{new_version}\"' + content[end:]\n )\n\n p.write_text(new_content)\n\n\[email protected]()\[email protected](\"new_version\")\ndef bump_version(new_version: str) -> None:\n base_dir = pathlib.Path(__file__).parent\n\n replace_version(base_dir / \"pyproject.toml\", \"version\", new_version)\n replace_version(\n base_dir / \"src/cryptography/__about__.py\", \"__version__\", new_version\n )\n replace_version(\n base_dir / \"vectors/pyproject.toml\",\n \"version\",\n new_version,\n )\n replace_version(\n base_dir / \"vectors/cryptography_vectors/__about__.py\",\n \"__version__\",\n new_version,\n )\n\n\nif __name__ == \"__main__\":\n cli()\n", "path": "release.py"}]} | 1,283 | 430 |
gh_patches_debug_58135 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-1579 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Untranslated "Plans"

Maybe we just remove it since there is already a header
</issue>
<code>
[start of meinberlin/apps/plans/forms.py]
1 from django import forms
2 from django.conf import settings
3 from django.db.models import Q
4 from django.utils.translation import ugettext_lazy as _
5
6 from adhocracy4.dashboard.components.forms import ProjectDashboardForm
7 from adhocracy4.maps import widgets as maps_widgets
8 from adhocracy4.projects import models as project_models
9 from meinberlin.apps.contrib import widgets as contrib_widgets
10
11 from . import models
12
13
14 def get_theme_options():
15 return models.Plan.objects\
16 .filter(~Q(theme=''))\
17 .order_by('theme')\
18 .values_list('theme', flat=True)\
19 .distinct()
20
21
22 class PlanForm(forms.ModelForm):
23
24 class Meta:
25 model = models.Plan
26 fields = [
27 'title',
28 'description_image',
29 'contact',
30 'point',
31 'point_label',
32 'district',
33 'cost',
34 'description',
35 'theme',
36 'status',
37 'participation']
38 widgets = {
39 'point': maps_widgets.MapChoosePointWidget(
40 polygon=settings.BERLIN_POLYGON),
41 'theme': contrib_widgets.TextWithDatalistWidget(attrs={
42 'options': get_theme_options
43 })
44 }
45 error_messages = {
46 'point': {
47 'required': _('Please locate the plan on the map.')
48 }
49 }
50
51 def __init__(self, *args, **kwargs):
52 super().__init__(*args, **kwargs)
53 self.fields['district'].empty_label = _('City wide')
54
55
56 class CustomMultipleChoiceField(forms.ModelMultipleChoiceField):
57
58 widget = forms.RadioSelect
59
60 def clean(self, value):
61 if value is None:
62 return super().clean([])
63 return super().clean([value])
64
65
66 class ProjectPlansDashboardForm(ProjectDashboardForm):
67 plans = CustomMultipleChoiceField(queryset=None)
68
69 class Meta:
70 model = project_models.Project
71 fields = ['plans']
72 required_for_project_publish = ['plans']
73
74 def save(self, commit=False):
75 plans = self.cleaned_data['plans']
76 self.instance.plans.set(plans)
77
78 def __init__(self, *args, **kwargs):
79 super().__init__(*args, **kwargs)
80 self.initial['plans'] = self.instance.plans.all()
81 self.fields['plans'
82 ].queryset = self.instance.organisation.plan_set.all()
83
[end of meinberlin/apps/plans/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/plans/forms.py b/meinberlin/apps/plans/forms.py
--- a/meinberlin/apps/plans/forms.py
+++ b/meinberlin/apps/plans/forms.py
@@ -64,7 +64,8 @@
class ProjectPlansDashboardForm(ProjectDashboardForm):
- plans = CustomMultipleChoiceField(queryset=None)
+ plans = CustomMultipleChoiceField(queryset=None,
+ label=_('Plans'))
class Meta:
model = project_models.Project
| {"golden_diff": "diff --git a/meinberlin/apps/plans/forms.py b/meinberlin/apps/plans/forms.py\n--- a/meinberlin/apps/plans/forms.py\n+++ b/meinberlin/apps/plans/forms.py\n@@ -64,7 +64,8 @@\n \n \n class ProjectPlansDashboardForm(ProjectDashboardForm):\n- plans = CustomMultipleChoiceField(queryset=None)\n+ plans = CustomMultipleChoiceField(queryset=None,\n+ label=_('Plans'))\n \n class Meta:\n model = project_models.Project\n", "issue": "Untranslated \"Plans\"\n\r\n\r\n\r\nMaybe we just remove it since there is already a header\n", "before_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.components.forms import ProjectDashboardForm\nfrom adhocracy4.maps import widgets as maps_widgets\nfrom adhocracy4.projects import models as project_models\nfrom meinberlin.apps.contrib import widgets as contrib_widgets\n\nfrom . import models\n\n\ndef get_theme_options():\n return models.Plan.objects\\\n .filter(~Q(theme=''))\\\n .order_by('theme')\\\n .values_list('theme', flat=True)\\\n .distinct()\n\n\nclass PlanForm(forms.ModelForm):\n\n class Meta:\n model = models.Plan\n fields = [\n 'title',\n 'description_image',\n 'contact',\n 'point',\n 'point_label',\n 'district',\n 'cost',\n 'description',\n 'theme',\n 'status',\n 'participation']\n widgets = {\n 'point': maps_widgets.MapChoosePointWidget(\n polygon=settings.BERLIN_POLYGON),\n 'theme': contrib_widgets.TextWithDatalistWidget(attrs={\n 'options': get_theme_options\n })\n }\n error_messages = {\n 'point': {\n 'required': _('Please locate the plan on the map.')\n }\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['district'].empty_label = _('City wide')\n\n\nclass CustomMultipleChoiceField(forms.ModelMultipleChoiceField):\n\n widget = forms.RadioSelect\n\n def clean(self, value):\n if value is None:\n return super().clean([])\n return super().clean([value])\n\n\nclass ProjectPlansDashboardForm(ProjectDashboardForm):\n plans = CustomMultipleChoiceField(queryset=None)\n\n class Meta:\n model = project_models.Project\n fields = ['plans']\n required_for_project_publish = ['plans']\n\n def save(self, commit=False):\n plans = self.cleaned_data['plans']\n self.instance.plans.set(plans)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.initial['plans'] = self.instance.plans.all()\n self.fields['plans'\n ].queryset = self.instance.organisation.plan_set.all()\n", "path": "meinberlin/apps/plans/forms.py"}]} | 1,298 | 113 |
gh_patches_debug_18447 | rasdani/github-patches | git_diff | facebookresearch__CompilerGym-442 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Observation 'Bitcode' without going through a temporary file.
## 🚀 Feature
Have an observation which just returns the bytes of the bitcode, rather than the file. Going through a temporary file is a (tiny) bit annoying.
```
env.observation["Bitcode"] -> bytes
env.bitcode == env.observation["Bitcode"]
```
## Motivation
I'm passing bitcode to LLVM tools. They will accept bitcode on stdin and emit it to stdout. No need for a temporary file.
## Pitch
Don't use a file when a `bytes` will do?
## Alternatives
Keep using a file.
</issue>
<code>
[start of compiler_gym/spaces/sequence.py]
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5 from typing import Optional, Tuple
6
7 from gym.spaces import Space
8
9 from compiler_gym.spaces.scalar import Scalar
10
11
12 class Sequence(Space):
13 """A sequence of values. Each element of the sequence is of `dtype`. The
14 length of the sequence is bounded by `size_range`.
15
16 Example:
17
18 ::
19
20 >>> space = Sequence(size_range=(0, None), dtype=str)
21 >>> space.contains("Hello, world!")
22 True
23
24 ::
25
26 >>> space = Sequence(size_range=(256, 256), dtype=bytes)
27 >>> space.contains("Hello, world!")
28 False
29
30 :ivar size_range: A tuple indicating the `(lower, upper)` bounds for
31 sequence lengths. An upper bound of `None` means no upper bound. All
32 sequences must have a lower bound of length >= 0.
33 :ivar dtype: The data type for each element in a sequence.
34 :ivar opaque_data_format: An optional string describing an opaque data
35 format, e.g. a data structure that is serialized to a string/binary
36 array for transmission to the client. It is up to the client and service
37 to agree on how to decode observations using this value. For example,
38 an opaque_data_format of `string_json` could be used to indicate that
39 the observation is a string-serialized JSON value.
40 """
41
42 def __init__(
43 self,
44 size_range: Tuple[int, Optional[int]] = (0, None),
45 dtype=bytes,
46 opaque_data_format: Optional[str] = None,
47 scalar_range: Optional[Scalar] = None,
48 ):
49 """Constructor.
50
51 :param size_range: A tuple indicating the `(lower, upper)` bounds for
52 sequence lengths. An upper bound of `None` means no upper bound. All
53 sequences must have a lower bound of length >= 0.
54 :param dtype: The data type for each element in a sequence.
55 :param opaque_data_format: An optional string describing an opaque data
56 format, e.g. a data structure that is serialized to a string/binary
57 array for transmission to the client. It is up to the client and
58 service to agree on how to decode observations using this value. For
59 example, an opaque_data_format of `string_json` could be used to
60 indicate that the observation is a string-serialized JSON value.
61 :param scalar_range: If specified, this denotes the legal range of
62 each element in the sequence. This is enforced by
63 :meth:`contains() <compiler_gym.spaces.Sequence.contains>` checks.
64 """
65 self.size_range = size_range
66 self.dtype = dtype
67 self.opaque_data_format = opaque_data_format
68 self.scalar_range = scalar_range
69
70 def __repr__(self) -> str:
71 upper_bound = "inf" if self.size_range[1] is None else self.size_range[1]
72 d = f" -> {self.opaque_data_format}" if self.opaque_data_format else ""
73 return (
74 f"{self.dtype.__name__}_list<>[{int(self.size_range[0])},{upper_bound}]){d}"
75 )
76
77 def contains(self, x):
78 lower_bound = self.size_range[0]
79 upper_bound = float("inf") if self.size_range[1] is None else self.size_range[1]
80 if not (lower_bound <= len(x) <= upper_bound):
81 return False
82 for element in x:
83 if not isinstance(element, self.dtype):
84 return False
85
86 # Run the bounds check on every scalar element, if there is a scalar
87 # range specified.
88 if self.scalar_range:
89 return all(self.scalar_range.contains(s) for s in x)
90
91 return True
92
93 def sample(self):
94 """
95 .. warning::
96 The `Sequence` space cannot be sampled from.
97
98 :raises NotImplementedError: Not supported.
99 """
100 raise NotImplementedError
101
102 def __eq__(self, other):
103 if not isinstance(other, Sequence):
104 return False
105 return (
106 self.size_range == other.size_range
107 and self.dtype == other.dtype
108 and self.opaque_data_format == other.opaque_data_format
109 )
110
[end of compiler_gym/spaces/sequence.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/compiler_gym/spaces/sequence.py b/compiler_gym/spaces/sequence.py
--- a/compiler_gym/spaces/sequence.py
+++ b/compiler_gym/spaces/sequence.py
@@ -79,9 +79,18 @@
upper_bound = float("inf") if self.size_range[1] is None else self.size_range[1]
if not (lower_bound <= len(x) <= upper_bound):
return False
- for element in x:
- if not isinstance(element, self.dtype):
+
+ # TODO(cummins): The dtype API is inconsistent. When dtype=str or
+ # dtype=bytes, we expect this to be the type of the entire sequence. But
+ # for dtype=int, we expect this to be the type of each element. We
+ # should distinguish these differences better.
+ if self.dtype in {str, bytes}:
+ if not isinstance(x, self.dtype):
return False
+ else:
+ for element in x:
+ if not isinstance(element, self.dtype):
+ return False
# Run the bounds check on every scalar element, if there is a scalar
# range specified.
| {"golden_diff": "diff --git a/compiler_gym/spaces/sequence.py b/compiler_gym/spaces/sequence.py\n--- a/compiler_gym/spaces/sequence.py\n+++ b/compiler_gym/spaces/sequence.py\n@@ -79,9 +79,18 @@\n upper_bound = float(\"inf\") if self.size_range[1] is None else self.size_range[1]\n if not (lower_bound <= len(x) <= upper_bound):\n return False\n- for element in x:\n- if not isinstance(element, self.dtype):\n+\n+ # TODO(cummins): The dtype API is inconsistent. When dtype=str or\n+ # dtype=bytes, we expect this to be the type of the entire sequence. But\n+ # for dtype=int, we expect this to be the type of each element. We\n+ # should distinguish these differences better.\n+ if self.dtype in {str, bytes}:\n+ if not isinstance(x, self.dtype):\n return False\n+ else:\n+ for element in x:\n+ if not isinstance(element, self.dtype):\n+ return False\n \n # Run the bounds check on every scalar element, if there is a scalar\n # range specified.\n", "issue": "Observation 'Bitcode' without going through a temporary file.\n## \ud83d\ude80 Feature\r\n\r\nHave an observation which just returns the bytes of the bitcode, rather than the file. Going through a temporary file is a (tiny) bit annoying.\r\n\r\n```\r\nenv.observation[\"Bitcode\"] -> bytes\r\nenv.bitcode == env.observation[\"Bitcode\"]\r\n```\r\n## Motivation\r\n\r\nI'm passing bitcode to LLVM tools. They will accept bitcode on stdin and emit it to stdout. No need for a temporary file.\r\n\r\n## Pitch\r\n\r\nDon't use a file when a `bytes` will do? \r\n\r\n## Alternatives\r\n\r\nKeep using a file.\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom typing import Optional, Tuple\n\nfrom gym.spaces import Space\n\nfrom compiler_gym.spaces.scalar import Scalar\n\n\nclass Sequence(Space):\n \"\"\"A sequence of values. Each element of the sequence is of `dtype`. The\n length of the sequence is bounded by `size_range`.\n\n Example:\n\n ::\n\n >>> space = Sequence(size_range=(0, None), dtype=str)\n >>> space.contains(\"Hello, world!\")\n True\n\n ::\n\n >>> space = Sequence(size_range=(256, 256), dtype=bytes)\n >>> space.contains(\"Hello, world!\")\n False\n\n :ivar size_range: A tuple indicating the `(lower, upper)` bounds for\n sequence lengths. An upper bound of `None` means no upper bound. All\n sequences must have a lower bound of length >= 0.\n :ivar dtype: The data type for each element in a sequence.\n :ivar opaque_data_format: An optional string describing an opaque data\n format, e.g. a data structure that is serialized to a string/binary\n array for transmission to the client. It is up to the client and service\n to agree on how to decode observations using this value. For example,\n an opaque_data_format of `string_json` could be used to indicate that\n the observation is a string-serialized JSON value.\n \"\"\"\n\n def __init__(\n self,\n size_range: Tuple[int, Optional[int]] = (0, None),\n dtype=bytes,\n opaque_data_format: Optional[str] = None,\n scalar_range: Optional[Scalar] = None,\n ):\n \"\"\"Constructor.\n\n :param size_range: A tuple indicating the `(lower, upper)` bounds for\n sequence lengths. An upper bound of `None` means no upper bound. All\n sequences must have a lower bound of length >= 0.\n :param dtype: The data type for each element in a sequence.\n :param opaque_data_format: An optional string describing an opaque data\n format, e.g. a data structure that is serialized to a string/binary\n array for transmission to the client. It is up to the client and\n service to agree on how to decode observations using this value. For\n example, an opaque_data_format of `string_json` could be used to\n indicate that the observation is a string-serialized JSON value.\n :param scalar_range: If specified, this denotes the legal range of\n each element in the sequence. This is enforced by\n :meth:`contains() <compiler_gym.spaces.Sequence.contains>` checks.\n \"\"\"\n self.size_range = size_range\n self.dtype = dtype\n self.opaque_data_format = opaque_data_format\n self.scalar_range = scalar_range\n\n def __repr__(self) -> str:\n upper_bound = \"inf\" if self.size_range[1] is None else self.size_range[1]\n d = f\" -> {self.opaque_data_format}\" if self.opaque_data_format else \"\"\n return (\n f\"{self.dtype.__name__}_list<>[{int(self.size_range[0])},{upper_bound}]){d}\"\n )\n\n def contains(self, x):\n lower_bound = self.size_range[0]\n upper_bound = float(\"inf\") if self.size_range[1] is None else self.size_range[1]\n if not (lower_bound <= len(x) <= upper_bound):\n return False\n for element in x:\n if not isinstance(element, self.dtype):\n return False\n\n # Run the bounds check on every scalar element, if there is a scalar\n # range specified.\n if self.scalar_range:\n return all(self.scalar_range.contains(s) for s in x)\n\n return True\n\n def sample(self):\n \"\"\"\n .. warning::\n The `Sequence` space cannot be sampled from.\n\n :raises NotImplementedError: Not supported.\n \"\"\"\n raise NotImplementedError\n\n def __eq__(self, other):\n if not isinstance(other, Sequence):\n return False\n return (\n self.size_range == other.size_range\n and self.dtype == other.dtype\n and self.opaque_data_format == other.opaque_data_format\n )\n", "path": "compiler_gym/spaces/sequence.py"}]} | 1,842 | 261 |
gh_patches_debug_67277 | rasdani/github-patches | git_diff | litestar-org__litestar-2244 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of litestar/cli/main.py]
1 from __future__ import annotations
2
3 import sys
4 from pathlib import Path
5 from typing import TYPE_CHECKING
6
7 from ._utils import RICH_CLICK_INSTALLED, LitestarEnv, LitestarExtensionGroup
8 from .commands import core, schema, sessions
9
10 if TYPE_CHECKING or not RICH_CLICK_INSTALLED: # pragma: no cover
11 import click
12 from click import Context, group, option, pass_context
13 from click import Path as ClickPath
14 else:
15 import rich_click as click
16 from rich_click import Context, group, option, pass_context
17 from rich_click import Path as ClickPath
18 from rich_click.cli import patch as rich_click_patch
19
20 rich_click_patch()
21 click.rich_click.USE_RICH_MARKUP = True
22 click.rich_click.USE_MARKDOWN = False
23 click.rich_click.SHOW_ARGUMENTS = True
24 click.rich_click.GROUP_ARGUMENTS_OPTIONS = True
25 click.rich_click.SHOW_ARGUMENTS = True
26 click.rich_click.GROUP_ARGUMENTS_OPTIONS = True
27 click.rich_click.STYLE_ERRORS_SUGGESTION = "magenta italic"
28 click.rich_click.ERRORS_SUGGESTION = ""
29 click.rich_click.ERRORS_EPILOGUE = ""
30 click.rich_click.MAX_WIDTH = 100
31 click.rich_click.SHOW_METAVARS_COLUMN = True
32 click.rich_click.APPEND_METAVARS_HELP = True
33
34
35 __all__ = ("litestar_group",)
36
37
38 @group(cls=LitestarExtensionGroup, context_settings={"help_option_names": ["-h", "--help"]})
39 @option("--app", "app_path", help="Module path to a Litestar application")
40 @option(
41 "--app-dir",
42 help="Look for APP in the specified directory, by adding this to the PYTHONPATH. Defaults to the current working directory.",
43 default=None,
44 type=ClickPath(dir_okay=True, file_okay=False, path_type=Path),
45 show_default=False,
46 )
47 @pass_context
48 def litestar_group(ctx: Context, app_path: str | None, app_dir: Path | None = None) -> None:
49 """Litestar CLI."""
50 sys.path.append(str(app_dir))
51
52 if ctx.obj is None: # env has not been loaded yet, so we can lazy load it
53 ctx.obj = lambda: LitestarEnv.from_env(app_path)
54
55
56 # add sub commands here
57
58 litestar_group.add_command(core.info_command)
59 litestar_group.add_command(core.run_command)
60 litestar_group.add_command(core.routes_command)
61 litestar_group.add_command(core.version_command)
62 litestar_group.add_command(sessions.sessions_group)
63 litestar_group.add_command(schema.schema_group)
64
[end of litestar/cli/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/litestar/cli/main.py b/litestar/cli/main.py
--- a/litestar/cli/main.py
+++ b/litestar/cli/main.py
@@ -27,7 +27,7 @@
click.rich_click.STYLE_ERRORS_SUGGESTION = "magenta italic"
click.rich_click.ERRORS_SUGGESTION = ""
click.rich_click.ERRORS_EPILOGUE = ""
- click.rich_click.MAX_WIDTH = 100
+ click.rich_click.MAX_WIDTH = 80
click.rich_click.SHOW_METAVARS_COLUMN = True
click.rich_click.APPEND_METAVARS_HELP = True
| {"golden_diff": "diff --git a/litestar/cli/main.py b/litestar/cli/main.py\n--- a/litestar/cli/main.py\n+++ b/litestar/cli/main.py\n@@ -27,7 +27,7 @@\n click.rich_click.STYLE_ERRORS_SUGGESTION = \"magenta italic\"\n click.rich_click.ERRORS_SUGGESTION = \"\"\n click.rich_click.ERRORS_EPILOGUE = \"\"\n- click.rich_click.MAX_WIDTH = 100\n+ click.rich_click.MAX_WIDTH = 80\n click.rich_click.SHOW_METAVARS_COLUMN = True\n click.rich_click.APPEND_METAVARS_HELP = True\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nimport sys\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\n\nfrom ._utils import RICH_CLICK_INSTALLED, LitestarEnv, LitestarExtensionGroup\nfrom .commands import core, schema, sessions\n\nif TYPE_CHECKING or not RICH_CLICK_INSTALLED: # pragma: no cover\n import click\n from click import Context, group, option, pass_context\n from click import Path as ClickPath\nelse:\n import rich_click as click\n from rich_click import Context, group, option, pass_context\n from rich_click import Path as ClickPath\n from rich_click.cli import patch as rich_click_patch\n\n rich_click_patch()\n click.rich_click.USE_RICH_MARKUP = True\n click.rich_click.USE_MARKDOWN = False\n click.rich_click.SHOW_ARGUMENTS = True\n click.rich_click.GROUP_ARGUMENTS_OPTIONS = True\n click.rich_click.SHOW_ARGUMENTS = True\n click.rich_click.GROUP_ARGUMENTS_OPTIONS = True\n click.rich_click.STYLE_ERRORS_SUGGESTION = \"magenta italic\"\n click.rich_click.ERRORS_SUGGESTION = \"\"\n click.rich_click.ERRORS_EPILOGUE = \"\"\n click.rich_click.MAX_WIDTH = 100\n click.rich_click.SHOW_METAVARS_COLUMN = True\n click.rich_click.APPEND_METAVARS_HELP = True\n\n\n__all__ = (\"litestar_group\",)\n\n\n@group(cls=LitestarExtensionGroup, context_settings={\"help_option_names\": [\"-h\", \"--help\"]})\n@option(\"--app\", \"app_path\", help=\"Module path to a Litestar application\")\n@option(\n \"--app-dir\",\n help=\"Look for APP in the specified directory, by adding this to the PYTHONPATH. Defaults to the current working directory.\",\n default=None,\n type=ClickPath(dir_okay=True, file_okay=False, path_type=Path),\n show_default=False,\n)\n@pass_context\ndef litestar_group(ctx: Context, app_path: str | None, app_dir: Path | None = None) -> None:\n \"\"\"Litestar CLI.\"\"\"\n sys.path.append(str(app_dir))\n\n if ctx.obj is None: # env has not been loaded yet, so we can lazy load it\n ctx.obj = lambda: LitestarEnv.from_env(app_path)\n\n\n# add sub commands here\n\nlitestar_group.add_command(core.info_command)\nlitestar_group.add_command(core.run_command)\nlitestar_group.add_command(core.routes_command)\nlitestar_group.add_command(core.version_command)\nlitestar_group.add_command(sessions.sessions_group)\nlitestar_group.add_command(schema.schema_group)\n", "path": "litestar/cli/main.py"}]} | 1,401 | 148 |
gh_patches_debug_40235 | rasdani/github-patches | git_diff | qutebrowser__qutebrowser-3385 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Completion: match terms in any order
[This regex](https://github.com/The-Compiler/qutebrowser/blob/master/qutebrowser/completion/models/sortfilter.py#L75), which is used to filter items for completion, constrains the match to the same order the terms are supplied by the user. I.e., "browser qute" does not match "qutebrowser". According to [this post](http://stackoverflow.com/a/18944925/4180822), this issue can be solved by constructing better regular expression using positive lookaheads. I think it's worthwhile to examine the performance impact of the solution, or find a different one.
</issue>
<code>
[start of qutebrowser/completion/models/histcategory.py]
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2017 Ryan Roden-Corrent (rcorre) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """A completion category that queries the SQL History store."""
21
22 import re
23
24 from PyQt5.QtSql import QSqlQueryModel
25
26 from qutebrowser.misc import sql
27 from qutebrowser.utils import debug
28 from qutebrowser.config import config
29
30
31 class HistoryCategory(QSqlQueryModel):
32
33 """A completion category that queries the SQL History store."""
34
35 def __init__(self, *, delete_func=None, parent=None):
36 """Create a new History completion category."""
37 super().__init__(parent=parent)
38 self.name = "History"
39
40 # replace ' in timestamp-format to avoid breaking the query
41 timestamp_format = config.val.completion.timestamp_format
42 timefmt = ("strftime('{}', last_atime, 'unixepoch', 'localtime')"
43 .format(timestamp_format.replace("'", "`")))
44
45 self._query = sql.Query(' '.join([
46 "SELECT url, title, {}".format(timefmt),
47 "FROM CompletionHistory",
48 # the incoming pattern will have literal % and _ escaped with '\'
49 # we need to tell sql to treat '\' as an escape character
50 "WHERE ((url || title) LIKE :pat escape '\\')",
51 self._atime_expr(),
52 "ORDER BY last_atime DESC",
53 ]), forward_only=False)
54
55 # advertise that this model filters by URL and title
56 self.columns_to_filter = [0, 1]
57 self.delete_func = delete_func
58
59 def _atime_expr(self):
60 """If max_items is set, return an expression to limit the query."""
61 max_items = config.val.completion.web_history_max_items
62 # HistoryCategory should not be added to the completion in that case.
63 assert max_items != 0
64
65 if max_items < 0:
66 return ''
67
68 min_atime = sql.Query(' '.join([
69 'SELECT min(last_atime) FROM',
70 '(SELECT last_atime FROM CompletionHistory',
71 'ORDER BY last_atime DESC LIMIT :limit)',
72 ])).run(limit=max_items).value()
73
74 if not min_atime:
75 # if there are no history items, min_atime may be '' (issue #2849)
76 return ''
77
78 return "AND last_atime >= {}".format(min_atime)
79
80 def set_pattern(self, pattern):
81 """Set the pattern used to filter results.
82
83 Args:
84 pattern: string pattern to filter by.
85 """
86 # escape to treat a user input % or _ as a literal, not a wildcard
87 pattern = pattern.replace('%', '\\%')
88 pattern = pattern.replace('_', '\\_')
89 # treat spaces as wildcards to match any of the typed words
90 pattern = re.sub(r' +', '%', pattern)
91 pattern = '%{}%'.format(pattern)
92 with debug.log_time('sql', 'Running completion query'):
93 self._query.run(pat=pattern)
94 self.setQuery(self._query)
95
96 def removeRows(self, row, _count, _parent=None):
97 """Override QAbstractItemModel::removeRows to re-run sql query."""
98 # re-run query to reload updated table
99 with debug.log_time('sql', 'Re-running completion query post-delete'):
100 self._query.run()
101 self.setQuery(self._query)
102 while self.rowCount() < row:
103 self.fetchMore()
104 return True
105
[end of qutebrowser/completion/models/histcategory.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qutebrowser/completion/models/histcategory.py b/qutebrowser/completion/models/histcategory.py
--- a/qutebrowser/completion/models/histcategory.py
+++ b/qutebrowser/completion/models/histcategory.py
@@ -19,8 +19,6 @@
"""A completion category that queries the SQL History store."""
-import re
-
from PyQt5.QtSql import QSqlQueryModel
from qutebrowser.misc import sql
@@ -36,21 +34,7 @@
"""Create a new History completion category."""
super().__init__(parent=parent)
self.name = "History"
-
- # replace ' in timestamp-format to avoid breaking the query
- timestamp_format = config.val.completion.timestamp_format
- timefmt = ("strftime('{}', last_atime, 'unixepoch', 'localtime')"
- .format(timestamp_format.replace("'", "`")))
-
- self._query = sql.Query(' '.join([
- "SELECT url, title, {}".format(timefmt),
- "FROM CompletionHistory",
- # the incoming pattern will have literal % and _ escaped with '\'
- # we need to tell sql to treat '\' as an escape character
- "WHERE ((url || title) LIKE :pat escape '\\')",
- self._atime_expr(),
- "ORDER BY last_atime DESC",
- ]), forward_only=False)
+ self._query = None
# advertise that this model filters by URL and title
self.columns_to_filter = [0, 1]
@@ -86,11 +70,36 @@
# escape to treat a user input % or _ as a literal, not a wildcard
pattern = pattern.replace('%', '\\%')
pattern = pattern.replace('_', '\\_')
- # treat spaces as wildcards to match any of the typed words
- pattern = re.sub(r' +', '%', pattern)
- pattern = '%{}%'.format(pattern)
+ words = ['%{}%'.format(w) for w in pattern.split(' ')]
+
+ # build a where clause to match all of the words in any order
+ # given the search term "a b", the WHERE clause would be:
+ # ((url || title) LIKE '%a%') AND ((url || title) LIKE '%b%')
+ where_clause = ' AND '.join(
+ "(url || title) LIKE :{} escape '\\'".format(i)
+ for i in range(len(words)))
+
+ # replace ' in timestamp-format to avoid breaking the query
+ timestamp_format = config.val.completion.timestamp_format
+ timefmt = ("strftime('{}', last_atime, 'unixepoch', 'localtime')"
+ .format(timestamp_format.replace("'", "`")))
+
+ if not self._query or len(words) != len(self._query.boundValues()):
+ # if the number of words changed, we need to generate a new query
+ # otherwise, we can reuse the prepared query for performance
+ self._query = sql.Query(' '.join([
+ "SELECT url, title, {}".format(timefmt),
+ "FROM CompletionHistory",
+ # the incoming pattern will have literal % and _ escaped
+ # we need to tell sql to treat '\' as an escape character
+ 'WHERE ({})'.format(where_clause),
+ self._atime_expr(),
+ "ORDER BY last_atime DESC",
+ ]), forward_only=False)
+
with debug.log_time('sql', 'Running completion query'):
- self._query.run(pat=pattern)
+ self._query.run(**{
+ str(i): w for i, w in enumerate(words)})
self.setQuery(self._query)
def removeRows(self, row, _count, _parent=None):
| {"golden_diff": "diff --git a/qutebrowser/completion/models/histcategory.py b/qutebrowser/completion/models/histcategory.py\n--- a/qutebrowser/completion/models/histcategory.py\n+++ b/qutebrowser/completion/models/histcategory.py\n@@ -19,8 +19,6 @@\n \n \"\"\"A completion category that queries the SQL History store.\"\"\"\n \n-import re\n-\n from PyQt5.QtSql import QSqlQueryModel\n \n from qutebrowser.misc import sql\n@@ -36,21 +34,7 @@\n \"\"\"Create a new History completion category.\"\"\"\n super().__init__(parent=parent)\n self.name = \"History\"\n-\n- # replace ' in timestamp-format to avoid breaking the query\n- timestamp_format = config.val.completion.timestamp_format\n- timefmt = (\"strftime('{}', last_atime, 'unixepoch', 'localtime')\"\n- .format(timestamp_format.replace(\"'\", \"`\")))\n-\n- self._query = sql.Query(' '.join([\n- \"SELECT url, title, {}\".format(timefmt),\n- \"FROM CompletionHistory\",\n- # the incoming pattern will have literal % and _ escaped with '\\'\n- # we need to tell sql to treat '\\' as an escape character\n- \"WHERE ((url || title) LIKE :pat escape '\\\\')\",\n- self._atime_expr(),\n- \"ORDER BY last_atime DESC\",\n- ]), forward_only=False)\n+ self._query = None\n \n # advertise that this model filters by URL and title\n self.columns_to_filter = [0, 1]\n@@ -86,11 +70,36 @@\n # escape to treat a user input % or _ as a literal, not a wildcard\n pattern = pattern.replace('%', '\\\\%')\n pattern = pattern.replace('_', '\\\\_')\n- # treat spaces as wildcards to match any of the typed words\n- pattern = re.sub(r' +', '%', pattern)\n- pattern = '%{}%'.format(pattern)\n+ words = ['%{}%'.format(w) for w in pattern.split(' ')]\n+\n+ # build a where clause to match all of the words in any order\n+ # given the search term \"a b\", the WHERE clause would be:\n+ # ((url || title) LIKE '%a%') AND ((url || title) LIKE '%b%')\n+ where_clause = ' AND '.join(\n+ \"(url || title) LIKE :{} escape '\\\\'\".format(i)\n+ for i in range(len(words)))\n+\n+ # replace ' in timestamp-format to avoid breaking the query\n+ timestamp_format = config.val.completion.timestamp_format\n+ timefmt = (\"strftime('{}', last_atime, 'unixepoch', 'localtime')\"\n+ .format(timestamp_format.replace(\"'\", \"`\")))\n+\n+ if not self._query or len(words) != len(self._query.boundValues()):\n+ # if the number of words changed, we need to generate a new query\n+ # otherwise, we can reuse the prepared query for performance\n+ self._query = sql.Query(' '.join([\n+ \"SELECT url, title, {}\".format(timefmt),\n+ \"FROM CompletionHistory\",\n+ # the incoming pattern will have literal % and _ escaped\n+ # we need to tell sql to treat '\\' as an escape character\n+ 'WHERE ({})'.format(where_clause),\n+ self._atime_expr(),\n+ \"ORDER BY last_atime DESC\",\n+ ]), forward_only=False)\n+\n with debug.log_time('sql', 'Running completion query'):\n- self._query.run(pat=pattern)\n+ self._query.run(**{\n+ str(i): w for i, w in enumerate(words)})\n self.setQuery(self._query)\n \n def removeRows(self, row, _count, _parent=None):\n", "issue": "Completion: match terms in any order\n[This regex](https://github.com/The-Compiler/qutebrowser/blob/master/qutebrowser/completion/models/sortfilter.py#L75), which is used to filter items for completion, constrains the match to the same order the terms are supplied by the user. I.e., \"browser qute\" does not match \"qutebrowser\". According to [this post](http://stackoverflow.com/a/18944925/4180822), this issue can be solved by constructing better regular expression using positive lookaheads. I think it's worthwhile to examine the performance impact of the solution, or find a different one.\n\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2017 Ryan Roden-Corrent (rcorre) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"A completion category that queries the SQL History store.\"\"\"\n\nimport re\n\nfrom PyQt5.QtSql import QSqlQueryModel\n\nfrom qutebrowser.misc import sql\nfrom qutebrowser.utils import debug\nfrom qutebrowser.config import config\n\n\nclass HistoryCategory(QSqlQueryModel):\n\n \"\"\"A completion category that queries the SQL History store.\"\"\"\n\n def __init__(self, *, delete_func=None, parent=None):\n \"\"\"Create a new History completion category.\"\"\"\n super().__init__(parent=parent)\n self.name = \"History\"\n\n # replace ' in timestamp-format to avoid breaking the query\n timestamp_format = config.val.completion.timestamp_format\n timefmt = (\"strftime('{}', last_atime, 'unixepoch', 'localtime')\"\n .format(timestamp_format.replace(\"'\", \"`\")))\n\n self._query = sql.Query(' '.join([\n \"SELECT url, title, {}\".format(timefmt),\n \"FROM CompletionHistory\",\n # the incoming pattern will have literal % and _ escaped with '\\'\n # we need to tell sql to treat '\\' as an escape character\n \"WHERE ((url || title) LIKE :pat escape '\\\\')\",\n self._atime_expr(),\n \"ORDER BY last_atime DESC\",\n ]), forward_only=False)\n\n # advertise that this model filters by URL and title\n self.columns_to_filter = [0, 1]\n self.delete_func = delete_func\n\n def _atime_expr(self):\n \"\"\"If max_items is set, return an expression to limit the query.\"\"\"\n max_items = config.val.completion.web_history_max_items\n # HistoryCategory should not be added to the completion in that case.\n assert max_items != 0\n\n if max_items < 0:\n return ''\n\n min_atime = sql.Query(' '.join([\n 'SELECT min(last_atime) FROM',\n '(SELECT last_atime FROM CompletionHistory',\n 'ORDER BY last_atime DESC LIMIT :limit)',\n ])).run(limit=max_items).value()\n\n if not min_atime:\n # if there are no history items, min_atime may be '' (issue #2849)\n return ''\n\n return \"AND last_atime >= {}\".format(min_atime)\n\n def set_pattern(self, pattern):\n \"\"\"Set the pattern used to filter results.\n\n Args:\n pattern: string pattern to filter by.\n \"\"\"\n # escape to treat a user input % or _ as a literal, not a wildcard\n pattern = pattern.replace('%', '\\\\%')\n pattern = pattern.replace('_', '\\\\_')\n # treat spaces as wildcards to match any of the typed words\n pattern = re.sub(r' +', '%', pattern)\n pattern = '%{}%'.format(pattern)\n with debug.log_time('sql', 'Running completion query'):\n self._query.run(pat=pattern)\n self.setQuery(self._query)\n\n def removeRows(self, row, _count, _parent=None):\n \"\"\"Override QAbstractItemModel::removeRows to re-run sql query.\"\"\"\n # re-run query to reload updated table\n with debug.log_time('sql', 'Re-running completion query post-delete'):\n self._query.run()\n self.setQuery(self._query)\n while self.rowCount() < row:\n self.fetchMore()\n return True\n", "path": "qutebrowser/completion/models/histcategory.py"}]} | 1,803 | 822 |
gh_patches_debug_5403 | rasdani/github-patches | git_diff | ivy-llc__ivy-18003 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
fft2
</issue>
<code>
[start of ivy/functional/frontends/scipy/fft/fft.py]
1 # global
2 import ivy
3 from ivy.functional.frontends.scipy.func_wrapper import (
4 to_ivy_arrays_and_back,
5 )
6
7
8 # fft
9 @to_ivy_arrays_and_back
10 def fft(x, n=None, axis=-1, norm="backward", overwrite_x=False):
11 return ivy.fft(x, axis, norm=norm, n=n)
12
13
14 # ifft
15 @to_ivy_arrays_and_back
16 def ifft(x, n=None, axis=-1, norm="backward", overwrite_x=False):
17 return ivy.ifft(x, axis, norm=norm, n=n)
18
19
20 # dct
21 @to_ivy_arrays_and_back
22 def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, orthogonalize=None):
23 return ivy.dct(x, type=type, n=n, axis=axis, norm=norm)
24
25
26 # idct
27 @to_ivy_arrays_and_back
28 def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, orthogonalize=None):
29 inverse_type = {1: 1, 2: 3, 3: 2, 4: 4}[type]
30 return ivy.dct(x, type=inverse_type, n=n, axis=axis, norm=norm)
31
[end of ivy/functional/frontends/scipy/fft/fft.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/scipy/fft/fft.py b/ivy/functional/frontends/scipy/fft/fft.py
--- a/ivy/functional/frontends/scipy/fft/fft.py
+++ b/ivy/functional/frontends/scipy/fft/fft.py
@@ -28,3 +28,8 @@
def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, orthogonalize=None):
inverse_type = {1: 1, 2: 3, 3: 2, 4: 4}[type]
return ivy.dct(x, type=inverse_type, n=n, axis=axis, norm=norm)
+
+
+@to_ivy_arrays_and_back
+def fft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False):
+ return ivy.fft2(x, s=s, dim=axes, norm=norm)
| {"golden_diff": "diff --git a/ivy/functional/frontends/scipy/fft/fft.py b/ivy/functional/frontends/scipy/fft/fft.py\n--- a/ivy/functional/frontends/scipy/fft/fft.py\n+++ b/ivy/functional/frontends/scipy/fft/fft.py\n@@ -28,3 +28,8 @@\n def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, orthogonalize=None):\n inverse_type = {1: 1, 2: 3, 3: 2, 4: 4}[type]\n return ivy.dct(x, type=inverse_type, n=n, axis=axis, norm=norm)\n+\n+\n+@to_ivy_arrays_and_back\n+def fft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False):\n+ return ivy.fft2(x, s=s, dim=axes, norm=norm)\n", "issue": "fft2\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.scipy.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n# fft\n@to_ivy_arrays_and_back\ndef fft(x, n=None, axis=-1, norm=\"backward\", overwrite_x=False):\n return ivy.fft(x, axis, norm=norm, n=n)\n\n\n# ifft\n@to_ivy_arrays_and_back\ndef ifft(x, n=None, axis=-1, norm=\"backward\", overwrite_x=False):\n return ivy.ifft(x, axis, norm=norm, n=n)\n\n\n# dct\n@to_ivy_arrays_and_back\ndef dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, orthogonalize=None):\n return ivy.dct(x, type=type, n=n, axis=axis, norm=norm)\n\n\n# idct\n@to_ivy_arrays_and_back\ndef idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, orthogonalize=None):\n inverse_type = {1: 1, 2: 3, 3: 2, 4: 4}[type]\n return ivy.dct(x, type=inverse_type, n=n, axis=axis, norm=norm)\n", "path": "ivy/functional/frontends/scipy/fft/fft.py"}]} | 892 | 212 |
gh_patches_debug_6100 | rasdani/github-patches | git_diff | conan-io__conan-center-index-2011 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] Some system packages fail to install (xorg/system, opengl/system)
<!--
Please don't forget to update the issue title.
Include all applicable information to help us reproduce your problem.
To help us debug your issue please explain:
--> When configuring a cmake project template that used conan i encountered an error while configuring cmake :
`opengl/system: ERROR: while executing system_requirements(): 'SysConfigOpenGLConan' object has no attribute 'warn'`. After attemping a manual installation with `conan install opengl/system@` i still have the same problem and output. I have no idea what could be causing this and after a few hours of googling i failed to find anything that would explain this. I also get the exact same output when installing `xorg/system`
### Environment Details (include every applicable attribute)
* Operating System+version: Pop!_OS 20.04 LTS x86_64
* Compiler+version: gcc (Ubuntu 9.3.0-10ubuntu2) 9.3.0
* Conan version: Conan version 1.26.0
* Python version: Python 2.7.18rc1 AND Python 3.8.2
### Steps to reproduce (Include if Applicable)
`conan install opengl/system@`
`conan install xorg/system@`
### Logs (Executed commands with output) (Include/Attach if Applicable)
```
Configuration:
[settings]
arch=x86_64
arch_build=x86_64
build_type=Release
os=Linux
os_build=Linux
[options]
[build_requires]
[env]
Installing package: opengl/system
Requirements
opengl/system from 'conan-center' - Cache
Packages
opengl/system:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9 - Cache
Installing (downloading, building) binaries...
opengl/system: ERROR: while executing system_requirements(): 'SysConfigOpenGLConan' object has no attribute 'warn'
ERROR: Error in system requirements
```
</issue>
<code>
[start of recipes/opengl/all/conanfile.py]
1 from conans import ConanFile, tools
2 from conans.errors import ConanException
3 import os
4
5
6 class SysConfigOpenGLConan(ConanFile):
7 name = "opengl"
8 version = "system"
9 description = "cross-platform virtual conan package for the OpenGL support"
10 topics = ("conan", "opengl", "gl")
11 url = "https://github.com/conan-io/conan-center-index"
12 homepage = "https://www.opengl.org/"
13 license = "MIT"
14 settings = ("os",)
15
16 def package_id(self):
17 self.info.header_only()
18
19 def _fill_cppinfo_from_pkgconfig(self, name):
20 pkg_config = tools.PkgConfig(name)
21 if not pkg_config.provides:
22 raise ConanException("OpenGL development files aren't available, give up")
23 libs = [lib[2:] for lib in pkg_config.libs_only_l]
24 lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]
25 ldflags = [flag for flag in pkg_config.libs_only_other]
26 include_dirs = [include[2:] for include in pkg_config.cflags_only_I]
27 cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith("-D")]
28 defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith("-D")]
29
30 self.cpp_info.system_libs.extend(libs)
31 self.cpp_info.libdirs.extend(lib_dirs)
32 self.cpp_info.sharedlinkflags.extend(ldflags)
33 self.cpp_info.exelinkflags.extend(ldflags)
34 self.cpp_info.defines.extend(defines)
35 self.cpp_info.includedirs.extend(include_dirs)
36 self.cpp_info.cflags.extend(cflags)
37 self.cpp_info.cxxflags.extend(cflags)
38
39 def system_requirements(self):
40 if tools.os_info.is_linux and self.settings.os == "Linux":
41 package_tool = tools.SystemPackageTool(conanfile=self, default_mode='verify')
42 if tools.os_info.with_yum:
43 if tools.os_info.linux_distro == "fedora" and tools.os_info.os_version >= "32":
44 packages = ["libglvnd-devel"]
45 else:
46 packages = ["mesa-libGL-devel"]
47 elif tools.os_info.with_apt:
48 ubuntu_20_or_later = tools.os_info.linux_distro == "ubuntu" and tools.os_info.os_version >= "20"
49 debian_11_or_later = tools.os_info.linux_distro == "debian" and tools.os_info.os_version >= "11"
50 if ubuntu_20_or_later or debian_11_or_later:
51 packages = ["libgl-dev"]
52 else:
53 packages = ["libgl1-mesa-dev"]
54 elif tools.os_info.with_pacman:
55 packages = ["libglvnd"]
56 elif tools.os_info.with_zypper:
57 packages = ["Mesa-libGL-devel"]
58 else:
59 self.warn("don't know how to install OpenGL for your distro")
60 package_tool.install(update=True, packages=packages)
61
62 def package_info(self):
63 if self.settings.os == "Macos":
64 self.cpp_info.defines.append("GL_SILENCE_DEPRECATION=1")
65 self.cpp_info.frameworks.append("OpenGL")
66 elif self.settings.os == "Windows":
67 self.cpp_info.system_libs = ["OpenGL32"]
68 elif self.settings.os == "Linux":
69 self._fill_cppinfo_from_pkgconfig('gl')
70
[end of recipes/opengl/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/opengl/all/conanfile.py b/recipes/opengl/all/conanfile.py
--- a/recipes/opengl/all/conanfile.py
+++ b/recipes/opengl/all/conanfile.py
@@ -56,7 +56,7 @@
elif tools.os_info.with_zypper:
packages = ["Mesa-libGL-devel"]
else:
- self.warn("don't know how to install OpenGL for your distro")
+ self.output.warn("Don't know how to install OpenGL for your distro.")
package_tool.install(update=True, packages=packages)
def package_info(self):
| {"golden_diff": "diff --git a/recipes/opengl/all/conanfile.py b/recipes/opengl/all/conanfile.py\n--- a/recipes/opengl/all/conanfile.py\n+++ b/recipes/opengl/all/conanfile.py\n@@ -56,7 +56,7 @@\n elif tools.os_info.with_zypper:\n packages = [\"Mesa-libGL-devel\"]\n else:\n- self.warn(\"don't know how to install OpenGL for your distro\")\n+ self.output.warn(\"Don't know how to install OpenGL for your distro.\")\n package_tool.install(update=True, packages=packages)\n \n def package_info(self):\n", "issue": "[bug] Some system packages fail to install (xorg/system, opengl/system)\n<!--\r\n Please don't forget to update the issue title.\r\n Include all applicable information to help us reproduce your problem.\r\n\r\n To help us debug your issue please explain:\r\n--> When configuring a cmake project template that used conan i encountered an error while configuring cmake : \r\n`opengl/system: ERROR: while executing system_requirements(): 'SysConfigOpenGLConan' object has no attribute 'warn'`. After attemping a manual installation with `conan install opengl/system@` i still have the same problem and output. I have no idea what could be causing this and after a few hours of googling i failed to find anything that would explain this. I also get the exact same output when installing `xorg/system`\r\n\r\n### Environment Details (include every applicable attribute)\r\n * Operating System+version: Pop!_OS 20.04 LTS x86_64\r\n * Compiler+version: gcc (Ubuntu 9.3.0-10ubuntu2) 9.3.0\r\n * Conan version: Conan version 1.26.0\r\n * Python version: Python 2.7.18rc1 AND Python 3.8.2\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n`conan install opengl/system@`\r\n`conan install xorg/system@`\r\n\r\n### Logs (Executed commands with output) (Include/Attach if Applicable)\r\n\r\n```\r\nConfiguration:\r\n[settings]\r\narch=x86_64\r\narch_build=x86_64\r\nbuild_type=Release\r\nos=Linux\r\nos_build=Linux\r\n[options]\r\n[build_requires]\r\n[env]\r\n\r\nInstalling package: opengl/system\r\nRequirements\r\n opengl/system from 'conan-center' - Cache\r\nPackages\r\n opengl/system:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9 - Cache\r\n\r\nInstalling (downloading, building) binaries...\r\nopengl/system: ERROR: while executing system_requirements(): 'SysConfigOpenGLConan' object has no attribute 'warn'\r\nERROR: Error in system requirements\r\n```\r\n\n", "before_files": [{"content": "from conans import ConanFile, tools\nfrom conans.errors import ConanException\nimport os\n\n\nclass SysConfigOpenGLConan(ConanFile):\n name = \"opengl\"\n version = \"system\"\n description = \"cross-platform virtual conan package for the OpenGL support\"\n topics = (\"conan\", \"opengl\", \"gl\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://www.opengl.org/\"\n license = \"MIT\"\n settings = (\"os\",)\n\n def package_id(self):\n self.info.header_only()\n\n def _fill_cppinfo_from_pkgconfig(self, name):\n pkg_config = tools.PkgConfig(name)\n if not pkg_config.provides:\n raise ConanException(\"OpenGL development files aren't available, give up\")\n libs = [lib[2:] for lib in pkg_config.libs_only_l]\n lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]\n ldflags = [flag for flag in pkg_config.libs_only_other]\n include_dirs = [include[2:] for include in pkg_config.cflags_only_I]\n cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith(\"-D\")]\n defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith(\"-D\")]\n\n self.cpp_info.system_libs.extend(libs)\n self.cpp_info.libdirs.extend(lib_dirs)\n self.cpp_info.sharedlinkflags.extend(ldflags)\n self.cpp_info.exelinkflags.extend(ldflags)\n self.cpp_info.defines.extend(defines)\n self.cpp_info.includedirs.extend(include_dirs)\n self.cpp_info.cflags.extend(cflags)\n self.cpp_info.cxxflags.extend(cflags)\n\n def system_requirements(self):\n if tools.os_info.is_linux and self.settings.os == \"Linux\":\n package_tool = tools.SystemPackageTool(conanfile=self, default_mode='verify')\n if tools.os_info.with_yum:\n if tools.os_info.linux_distro == \"fedora\" and tools.os_info.os_version >= \"32\":\n packages = [\"libglvnd-devel\"]\n else:\n packages = [\"mesa-libGL-devel\"]\n elif tools.os_info.with_apt:\n ubuntu_20_or_later = tools.os_info.linux_distro == \"ubuntu\" and tools.os_info.os_version >= \"20\"\n debian_11_or_later = tools.os_info.linux_distro == \"debian\" and tools.os_info.os_version >= \"11\"\n if ubuntu_20_or_later or debian_11_or_later:\n packages = [\"libgl-dev\"]\n else:\n packages = [\"libgl1-mesa-dev\"]\n elif tools.os_info.with_pacman:\n packages = [\"libglvnd\"]\n elif tools.os_info.with_zypper:\n packages = [\"Mesa-libGL-devel\"]\n else:\n self.warn(\"don't know how to install OpenGL for your distro\")\n package_tool.install(update=True, packages=packages)\n\n def package_info(self):\n if self.settings.os == \"Macos\":\n self.cpp_info.defines.append(\"GL_SILENCE_DEPRECATION=1\")\n self.cpp_info.frameworks.append(\"OpenGL\")\n elif self.settings.os == \"Windows\":\n self.cpp_info.system_libs = [\"OpenGL32\"]\n elif self.settings.os == \"Linux\":\n self._fill_cppinfo_from_pkgconfig('gl')\n", "path": "recipes/opengl/all/conanfile.py"}]} | 1,886 | 137 |
gh_patches_debug_37236 | rasdani/github-patches | git_diff | ansible__awx-11599 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
doc error with inventory plugin for latest awx.awx inventory plugin
### Please confirm the following
- [X] I agree to follow this project's [code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
- [X] I understand that AWX is open source software provided for free and that I am not entitled to status updates or other assurances.
### Summary
The documentation for the inventory plugin, awx.awx.controller, was malformed.
rendered webpage for docs is here: https://docs.ansible.com/ansible/latest/collections/awx/awx/controller_inventory.html#ansible-collections-awx-awx-controller-inventory
### AWX version
N/A
### Installation method
openshift
### Modifications
no
### Ansible version
n/a
### Operating system
n/a
### Web browser
Chrome
### Steps to reproduce
go to https://docs.ansible.com/ansible/latest/collections/awx/awx/controller_inventory.html#ansible-collections-awx-awx-controller-inventory
there is docs errors on the webpage
>The documentation for the inventory plugin, awx.awx.controller, was malformed.
### Expected results
no errors
### Actual results

### Additional information
fast track this to infamous Chris Meyers
</issue>
<code>
[start of awx_collection/plugins/doc_fragments/auth_plugin.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright: (c) 2020, Ansible by Red Hat, Inc
4 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
5
6 from __future__ import absolute_import, division, print_function
7
8 __metaclass__ = type
9
10
11 class ModuleDocFragment(object):
12
13 # Automation Platform Controller documentation fragment
14 DOCUMENTATION = r'''
15 options:
16 host:
17 description: The network address of your Automation Platform Controller host.
18 env:
19 - name: CONTROLLER_HOST
20 - name: TOWER_HOST
21 deprecated:
22 - removed_from_collection: 'awx.awx.controller'
23 - removed_in: '4.0.0'
24 - why: Collection name change
25 - alternative: 'CONTROLLER_HOST'
26 username:
27 description: The user that you plan to use to access inventories on the controller.
28 env:
29 - name: CONTROLLER_USERNAME
30 - name: TOWER_USERNAME
31 deprecated:
32 - removed_from_collection: 'awx.awx.controller'
33 - removed_in: '4.0.0'
34 - why: Collection name change
35 - alternative: 'CONTROLLER_USERNAME'
36 password:
37 description: The password for your controller user.
38 env:
39 - name: CONTROLLER_PASSWORD
40 - name: TOWER_PASSWORD
41 deprecated:
42 - removed_from_collection: 'awx.awx.controller'
43 - removed_in: '4.0.0'
44 - why: Collection name change
45 - alternative: 'CONTROLLER_PASSWORD'
46 oauth_token:
47 description:
48 - The OAuth token to use.
49 env:
50 - name: CONTROLLER_OAUTH_TOKEN
51 - name: TOWER_OAUTH_TOKEN
52 deprecated:
53 - removed_from_collection: 'awx.awx.controller'
54 - removed_in: '4.0.0'
55 - why: Collection name change
56 - alternative: 'CONTROLLER_OAUTH_TOKEN'
57 verify_ssl:
58 description:
59 - Specify whether Ansible should verify the SSL certificate of the controller host.
60 - Defaults to True, but this is handled by the shared module_utils code
61 type: bool
62 env:
63 - name: CONTROLLER_VERIFY_SSL
64 - name: TOWER_VERIFY_SSL
65 deprecated:
66 - removed_from_collection: 'awx.awx.controller'
67 - removed_in: '4.0.0'
68 - why: Collection name change
69 - alternative: 'CONTROLLER_VERIFY_SSL'
70 aliases: [ validate_certs ]
71
72 notes:
73 - If no I(config_file) is provided we will attempt to use the tower-cli library
74 defaults to find your host information.
75 - I(config_file) should be in the following format
76 host=hostname
77 username=username
78 password=password
79 '''
80
[end of awx_collection/plugins/doc_fragments/auth_plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awx_collection/plugins/doc_fragments/auth_plugin.py b/awx_collection/plugins/doc_fragments/auth_plugin.py
--- a/awx_collection/plugins/doc_fragments/auth_plugin.py
+++ b/awx_collection/plugins/doc_fragments/auth_plugin.py
@@ -18,42 +18,42 @@
env:
- name: CONTROLLER_HOST
- name: TOWER_HOST
- deprecated:
- - removed_from_collection: 'awx.awx.controller'
- - removed_in: '4.0.0'
- - why: Collection name change
- - alternative: 'CONTROLLER_HOST'
+ deprecated:
+ collection_name: 'awx.awx'
+ version: '4.0.0'
+ why: Collection name change
+ alternatives: 'CONTROLLER_HOST'
username:
description: The user that you plan to use to access inventories on the controller.
env:
- name: CONTROLLER_USERNAME
- name: TOWER_USERNAME
- deprecated:
- - removed_from_collection: 'awx.awx.controller'
- - removed_in: '4.0.0'
- - why: Collection name change
- - alternative: 'CONTROLLER_USERNAME'
+ deprecated:
+ collection_name: 'awx.awx'
+ version: '4.0.0'
+ why: Collection name change
+ alternatives: 'CONTROLLER_USERNAME'
password:
description: The password for your controller user.
env:
- name: CONTROLLER_PASSWORD
- name: TOWER_PASSWORD
- deprecated:
- - removed_from_collection: 'awx.awx.controller'
- - removed_in: '4.0.0'
- - why: Collection name change
- - alternative: 'CONTROLLER_PASSWORD'
+ deprecated:
+ collection_name: 'awx.awx'
+ version: '4.0.0'
+ why: Collection name change
+ alternatives: 'CONTROLLER_PASSWORD'
oauth_token:
description:
- The OAuth token to use.
env:
- name: CONTROLLER_OAUTH_TOKEN
- name: TOWER_OAUTH_TOKEN
- deprecated:
- - removed_from_collection: 'awx.awx.controller'
- - removed_in: '4.0.0'
- - why: Collection name change
- - alternative: 'CONTROLLER_OAUTH_TOKEN'
+ deprecated:
+ collection_name: 'awx.awx'
+ version: '4.0.0'
+ why: Collection name change
+ alternatives: 'CONTROLLER_OAUTH_TOKEN'
verify_ssl:
description:
- Specify whether Ansible should verify the SSL certificate of the controller host.
@@ -62,11 +62,11 @@
env:
- name: CONTROLLER_VERIFY_SSL
- name: TOWER_VERIFY_SSL
- deprecated:
- - removed_from_collection: 'awx.awx.controller'
- - removed_in: '4.0.0'
- - why: Collection name change
- - alternative: 'CONTROLLER_VERIFY_SSL'
+ deprecated:
+ collection_name: 'awx.awx'
+ version: '4.0.0'
+ why: Collection name change
+ alternatives: 'CONTROLLER_VERIFY_SSL'
aliases: [ validate_certs ]
notes:
| {"golden_diff": "diff --git a/awx_collection/plugins/doc_fragments/auth_plugin.py b/awx_collection/plugins/doc_fragments/auth_plugin.py\n--- a/awx_collection/plugins/doc_fragments/auth_plugin.py\n+++ b/awx_collection/plugins/doc_fragments/auth_plugin.py\n@@ -18,42 +18,42 @@\n env:\n - name: CONTROLLER_HOST\n - name: TOWER_HOST\n- deprecated:\n- - removed_from_collection: 'awx.awx.controller'\n- - removed_in: '4.0.0'\n- - why: Collection name change\n- - alternative: 'CONTROLLER_HOST'\n+ deprecated:\n+ collection_name: 'awx.awx'\n+ version: '4.0.0'\n+ why: Collection name change\n+ alternatives: 'CONTROLLER_HOST'\n username:\n description: The user that you plan to use to access inventories on the controller.\n env:\n - name: CONTROLLER_USERNAME\n - name: TOWER_USERNAME\n- deprecated:\n- - removed_from_collection: 'awx.awx.controller'\n- - removed_in: '4.0.0'\n- - why: Collection name change\n- - alternative: 'CONTROLLER_USERNAME'\n+ deprecated:\n+ collection_name: 'awx.awx'\n+ version: '4.0.0'\n+ why: Collection name change\n+ alternatives: 'CONTROLLER_USERNAME'\n password:\n description: The password for your controller user.\n env:\n - name: CONTROLLER_PASSWORD\n - name: TOWER_PASSWORD\n- deprecated:\n- - removed_from_collection: 'awx.awx.controller'\n- - removed_in: '4.0.0'\n- - why: Collection name change\n- - alternative: 'CONTROLLER_PASSWORD'\n+ deprecated:\n+ collection_name: 'awx.awx'\n+ version: '4.0.0'\n+ why: Collection name change\n+ alternatives: 'CONTROLLER_PASSWORD'\n oauth_token:\n description:\n - The OAuth token to use.\n env:\n - name: CONTROLLER_OAUTH_TOKEN\n - name: TOWER_OAUTH_TOKEN\n- deprecated:\n- - removed_from_collection: 'awx.awx.controller'\n- - removed_in: '4.0.0'\n- - why: Collection name change\n- - alternative: 'CONTROLLER_OAUTH_TOKEN'\n+ deprecated:\n+ collection_name: 'awx.awx'\n+ version: '4.0.0'\n+ why: Collection name change\n+ alternatives: 'CONTROLLER_OAUTH_TOKEN'\n verify_ssl:\n description:\n - Specify whether Ansible should verify the SSL certificate of the controller host.\n@@ -62,11 +62,11 @@\n env:\n - name: CONTROLLER_VERIFY_SSL\n - name: TOWER_VERIFY_SSL\n- deprecated:\n- - removed_from_collection: 'awx.awx.controller'\n- - removed_in: '4.0.0'\n- - why: Collection name change\n- - alternative: 'CONTROLLER_VERIFY_SSL'\n+ deprecated:\n+ collection_name: 'awx.awx'\n+ version: '4.0.0'\n+ why: Collection name change\n+ alternatives: 'CONTROLLER_VERIFY_SSL'\n aliases: [ validate_certs ]\n \n notes:\n", "issue": "doc error with inventory plugin for latest awx.awx inventory plugin \n### Please confirm the following\n\n- [X] I agree to follow this project's [code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\n- [X] I understand that AWX is open source software provided for free and that I am not entitled to status updates or other assurances.\n\n### Summary\n\nThe documentation for the inventory plugin, awx.awx.controller, was malformed.\r\n\r\nrendered webpage for docs is here: https://docs.ansible.com/ansible/latest/collections/awx/awx/controller_inventory.html#ansible-collections-awx-awx-controller-inventory\n\n### AWX version\n\nN/A\n\n### Installation method\n\nopenshift\n\n### Modifications\n\nno\n\n### Ansible version\n\nn/a\n\n### Operating system\n\nn/a\n\n### Web browser\n\nChrome\n\n### Steps to reproduce\n\ngo to https://docs.ansible.com/ansible/latest/collections/awx/awx/controller_inventory.html#ansible-collections-awx-awx-controller-inventory\r\n\r\nthere is docs errors on the webpage \r\n\r\n>The documentation for the inventory plugin, awx.awx.controller, was malformed.\r\n\r\n\n\n### Expected results\n\nno errors\n\n### Actual results\n\n\r\n\n\n### Additional information\n\nfast track this to infamous Chris Meyers\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2020, Ansible by Red Hat, Inc\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\n\nclass ModuleDocFragment(object):\n\n # Automation Platform Controller documentation fragment\n DOCUMENTATION = r'''\noptions:\n host:\n description: The network address of your Automation Platform Controller host.\n env:\n - name: CONTROLLER_HOST\n - name: TOWER_HOST\n deprecated:\n - removed_from_collection: 'awx.awx.controller'\n - removed_in: '4.0.0'\n - why: Collection name change\n - alternative: 'CONTROLLER_HOST'\n username:\n description: The user that you plan to use to access inventories on the controller.\n env:\n - name: CONTROLLER_USERNAME\n - name: TOWER_USERNAME\n deprecated:\n - removed_from_collection: 'awx.awx.controller'\n - removed_in: '4.0.0'\n - why: Collection name change\n - alternative: 'CONTROLLER_USERNAME'\n password:\n description: The password for your controller user.\n env:\n - name: CONTROLLER_PASSWORD\n - name: TOWER_PASSWORD\n deprecated:\n - removed_from_collection: 'awx.awx.controller'\n - removed_in: '4.0.0'\n - why: Collection name change\n - alternative: 'CONTROLLER_PASSWORD'\n oauth_token:\n description:\n - The OAuth token to use.\n env:\n - name: CONTROLLER_OAUTH_TOKEN\n - name: TOWER_OAUTH_TOKEN\n deprecated:\n - removed_from_collection: 'awx.awx.controller'\n - removed_in: '4.0.0'\n - why: Collection name change\n - alternative: 'CONTROLLER_OAUTH_TOKEN'\n verify_ssl:\n description:\n - Specify whether Ansible should verify the SSL certificate of the controller host.\n - Defaults to True, but this is handled by the shared module_utils code\n type: bool\n env:\n - name: CONTROLLER_VERIFY_SSL\n - name: TOWER_VERIFY_SSL\n deprecated:\n - removed_from_collection: 'awx.awx.controller'\n - removed_in: '4.0.0'\n - why: Collection name change\n - alternative: 'CONTROLLER_VERIFY_SSL'\n aliases: [ validate_certs ]\n\nnotes:\n- If no I(config_file) is provided we will attempt to use the tower-cli library\n defaults to find your host information.\n- I(config_file) should be in the following format\n host=hostname\n username=username\n password=password\n'''\n", "path": "awx_collection/plugins/doc_fragments/auth_plugin.py"}]} | 1,666 | 740 |
gh_patches_debug_1537 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2051 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
@concurrent annotation doesn't work in the OOP script method
I.E:
```
class SomeClass:
@concurrent [doesn't work, 2 args]
def request(self, flow):
pass
```
@concurrent annotation doesn't work in the OOP script method
I.E:
```
class SomeClass:
@concurrent [doesn't work, 2 args]
def request(self, flow):
pass
```
</issue>
<code>
[start of mitmproxy/script/concurrent.py]
1 """
2 This module provides a @concurrent decorator primitive to
3 offload computations from mitmproxy's main master thread.
4 """
5
6 from mitmproxy import eventsequence
7 from mitmproxy.types import basethread
8
9
10 class ScriptThread(basethread.BaseThread):
11 name = "ScriptThread"
12
13
14 def concurrent(fn):
15 if fn.__name__ not in eventsequence.Events - {"start", "configure", "tick"}:
16 raise NotImplementedError(
17 "Concurrent decorator not supported for '%s' method." % fn.__name__
18 )
19
20 def _concurrent(obj):
21 def run():
22 fn(obj)
23 if obj.reply.state == "taken":
24 if not obj.reply.has_message:
25 obj.reply.ack()
26 obj.reply.commit()
27 obj.reply.take()
28 ScriptThread(
29 "script.concurrent (%s)" % fn.__name__,
30 target=run
31 ).start()
32 return _concurrent
33
[end of mitmproxy/script/concurrent.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/script/concurrent.py b/mitmproxy/script/concurrent.py
--- a/mitmproxy/script/concurrent.py
+++ b/mitmproxy/script/concurrent.py
@@ -29,4 +29,8 @@
"script.concurrent (%s)" % fn.__name__,
target=run
).start()
- return _concurrent
+ # Support @concurrent for class-based addons
+ if "." in fn.__qualname__:
+ return staticmethod(_concurrent)
+ else:
+ return _concurrent
| {"golden_diff": "diff --git a/mitmproxy/script/concurrent.py b/mitmproxy/script/concurrent.py\n--- a/mitmproxy/script/concurrent.py\n+++ b/mitmproxy/script/concurrent.py\n@@ -29,4 +29,8 @@\n \"script.concurrent (%s)\" % fn.__name__,\n target=run\n ).start()\n- return _concurrent\n+ # Support @concurrent for class-based addons\n+ if \".\" in fn.__qualname__:\n+ return staticmethod(_concurrent)\n+ else:\n+ return _concurrent\n", "issue": "@concurrent annotation doesn't work in the OOP script method\nI.E:\r\n\r\n```\r\nclass SomeClass:\r\n @concurrent [doesn't work, 2 args]\r\n def request(self, flow):\r\n pass\r\n\r\n```\n@concurrent annotation doesn't work in the OOP script method\nI.E:\r\n\r\n```\r\nclass SomeClass:\r\n @concurrent [doesn't work, 2 args]\r\n def request(self, flow):\r\n pass\r\n\r\n```\n", "before_files": [{"content": "\"\"\"\nThis module provides a @concurrent decorator primitive to\noffload computations from mitmproxy's main master thread.\n\"\"\"\n\nfrom mitmproxy import eventsequence\nfrom mitmproxy.types import basethread\n\n\nclass ScriptThread(basethread.BaseThread):\n name = \"ScriptThread\"\n\n\ndef concurrent(fn):\n if fn.__name__ not in eventsequence.Events - {\"start\", \"configure\", \"tick\"}:\n raise NotImplementedError(\n \"Concurrent decorator not supported for '%s' method.\" % fn.__name__\n )\n\n def _concurrent(obj):\n def run():\n fn(obj)\n if obj.reply.state == \"taken\":\n if not obj.reply.has_message:\n obj.reply.ack()\n obj.reply.commit()\n obj.reply.take()\n ScriptThread(\n \"script.concurrent (%s)\" % fn.__name__,\n target=run\n ).start()\n return _concurrent\n", "path": "mitmproxy/script/concurrent.py"}]} | 888 | 124 |
gh_patches_debug_10057 | rasdani/github-patches | git_diff | OCA__social-1068 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[15.0] mail_tracking: portal tests fail
<!-- Provide a general summary of the issue in the Title above.
Before opening the issue, please check that there isn't another issue addressing the same problem already created -->
## Module
mail_tracking
## Describe the bug
mail_tracking causes TestPortalWizard.test_portal_wizard_public_user and TestPortalWizard.test_portal_wizard_partner to fail
## To Reproduce
**Affected versions**: 15.0
Steps to reproduce the behavior:
1. init db with portal and mail_tracking
2. run portal tests
</issue>
<code>
[start of mail_tracking/models/ir_mail_server.py]
1 # Copyright 2016 Antonio Espinosa - <[email protected]>
2 # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
3
4 import re
5 import threading
6
7 from odoo import api, models, tools
8
9
10 class IrMailServer(models.Model):
11 _inherit = "ir.mail_server"
12
13 def _tracking_headers_add(self, tracking_email_id, headers):
14 """Allow other addons to add its own tracking SMTP headers"""
15 headers = headers or {}
16 headers["X-Odoo-Database"] = getattr(threading.current_thread(), "dbname", None)
17 headers["X-Odoo-MailTracking-ID"] = tracking_email_id
18 return headers
19
20 def _tracking_email_id_body_get(self, body):
21 body = body or ""
22 # https://regex101.com/r/lW4cB1/2
23 match = re.search(r'<img[^>]*data-odoo-tracking-email=["\']([0-9]*)["\']', body)
24 return str(match.group(1)) if match and match.group(1) else False
25
26 def build_email(
27 self,
28 email_from,
29 email_to,
30 subject,
31 body,
32 email_cc=None,
33 email_bcc=None,
34 reply_to=False,
35 attachments=None,
36 message_id=None,
37 references=None,
38 object_id=False,
39 subtype="plain",
40 headers=None,
41 body_alternative=None,
42 subtype_alternative="plain",
43 ):
44 tracking_email_id = self._tracking_email_id_body_get(body)
45 if tracking_email_id:
46 headers = self._tracking_headers_add(tracking_email_id, headers)
47 msg = super(IrMailServer, self).build_email(
48 email_from,
49 email_to,
50 subject,
51 body,
52 email_cc=email_cc,
53 email_bcc=email_bcc,
54 reply_to=reply_to,
55 attachments=attachments,
56 message_id=message_id,
57 references=references,
58 object_id=object_id,
59 subtype=subtype,
60 headers=headers,
61 body_alternative=body_alternative,
62 subtype_alternative=subtype_alternative,
63 )
64 return msg
65
66 def _tracking_email_get(self, message):
67 try:
68 tracking_email_id = int(
69 message.get(
70 "X-Odoo-MailTracking-ID",
71 # Deprecated tracking header, kept as fallback
72 message["X-Odoo-Tracking-ID"],
73 )
74 )
75 except (TypeError, ValueError, KeyError):
76 tracking_email_id = False
77 return self.env["mail.tracking.email"].browse(tracking_email_id)
78
79 def _smtp_server_get(self, mail_server_id, smtp_server):
80 smtp_server_used = False
81 mail_server = None
82 if mail_server_id:
83 mail_server = self.browse(mail_server_id)
84 elif not smtp_server:
85 mail_server_ids = self.search([], order="sequence", limit=1)
86 mail_server = mail_server_ids[0] if mail_server_ids else None
87 if mail_server:
88 smtp_server_used = mail_server.smtp_host
89 else:
90 smtp_server_used = smtp_server or tools.config.get("smtp_server")
91 return smtp_server_used
92
93 @api.model
94 def send_email(
95 self,
96 message,
97 mail_server_id=None,
98 smtp_server=None,
99 smtp_port=None,
100 smtp_user=None,
101 smtp_password=None,
102 smtp_encryption=None,
103 smtp_debug=False,
104 smtp_session=None,
105 ):
106 message_id = False
107 tracking_email = self._tracking_email_get(message)
108 smtp_server_used = self.sudo()._smtp_server_get(mail_server_id, smtp_server)
109 try:
110 message_id = super(IrMailServer, self).send_email(
111 message,
112 mail_server_id=mail_server_id,
113 smtp_server=smtp_server,
114 smtp_port=smtp_port,
115 smtp_user=smtp_user,
116 smtp_password=smtp_password,
117 smtp_encryption=smtp_encryption,
118 smtp_debug=smtp_debug,
119 smtp_session=smtp_session,
120 )
121 except Exception as e:
122 if tracking_email:
123 tracking_email.smtp_error(self, smtp_server_used, e)
124 if message_id and tracking_email:
125 vals = tracking_email._tracking_sent_prepare(
126 self, smtp_server_used, message, message_id
127 )
128 if vals:
129 self.env["mail.tracking.event"].sudo().create(vals)
130 return message_id
131
[end of mail_tracking/models/ir_mail_server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mail_tracking/models/ir_mail_server.py b/mail_tracking/models/ir_mail_server.py
--- a/mail_tracking/models/ir_mail_server.py
+++ b/mail_tracking/models/ir_mail_server.py
@@ -45,10 +45,10 @@
if tracking_email_id:
headers = self._tracking_headers_add(tracking_email_id, headers)
msg = super(IrMailServer, self).build_email(
- email_from,
- email_to,
- subject,
- body,
+ email_from=email_from,
+ email_to=email_to,
+ subject=subject,
+ body=body,
email_cc=email_cc,
email_bcc=email_bcc,
reply_to=reply_to,
| {"golden_diff": "diff --git a/mail_tracking/models/ir_mail_server.py b/mail_tracking/models/ir_mail_server.py\n--- a/mail_tracking/models/ir_mail_server.py\n+++ b/mail_tracking/models/ir_mail_server.py\n@@ -45,10 +45,10 @@\n if tracking_email_id:\n headers = self._tracking_headers_add(tracking_email_id, headers)\n msg = super(IrMailServer, self).build_email(\n- email_from,\n- email_to,\n- subject,\n- body,\n+ email_from=email_from,\n+ email_to=email_to,\n+ subject=subject,\n+ body=body,\n email_cc=email_cc,\n email_bcc=email_bcc,\n reply_to=reply_to,\n", "issue": "[15.0] mail_tracking: portal tests fail\n<!-- Provide a general summary of the issue in the Title above.\r\nBefore opening the issue, please check that there isn't another issue addressing the same problem already created -->\r\n\r\n## Module\r\nmail_tracking\r\n\r\n## Describe the bug\r\nmail_tracking causes TestPortalWizard.test_portal_wizard_public_user and TestPortalWizard.test_portal_wizard_partner to fail\r\n\r\n## To Reproduce\r\n**Affected versions**: 15.0\r\n\r\nSteps to reproduce the behavior:\r\n1. init db with portal and mail_tracking\r\n2. run portal tests\r\n\n", "before_files": [{"content": "# Copyright 2016 Antonio Espinosa - <[email protected]>\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n\nimport re\nimport threading\n\nfrom odoo import api, models, tools\n\n\nclass IrMailServer(models.Model):\n _inherit = \"ir.mail_server\"\n\n def _tracking_headers_add(self, tracking_email_id, headers):\n \"\"\"Allow other addons to add its own tracking SMTP headers\"\"\"\n headers = headers or {}\n headers[\"X-Odoo-Database\"] = getattr(threading.current_thread(), \"dbname\", None)\n headers[\"X-Odoo-MailTracking-ID\"] = tracking_email_id\n return headers\n\n def _tracking_email_id_body_get(self, body):\n body = body or \"\"\n # https://regex101.com/r/lW4cB1/2\n match = re.search(r'<img[^>]*data-odoo-tracking-email=[\"\\']([0-9]*)[\"\\']', body)\n return str(match.group(1)) if match and match.group(1) else False\n\n def build_email(\n self,\n email_from,\n email_to,\n subject,\n body,\n email_cc=None,\n email_bcc=None,\n reply_to=False,\n attachments=None,\n message_id=None,\n references=None,\n object_id=False,\n subtype=\"plain\",\n headers=None,\n body_alternative=None,\n subtype_alternative=\"plain\",\n ):\n tracking_email_id = self._tracking_email_id_body_get(body)\n if tracking_email_id:\n headers = self._tracking_headers_add(tracking_email_id, headers)\n msg = super(IrMailServer, self).build_email(\n email_from,\n email_to,\n subject,\n body,\n email_cc=email_cc,\n email_bcc=email_bcc,\n reply_to=reply_to,\n attachments=attachments,\n message_id=message_id,\n references=references,\n object_id=object_id,\n subtype=subtype,\n headers=headers,\n body_alternative=body_alternative,\n subtype_alternative=subtype_alternative,\n )\n return msg\n\n def _tracking_email_get(self, message):\n try:\n tracking_email_id = int(\n message.get(\n \"X-Odoo-MailTracking-ID\",\n # Deprecated tracking header, kept as fallback\n message[\"X-Odoo-Tracking-ID\"],\n )\n )\n except (TypeError, ValueError, KeyError):\n tracking_email_id = False\n return self.env[\"mail.tracking.email\"].browse(tracking_email_id)\n\n def _smtp_server_get(self, mail_server_id, smtp_server):\n smtp_server_used = False\n mail_server = None\n if mail_server_id:\n mail_server = self.browse(mail_server_id)\n elif not smtp_server:\n mail_server_ids = self.search([], order=\"sequence\", limit=1)\n mail_server = mail_server_ids[0] if mail_server_ids else None\n if mail_server:\n smtp_server_used = mail_server.smtp_host\n else:\n smtp_server_used = smtp_server or tools.config.get(\"smtp_server\")\n return smtp_server_used\n\n @api.model\n def send_email(\n self,\n message,\n mail_server_id=None,\n smtp_server=None,\n smtp_port=None,\n smtp_user=None,\n smtp_password=None,\n smtp_encryption=None,\n smtp_debug=False,\n smtp_session=None,\n ):\n message_id = False\n tracking_email = self._tracking_email_get(message)\n smtp_server_used = self.sudo()._smtp_server_get(mail_server_id, smtp_server)\n try:\n message_id = super(IrMailServer, self).send_email(\n message,\n mail_server_id=mail_server_id,\n smtp_server=smtp_server,\n smtp_port=smtp_port,\n smtp_user=smtp_user,\n smtp_password=smtp_password,\n smtp_encryption=smtp_encryption,\n smtp_debug=smtp_debug,\n smtp_session=smtp_session,\n )\n except Exception as e:\n if tracking_email:\n tracking_email.smtp_error(self, smtp_server_used, e)\n if message_id and tracking_email:\n vals = tracking_email._tracking_sent_prepare(\n self, smtp_server_used, message, message_id\n )\n if vals:\n self.env[\"mail.tracking.event\"].sudo().create(vals)\n return message_id\n", "path": "mail_tracking/models/ir_mail_server.py"}]} | 1,887 | 159 |
gh_patches_debug_35032 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-1880 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider quiznos is broken
During the global build at 2021-05-26-14-42-23, spider **quiznos** failed with **129 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/quiznos.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/quiznos.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/quiznos.geojson))
</issue>
<code>
[start of locations/spiders/quiznos.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4 import re
5 import traceback
6
7 from locations.items import GeojsonPointItem
8
9 URL = "http://restaurants.quiznos.com"
10
11 class QuiznosSpider(scrapy.Spider):
12 name = "quiznos"
13 item_attributes = { 'brand': "Quizno's", 'brand_wikidata': "Q1936229" }
14 allowed_domains = [URL]
15 start_urls = (
16 'http://restaurants.quiznos.com/data/stores.json?callback=storeList',
17 )
18
19 def store_hours(self, store_hours):
20 if store_hours == '' or store_hours.lower().find('close') > -1: return ''
21
22 day_groups = []
23 this_day_group = None
24 hour_intervals = []
25
26 interval = store_hours.split(' - ')
27 start_time = interval[0].split(' ')
28 end_time = interval[1].split(' ')
29 start_hour = start_time[0].split(':')
30 end_hour = end_time[0].split(':')
31
32 hour_intervals.append('{}:{}-{}:{}'.format(
33 start_hour[0],
34 start_hour[1],
35 int(end_hour[0]) + 12 if end_time[1] == 'PM' else end_hour[0],
36 end_hour[1],
37 ))
38
39 hours = ','.join(hour_intervals)
40
41 if not this_day_group:
42 this_day_group = {
43 'from_day': 'Su',
44 'to_day': 'Sa',
45 'hours': hours
46 }
47
48 day_groups.append(this_day_group)
49
50 opening_hours = ""
51 if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
52 opening_hours = '24/7'
53 else:
54 for day_group in day_groups:
55 if day_group['from_day'] == day_group['to_day']:
56 opening_hours += '{from_day} {hours}; '.format(**day_group)
57 elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':
58 opening_hours += '{hours}; '.format(**day_group)
59 else:
60 opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
61 opening_hours = opening_hours[:-2]
62
63 return opening_hours
64
65 def parse(self, response):
66 data = response.body_as_unicode()
67 stores = json.loads(re.search(r'storeList\((.*)\)', data).group(1))
68
69 for store in stores:
70
71 yield GeojsonPointItem(
72 lat=store.get('latitude'),
73 lon=store.get('longitude'),
74 ref=str(store.get('storeid')),
75 phone=store.get('phone'),
76 name=store.get('restaurantname'),
77 opening_hours=self.store_hours(store.get('businesshours')),
78 addr_full=store.get('address1'),
79 city=store.get('city'),
80 state=store.get('statecode'),
81 postcode=store.get('zipcode'),
82 website=URL + store.get('url'),
83 )
84
[end of locations/spiders/quiznos.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/quiznos.py b/locations/spiders/quiznos.py
--- a/locations/spiders/quiznos.py
+++ b/locations/spiders/quiznos.py
@@ -2,22 +2,24 @@
import scrapy
import json
import re
-import traceback
from locations.items import GeojsonPointItem
-URL = "http://restaurants.quiznos.com"
class QuiznosSpider(scrapy.Spider):
name = "quiznos"
item_attributes = { 'brand': "Quizno's", 'brand_wikidata': "Q1936229" }
- allowed_domains = [URL]
+ allowed_domains = ["https://restaurants.quiznos.com"]
start_urls = (
- 'http://restaurants.quiznos.com/data/stores.json?callback=storeList',
+ 'https://restaurants.quiznos.com/data/stores.json',
)
def store_hours(self, store_hours):
- if store_hours == '' or store_hours.lower().find('close') > -1: return ''
+ if store_hours == '' or store_hours == ' - ' or store_hours.lower().find('close') > -1:
+ return ''
+
+ if store_hours == 'Open 24 Hours - Open 24 Hours':
+ return '24/7'
day_groups = []
this_day_group = None
@@ -67,17 +69,18 @@
stores = json.loads(re.search(r'storeList\((.*)\)', data).group(1))
for store in stores:
+ properties = {
+ "lat": store.get('latitude'),
+ "lon": store.get('longitude'),
+ "ref": str(store.get('storeid')),
+ "phone": store.get('phone'),
+ "name": store.get('restaurantname'),
+ "opening_hours": self.store_hours(store.get('businesshours')),
+ "addr_full": store.get('address1'),
+ "city": store.get('city'),
+ "state": store.get('statecode'),
+ "postcode": store.get('zipcode'),
+ "website": response.urljoin(store.get('url')),
+ }
- yield GeojsonPointItem(
- lat=store.get('latitude'),
- lon=store.get('longitude'),
- ref=str(store.get('storeid')),
- phone=store.get('phone'),
- name=store.get('restaurantname'),
- opening_hours=self.store_hours(store.get('businesshours')),
- addr_full=store.get('address1'),
- city=store.get('city'),
- state=store.get('statecode'),
- postcode=store.get('zipcode'),
- website=URL + store.get('url'),
- )
+ yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/quiznos.py b/locations/spiders/quiznos.py\n--- a/locations/spiders/quiznos.py\n+++ b/locations/spiders/quiznos.py\n@@ -2,22 +2,24 @@\n import scrapy\n import json\n import re\n-import traceback\n \n from locations.items import GeojsonPointItem\n \n-URL = \"http://restaurants.quiznos.com\"\n \n class QuiznosSpider(scrapy.Spider):\n name = \"quiznos\"\n item_attributes = { 'brand': \"Quizno's\", 'brand_wikidata': \"Q1936229\" }\n- allowed_domains = [URL]\n+ allowed_domains = [\"https://restaurants.quiznos.com\"]\n start_urls = (\n- 'http://restaurants.quiznos.com/data/stores.json?callback=storeList',\n+ 'https://restaurants.quiznos.com/data/stores.json',\n )\n \n def store_hours(self, store_hours):\n- if store_hours == '' or store_hours.lower().find('close') > -1: return ''\n+ if store_hours == '' or store_hours == ' - ' or store_hours.lower().find('close') > -1:\n+ return ''\n+\n+ if store_hours == 'Open 24 Hours - Open 24 Hours':\n+ return '24/7'\n \n day_groups = []\n this_day_group = None\n@@ -67,17 +69,18 @@\n stores = json.loads(re.search(r'storeList\\((.*)\\)', data).group(1))\n \n for store in stores:\n+ properties = {\n+ \"lat\": store.get('latitude'),\n+ \"lon\": store.get('longitude'),\n+ \"ref\": str(store.get('storeid')),\n+ \"phone\": store.get('phone'),\n+ \"name\": store.get('restaurantname'),\n+ \"opening_hours\": self.store_hours(store.get('businesshours')),\n+ \"addr_full\": store.get('address1'),\n+ \"city\": store.get('city'),\n+ \"state\": store.get('statecode'),\n+ \"postcode\": store.get('zipcode'),\n+ \"website\": response.urljoin(store.get('url')),\n+ }\n \n- yield GeojsonPointItem(\n- lat=store.get('latitude'),\n- lon=store.get('longitude'),\n- ref=str(store.get('storeid')),\n- phone=store.get('phone'),\n- name=store.get('restaurantname'),\n- opening_hours=self.store_hours(store.get('businesshours')),\n- addr_full=store.get('address1'),\n- city=store.get('city'),\n- state=store.get('statecode'),\n- postcode=store.get('zipcode'),\n- website=URL + store.get('url'),\n- )\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider quiznos is broken\nDuring the global build at 2021-05-26-14-42-23, spider **quiznos** failed with **129 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/quiznos.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/quiznos.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/quiznos.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\nimport traceback\n\nfrom locations.items import GeojsonPointItem\n\nURL = \"http://restaurants.quiznos.com\"\n\nclass QuiznosSpider(scrapy.Spider):\n name = \"quiznos\"\n item_attributes = { 'brand': \"Quizno's\", 'brand_wikidata': \"Q1936229\" }\n allowed_domains = [URL]\n start_urls = (\n 'http://restaurants.quiznos.com/data/stores.json?callback=storeList',\n )\n\n def store_hours(self, store_hours):\n if store_hours == '' or store_hours.lower().find('close') > -1: return ''\n\n day_groups = []\n this_day_group = None\n hour_intervals = []\n\n interval = store_hours.split(' - ')\n start_time = interval[0].split(' ')\n end_time = interval[1].split(' ')\n start_hour = start_time[0].split(':')\n end_hour = end_time[0].split(':')\n\n hour_intervals.append('{}:{}-{}:{}'.format(\n start_hour[0],\n start_hour[1],\n int(end_hour[0]) + 12 if end_time[1] == 'PM' else end_hour[0],\n end_hour[1],\n ))\n\n hours = ','.join(hour_intervals)\n\n if not this_day_group:\n this_day_group = {\n 'from_day': 'Su',\n 'to_day': 'Sa',\n 'hours': hours\n }\n\n day_groups.append(this_day_group)\n\n opening_hours = \"\"\n if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\n opening_hours = '24/7'\n else:\n for day_group in day_groups:\n if day_group['from_day'] == day_group['to_day']:\n opening_hours += '{from_day} {hours}; '.format(**day_group)\n elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':\n opening_hours += '{hours}; '.format(**day_group)\n else:\n opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)\n opening_hours = opening_hours[:-2]\n\n return opening_hours\n\n def parse(self, response):\n data = response.body_as_unicode()\n stores = json.loads(re.search(r'storeList\\((.*)\\)', data).group(1))\n\n for store in stores:\n\n yield GeojsonPointItem(\n lat=store.get('latitude'),\n lon=store.get('longitude'),\n ref=str(store.get('storeid')),\n phone=store.get('phone'),\n name=store.get('restaurantname'),\n opening_hours=self.store_hours(store.get('businesshours')),\n addr_full=store.get('address1'),\n city=store.get('city'),\n state=store.get('statecode'),\n postcode=store.get('zipcode'),\n website=URL + store.get('url'),\n )\n", "path": "locations/spiders/quiznos.py"}]} | 1,557 | 610 |
gh_patches_debug_37893 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-5532 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of examples/language/grok-1/inference_tp.py]
1 import time
2
3 import torch
4 from grok1_policy import Grok1ForCausalLMPolicy
5 from transformers import AutoModelForCausalLM, LlamaTokenizerFast
6 from utils import get_defualt_parser, inference, print_output
7
8 import colossalai
9 from colossalai.booster import Booster
10 from colossalai.booster.plugin import HybridParallelPlugin
11 from colossalai.cluster import DistCoordinator
12 from colossalai.lazy import LazyInitContext
13 from colossalai.utils import get_current_device
14
15 if __name__ == "__main__":
16 parser = get_defualt_parser()
17 args = parser.parse_args()
18 start = time.time()
19 colossalai.launch_from_torch({})
20 coordinator = DistCoordinator()
21 plugin = HybridParallelPlugin(
22 tp_size=coordinator.world_size,
23 pp_size=1,
24 precision="bf16",
25 parallel_output=False,
26 custom_policy=Grok1ForCausalLMPolicy(),
27 )
28 booster = Booster(plugin=plugin)
29 torch.set_default_dtype(torch.bfloat16)
30 with LazyInitContext(default_device=get_current_device()):
31 model = AutoModelForCausalLM.from_pretrained(
32 args.pretrained, trust_remote_code=True, torch_dtype=torch.bfloat16
33 )
34 model, *_ = booster.boost(model)
35 model.eval()
36 init_time = time.time() - start
37
38 # A transformers-compatible version of the grok-1 tokenizer by Xenova
39 # https://huggingface.co/Xenova/grok-1-tokenizer
40 tokenizer = LlamaTokenizerFast.from_pretrained("Xenova/grok-1-tokenizer")
41
42 for text in args.text:
43 output = inference(
44 model.unwrap(),
45 tokenizer,
46 text,
47 max_new_tokens=args.max_new_tokens,
48 do_sample=args.do_sample,
49 temperature=args.temperature,
50 top_k=args.top_k,
51 top_p=args.top_p,
52 )
53 if coordinator.is_master():
54 print_output(text, tokenizer.decode(output))
55
56 overall_time = time.time() - start
57 gen_latency = overall_time - init_time
58 avg_gen_latency = gen_latency / len(args.text)
59 coordinator.print_on_master(
60 f"Initializing time: {init_time:.2f} seconds.\n"
61 f"Overall time: {overall_time:.2f} seconds. \n"
62 f"Generation latency: {gen_latency:.2f} seconds. \n"
63 f"Average generation latency: {avg_gen_latency:.2f} seconds. \n"
64 )
65
[end of examples/language/grok-1/inference_tp.py]
[start of examples/language/grok-1/inference.py]
1 import time
2
3 import torch
4 from transformers import AutoModelForCausalLM, LlamaTokenizerFast
5 from utils import get_defualt_parser, inference, print_output
6
7 if __name__ == "__main__":
8 parser = get_defualt_parser()
9 args = parser.parse_args()
10 start = time.time()
11 torch.set_default_dtype(torch.bfloat16)
12 model = AutoModelForCausalLM.from_pretrained(
13 args.pretrained,
14 trust_remote_code=True,
15 device_map="auto",
16 torch_dtype=torch.bfloat16,
17 )
18 model.eval()
19 init_time = time.time() - start
20
21 # A transformers-compatible version of the grok-1 tokenizer by Xenova
22 # https://huggingface.co/Xenova/grok-1-tokenizer
23 tokenizer = LlamaTokenizerFast.from_pretrained("Xenova/grok-1-tokenizer")
24
25 for text in args.text:
26 output = inference(
27 model,
28 tokenizer,
29 text,
30 max_new_tokens=args.max_new_tokens,
31 do_sample=args.do_sample,
32 temperature=args.temperature,
33 top_k=args.top_k,
34 top_p=args.top_p,
35 )
36 print_output(text, tokenizer.decode(output))
37
38 overall_time = time.time() - start
39 gen_latency = overall_time - init_time
40 avg_gen_latency = gen_latency / len(args.text)
41 print(
42 f"Initializing time: {init_time:.2f} seconds.\n"
43 f"Overall time: {overall_time:.2f} seconds. \n"
44 f"Generation latency: {gen_latency:.2f} seconds. \n"
45 f"Average generation latency: {avg_gen_latency:.2f} seconds. \n"
46 )
47
[end of examples/language/grok-1/inference.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/language/grok-1/inference.py b/examples/language/grok-1/inference.py
--- a/examples/language/grok-1/inference.py
+++ b/examples/language/grok-1/inference.py
@@ -1,7 +1,7 @@
import time
import torch
-from transformers import AutoModelForCausalLM, LlamaTokenizerFast
+from transformers import AutoModelForCausalLM, AutoTokenizer
from utils import get_defualt_parser, inference, print_output
if __name__ == "__main__":
@@ -9,6 +9,9 @@
args = parser.parse_args()
start = time.time()
torch.set_default_dtype(torch.bfloat16)
+
+ tokenizer = AutoTokenizer.from_pretrained(args.pretrained, trust_remote_code=True)
+
model = AutoModelForCausalLM.from_pretrained(
args.pretrained,
trust_remote_code=True,
@@ -18,10 +21,6 @@
model.eval()
init_time = time.time() - start
- # A transformers-compatible version of the grok-1 tokenizer by Xenova
- # https://huggingface.co/Xenova/grok-1-tokenizer
- tokenizer = LlamaTokenizerFast.from_pretrained("Xenova/grok-1-tokenizer")
-
for text in args.text:
output = inference(
model,
diff --git a/examples/language/grok-1/inference_tp.py b/examples/language/grok-1/inference_tp.py
--- a/examples/language/grok-1/inference_tp.py
+++ b/examples/language/grok-1/inference_tp.py
@@ -2,7 +2,7 @@
import torch
from grok1_policy import Grok1ForCausalLMPolicy
-from transformers import AutoModelForCausalLM, LlamaTokenizerFast
+from transformers import AutoModelForCausalLM, AutoTokenizer
from utils import get_defualt_parser, inference, print_output
import colossalai
@@ -27,6 +27,9 @@
)
booster = Booster(plugin=plugin)
torch.set_default_dtype(torch.bfloat16)
+
+ tokenizer = AutoTokenizer.from_pretrained(args.pretrained, trust_remote_code=True)
+
with LazyInitContext(default_device=get_current_device()):
model = AutoModelForCausalLM.from_pretrained(
args.pretrained, trust_remote_code=True, torch_dtype=torch.bfloat16
@@ -35,10 +38,6 @@
model.eval()
init_time = time.time() - start
- # A transformers-compatible version of the grok-1 tokenizer by Xenova
- # https://huggingface.co/Xenova/grok-1-tokenizer
- tokenizer = LlamaTokenizerFast.from_pretrained("Xenova/grok-1-tokenizer")
-
for text in args.text:
output = inference(
model.unwrap(),
| {"golden_diff": "diff --git a/examples/language/grok-1/inference.py b/examples/language/grok-1/inference.py\n--- a/examples/language/grok-1/inference.py\n+++ b/examples/language/grok-1/inference.py\n@@ -1,7 +1,7 @@\n import time\n \n import torch\n-from transformers import AutoModelForCausalLM, LlamaTokenizerFast\n+from transformers import AutoModelForCausalLM, AutoTokenizer\n from utils import get_defualt_parser, inference, print_output\n \n if __name__ == \"__main__\":\n@@ -9,6 +9,9 @@\n args = parser.parse_args()\n start = time.time()\n torch.set_default_dtype(torch.bfloat16)\n+\n+ tokenizer = AutoTokenizer.from_pretrained(args.pretrained, trust_remote_code=True)\n+\n model = AutoModelForCausalLM.from_pretrained(\n args.pretrained,\n trust_remote_code=True,\n@@ -18,10 +21,6 @@\n model.eval()\n init_time = time.time() - start\n \n- # A transformers-compatible version of the grok-1 tokenizer by Xenova\n- # https://huggingface.co/Xenova/grok-1-tokenizer\n- tokenizer = LlamaTokenizerFast.from_pretrained(\"Xenova/grok-1-tokenizer\")\n-\n for text in args.text:\n output = inference(\n model,\ndiff --git a/examples/language/grok-1/inference_tp.py b/examples/language/grok-1/inference_tp.py\n--- a/examples/language/grok-1/inference_tp.py\n+++ b/examples/language/grok-1/inference_tp.py\n@@ -2,7 +2,7 @@\n \n import torch\n from grok1_policy import Grok1ForCausalLMPolicy\n-from transformers import AutoModelForCausalLM, LlamaTokenizerFast\n+from transformers import AutoModelForCausalLM, AutoTokenizer\n from utils import get_defualt_parser, inference, print_output\n \n import colossalai\n@@ -27,6 +27,9 @@\n )\n booster = Booster(plugin=plugin)\n torch.set_default_dtype(torch.bfloat16)\n+\n+ tokenizer = AutoTokenizer.from_pretrained(args.pretrained, trust_remote_code=True)\n+\n with LazyInitContext(default_device=get_current_device()):\n model = AutoModelForCausalLM.from_pretrained(\n args.pretrained, trust_remote_code=True, torch_dtype=torch.bfloat16\n@@ -35,10 +38,6 @@\n model.eval()\n init_time = time.time() - start\n \n- # A transformers-compatible version of the grok-1 tokenizer by Xenova\n- # https://huggingface.co/Xenova/grok-1-tokenizer\n- tokenizer = LlamaTokenizerFast.from_pretrained(\"Xenova/grok-1-tokenizer\")\n-\n for text in args.text:\n output = inference(\n model.unwrap(),\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import time\n\nimport torch\nfrom grok1_policy import Grok1ForCausalLMPolicy\nfrom transformers import AutoModelForCausalLM, LlamaTokenizerFast\nfrom utils import get_defualt_parser, inference, print_output\n\nimport colossalai\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import HybridParallelPlugin\nfrom colossalai.cluster import DistCoordinator\nfrom colossalai.lazy import LazyInitContext\nfrom colossalai.utils import get_current_device\n\nif __name__ == \"__main__\":\n parser = get_defualt_parser()\n args = parser.parse_args()\n start = time.time()\n colossalai.launch_from_torch({})\n coordinator = DistCoordinator()\n plugin = HybridParallelPlugin(\n tp_size=coordinator.world_size,\n pp_size=1,\n precision=\"bf16\",\n parallel_output=False,\n custom_policy=Grok1ForCausalLMPolicy(),\n )\n booster = Booster(plugin=plugin)\n torch.set_default_dtype(torch.bfloat16)\n with LazyInitContext(default_device=get_current_device()):\n model = AutoModelForCausalLM.from_pretrained(\n args.pretrained, trust_remote_code=True, torch_dtype=torch.bfloat16\n )\n model, *_ = booster.boost(model)\n model.eval()\n init_time = time.time() - start\n\n # A transformers-compatible version of the grok-1 tokenizer by Xenova\n # https://huggingface.co/Xenova/grok-1-tokenizer\n tokenizer = LlamaTokenizerFast.from_pretrained(\"Xenova/grok-1-tokenizer\")\n\n for text in args.text:\n output = inference(\n model.unwrap(),\n tokenizer,\n text,\n max_new_tokens=args.max_new_tokens,\n do_sample=args.do_sample,\n temperature=args.temperature,\n top_k=args.top_k,\n top_p=args.top_p,\n )\n if coordinator.is_master():\n print_output(text, tokenizer.decode(output))\n\n overall_time = time.time() - start\n gen_latency = overall_time - init_time\n avg_gen_latency = gen_latency / len(args.text)\n coordinator.print_on_master(\n f\"Initializing time: {init_time:.2f} seconds.\\n\"\n f\"Overall time: {overall_time:.2f} seconds. \\n\"\n f\"Generation latency: {gen_latency:.2f} seconds. \\n\"\n f\"Average generation latency: {avg_gen_latency:.2f} seconds. \\n\"\n )\n", "path": "examples/language/grok-1/inference_tp.py"}, {"content": "import time\n\nimport torch\nfrom transformers import AutoModelForCausalLM, LlamaTokenizerFast\nfrom utils import get_defualt_parser, inference, print_output\n\nif __name__ == \"__main__\":\n parser = get_defualt_parser()\n args = parser.parse_args()\n start = time.time()\n torch.set_default_dtype(torch.bfloat16)\n model = AutoModelForCausalLM.from_pretrained(\n args.pretrained,\n trust_remote_code=True,\n device_map=\"auto\",\n torch_dtype=torch.bfloat16,\n )\n model.eval()\n init_time = time.time() - start\n\n # A transformers-compatible version of the grok-1 tokenizer by Xenova\n # https://huggingface.co/Xenova/grok-1-tokenizer\n tokenizer = LlamaTokenizerFast.from_pretrained(\"Xenova/grok-1-tokenizer\")\n\n for text in args.text:\n output = inference(\n model,\n tokenizer,\n text,\n max_new_tokens=args.max_new_tokens,\n do_sample=args.do_sample,\n temperature=args.temperature,\n top_k=args.top_k,\n top_p=args.top_p,\n )\n print_output(text, tokenizer.decode(output))\n\n overall_time = time.time() - start\n gen_latency = overall_time - init_time\n avg_gen_latency = gen_latency / len(args.text)\n print(\n f\"Initializing time: {init_time:.2f} seconds.\\n\"\n f\"Overall time: {overall_time:.2f} seconds. \\n\"\n f\"Generation latency: {gen_latency:.2f} seconds. \\n\"\n f\"Average generation latency: {avg_gen_latency:.2f} seconds. \\n\"\n )\n", "path": "examples/language/grok-1/inference.py"}]} | 1,698 | 623 |
gh_patches_debug_10285 | rasdani/github-patches | git_diff | certbot__certbot-6002 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Certbot crashes if EFF e-mail submission endpoint is unavailable
Certbot will crash if the user's e-mail information can't be submitted to the EFF endpoint:
https://community.letsencrypt.org/t/issue-when-trying-to-create-certificate/59307/1
https://github.com/certbot/certbot/blob/master/certbot/eff.py#L74
It crashes with a somewhat relevant error if the endpoint is unavailable, but a confusing error if the endpoint returns something other than JSON. It should probably not crash at all in either case. If the endpoint specifically says that the user's e-mail address is malformed, that's probably worth stopping to report to the user, but if the endpoint simply doesn't reply usefully, it's probably preferable to continue since I wouldn't think we really want to make users wait in this situation.
(Adding the "bug" tag at least for the case where the endpoint returns a non-JSON reply because the error shown to the user in this case is unhelpful.)
</issue>
<code>
[start of certbot/eff.py]
1 """Subscribes users to the EFF newsletter."""
2 import logging
3
4 import requests
5 import zope.component
6
7 from certbot import constants
8 from certbot import interfaces
9
10
11 logger = logging.getLogger(__name__)
12
13
14 def handle_subscription(config):
15 """High level function to take care of EFF newsletter subscriptions.
16
17 The user may be asked if they want to sign up for the newsletter if
18 they have not already specified.
19
20 :param .IConfig config: Client configuration.
21
22 """
23 if config.email is None:
24 if config.eff_email:
25 _report_failure("you didn't provide an e-mail address")
26 return
27 if config.eff_email is None:
28 config.eff_email = _want_subscription()
29 if config.eff_email:
30 subscribe(config.email)
31
32
33 def _want_subscription():
34 """Does the user want to be subscribed to the EFF newsletter?
35
36 :returns: True if we should subscribe the user, otherwise, False
37 :rtype: bool
38
39 """
40 prompt = (
41 'Would you be willing to share your email address with the '
42 "Electronic Frontier Foundation, a founding partner of the Let's "
43 'Encrypt project and the non-profit organization that develops '
44 "Certbot? We'd like to send you email about EFF and our work to "
45 'encrypt the web, protect its users and defend digital rights.')
46 display = zope.component.getUtility(interfaces.IDisplay)
47 return display.yesno(prompt, default=False)
48
49
50 def subscribe(email):
51 """Subscribe the user to the EFF mailing list.
52
53 :param str email: the e-mail address to subscribe
54
55 """
56 url = constants.EFF_SUBSCRIBE_URI
57 data = {'data_type': 'json',
58 'email': email,
59 'form_id': 'eff_supporters_library_subscribe_form'}
60 logger.debug('Sending POST request to %s:\n%s', url, data)
61 _check_response(requests.post(url, data=data))
62
63
64 def _check_response(response):
65 """Check for errors in the server's response.
66
67 If an error occurred, it will be reported to the user.
68
69 :param requests.Response response: the server's response to the
70 subscription request
71
72 """
73 logger.debug('Received response:\n%s', response.content)
74 if response.ok:
75 if not response.json()['status']:
76 _report_failure('your e-mail address appears to be invalid')
77 else:
78 _report_failure()
79
80
81 def _report_failure(reason=None):
82 """Notify the user of failing to sign them up for the newsletter.
83
84 :param reason: a phrase describing what the problem was
85 beginning with a lowercase letter and no closing punctuation
86 :type reason: `str` or `None`
87
88 """
89 msg = ['We were unable to subscribe you the EFF mailing list']
90 if reason is not None:
91 msg.append(' because ')
92 msg.append(reason)
93 msg.append('. You can try again later by visiting https://act.eff.org.')
94 reporter = zope.component.getUtility(interfaces.IReporter)
95 reporter.add_message(''.join(msg), reporter.LOW_PRIORITY)
96
[end of certbot/eff.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/certbot/eff.py b/certbot/eff.py
--- a/certbot/eff.py
+++ b/certbot/eff.py
@@ -71,11 +71,14 @@
"""
logger.debug('Received response:\n%s', response.content)
- if response.ok:
- if not response.json()['status']:
+ try:
+ response.raise_for_status()
+ if response.json()['status'] == False:
_report_failure('your e-mail address appears to be invalid')
- else:
+ except requests.exceptions.HTTPError:
_report_failure()
+ except (ValueError, KeyError):
+ _report_failure('there was a problem with the server response')
def _report_failure(reason=None):
| {"golden_diff": "diff --git a/certbot/eff.py b/certbot/eff.py\n--- a/certbot/eff.py\n+++ b/certbot/eff.py\n@@ -71,11 +71,14 @@\n \n \"\"\"\n logger.debug('Received response:\\n%s', response.content)\n- if response.ok:\n- if not response.json()['status']:\n+ try:\n+ response.raise_for_status()\n+ if response.json()['status'] == False:\n _report_failure('your e-mail address appears to be invalid')\n- else:\n+ except requests.exceptions.HTTPError:\n _report_failure()\n+ except (ValueError, KeyError):\n+ _report_failure('there was a problem with the server response')\n \n \n def _report_failure(reason=None):\n", "issue": "Certbot crashes if EFF e-mail submission endpoint is unavailable\nCertbot will crash if the user's e-mail information can't be submitted to the EFF endpoint:\r\n\r\nhttps://community.letsencrypt.org/t/issue-when-trying-to-create-certificate/59307/1\r\n\r\nhttps://github.com/certbot/certbot/blob/master/certbot/eff.py#L74\r\n\r\nIt crashes with a somewhat relevant error if the endpoint is unavailable, but a confusing error if the endpoint returns something other than JSON. It should probably not crash at all in either case. If the endpoint specifically says that the user's e-mail address is malformed, that's probably worth stopping to report to the user, but if the endpoint simply doesn't reply usefully, it's probably preferable to continue since I wouldn't think we really want to make users wait in this situation.\r\n\r\n(Adding the \"bug\" tag at least for the case where the endpoint returns a non-JSON reply because the error shown to the user in this case is unhelpful.)\n", "before_files": [{"content": "\"\"\"Subscribes users to the EFF newsletter.\"\"\"\nimport logging\n\nimport requests\nimport zope.component\n\nfrom certbot import constants\nfrom certbot import interfaces\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef handle_subscription(config):\n \"\"\"High level function to take care of EFF newsletter subscriptions.\n\n The user may be asked if they want to sign up for the newsletter if\n they have not already specified.\n\n :param .IConfig config: Client configuration.\n\n \"\"\"\n if config.email is None:\n if config.eff_email:\n _report_failure(\"you didn't provide an e-mail address\")\n return\n if config.eff_email is None:\n config.eff_email = _want_subscription()\n if config.eff_email:\n subscribe(config.email)\n\n\ndef _want_subscription():\n \"\"\"Does the user want to be subscribed to the EFF newsletter?\n\n :returns: True if we should subscribe the user, otherwise, False\n :rtype: bool\n\n \"\"\"\n prompt = (\n 'Would you be willing to share your email address with the '\n \"Electronic Frontier Foundation, a founding partner of the Let's \"\n 'Encrypt project and the non-profit organization that develops '\n \"Certbot? We'd like to send you email about EFF and our work to \"\n 'encrypt the web, protect its users and defend digital rights.')\n display = zope.component.getUtility(interfaces.IDisplay)\n return display.yesno(prompt, default=False)\n\n\ndef subscribe(email):\n \"\"\"Subscribe the user to the EFF mailing list.\n\n :param str email: the e-mail address to subscribe\n\n \"\"\"\n url = constants.EFF_SUBSCRIBE_URI\n data = {'data_type': 'json',\n 'email': email,\n 'form_id': 'eff_supporters_library_subscribe_form'}\n logger.debug('Sending POST request to %s:\\n%s', url, data)\n _check_response(requests.post(url, data=data))\n\n\ndef _check_response(response):\n \"\"\"Check for errors in the server's response.\n\n If an error occurred, it will be reported to the user.\n\n :param requests.Response response: the server's response to the\n subscription request\n\n \"\"\"\n logger.debug('Received response:\\n%s', response.content)\n if response.ok:\n if not response.json()['status']:\n _report_failure('your e-mail address appears to be invalid')\n else:\n _report_failure()\n\n\ndef _report_failure(reason=None):\n \"\"\"Notify the user of failing to sign them up for the newsletter.\n\n :param reason: a phrase describing what the problem was\n beginning with a lowercase letter and no closing punctuation\n :type reason: `str` or `None`\n\n \"\"\"\n msg = ['We were unable to subscribe you the EFF mailing list']\n if reason is not None:\n msg.append(' because ')\n msg.append(reason)\n msg.append('. You can try again later by visiting https://act.eff.org.')\n reporter = zope.component.getUtility(interfaces.IReporter)\n reporter.add_message(''.join(msg), reporter.LOW_PRIORITY)\n", "path": "certbot/eff.py"}]} | 1,596 | 167 |
gh_patches_debug_34333 | rasdani/github-patches | git_diff | internetarchive__openlibrary-8896 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove fixed subject line from contact form
The contact form has a drop down for choosing a topic. When patrons submit this form, the submissions all go to the same inbox. The selected topics are usually incorrect, but also make it more difficult to manage the inbox due to most messages looking the same.
### Describe the problem that you'd like solved
Let the patron populate the subject line OR auto-populate the subject line from the message.
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->
### Additional context
<!-- Add any other context or screenshots about the feature request here. -->
### Stakeholders
<!-- @ tag stakeholders of this bug -->
</issue>
<code>
[start of openlibrary/plugins/openlibrary/support.py]
1 import hashlib
2
3 import web
4 import logging
5
6 from infogami import config
7 from infogami.utils import delegate
8 from infogami.utils.view import render_template
9
10 from openlibrary import accounts
11 from openlibrary.core import stats
12 from openlibrary.core.cache import get_memcache
13 from openlibrary.plugins.upstream.addbook import get_recaptcha
14 from openlibrary.utils.dateutil import MINUTE_SECS
15
16 logger = logging.getLogger("openlibrary")
17
18
19 class contact(delegate.page):
20 def GET(self):
21 i = web.input(path=None)
22 user = accounts.get_current_user()
23 email = user and user.email
24
25 hashed_ip = hashlib.md5(web.ctx.ip.encode('utf-8')).hexdigest()
26 has_emailed_recently = get_memcache().get('contact-POST-%s' % hashed_ip)
27 recaptcha = has_emailed_recently and get_recaptcha()
28 return render_template("support", email=email, url=i.path, recaptcha=recaptcha)
29
30 def POST(self):
31 form = web.input()
32 patron_name = form.get("name", "")
33 email = form.get("email", "")
34 topic = form.get("topic", "")
35 description = form.get("question", "")
36 url = form.get("url", "")
37 user = accounts.get_current_user()
38 useragent = web.ctx.env.get("HTTP_USER_AGENT", "")
39 if not all([email, topic, description]):
40 return ""
41
42 hashed_ip = hashlib.md5(web.ctx.ip.encode('utf-8')).hexdigest()
43 has_emailed_recently = get_memcache().get('contact-POST-%s' % hashed_ip)
44 if has_emailed_recently:
45 recap = get_recaptcha()
46 if recap and not recap.validate():
47 return render_template(
48 "message.html",
49 'Recaptcha solution was incorrect',
50 (
51 'Please <a href="javascript:history.back()">go back</a> and try '
52 'again.'
53 ),
54 )
55
56 default_assignees = config.get("support_default_assignees", {})
57 if (topic_key := str(topic.replace(" ", "_").lower())) in default_assignees:
58 assignee = default_assignees.get(topic_key)
59 else:
60 assignee = default_assignees.get("default", "[email protected]")
61 stats.increment("ol.support.all")
62 subject = "Support case *%s*" % topic
63
64 url = web.ctx.home + url
65 displayname = user and user.get_name() or ""
66 username = user and user.get_username() or ""
67
68 message = SUPPORT_EMAIL_TEMPLATE % locals()
69 sendmail(email, assignee, subject, message)
70
71 get_memcache().set(
72 'contact-POST-%s' % hashed_ip, "true", expires=15 * MINUTE_SECS
73 )
74 return render_template("email/case_created", assignee)
75
76
77 def sendmail(from_address, to_address, subject, message):
78 if config.get('dummy_sendmail'):
79 msg = (
80 f'To: {to_address}\n'
81 f'From:{from_address}\n'
82 f'Subject:{subject}\n'
83 f'\n{web.safestr(message)}'
84 )
85
86 logger.info("sending email:\n%s", msg)
87 else:
88 web.sendmail(from_address, to_address, subject, message)
89
90
91 SUPPORT_EMAIL_TEMPLATE = """
92
93 Description:\n
94 %(description)s
95
96 A new support case has been filed by %(displayname)s <%(email)s>.
97
98 Topic: %(topic)s
99 URL: %(url)s
100 User-Agent: %(useragent)s
101 OL-username: %(username)s
102 Patron-name: %(patron_name)s
103 """
104
105
106 def setup():
107 pass
108
[end of openlibrary/plugins/openlibrary/support.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openlibrary/plugins/openlibrary/support.py b/openlibrary/plugins/openlibrary/support.py
--- a/openlibrary/plugins/openlibrary/support.py
+++ b/openlibrary/plugins/openlibrary/support.py
@@ -32,11 +32,12 @@
patron_name = form.get("name", "")
email = form.get("email", "")
topic = form.get("topic", "")
+ subject_line = form.get('subject', '')
description = form.get("question", "")
url = form.get("url", "")
user = accounts.get_current_user()
useragent = web.ctx.env.get("HTTP_USER_AGENT", "")
- if not all([email, topic, description]):
+ if not all([email, description]):
return ""
hashed_ip = hashlib.md5(web.ctx.ip.encode('utf-8')).hexdigest()
@@ -59,7 +60,7 @@
else:
assignee = default_assignees.get("default", "[email protected]")
stats.increment("ol.support.all")
- subject = "Support case *%s*" % topic
+ subject = "Support case *%s*" % self.prepare_subject_line(subject_line)
url = web.ctx.home + url
displayname = user and user.get_name() or ""
@@ -73,6 +74,14 @@
)
return render_template("email/case_created", assignee)
+ def prepare_subject_line(self, subject, max_length=60):
+ if not subject:
+ return '[no subject]'
+ if len(subject) <= max_length:
+ return subject
+
+ return subject[:max_length]
+
def sendmail(from_address, to_address, subject, message):
if config.get('dummy_sendmail'):
@@ -95,7 +104,7 @@
A new support case has been filed by %(displayname)s <%(email)s>.
-Topic: %(topic)s
+Subject: %(subject_line)s
URL: %(url)s
User-Agent: %(useragent)s
OL-username: %(username)s
| {"golden_diff": "diff --git a/openlibrary/plugins/openlibrary/support.py b/openlibrary/plugins/openlibrary/support.py\n--- a/openlibrary/plugins/openlibrary/support.py\n+++ b/openlibrary/plugins/openlibrary/support.py\n@@ -32,11 +32,12 @@\n patron_name = form.get(\"name\", \"\")\n email = form.get(\"email\", \"\")\n topic = form.get(\"topic\", \"\")\n+ subject_line = form.get('subject', '')\n description = form.get(\"question\", \"\")\n url = form.get(\"url\", \"\")\n user = accounts.get_current_user()\n useragent = web.ctx.env.get(\"HTTP_USER_AGENT\", \"\")\n- if not all([email, topic, description]):\n+ if not all([email, description]):\n return \"\"\n \n hashed_ip = hashlib.md5(web.ctx.ip.encode('utf-8')).hexdigest()\n@@ -59,7 +60,7 @@\n else:\n assignee = default_assignees.get(\"default\", \"[email protected]\")\n stats.increment(\"ol.support.all\")\n- subject = \"Support case *%s*\" % topic\n+ subject = \"Support case *%s*\" % self.prepare_subject_line(subject_line)\n \n url = web.ctx.home + url\n displayname = user and user.get_name() or \"\"\n@@ -73,6 +74,14 @@\n )\n return render_template(\"email/case_created\", assignee)\n \n+ def prepare_subject_line(self, subject, max_length=60):\n+ if not subject:\n+ return '[no subject]'\n+ if len(subject) <= max_length:\n+ return subject\n+\n+ return subject[:max_length]\n+\n \n def sendmail(from_address, to_address, subject, message):\n if config.get('dummy_sendmail'):\n@@ -95,7 +104,7 @@\n \n A new support case has been filed by %(displayname)s <%(email)s>.\n \n-Topic: %(topic)s\n+Subject: %(subject_line)s\n URL: %(url)s\n User-Agent: %(useragent)s\n OL-username: %(username)s\n", "issue": "Remove fixed subject line from contact form\nThe contact form has a drop down for choosing a topic. When patrons submit this form, the submissions all go to the same inbox. The selected topics are usually incorrect, but also make it more difficult to manage the inbox due to most messages looking the same.\r\n\r\n### Describe the problem that you'd like solved\r\nLet the patron populate the subject line OR auto-populate the subject line from the message.\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->\r\n\r\n### Additional context\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n\r\n\r\n\n", "before_files": [{"content": "import hashlib\n\nimport web\nimport logging\n\nfrom infogami import config\nfrom infogami.utils import delegate\nfrom infogami.utils.view import render_template\n\nfrom openlibrary import accounts\nfrom openlibrary.core import stats\nfrom openlibrary.core.cache import get_memcache\nfrom openlibrary.plugins.upstream.addbook import get_recaptcha\nfrom openlibrary.utils.dateutil import MINUTE_SECS\n\nlogger = logging.getLogger(\"openlibrary\")\n\n\nclass contact(delegate.page):\n def GET(self):\n i = web.input(path=None)\n user = accounts.get_current_user()\n email = user and user.email\n\n hashed_ip = hashlib.md5(web.ctx.ip.encode('utf-8')).hexdigest()\n has_emailed_recently = get_memcache().get('contact-POST-%s' % hashed_ip)\n recaptcha = has_emailed_recently and get_recaptcha()\n return render_template(\"support\", email=email, url=i.path, recaptcha=recaptcha)\n\n def POST(self):\n form = web.input()\n patron_name = form.get(\"name\", \"\")\n email = form.get(\"email\", \"\")\n topic = form.get(\"topic\", \"\")\n description = form.get(\"question\", \"\")\n url = form.get(\"url\", \"\")\n user = accounts.get_current_user()\n useragent = web.ctx.env.get(\"HTTP_USER_AGENT\", \"\")\n if not all([email, topic, description]):\n return \"\"\n\n hashed_ip = hashlib.md5(web.ctx.ip.encode('utf-8')).hexdigest()\n has_emailed_recently = get_memcache().get('contact-POST-%s' % hashed_ip)\n if has_emailed_recently:\n recap = get_recaptcha()\n if recap and not recap.validate():\n return render_template(\n \"message.html\",\n 'Recaptcha solution was incorrect',\n (\n 'Please <a href=\"javascript:history.back()\">go back</a> and try '\n 'again.'\n ),\n )\n\n default_assignees = config.get(\"support_default_assignees\", {})\n if (topic_key := str(topic.replace(\" \", \"_\").lower())) in default_assignees:\n assignee = default_assignees.get(topic_key)\n else:\n assignee = default_assignees.get(\"default\", \"[email protected]\")\n stats.increment(\"ol.support.all\")\n subject = \"Support case *%s*\" % topic\n\n url = web.ctx.home + url\n displayname = user and user.get_name() or \"\"\n username = user and user.get_username() or \"\"\n\n message = SUPPORT_EMAIL_TEMPLATE % locals()\n sendmail(email, assignee, subject, message)\n\n get_memcache().set(\n 'contact-POST-%s' % hashed_ip, \"true\", expires=15 * MINUTE_SECS\n )\n return render_template(\"email/case_created\", assignee)\n\n\ndef sendmail(from_address, to_address, subject, message):\n if config.get('dummy_sendmail'):\n msg = (\n f'To: {to_address}\\n'\n f'From:{from_address}\\n'\n f'Subject:{subject}\\n'\n f'\\n{web.safestr(message)}'\n )\n\n logger.info(\"sending email:\\n%s\", msg)\n else:\n web.sendmail(from_address, to_address, subject, message)\n\n\nSUPPORT_EMAIL_TEMPLATE = \"\"\"\n\nDescription:\\n\n%(description)s\n\nA new support case has been filed by %(displayname)s <%(email)s>.\n\nTopic: %(topic)s\nURL: %(url)s\nUser-Agent: %(useragent)s\nOL-username: %(username)s\nPatron-name: %(patron_name)s\n\"\"\"\n\n\ndef setup():\n pass\n", "path": "openlibrary/plugins/openlibrary/support.py"}]} | 1,708 | 449 |
gh_patches_debug_30256 | rasdani/github-patches | git_diff | TheAlgorithms__Python-3949 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
web_programming/fetch_github_info.py err
<pre>message: Bad credentials. The API can't be accessed using username/password authentication. Please create a personal access token to access this endpoint: http://github.com/settings/tokens
documentation_url: https://docs.github.com/articles/creating-a-personal-access-token-for-the-command-line
</pre>
</issue>
<code>
[start of web_programming/fetch_github_info.py]
1 #!/usr/bin/env python3
2
3 """
4 Created by sarathkaul on 14/11/19
5
6 Basic authentication using an API password is deprecated and will soon no longer work.
7 Visit https://developer.github.com/changes/2020-02-14-deprecating-password-auth
8 for more information around suggested workarounds and removal dates.
9 """
10
11
12 import requests
13
14 _GITHUB_API = "https://api.github.com/user"
15
16
17 def fetch_github_info(auth_user: str, auth_pass: str) -> dict:
18 """
19 Fetch GitHub info of a user using the requests module
20 """
21 return requests.get(_GITHUB_API, auth=(auth_user, auth_pass)).json()
22
23
24 if __name__ == "__main__":
25 for key, value in fetch_github_info("<USER NAME>", "<PASSWORD>").items():
26 print(f"{key}: {value}")
27
[end of web_programming/fetch_github_info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/web_programming/fetch_github_info.py b/web_programming/fetch_github_info.py
--- a/web_programming/fetch_github_info.py
+++ b/web_programming/fetch_github_info.py
@@ -1,26 +1,50 @@
#!/usr/bin/env python3
-
"""
Created by sarathkaul on 14/11/19
+Updated by lawric1 on 24/11/20
-Basic authentication using an API password is deprecated and will soon no longer work.
-Visit https://developer.github.com/changes/2020-02-14-deprecating-password-auth
-for more information around suggested workarounds and removal dates.
-"""
+Authentication will be made via access token.
+To generate your personal access token visit https://github.com/settings/tokens.
+
+NOTE:
+Never hardcode any credential information in the code. Always use an environment
+file to store the private information and use the `os` module to get the information
+during runtime.
+Create a ".env" file in the root directory and write these two lines in that file
+with your token::
+
+#!/usr/bin/env bash
+export USER_TOKEN=""
+"""
+import os
+from typing import Any, Dict
import requests
-_GITHUB_API = "https://api.github.com/user"
+BASE_URL = "https://api.github.com"
+# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
+AUTHENTICATED_USER_ENDPOINT = BASE_URL + "/user"
-def fetch_github_info(auth_user: str, auth_pass: str) -> dict:
+# https://github.com/settings/tokens
+USER_TOKEN = os.environ.get("USER_TOKEN", "")
+
+
+def fetch_github_info(auth_token: str) -> Dict[Any, Any]:
"""
Fetch GitHub info of a user using the requests module
"""
- return requests.get(_GITHUB_API, auth=(auth_user, auth_pass)).json()
-
-
-if __name__ == "__main__":
- for key, value in fetch_github_info("<USER NAME>", "<PASSWORD>").items():
- print(f"{key}: {value}")
+ headers = {
+ "Authorization": f"token {auth_token}",
+ "Accept": "application/vnd.github.v3+json",
+ }
+ return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers).json()
+
+
+if __name__ == "__main__": # pragma: no cover
+ if USER_TOKEN:
+ for key, value in fetch_github_info(USER_TOKEN).items():
+ print(f"{key}: {value}")
+ else:
+ raise ValueError("'USER_TOKEN' field cannot be empty.")
| {"golden_diff": "diff --git a/web_programming/fetch_github_info.py b/web_programming/fetch_github_info.py\n--- a/web_programming/fetch_github_info.py\n+++ b/web_programming/fetch_github_info.py\n@@ -1,26 +1,50 @@\n #!/usr/bin/env python3\n-\n \"\"\"\n Created by sarathkaul on 14/11/19\n+Updated by lawric1 on 24/11/20\n \n-Basic authentication using an API password is deprecated and will soon no longer work.\n-Visit https://developer.github.com/changes/2020-02-14-deprecating-password-auth\n-for more information around suggested workarounds and removal dates.\n-\"\"\"\n+Authentication will be made via access token.\n+To generate your personal access token visit https://github.com/settings/tokens.\n+\n+NOTE:\n+Never hardcode any credential information in the code. Always use an environment\n+file to store the private information and use the `os` module to get the information\n+during runtime.\n \n+Create a \".env\" file in the root directory and write these two lines in that file\n+with your token::\n+\n+#!/usr/bin/env bash\n+export USER_TOKEN=\"\"\n+\"\"\"\n+import os\n+from typing import Any, Dict\n \n import requests\n \n-_GITHUB_API = \"https://api.github.com/user\"\n+BASE_URL = \"https://api.github.com\"\n \n+# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user\n+AUTHENTICATED_USER_ENDPOINT = BASE_URL + \"/user\"\n \n-def fetch_github_info(auth_user: str, auth_pass: str) -> dict:\n+# https://github.com/settings/tokens\n+USER_TOKEN = os.environ.get(\"USER_TOKEN\", \"\")\n+\n+\n+def fetch_github_info(auth_token: str) -> Dict[Any, Any]:\n \"\"\"\n Fetch GitHub info of a user using the requests module\n \"\"\"\n- return requests.get(_GITHUB_API, auth=(auth_user, auth_pass)).json()\n-\n-\n-if __name__ == \"__main__\":\n- for key, value in fetch_github_info(\"<USER NAME>\", \"<PASSWORD>\").items():\n- print(f\"{key}: {value}\")\n+ headers = {\n+ \"Authorization\": f\"token {auth_token}\",\n+ \"Accept\": \"application/vnd.github.v3+json\",\n+ }\n+ return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers).json()\n+\n+\n+if __name__ == \"__main__\": # pragma: no cover\n+ if USER_TOKEN:\n+ for key, value in fetch_github_info(USER_TOKEN).items():\n+ print(f\"{key}: {value}\")\n+ else:\n+ raise ValueError(\"'USER_TOKEN' field cannot be empty.\")\n", "issue": "web_programming/fetch_github_info.py err\n<pre>message: Bad credentials. The API can't be accessed using username/password authentication. Please create a personal access token to access this endpoint: http://github.com/settings/tokens\r\ndocumentation_url: https://docs.github.com/articles/creating-a-personal-access-token-for-the-command-line\r\n</pre>\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nCreated by sarathkaul on 14/11/19\n\nBasic authentication using an API password is deprecated and will soon no longer work.\nVisit https://developer.github.com/changes/2020-02-14-deprecating-password-auth\nfor more information around suggested workarounds and removal dates.\n\"\"\"\n\n\nimport requests\n\n_GITHUB_API = \"https://api.github.com/user\"\n\n\ndef fetch_github_info(auth_user: str, auth_pass: str) -> dict:\n \"\"\"\n Fetch GitHub info of a user using the requests module\n \"\"\"\n return requests.get(_GITHUB_API, auth=(auth_user, auth_pass)).json()\n\n\nif __name__ == \"__main__\":\n for key, value in fetch_github_info(\"<USER NAME>\", \"<PASSWORD>\").items():\n print(f\"{key}: {value}\")\n", "path": "web_programming/fetch_github_info.py"}]} | 855 | 598 |
gh_patches_debug_14459 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-268 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[clean up] Should check tf is running eagerly early
so the program can fail early if tf is configured wrong.
</issue>
<code>
[start of elasticdl/worker/worker.py]
1 import tensorflow as tf
2 from tensorflow.python.ops import math_ops
3 from proto import master_pb2
4 from proto import master_pb2_grpc
5
6
7 class Worker(object):
8 """ElasticDL worker"""
9
10 def __init__(self, model_cls, input_fn, opt_fn, channel=None):
11 """
12 Arguments:
13 model_cls: A class to define the model, which contains funcs
14 get_keras_model: return the keras model defined in the class, with a tf dataset as its input
15 output(data): get model ouput from data as input, either a single output of a dict of outputs
16 loss(output, data): get model loss from output and data as input
17 input_fn: a func to to get a dataset, which can be used as the keras model input
18 dataset = input_fn(dict_of_params)
19 dict_of_params from GetTask for DistributedTrain, from kwargs for LocalTrain
20 opt_fn: a func to get the optimizer
21 channel: grpc channel
22 """
23
24 self._model_cls = model_cls()
25 self._keras_model = self._model_cls.get_keras_model()
26 self._input_fn = input_fn
27 self._opt_fn = opt_fn
28 if channel is None:
29 self._stub = None
30 else:
31 self._stub = master_pb2_grpc.MasterStub(channel)
32 self._model_version = -1
33
34 def get_task(self):
35 # TODO: get task from master
36 pass
37
38 def get_model(self):
39 # TODO: get model from master
40 pass
41
42 def report_task_result(self):
43 # TODO: report task result to master
44 pass
45
46 def report_gradient(self):
47 # TODO: report gradient to ps
48 pass
49
50 def distributed_train(self):
51 # TODO: distributed training
52 pass
53
54 def local_train(self, batch_size, epoch=1, kwargs=None):
55 """
56 Local training for local testing. Must in eager mode.
57 Argments:
58 batch_size: batch size in training
59 epoch: the number of epoch in training
60 kwargs: contains a dict of parameters used in training
61 """
62
63 if not tf.executing_eagerly():
64 raise ValueError('Eager mode is required for LocalTrain')
65
66 dataset = self._input_fn(kwargs)
67 dataset = dataset.repeat(epoch).batch(batch_size)
68 optimizer = self._opt_fn()
69
70 for data in dataset:
71 with tf.GradientTape() as tape:
72 output = self._model_cls.output(data)
73 loss = self._model_cls.loss(output, data)
74 # Add regularization loss if any.
75 # Note: for distributed training, the regularization loss should
76 # be divided by the number of contributing workers, which
77 # might be difficult for elasticdl.
78 if self._keras_model.losses:
79 loss += math_ops.add_n(self._keras_model.losses)
80 grads = tape.gradient(loss, self._keras_model.variables)
81 optimizer.apply_gradients(zip(grads, self._keras_model.variables))
82 print('Loss is ', loss.numpy())
83 pass
84
[end of elasticdl/worker/worker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/worker/worker.py b/elasticdl/worker/worker.py
--- a/elasticdl/worker/worker.py
+++ b/elasticdl/worker/worker.py
@@ -1,4 +1,6 @@
import tensorflow as tf
+assert tf.executing_eagerly()
+
from tensorflow.python.ops import math_ops
from proto import master_pb2
from proto import master_pb2_grpc
@@ -60,9 +62,6 @@
kwargs: contains a dict of parameters used in training
"""
- if not tf.executing_eagerly():
- raise ValueError('Eager mode is required for LocalTrain')
-
dataset = self._input_fn(kwargs)
dataset = dataset.repeat(epoch).batch(batch_size)
optimizer = self._opt_fn()
| {"golden_diff": "diff --git a/elasticdl/worker/worker.py b/elasticdl/worker/worker.py\n--- a/elasticdl/worker/worker.py\n+++ b/elasticdl/worker/worker.py\n@@ -1,4 +1,6 @@\n import tensorflow as tf\n+assert tf.executing_eagerly()\n+\n from tensorflow.python.ops import math_ops\n from proto import master_pb2\n from proto import master_pb2_grpc\n@@ -60,9 +62,6 @@\n kwargs: contains a dict of parameters used in training\n \"\"\"\n \n- if not tf.executing_eagerly():\n- raise ValueError('Eager mode is required for LocalTrain')\n-\n dataset = self._input_fn(kwargs)\n dataset = dataset.repeat(epoch).batch(batch_size)\n optimizer = self._opt_fn()\n", "issue": "[clean up] Should check tf is running eagerly early\nso the program can fail early if tf is configured wrong.\n", "before_files": [{"content": "import tensorflow as tf\nfrom tensorflow.python.ops import math_ops\nfrom proto import master_pb2\nfrom proto import master_pb2_grpc\n\n\nclass Worker(object):\n \"\"\"ElasticDL worker\"\"\"\n\n def __init__(self, model_cls, input_fn, opt_fn, channel=None):\n \"\"\"\n Arguments:\n model_cls: A class to define the model, which contains funcs\n get_keras_model: return the keras model defined in the class, with a tf dataset as its input\n output(data): get model ouput from data as input, either a single output of a dict of outputs\n loss(output, data): get model loss from output and data as input\n input_fn: a func to to get a dataset, which can be used as the keras model input\n dataset = input_fn(dict_of_params)\n dict_of_params from GetTask for DistributedTrain, from kwargs for LocalTrain\n opt_fn: a func to get the optimizer \n channel: grpc channel\n \"\"\"\n\n self._model_cls = model_cls()\n self._keras_model = self._model_cls.get_keras_model()\n self._input_fn = input_fn\n self._opt_fn = opt_fn\n if channel is None:\n self._stub = None\n else:\n self._stub = master_pb2_grpc.MasterStub(channel)\n self._model_version = -1\n\n def get_task(self):\n # TODO: get task from master\n pass\n\n def get_model(self):\n # TODO: get model from master\n pass\n\n def report_task_result(self):\n # TODO: report task result to master\n pass\n\n def report_gradient(self):\n # TODO: report gradient to ps\n pass\n\n def distributed_train(self):\n # TODO: distributed training\n pass\n\n def local_train(self, batch_size, epoch=1, kwargs=None):\n \"\"\"\n Local training for local testing. Must in eager mode.\n Argments:\n batch_size: batch size in training\n epoch: the number of epoch in training\n kwargs: contains a dict of parameters used in training\n \"\"\"\n\n if not tf.executing_eagerly():\n raise ValueError('Eager mode is required for LocalTrain')\n\n dataset = self._input_fn(kwargs)\n dataset = dataset.repeat(epoch).batch(batch_size)\n optimizer = self._opt_fn()\n\n for data in dataset:\n with tf.GradientTape() as tape:\n output = self._model_cls.output(data)\n loss = self._model_cls.loss(output, data)\n # Add regularization loss if any.\n # Note: for distributed training, the regularization loss should\n # be divided by the number of contributing workers, which\n # might be difficult for elasticdl.\n if self._keras_model.losses:\n loss += math_ops.add_n(self._keras_model.losses)\n grads = tape.gradient(loss, self._keras_model.variables)\n optimizer.apply_gradients(zip(grads, self._keras_model.variables))\n print('Loss is ', loss.numpy())\n pass\n", "path": "elasticdl/worker/worker.py"}]} | 1,383 | 175 |
gh_patches_debug_22781 | rasdani/github-patches | git_diff | microsoft__lisa-491 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
VERIFY-BOOT-ERROR-WARNINGS.py: fix test case
Test case is not running properly on redhat. Run the script directly, not through lisav2 to observe this better.
Release: 7.7 Snapshot 5 (most likely 7.6 should also have this behavior)
Issues to be fixed:
- ignore if syslog is not present (that exists only on ubuntu/debian).
[root@rhel77gen2 Linux]# python VERIFY-BOOT-ERROR-WARNINGS.py
2.7.5 (default, Jun 11 2019, 14:33:56)
[GCC 4.8.5 20150623 (Red Hat 4.8.5-39)]
grep: /var/log/syslog: No such file or directory
- fix grep search
[root@rhel77gen2 Linux]# grep -nw '/var/log/messages' -e 'mlx5'
[root@rhel77gen2 Linux]# grep /var/log/messages -e mlx5
Jul 29 10:02:27 rhel77gen2 kernel: mlx5_core ae35:00:02.0: firmware version: 14.23.1020
</issue>
<code>
[start of Testscripts/Linux/VERIFY-BOOT-ERROR-WARNINGS.py]
1 #!/usr/bin/python
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the Apache License.
4 from azuremodules import *
5
6 white_list_xml = "ignorable-boot-errors.xml"
7
8
9 def RunTest():
10 UpdateState("TestRunning")
11 RunLog.info("Checking for ERROR and WARNING messages in system logs.")
12 errors = Run("grep -nw '/var/log/syslog' -e 'error' --ignore-case && grep -nw '/var/log/messages' -e 'error' --ignore-case")
13 warnings = Run("grep -nw '/var/log/syslog' -e 'warning' --ignore-case && grep -nw '/var/log/messages' -e 'warning' --ignore-case")
14 failures = Run("grep -nw '/var/log/syslog' -e 'fail' --ignore-case && grep -nw '/var/log/messages' -e 'fail' --ignore-case")
15 if (not errors and not warnings and not failures):
16 RunLog.info('Could not find ERROR/WARNING/FAILURE messages in syslog/messages log file.')
17 ResultLog.info('PASS')
18 else:
19 if white_list_xml and os.path.isfile(white_list_xml):
20 try:
21 import xml.etree.cElementTree as ET
22 except ImportError:
23 import xml.etree.ElementTree as ET
24
25 white_list_file = ET.parse(white_list_xml)
26 xml_root = white_list_file.getroot()
27
28 RunLog.info('Checking ignorable boot ERROR/WARNING/FAILURE messages...')
29 for node in xml_root:
30 if (failures and node.tag == "failures"):
31 failures = RemoveIgnorableMessages(failures, node)
32 if (errors and node.tag == "errors"):
33 errors = RemoveIgnorableMessages(errors, node)
34 if (warnings and node.tag == "warnings"):
35 warnings = RemoveIgnorableMessages(warnings, node)
36
37 if (errors or warnings or failures):
38 RunLog.error('Found ERROR/WARNING/FAILURE messages in logs.')
39 if(errors):
40 SplitLog('Errors', errors)
41 if(warnings):
42 SplitLog('warnings', warnings)
43 if(failures):
44 SplitLog('failures', failures)
45 ResultLog.error('FAIL')
46 else:
47 ResultLog.info('PASS')
48 UpdateState("TestCompleted")
49
50
51 def SplitLog(logType, logValues):
52 for logEntry in logValues:
53 RunLog.info(logType + ': ' + logEntry)
54
55
56 def RemoveIgnorableMessages(messages, keywords_xml_node):
57 message_list = messages.strip().split('\n')
58 valid_list = []
59 for msg in message_list:
60 for keywords in keywords_xml_node:
61 if keywords.text in msg:
62 RunLog.info('Ignorable ERROR/WARNING/FAILURE message: ' + msg)
63 break
64 else:
65 valid_list.append(msg)
66 if len(valid_list) > 0:
67 return valid_list
68 else:
69 return None
70
71 RunTest()
72
[end of Testscripts/Linux/VERIFY-BOOT-ERROR-WARNINGS.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Testscripts/Linux/VERIFY-BOOT-ERROR-WARNINGS.py b/Testscripts/Linux/VERIFY-BOOT-ERROR-WARNINGS.py
--- a/Testscripts/Linux/VERIFY-BOOT-ERROR-WARNINGS.py
+++ b/Testscripts/Linux/VERIFY-BOOT-ERROR-WARNINGS.py
@@ -9,9 +9,9 @@
def RunTest():
UpdateState("TestRunning")
RunLog.info("Checking for ERROR and WARNING messages in system logs.")
- errors = Run("grep -nw '/var/log/syslog' -e 'error' --ignore-case && grep -nw '/var/log/messages' -e 'error' --ignore-case")
- warnings = Run("grep -nw '/var/log/syslog' -e 'warning' --ignore-case && grep -nw '/var/log/messages' -e 'warning' --ignore-case")
- failures = Run("grep -nw '/var/log/syslog' -e 'fail' --ignore-case && grep -nw '/var/log/messages' -e 'fail' --ignore-case")
+ errors = Run("grep -nw '/var/log/syslog' -e 'error' --ignore-case --no-message && grep /var/log/messages -e error --ignore-case --no-message")
+ warnings = Run("grep -nw '/var/log/syslog' -e 'warning' --ignore-case --no-message && grep /var/log/messages -e warning --ignore-case --no-message")
+ failures = Run("grep -nw '/var/log/syslog' -e 'fail' --ignore-case --no-message && grep /var/log/messages -e fail --ignore-case --no-message")
if (not errors and not warnings and not failures):
RunLog.info('Could not find ERROR/WARNING/FAILURE messages in syslog/messages log file.')
ResultLog.info('PASS')
| {"golden_diff": "diff --git a/Testscripts/Linux/VERIFY-BOOT-ERROR-WARNINGS.py b/Testscripts/Linux/VERIFY-BOOT-ERROR-WARNINGS.py\n--- a/Testscripts/Linux/VERIFY-BOOT-ERROR-WARNINGS.py\n+++ b/Testscripts/Linux/VERIFY-BOOT-ERROR-WARNINGS.py\n@@ -9,9 +9,9 @@\n def RunTest():\n UpdateState(\"TestRunning\")\n RunLog.info(\"Checking for ERROR and WARNING messages in system logs.\")\n- errors = Run(\"grep -nw '/var/log/syslog' -e 'error' --ignore-case && grep -nw '/var/log/messages' -e 'error' --ignore-case\")\n- warnings = Run(\"grep -nw '/var/log/syslog' -e 'warning' --ignore-case && grep -nw '/var/log/messages' -e 'warning' --ignore-case\")\n- failures = Run(\"grep -nw '/var/log/syslog' -e 'fail' --ignore-case && grep -nw '/var/log/messages' -e 'fail' --ignore-case\")\n+ errors = Run(\"grep -nw '/var/log/syslog' -e 'error' --ignore-case --no-message && grep /var/log/messages -e error --ignore-case --no-message\")\n+ warnings = Run(\"grep -nw '/var/log/syslog' -e 'warning' --ignore-case --no-message && grep /var/log/messages -e warning --ignore-case --no-message\")\n+ failures = Run(\"grep -nw '/var/log/syslog' -e 'fail' --ignore-case --no-message && grep /var/log/messages -e fail --ignore-case --no-message\")\n if (not errors and not warnings and not failures):\n RunLog.info('Could not find ERROR/WARNING/FAILURE messages in syslog/messages log file.')\n ResultLog.info('PASS')\n", "issue": "VERIFY-BOOT-ERROR-WARNINGS.py: fix test case\nTest case is not running properly on redhat. Run the script directly, not through lisav2 to observe this better.\r\n\r\nRelease: 7.7 Snapshot 5 (most likely 7.6 should also have this behavior)\r\n\r\nIssues to be fixed:\r\n- ignore if syslog is not present (that exists only on ubuntu/debian).\r\n[root@rhel77gen2 Linux]# python VERIFY-BOOT-ERROR-WARNINGS.py\r\n2.7.5 (default, Jun 11 2019, 14:33:56)\r\n[GCC 4.8.5 20150623 (Red Hat 4.8.5-39)]\r\ngrep: /var/log/syslog: No such file or directory\r\n\r\n- fix grep search\r\n[root@rhel77gen2 Linux]# grep -nw '/var/log/messages' -e 'mlx5'\r\n[root@rhel77gen2 Linux]# grep /var/log/messages -e mlx5\r\nJul 29 10:02:27 rhel77gen2 kernel: mlx5_core ae35:00:02.0: firmware version: 14.23.1020\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the Apache License.\nfrom azuremodules import *\n\nwhite_list_xml = \"ignorable-boot-errors.xml\"\n\n\ndef RunTest():\n UpdateState(\"TestRunning\")\n RunLog.info(\"Checking for ERROR and WARNING messages in system logs.\")\n errors = Run(\"grep -nw '/var/log/syslog' -e 'error' --ignore-case && grep -nw '/var/log/messages' -e 'error' --ignore-case\")\n warnings = Run(\"grep -nw '/var/log/syslog' -e 'warning' --ignore-case && grep -nw '/var/log/messages' -e 'warning' --ignore-case\")\n failures = Run(\"grep -nw '/var/log/syslog' -e 'fail' --ignore-case && grep -nw '/var/log/messages' -e 'fail' --ignore-case\")\n if (not errors and not warnings and not failures):\n RunLog.info('Could not find ERROR/WARNING/FAILURE messages in syslog/messages log file.')\n ResultLog.info('PASS')\n else:\n if white_list_xml and os.path.isfile(white_list_xml):\n try:\n import xml.etree.cElementTree as ET\n except ImportError:\n import xml.etree.ElementTree as ET\n\n white_list_file = ET.parse(white_list_xml)\n xml_root = white_list_file.getroot()\n\n RunLog.info('Checking ignorable boot ERROR/WARNING/FAILURE messages...')\n for node in xml_root:\n if (failures and node.tag == \"failures\"):\n failures = RemoveIgnorableMessages(failures, node)\n if (errors and node.tag == \"errors\"):\n errors = RemoveIgnorableMessages(errors, node)\n if (warnings and node.tag == \"warnings\"):\n warnings = RemoveIgnorableMessages(warnings, node)\n\n if (errors or warnings or failures):\n RunLog.error('Found ERROR/WARNING/FAILURE messages in logs.')\n if(errors):\n SplitLog('Errors', errors)\n if(warnings):\n SplitLog('warnings', warnings)\n if(failures):\n SplitLog('failures', failures)\n ResultLog.error('FAIL')\n else:\n ResultLog.info('PASS')\n UpdateState(\"TestCompleted\")\n\n\ndef SplitLog(logType, logValues):\n for logEntry in logValues:\n RunLog.info(logType + ': ' + logEntry)\n\n\ndef RemoveIgnorableMessages(messages, keywords_xml_node):\n message_list = messages.strip().split('\\n')\n valid_list = []\n for msg in message_list:\n for keywords in keywords_xml_node:\n if keywords.text in msg:\n RunLog.info('Ignorable ERROR/WARNING/FAILURE message: ' + msg)\n break\n else:\n valid_list.append(msg)\n if len(valid_list) > 0:\n return valid_list\n else:\n return None\n\nRunTest()\n", "path": "Testscripts/Linux/VERIFY-BOOT-ERROR-WARNINGS.py"}]} | 1,587 | 389 |
gh_patches_debug_23467 | rasdani/github-patches | git_diff | saleor__saleor-809 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Calling get_price_range() on an empty item range
On the product detail page backend doesn't check existence of product variants.
If the product requires variants, but they don't exist, Django throws an exception `Calling get_price_range() on an empty item range`.
</issue>
<code>
[start of saleor/product/utils.py]
1 from collections import namedtuple
2
3 from ..cart.utils import get_cart_from_request, get_or_create_cart_from_request
4 from ..core.utils import to_local_currency
5 from .forms import get_form_class_for_product
6 from .models.utils import get_attributes_display_map
7 from .models import Product
8
9
10 def products_visible_to_user(user):
11 if (user.is_authenticated() and
12 user.is_active and user.is_staff):
13 return Product.objects.all()
14 else:
15 return Product.objects.get_available_products()
16
17
18 def products_with_details(user):
19 products = products_visible_to_user(user)
20 products = products.prefetch_related('categories', 'images',
21 'variants__stock',
22 'variants__variant_images__image',
23 'attributes__values',
24 'product_class__variant_attributes__values',
25 'product_class__product_attributes__values')
26 return products
27
28
29 def get_product_images(product):
30 """
31 Returns list of product images that will be placed in product gallery
32 """
33 return list(product.images.all())
34
35
36 def products_with_availability(products, discounts, local_currency):
37 for product in products:
38 yield product, get_availability(product, discounts, local_currency)
39
40
41 ProductAvailability = namedtuple(
42 'ProductAvailability', (
43 'available', 'price_range', 'price_range_undiscounted', 'discount',
44 'price_range_local_currency', 'discount_local_currency'))
45
46
47 def get_availability(product, discounts=None, local_currency=None):
48 # In default currency
49 price_range = product.get_price_range(discounts=discounts)
50 undiscounted = product.get_price_range()
51 if undiscounted.min_price > price_range.min_price:
52 discount = undiscounted.min_price - price_range.min_price
53 else:
54 discount = None
55
56 # Local currency
57 if local_currency:
58 price_range_local = to_local_currency(
59 price_range, local_currency)
60 undiscounted_local = to_local_currency(
61 undiscounted, local_currency)
62 if (undiscounted_local and
63 undiscounted_local.min_price > price_range_local.min_price):
64 discount_local_currency = (
65 undiscounted_local.min_price - price_range_local.min_price)
66 else:
67 discount_local_currency = None
68 else:
69 price_range_local = None
70 discount_local_currency = None
71
72 is_available = product.is_in_stock() and product.is_available()
73
74 return ProductAvailability(
75 available=is_available,
76 price_range=price_range,
77 price_range_undiscounted=undiscounted,
78 discount=discount,
79 price_range_local_currency=price_range_local,
80 discount_local_currency=discount_local_currency)
81
82
83 def handle_cart_form(request, product, create_cart=False):
84 if create_cart:
85 cart = get_or_create_cart_from_request(request)
86 else:
87 cart = get_cart_from_request(request)
88
89 form_class = get_form_class_for_product(product)
90 form = form_class(cart=cart, product=product,
91 data=request.POST or None, discounts=request.discounts)
92 return form, cart
93
94
95 def products_for_cart(user):
96 products = products_visible_to_user(user)
97 products = products.prefetch_related(
98 'variants', 'variants__variant_images__image')
99 return products
100
101
102 def get_variant_picker_data(product, discounts=None):
103 availability = get_availability(product, discounts)
104 variants = product.variants.all()
105 data = {'variantAttributes': [], 'variants': []}
106
107 variant_attributes = product.product_class.variant_attributes.all()
108 for attribute in variant_attributes:
109 data['variantAttributes'].append({
110 'pk': attribute.pk,
111 'display': attribute.display,
112 'name': attribute.name,
113 'values': [{'pk': value.pk, 'display': value.display}
114 for value in attribute.values.all()]})
115
116 for variant in variants:
117 price = variant.get_price_per_item(discounts)
118 price_undiscounted = variant.get_price_per_item()
119 variant_data = {
120 'id': variant.id,
121 'price': price.gross,
122 'priceUndiscounted': price_undiscounted.gross,
123 'currency': price.currency,
124 'attributes': variant.attributes}
125 data['variants'].append(variant_data)
126
127 data['availability'] = {
128 'discount': price_as_dict(availability.discount),
129 'priceRange': price_range_as_dict(availability.price_range),
130 'priceRangeUndiscounted': price_range_as_dict(
131 availability.price_range_undiscounted)}
132 return data
133
134
135 def get_product_attributes_data(product):
136 attributes = product.product_class.product_attributes.all()
137 attributes_map = {attribute.pk: attribute for attribute in attributes}
138 values_map = get_attributes_display_map(product, attributes)
139 return {attributes_map.get(attr_pk): value_obj
140 for (attr_pk, value_obj) in values_map.items()}
141
142
143 def price_as_dict(price):
144 if not price:
145 return {}
146 return {'currency': price.currency,
147 'gross': price.gross,
148 'net': price.net}
149
150
151 def price_range_as_dict(price_range):
152 return {'maxPrice': price_as_dict(price_range.max_price),
153 'minPrice': price_as_dict(price_range.min_price)}
154
[end of saleor/product/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/product/utils.py b/saleor/product/utils.py
--- a/saleor/product/utils.py
+++ b/saleor/product/utils.py
@@ -45,6 +45,15 @@
def get_availability(product, discounts=None, local_currency=None):
+ if not product.variants.exists():
+ return ProductAvailability(
+ available=False,
+ price_range=None,
+ price_range_undiscounted=None,
+ discount=None,
+ price_range_local_currency=None,
+ discount_local_currency=None)
+
# In default currency
price_range = product.get_price_range(discounts=discounts)
undiscounted = product.get_price_range()
@@ -142,12 +151,14 @@
def price_as_dict(price):
if not price:
- return {}
+ return None
return {'currency': price.currency,
'gross': price.gross,
'net': price.net}
def price_range_as_dict(price_range):
+ if not price_range:
+ return None
return {'maxPrice': price_as_dict(price_range.max_price),
'minPrice': price_as_dict(price_range.min_price)}
| {"golden_diff": "diff --git a/saleor/product/utils.py b/saleor/product/utils.py\n--- a/saleor/product/utils.py\n+++ b/saleor/product/utils.py\n@@ -45,6 +45,15 @@\n \n \n def get_availability(product, discounts=None, local_currency=None):\n+ if not product.variants.exists():\n+ return ProductAvailability(\n+ available=False,\n+ price_range=None,\n+ price_range_undiscounted=None,\n+ discount=None,\n+ price_range_local_currency=None,\n+ discount_local_currency=None)\n+\n # In default currency\n price_range = product.get_price_range(discounts=discounts)\n undiscounted = product.get_price_range()\n@@ -142,12 +151,14 @@\n \n def price_as_dict(price):\n if not price:\n- return {}\n+ return None\n return {'currency': price.currency,\n 'gross': price.gross,\n 'net': price.net}\n \n \n def price_range_as_dict(price_range):\n+ if not price_range:\n+ return None\n return {'maxPrice': price_as_dict(price_range.max_price),\n 'minPrice': price_as_dict(price_range.min_price)}\n", "issue": "Calling get_price_range() on an empty item range\nOn the product detail page backend doesn't check existence of product variants.\r\nIf the product requires variants, but they don't exist, Django throws an exception `Calling get_price_range() on an empty item range`.\n", "before_files": [{"content": "from collections import namedtuple\n\nfrom ..cart.utils import get_cart_from_request, get_or_create_cart_from_request\nfrom ..core.utils import to_local_currency\nfrom .forms import get_form_class_for_product\nfrom .models.utils import get_attributes_display_map\nfrom .models import Product\n\n\ndef products_visible_to_user(user):\n if (user.is_authenticated() and\n user.is_active and user.is_staff):\n return Product.objects.all()\n else:\n return Product.objects.get_available_products()\n\n\ndef products_with_details(user):\n products = products_visible_to_user(user)\n products = products.prefetch_related('categories', 'images',\n 'variants__stock',\n 'variants__variant_images__image',\n 'attributes__values',\n 'product_class__variant_attributes__values',\n 'product_class__product_attributes__values')\n return products\n\n\ndef get_product_images(product):\n \"\"\"\n Returns list of product images that will be placed in product gallery\n \"\"\"\n return list(product.images.all())\n\n\ndef products_with_availability(products, discounts, local_currency):\n for product in products:\n yield product, get_availability(product, discounts, local_currency)\n\n\nProductAvailability = namedtuple(\n 'ProductAvailability', (\n 'available', 'price_range', 'price_range_undiscounted', 'discount',\n 'price_range_local_currency', 'discount_local_currency'))\n\n\ndef get_availability(product, discounts=None, local_currency=None):\n # In default currency\n price_range = product.get_price_range(discounts=discounts)\n undiscounted = product.get_price_range()\n if undiscounted.min_price > price_range.min_price:\n discount = undiscounted.min_price - price_range.min_price\n else:\n discount = None\n\n # Local currency\n if local_currency:\n price_range_local = to_local_currency(\n price_range, local_currency)\n undiscounted_local = to_local_currency(\n undiscounted, local_currency)\n if (undiscounted_local and\n undiscounted_local.min_price > price_range_local.min_price):\n discount_local_currency = (\n undiscounted_local.min_price - price_range_local.min_price)\n else:\n discount_local_currency = None\n else:\n price_range_local = None\n discount_local_currency = None\n\n is_available = product.is_in_stock() and product.is_available()\n\n return ProductAvailability(\n available=is_available,\n price_range=price_range,\n price_range_undiscounted=undiscounted,\n discount=discount,\n price_range_local_currency=price_range_local,\n discount_local_currency=discount_local_currency)\n\n\ndef handle_cart_form(request, product, create_cart=False):\n if create_cart:\n cart = get_or_create_cart_from_request(request)\n else:\n cart = get_cart_from_request(request)\n\n form_class = get_form_class_for_product(product)\n form = form_class(cart=cart, product=product,\n data=request.POST or None, discounts=request.discounts)\n return form, cart\n\n\ndef products_for_cart(user):\n products = products_visible_to_user(user)\n products = products.prefetch_related(\n 'variants', 'variants__variant_images__image')\n return products\n\n\ndef get_variant_picker_data(product, discounts=None):\n availability = get_availability(product, discounts)\n variants = product.variants.all()\n data = {'variantAttributes': [], 'variants': []}\n\n variant_attributes = product.product_class.variant_attributes.all()\n for attribute in variant_attributes:\n data['variantAttributes'].append({\n 'pk': attribute.pk,\n 'display': attribute.display,\n 'name': attribute.name,\n 'values': [{'pk': value.pk, 'display': value.display}\n for value in attribute.values.all()]})\n\n for variant in variants:\n price = variant.get_price_per_item(discounts)\n price_undiscounted = variant.get_price_per_item()\n variant_data = {\n 'id': variant.id,\n 'price': price.gross,\n 'priceUndiscounted': price_undiscounted.gross,\n 'currency': price.currency,\n 'attributes': variant.attributes}\n data['variants'].append(variant_data)\n\n data['availability'] = {\n 'discount': price_as_dict(availability.discount),\n 'priceRange': price_range_as_dict(availability.price_range),\n 'priceRangeUndiscounted': price_range_as_dict(\n availability.price_range_undiscounted)}\n return data\n\n\ndef get_product_attributes_data(product):\n attributes = product.product_class.product_attributes.all()\n attributes_map = {attribute.pk: attribute for attribute in attributes}\n values_map = get_attributes_display_map(product, attributes)\n return {attributes_map.get(attr_pk): value_obj\n for (attr_pk, value_obj) in values_map.items()}\n\n\ndef price_as_dict(price):\n if not price:\n return {}\n return {'currency': price.currency,\n 'gross': price.gross,\n 'net': price.net}\n\n\ndef price_range_as_dict(price_range):\n return {'maxPrice': price_as_dict(price_range.max_price),\n 'minPrice': price_as_dict(price_range.min_price)}\n", "path": "saleor/product/utils.py"}]} | 2,042 | 260 |
gh_patches_debug_54265 | rasdani/github-patches | git_diff | hylang__hy-411 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
problem with comment parsing
I was translating some code to Hy from a textbook on Python programming (http://inventwithpython.com/pygame/index.html) and ran into a problem with this direct translation.
```
(import pygame sys)
(import [pygame.locals [*]])
(pygame.init)
(setv *displaysurf* (pygame.display.set_mode (, 400 300)))
(pygame.display.set_caption "Hello World!")
(while True ; main game loop
(do (foreach [event (pygame.event.get)]
(if (= event.type QUIT)
(do (pygame.quit)
(sys.exit))))
(pygame.display.update)))
```
I get a parse error if the end-of-line comment ("main game loop") appears where it does. It works if I remove it.
The following interaction with the prompt also surprised me.
```
=> ; some comment
hy.lex.exceptions.LexException: Could not identify the next token at line -1, column -1
```
Fixing this isn't critical, but it should probably be fixed. I do occasionally type something, realize I need to do something else first, comment it, press Enter, type whatever setup I needed, press Enter, then press Up twice, uncomment the line, and Enter to run it.
problem with comment parsing
I was translating some code to Hy from a textbook on Python programming (http://inventwithpython.com/pygame/index.html) and ran into a problem with this direct translation.
```
(import pygame sys)
(import [pygame.locals [*]])
(pygame.init)
(setv *displaysurf* (pygame.display.set_mode (, 400 300)))
(pygame.display.set_caption "Hello World!")
(while True ; main game loop
(do (foreach [event (pygame.event.get)]
(if (= event.type QUIT)
(do (pygame.quit)
(sys.exit))))
(pygame.display.update)))
```
I get a parse error if the end-of-line comment ("main game loop") appears where it does. It works if I remove it.
The following interaction with the prompt also surprised me.
```
=> ; some comment
hy.lex.exceptions.LexException: Could not identify the next token at line -1, column -1
```
Fixing this isn't critical, but it should probably be fixed. I do occasionally type something, realize I need to do something else first, comment it, press Enter, type whatever setup I needed, press Enter, then press Up twice, uncomment the line, and Enter to run it.
</issue>
<code>
[start of hy/lex/lexer.py]
1 # Copyright (c) 2013 Nicolas Dandrimont <[email protected]>
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a
4 # copy of this software and associated documentation files (the "Software"),
5 # to deal in the Software without restriction, including without limitation
6 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 # and/or sell copies of the Software, and to permit persons to whom the
8 # Software is furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 # DEALINGS IN THE SOFTWARE.
20
21 from rply import LexerGenerator
22
23
24 lg = LexerGenerator()
25
26
27 # A regexp for something that should end a quoting/unquoting operator
28 # i.e. a space or a closing brace/paren/curly
29 end_quote = r'(?![\s\)\]\}])'
30
31
32 lg.add('LPAREN', r'\(')
33 lg.add('RPAREN', r'\)')
34 lg.add('LBRACKET', r'\[')
35 lg.add('RBRACKET', r'\]')
36 lg.add('LCURLY', r'\{')
37 lg.add('RCURLY', r'\}')
38 lg.add('QUOTE', r'\'%s' % end_quote)
39 lg.add('QUASIQUOTE', r'`%s' % end_quote)
40 lg.add('UNQUOTESPLICE', r'~@%s' % end_quote)
41 lg.add('UNQUOTE', r'~%s' % end_quote)
42 lg.add('HASHBANG', r'#!.*[^\r\n]')
43 lg.add('HASHREADER', r'#.')
44
45
46 lg.add('STRING', r'''(?x)
47 (?:u|r|ur|ru)? # prefix
48 " # start string
49 (?:
50 | [^"\\] # non-quote or backslash
51 | \\. # or escaped single character
52 | \\x[0-9a-fA-F]{2} # or escaped raw character
53 | \\u[0-9a-fA-F]{4} # or unicode escape
54 | \\U[0-9a-fA-F]{8} # or long unicode escape
55 )* # one or more times
56 " # end string
57 ''')
58
59
60 lg.add('IDENTIFIER', r'[^()\[\]{}\'"\s;]+')
61
62
63 lg.ignore(r';.*[\r\n]+')
64 lg.ignore(r'\s+')
65
66
67 lexer = lg.build()
68
[end of hy/lex/lexer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hy/lex/lexer.py b/hy/lex/lexer.py
--- a/hy/lex/lexer.py
+++ b/hy/lex/lexer.py
@@ -60,7 +60,7 @@
lg.add('IDENTIFIER', r'[^()\[\]{}\'"\s;]+')
-lg.ignore(r';.*[\r\n]+')
+lg.ignore(r';.*(?=\r|\n|$)')
lg.ignore(r'\s+')
| {"golden_diff": "diff --git a/hy/lex/lexer.py b/hy/lex/lexer.py\n--- a/hy/lex/lexer.py\n+++ b/hy/lex/lexer.py\n@@ -60,7 +60,7 @@\n lg.add('IDENTIFIER', r'[^()\\[\\]{}\\'\"\\s;]+')\n \n \n-lg.ignore(r';.*[\\r\\n]+')\n+lg.ignore(r';.*(?=\\r|\\n|$)')\n lg.ignore(r'\\s+')\n", "issue": "problem with comment parsing\nI was translating some code to Hy from a textbook on Python programming (http://inventwithpython.com/pygame/index.html) and ran into a problem with this direct translation.\n\n```\n(import pygame sys)\n(import [pygame.locals [*]])\n\n(pygame.init)\n(setv *displaysurf* (pygame.display.set_mode (, 400 300)))\n(pygame.display.set_caption \"Hello World!\")\n(while True ; main game loop\n (do (foreach [event (pygame.event.get)]\n (if (= event.type QUIT)\n (do (pygame.quit)\n (sys.exit))))\n (pygame.display.update)))\n```\n\nI get a parse error if the end-of-line comment (\"main game loop\") appears where it does. It works if I remove it.\n\nThe following interaction with the prompt also surprised me.\n\n```\n=> ; some comment\nhy.lex.exceptions.LexException: Could not identify the next token at line -1, column -1\n```\n\nFixing this isn't critical, but it should probably be fixed. I do occasionally type something, realize I need to do something else first, comment it, press Enter, type whatever setup I needed, press Enter, then press Up twice, uncomment the line, and Enter to run it.\n\nproblem with comment parsing\nI was translating some code to Hy from a textbook on Python programming (http://inventwithpython.com/pygame/index.html) and ran into a problem with this direct translation.\n\n```\n(import pygame sys)\n(import [pygame.locals [*]])\n\n(pygame.init)\n(setv *displaysurf* (pygame.display.set_mode (, 400 300)))\n(pygame.display.set_caption \"Hello World!\")\n(while True ; main game loop\n (do (foreach [event (pygame.event.get)]\n (if (= event.type QUIT)\n (do (pygame.quit)\n (sys.exit))))\n (pygame.display.update)))\n```\n\nI get a parse error if the end-of-line comment (\"main game loop\") appears where it does. It works if I remove it.\n\nThe following interaction with the prompt also surprised me.\n\n```\n=> ; some comment\nhy.lex.exceptions.LexException: Could not identify the next token at line -1, column -1\n```\n\nFixing this isn't critical, but it should probably be fixed. I do occasionally type something, realize I need to do something else first, comment it, press Enter, type whatever setup I needed, press Enter, then press Up twice, uncomment the line, and Enter to run it.\n\n", "before_files": [{"content": "# Copyright (c) 2013 Nicolas Dandrimont <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom rply import LexerGenerator\n\n\nlg = LexerGenerator()\n\n\n# A regexp for something that should end a quoting/unquoting operator\n# i.e. a space or a closing brace/paren/curly\nend_quote = r'(?![\\s\\)\\]\\}])'\n\n\nlg.add('LPAREN', r'\\(')\nlg.add('RPAREN', r'\\)')\nlg.add('LBRACKET', r'\\[')\nlg.add('RBRACKET', r'\\]')\nlg.add('LCURLY', r'\\{')\nlg.add('RCURLY', r'\\}')\nlg.add('QUOTE', r'\\'%s' % end_quote)\nlg.add('QUASIQUOTE', r'`%s' % end_quote)\nlg.add('UNQUOTESPLICE', r'~@%s' % end_quote)\nlg.add('UNQUOTE', r'~%s' % end_quote)\nlg.add('HASHBANG', r'#!.*[^\\r\\n]')\nlg.add('HASHREADER', r'#.')\n\n\nlg.add('STRING', r'''(?x)\n (?:u|r|ur|ru)? # prefix\n \" # start string\n (?:\n | [^\"\\\\] # non-quote or backslash\n | \\\\. # or escaped single character\n | \\\\x[0-9a-fA-F]{2} # or escaped raw character\n | \\\\u[0-9a-fA-F]{4} # or unicode escape\n | \\\\U[0-9a-fA-F]{8} # or long unicode escape\n )* # one or more times\n \" # end string\n''')\n\n\nlg.add('IDENTIFIER', r'[^()\\[\\]{}\\'\"\\s;]+')\n\n\nlg.ignore(r';.*[\\r\\n]+')\nlg.ignore(r'\\s+')\n\n\nlexer = lg.build()\n", "path": "hy/lex/lexer.py"}]} | 1,858 | 103 |
gh_patches_debug_91 | rasdani/github-patches | git_diff | projectmesa__mesa-535 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issue 523
add networkx to the dependency specified in setup.py for #523 to allow all the tests to run without error when installing with the `pip install -e .` command.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 import re
4
5 from setuptools import setup, find_packages
6 from codecs import open
7
8 requires = [
9 'tornado >= 4.2, < 5.0.0',
10 'numpy',
11 'pandas',
12 'tqdm',
13 'click',
14 ]
15
16 version = ''
17 with open('mesa/__init__.py', 'r') as fd:
18 version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
19 fd.read(), re.MULTILINE).group(1)
20
21 with open('README.rst', 'rb', encoding='utf-8') as f:
22 readme = f.read()
23
24 setup(
25 name='Mesa',
26 version=version,
27 description="Agent-based modeling (ABM) in Python 3+",
28 long_description=readme,
29 author='Project Mesa Team',
30 author_email='[email protected]',
31 url='https://github.com/projectmesa/mesa',
32 packages=find_packages(),
33 package_data={'mesa': ['visualization/templates/*.html', 'visualization/templates/css/*',
34 'visualization/templates/fonts/*', 'visualization/templates/js/*']},
35 include_package_data=True,
36 install_requires=requires,
37 keywords='agent based modeling model ABM simulation multi-agent',
38 license='Apache 2.0',
39 zip_safe=False,
40 classifiers=[
41 'Topic :: Scientific/Engineering',
42 'Topic :: Scientific/Engineering :: Artificial Life',
43 'Topic :: Scientific/Engineering :: Artificial Intelligence',
44 'Intended Audience :: Science/Research',
45 'Programming Language :: Python :: 3 :: Only',
46 'License :: OSI Approved :: Apache Software License',
47 'Operating System :: OS Independent',
48 'Development Status :: 3 - Alpha',
49 'Natural Language :: English',
50 ],
51 entry_points='''
52 [console_scripts]
53 mesa=mesa.main:cli
54 ''',
55 )
56
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,6 +7,7 @@
requires = [
'tornado >= 4.2, < 5.0.0',
+ 'networkx',
'numpy',
'pandas',
'tqdm',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -7,6 +7,7 @@\n \n requires = [\n 'tornado >= 4.2, < 5.0.0',\n+ 'networkx',\n 'numpy',\n 'pandas',\n 'tqdm',\n", "issue": "Issue 523\nadd networkx to the dependency specified in setup.py for #523 to allow all the tests to run without error when installing with the `pip install -e .` command.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport re\n\nfrom setuptools import setup, find_packages\nfrom codecs import open\n\nrequires = [\n 'tornado >= 4.2, < 5.0.0',\n 'numpy',\n 'pandas',\n 'tqdm',\n 'click',\n]\n\nversion = ''\nwith open('mesa/__init__.py', 'r') as fd:\n version = re.search(r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]',\n fd.read(), re.MULTILINE).group(1)\n\nwith open('README.rst', 'rb', encoding='utf-8') as f:\n readme = f.read()\n\nsetup(\n name='Mesa',\n version=version,\n description=\"Agent-based modeling (ABM) in Python 3+\",\n long_description=readme,\n author='Project Mesa Team',\n author_email='[email protected]',\n url='https://github.com/projectmesa/mesa',\n packages=find_packages(),\n package_data={'mesa': ['visualization/templates/*.html', 'visualization/templates/css/*',\n 'visualization/templates/fonts/*', 'visualization/templates/js/*']},\n include_package_data=True,\n install_requires=requires,\n keywords='agent based modeling model ABM simulation multi-agent',\n license='Apache 2.0',\n zip_safe=False,\n classifiers=[\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Life',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3 :: Only',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Development Status :: 3 - Alpha',\n 'Natural Language :: English',\n ],\n entry_points='''\n [console_scripts]\n mesa=mesa.main:cli\n ''',\n)\n", "path": "setup.py"}]} | 1,095 | 74 |
gh_patches_debug_24159 | rasdani/github-patches | git_diff | pytorch__ignite-399 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve State documentation
Hi,
I think the documentation of states and the way they're used by engines is not detailed enough.
Currently the only way to know what attributes are available in the default State object, used by the engines generated by convenience methods such as `create_supervised_trainer`, is to look at the source code.
Some of them are mentioned in the [Concept](https://pytorch.org/ignite/concepts.html#state) part of the documentation but there is still a lot of room for confusion about the way they work and some are missing.
For example, no information about the `batch` attribute which without documentation some might assume to correspond to the batch index (kinda like `epoch` for the epoch index) which is actually `iteration`. Same thing for `epoch` which is used implicitly in examples but no mention of it in the default `State` documentation, and you might think it's obvious but then someone will try to create a default `State` and assume `epoch` is a built-in attribute set to 0 at instantiation when in fact you'll have to search through the source code to figure out that it's not a default attribute and is only created by the default `Engine` through a kwarg passed to the `State` class.
Additionally this kind of explicit documentation of the state should be done every time a custom `State` is created otherwise the user is left to guess what attributes are available in it, what they mean and what format they're in. For example, is `output` the return from the forward pass of your model? The loss?
You'll have to look at `Engine` then find out it's the output of the process_function argument. Then figure out that in the case of the `create_supervised_trainer` that function is `_update` which returns `loss.item()` and in the case of `create_supervised_evaluator` returns `(y_pred, y)`.
Thank you, and sorry for the verbosity but without it I was afraid this would be rejected as trivial.
</issue>
<code>
[start of ignite/engine/__init__.py]
1 import torch
2
3 from ignite.engine.engine import Engine, State, Events
4 from ignite.utils import convert_tensor
5
6
7 def _prepare_batch(batch, device=None, non_blocking=False):
8 """Prepare batch for training: pass to a device with options.
9
10 """
11 x, y = batch
12 return (convert_tensor(x, device=device, non_blocking=non_blocking),
13 convert_tensor(y, device=device, non_blocking=non_blocking))
14
15
16 def create_supervised_trainer(model, optimizer, loss_fn,
17 device=None, non_blocking=False,
18 prepare_batch=_prepare_batch):
19 """
20 Factory function for creating a trainer for supervised models.
21
22 Args:
23 model (`torch.nn.Module`): the model to train.
24 optimizer (`torch.optim.Optimizer`): the optimizer to use.
25 loss_fn (torch.nn loss function): the loss function to use.
26 device (str, optional): device type specification (default: None).
27 Applies to both model and batches.
28 non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
29 with respect to the host. For other cases, this argument has no effect.
30 prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
31 tuple of tensors `(batch_x, batch_y)`.
32
33 Returns:
34 Engine: a trainer engine with supervised update function.
35 """
36 if device:
37 model.to(device)
38
39 def _update(engine, batch):
40 model.train()
41 optimizer.zero_grad()
42 x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
43 y_pred = model(x)
44 loss = loss_fn(y_pred, y)
45 loss.backward()
46 optimizer.step()
47 return loss.item()
48
49 return Engine(_update)
50
51
52 def create_supervised_evaluator(model, metrics={},
53 device=None, non_blocking=False,
54 prepare_batch=_prepare_batch):
55 """
56 Factory function for creating an evaluator for supervised models.
57
58 Args:
59 model (`torch.nn.Module`): the model to train.
60 metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics.
61 device (str, optional): device type specification (default: None).
62 Applies to both model and batches.
63 non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
64 with respect to the host. For other cases, this argument has no effect.
65 prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
66 tuple of tensors `(batch_x, batch_y)`.
67
68 Returns:
69 Engine: an evaluator engine with supervised inference function.
70 """
71 if device:
72 model.to(device)
73
74 def _inference(engine, batch):
75 model.eval()
76 with torch.no_grad():
77 x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
78 y_pred = model(x)
79 return y_pred, y
80
81 engine = Engine(_inference)
82
83 for name, metric in metrics.items():
84 metric.attach(engine, name)
85
86 return engine
87
[end of ignite/engine/__init__.py]
[start of ignite/__init__.py]
1 import ignite.engine
2 import ignite.handlers
3 import ignite.metrics
4 import ignite.exceptions
5 import ignite.contrib
6
7 __version__ = '0.1.2'
8
[end of ignite/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ignite/__init__.py b/ignite/__init__.py
--- a/ignite/__init__.py
+++ b/ignite/__init__.py
@@ -3,5 +3,6 @@
import ignite.metrics
import ignite.exceptions
import ignite.contrib
+import ignite.utils
__version__ = '0.1.2'
diff --git a/ignite/engine/__init__.py b/ignite/engine/__init__.py
--- a/ignite/engine/__init__.py
+++ b/ignite/engine/__init__.py
@@ -30,6 +30,8 @@
prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
+ Note: `engine.state.output` for this engine is the loss of the processed batch.
+
Returns:
Engine: a trainer engine with supervised update function.
"""
@@ -65,6 +67,8 @@
prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
+ Note: `engine.state.output` for this engine is a tuple of `(batch_pred, batch_y)`.
+
Returns:
Engine: an evaluator engine with supervised inference function.
"""
| {"golden_diff": "diff --git a/ignite/__init__.py b/ignite/__init__.py\n--- a/ignite/__init__.py\n+++ b/ignite/__init__.py\n@@ -3,5 +3,6 @@\n import ignite.metrics\n import ignite.exceptions\n import ignite.contrib\n+import ignite.utils\n \n __version__ = '0.1.2'\ndiff --git a/ignite/engine/__init__.py b/ignite/engine/__init__.py\n--- a/ignite/engine/__init__.py\n+++ b/ignite/engine/__init__.py\n@@ -30,6 +30,8 @@\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n \n+ Note: `engine.state.output` for this engine is the loss of the processed batch.\n+\n Returns:\n Engine: a trainer engine with supervised update function.\n \"\"\"\n@@ -65,6 +67,8 @@\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n \n+ Note: `engine.state.output` for this engine is a tuple of `(batch_pred, batch_y)`.\n+\n Returns:\n Engine: an evaluator engine with supervised inference function.\n \"\"\"\n", "issue": "Improve State documentation\nHi,\r\n\r\nI think the documentation of states and the way they're used by engines is not detailed enough.\r\nCurrently the only way to know what attributes are available in the default State object, used by the engines generated by convenience methods such as `create_supervised_trainer`, is to look at the source code. \r\nSome of them are mentioned in the [Concept](https://pytorch.org/ignite/concepts.html#state) part of the documentation but there is still a lot of room for confusion about the way they work and some are missing.\r\nFor example, no information about the `batch` attribute which without documentation some might assume to correspond to the batch index (kinda like `epoch` for the epoch index) which is actually `iteration`. Same thing for `epoch` which is used implicitly in examples but no mention of it in the default `State` documentation, and you might think it's obvious but then someone will try to create a default `State` and assume `epoch` is a built-in attribute set to 0 at instantiation when in fact you'll have to search through the source code to figure out that it's not a default attribute and is only created by the default `Engine` through a kwarg passed to the `State` class.\r\n\r\nAdditionally this kind of explicit documentation of the state should be done every time a custom `State` is created otherwise the user is left to guess what attributes are available in it, what they mean and what format they're in. For example, is `output` the return from the forward pass of your model? The loss?\r\nYou'll have to look at `Engine` then find out it's the output of the process_function argument. Then figure out that in the case of the `create_supervised_trainer` that function is `_update` which returns `loss.item()` and in the case of `create_supervised_evaluator` returns `(y_pred, y)`.\r\n\r\nThank you, and sorry for the verbosity but without it I was afraid this would be rejected as trivial.\n", "before_files": [{"content": "import torch\n\nfrom ignite.engine.engine import Engine, State, Events\nfrom ignite.utils import convert_tensor\n\n\ndef _prepare_batch(batch, device=None, non_blocking=False):\n \"\"\"Prepare batch for training: pass to a device with options.\n\n \"\"\"\n x, y = batch\n return (convert_tensor(x, device=device, non_blocking=non_blocking),\n convert_tensor(y, device=device, non_blocking=non_blocking))\n\n\ndef create_supervised_trainer(model, optimizer, loss_fn,\n device=None, non_blocking=False,\n prepare_batch=_prepare_batch):\n \"\"\"\n Factory function for creating a trainer for supervised models.\n\n Args:\n model (`torch.nn.Module`): the model to train.\n optimizer (`torch.optim.Optimizer`): the optimizer to use.\n loss_fn (torch.nn loss function): the loss function to use.\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n\n Returns:\n Engine: a trainer engine with supervised update function.\n \"\"\"\n if device:\n model.to(device)\n\n def _update(engine, batch):\n model.train()\n optimizer.zero_grad()\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n return loss.item()\n\n return Engine(_update)\n\n\ndef create_supervised_evaluator(model, metrics={},\n device=None, non_blocking=False,\n prepare_batch=_prepare_batch):\n \"\"\"\n Factory function for creating an evaluator for supervised models.\n\n Args:\n model (`torch.nn.Module`): the model to train.\n metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics.\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n\n Returns:\n Engine: an evaluator engine with supervised inference function.\n \"\"\"\n if device:\n model.to(device)\n\n def _inference(engine, batch):\n model.eval()\n with torch.no_grad():\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n return y_pred, y\n\n engine = Engine(_inference)\n\n for name, metric in metrics.items():\n metric.attach(engine, name)\n\n return engine\n", "path": "ignite/engine/__init__.py"}, {"content": "import ignite.engine\nimport ignite.handlers\nimport ignite.metrics\nimport ignite.exceptions\nimport ignite.contrib\n\n__version__ = '0.1.2'\n", "path": "ignite/__init__.py"}]} | 1,850 | 291 |
gh_patches_debug_21524 | rasdani/github-patches | git_diff | mindsdb__mindsdb-981 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Route to check connection
At this moment in GUI possible check connection only for already exists integration. Route need for checking connection before integration created.
</issue>
<code>
[start of mindsdb/api/http/namespaces/config.py]
1 import copy
2 import traceback
3
4 from flask import request
5 from flask_restx import Resource, abort
6 from flask import current_app as ca
7
8 from mindsdb.api.http.namespaces.configs.config import ns_conf
9 from mindsdb.interfaces.database.database import DatabaseWrapper
10 from mindsdb.utilities.functions import get_all_models_meta_data
11
12
13 def get_integration(name):
14 integrations = ca.config_obj.get('integrations', {})
15 return integrations.get(name)
16
17
18 @ns_conf.route('/integrations')
19 @ns_conf.param('name', 'List all database integration')
20 class ListIntegration(Resource):
21 @ns_conf.doc('get_integrations')
22 def get(self):
23 return {'integrations': [k for k in ca.config_obj.get('integrations', {})]}
24
25
26 @ns_conf.route('/all_integrations')
27 @ns_conf.param('name', 'List all database integration')
28 class AllIntegration(Resource):
29 @ns_conf.doc('get_all_integrations')
30 def get(self):
31 integrations = copy.deepcopy(
32 ca.config_obj.get('integrations', {})
33 )
34 for integration in integrations.values():
35 if 'password' in integration:
36 integration['password'] = None
37 return integrations
38
39
40 @ns_conf.route('/integrations/<name>')
41 @ns_conf.param('name', 'Database integration')
42 class Integration(Resource):
43 @ns_conf.doc('get_integration')
44 def get(self, name):
45 integration = get_integration(name)
46 if integration is None:
47 abort(404, f'Can\'t find database integration: {name}')
48 integration = copy.deepcopy(integration)
49 if 'password' in integration:
50 integration['password'] = None
51 return integration
52
53 @ns_conf.doc('put_integration')
54 def put(self, name):
55 params = request.json.get('params')
56 if not isinstance(params, dict):
57 abort(400, "type of 'params' must be dict")
58 integration = get_integration(name)
59 if integration is not None:
60 abort(400, f"Integration with name '{name}' already exists")
61 try:
62 if 'enabled' in params:
63 params['publish'] = params['enabled']
64 del params['enabled']
65 ca.config_obj.add_db_integration(name, params)
66
67 mdb = ca.mindsdb_native
68 cst = ca.custom_models
69 model_data_arr = get_all_models_meta_data(mdb, cst)
70 dbw = DatabaseWrapper(ca.config_obj)
71 dbw.register_predictors(model_data_arr)
72 except Exception as e:
73 print(traceback.format_exc())
74 abort(500, f'Error during config update: {str(e)}')
75 return '', 200
76
77 @ns_conf.doc('delete_integration')
78 def delete(self, name):
79 integration = get_integration(name)
80 if integration is None:
81 abort(400, f"Nothing to delete. '{name}' not exists.")
82 try:
83 ca.config_obj.remove_db_integration(name)
84 except Exception as e:
85 print(traceback.format_exc())
86 abort(500, f'Error during integration delete: {str(e)}')
87 return '', 200
88
89 @ns_conf.doc('modify_integration')
90 def post(self, name):
91 params = request.json.get('params')
92 if not isinstance(params, dict):
93 abort(400, "type of 'params' must be dict")
94 integration = get_integration(name)
95 if integration is None:
96 abort(400, f"Nothin to modify. '{name}' not exists.")
97 try:
98 if 'enabled' in params:
99 params['publish'] = params['enabled']
100 del params['enabled']
101 ca.config_obj.modify_db_integration(name, params)
102 DatabaseWrapper(ca.config_obj)
103 except Exception as e:
104 print(traceback.format_exc())
105 abort(500, f'Error during integration modifycation: {str(e)}')
106 return '', 200
107
108
109 @ns_conf.route('/integrations/<name>/check')
110 @ns_conf.param('name', 'Database integration checks')
111 class Check(Resource):
112 @ns_conf.doc('check')
113 def get(self, name):
114 if get_integration(name) is None:
115 abort(404, f'Can\'t find database integration: {name}')
116 dbw = DatabaseWrapper(ca.config_obj)
117 connections = dbw.check_connections()
118 return connections.get(name, False), 200
119
[end of mindsdb/api/http/namespaces/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mindsdb/api/http/namespaces/config.py b/mindsdb/api/http/namespaces/config.py
--- a/mindsdb/api/http/namespaces/config.py
+++ b/mindsdb/api/http/namespaces/config.py
@@ -55,6 +55,11 @@
params = request.json.get('params')
if not isinstance(params, dict):
abort(400, "type of 'params' must be dict")
+
+ is_test = params.get('test', False)
+ if is_test:
+ del params['test']
+
integration = get_integration(name)
if integration is not None:
abort(400, f"Integration with name '{name}' already exists")
@@ -72,6 +77,12 @@
except Exception as e:
print(traceback.format_exc())
abort(500, f'Error during config update: {str(e)}')
+
+ if is_test:
+ cons = dbw.check_connections()
+ ca.config_obj.remove_db_integration(name)
+ return {'success': cons[name]}, 200
+
return '', 200
@ns_conf.doc('delete_integration')
| {"golden_diff": "diff --git a/mindsdb/api/http/namespaces/config.py b/mindsdb/api/http/namespaces/config.py\n--- a/mindsdb/api/http/namespaces/config.py\n+++ b/mindsdb/api/http/namespaces/config.py\n@@ -55,6 +55,11 @@\n params = request.json.get('params')\n if not isinstance(params, dict):\n abort(400, \"type of 'params' must be dict\")\n+\n+ is_test = params.get('test', False)\n+ if is_test:\n+ del params['test']\n+\n integration = get_integration(name)\n if integration is not None:\n abort(400, f\"Integration with name '{name}' already exists\")\n@@ -72,6 +77,12 @@\n except Exception as e:\n print(traceback.format_exc())\n abort(500, f'Error during config update: {str(e)}')\n+\n+ if is_test:\n+ cons = dbw.check_connections()\n+ ca.config_obj.remove_db_integration(name)\n+ return {'success': cons[name]}, 200\n+\n return '', 200\n \n @ns_conf.doc('delete_integration')\n", "issue": "Route to check connection\nAt this moment in GUI possible check connection only for already exists integration. Route need for checking connection before integration created.\n", "before_files": [{"content": "import copy\nimport traceback\n\nfrom flask import request\nfrom flask_restx import Resource, abort\nfrom flask import current_app as ca\n\nfrom mindsdb.api.http.namespaces.configs.config import ns_conf\nfrom mindsdb.interfaces.database.database import DatabaseWrapper\nfrom mindsdb.utilities.functions import get_all_models_meta_data\n\n\ndef get_integration(name):\n integrations = ca.config_obj.get('integrations', {})\n return integrations.get(name)\n\n\n@ns_conf.route('/integrations')\n@ns_conf.param('name', 'List all database integration')\nclass ListIntegration(Resource):\n @ns_conf.doc('get_integrations')\n def get(self):\n return {'integrations': [k for k in ca.config_obj.get('integrations', {})]}\n\n\n@ns_conf.route('/all_integrations')\n@ns_conf.param('name', 'List all database integration')\nclass AllIntegration(Resource):\n @ns_conf.doc('get_all_integrations')\n def get(self):\n integrations = copy.deepcopy(\n ca.config_obj.get('integrations', {})\n )\n for integration in integrations.values():\n if 'password' in integration:\n integration['password'] = None\n return integrations\n\n\n@ns_conf.route('/integrations/<name>')\n@ns_conf.param('name', 'Database integration')\nclass Integration(Resource):\n @ns_conf.doc('get_integration')\n def get(self, name):\n integration = get_integration(name)\n if integration is None:\n abort(404, f'Can\\'t find database integration: {name}')\n integration = copy.deepcopy(integration)\n if 'password' in integration:\n integration['password'] = None\n return integration\n\n @ns_conf.doc('put_integration')\n def put(self, name):\n params = request.json.get('params')\n if not isinstance(params, dict):\n abort(400, \"type of 'params' must be dict\")\n integration = get_integration(name)\n if integration is not None:\n abort(400, f\"Integration with name '{name}' already exists\")\n try:\n if 'enabled' in params:\n params['publish'] = params['enabled']\n del params['enabled']\n ca.config_obj.add_db_integration(name, params)\n\n mdb = ca.mindsdb_native\n cst = ca.custom_models\n model_data_arr = get_all_models_meta_data(mdb, cst)\n dbw = DatabaseWrapper(ca.config_obj)\n dbw.register_predictors(model_data_arr)\n except Exception as e:\n print(traceback.format_exc())\n abort(500, f'Error during config update: {str(e)}')\n return '', 200\n\n @ns_conf.doc('delete_integration')\n def delete(self, name):\n integration = get_integration(name)\n if integration is None:\n abort(400, f\"Nothing to delete. '{name}' not exists.\")\n try:\n ca.config_obj.remove_db_integration(name)\n except Exception as e:\n print(traceback.format_exc())\n abort(500, f'Error during integration delete: {str(e)}')\n return '', 200\n\n @ns_conf.doc('modify_integration')\n def post(self, name):\n params = request.json.get('params')\n if not isinstance(params, dict):\n abort(400, \"type of 'params' must be dict\")\n integration = get_integration(name)\n if integration is None:\n abort(400, f\"Nothin to modify. '{name}' not exists.\")\n try:\n if 'enabled' in params:\n params['publish'] = params['enabled']\n del params['enabled']\n ca.config_obj.modify_db_integration(name, params)\n DatabaseWrapper(ca.config_obj)\n except Exception as e:\n print(traceback.format_exc())\n abort(500, f'Error during integration modifycation: {str(e)}')\n return '', 200\n\n\n@ns_conf.route('/integrations/<name>/check')\n@ns_conf.param('name', 'Database integration checks')\nclass Check(Resource):\n @ns_conf.doc('check')\n def get(self, name):\n if get_integration(name) is None:\n abort(404, f'Can\\'t find database integration: {name}')\n dbw = DatabaseWrapper(ca.config_obj)\n connections = dbw.check_connections()\n return connections.get(name, False), 200\n", "path": "mindsdb/api/http/namespaces/config.py"}]} | 1,765 | 262 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.