problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_19934 | rasdani/github-patches | git_diff | Mailu__Mailu-1599 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hardcoded http://admin/ in fetchmail.py
I've tweaked ``docker-compose.yml`` so that all my containers related to ``mailu`` are prefixed by ``mailu-``, in order to pro-actively avoid conflict with any other containers I may eventually define in future.
However, the hardcode ``http://admin/...`` below causes failure in ``fetchmail``, since my container is now named ``mailu-admin`` in my ``docker-compose.yml``, not ``admin`` as the code supposes it should be.
```
./services/fetchmail/fetchmail.py:47: fetches = requests.get("http://admin/internal/fetch").json()
./services/fetchmail/fetchmail.py:85: requests.post("http://admin/internal/fetch/{}".format(fetch["id"]),
```
</issue>
<code>
[start of optional/fetchmail/fetchmail.py]
1 #!/usr/bin/python3
2
3 import time
4 import os
5 import tempfile
6 import shlex
7 import subprocess
8 import re
9 import requests
10 import sys
11 import traceback
12
13
14 FETCHMAIL = """
15 fetchmail -N \
16 --sslcertck --sslcertpath /etc/ssl/certs \
17 -f {}
18 """
19
20
21 RC_LINE = """
22 poll "{host}" proto {protocol} port {port}
23 user "{username}" password "{password}"
24 is "{user_email}"
25 smtphost "{smtphost}"
26 {options}
27 """
28
29
30 def extract_host_port(host_and_port, default_port):
31 host, _, port = re.match('^(.*)(:([0-9]*))?$', host_and_port).groups()
32 return host, int(port) if port else default_port
33
34
35 def escape_rc_string(arg):
36 return "".join("\\x%2x" % ord(char) for char in arg)
37
38
39 def fetchmail(fetchmailrc):
40 with tempfile.NamedTemporaryFile() as handler:
41 handler.write(fetchmailrc.encode("utf8"))
42 handler.flush()
43 command = FETCHMAIL.format(shlex.quote(handler.name))
44 output = subprocess.check_output(command, shell=True)
45 return output
46
47
48 def run(debug):
49 try:
50 fetches = requests.get("http://admin/internal/fetch").json()
51 smtphost, smtpport = extract_host_port(os.environ.get("HOST_SMTP", "smtp"), None)
52 if smtpport is None:
53 smtphostport = smtphost
54 else:
55 smtphostport = "%s/%d" % (smtphost, smtpport)
56 for fetch in fetches:
57 fetchmailrc = ""
58 options = "options antispam 501, 504, 550, 553, 554"
59 options += " ssl" if fetch["tls"] else ""
60 options += " keep" if fetch["keep"] else " fetchall"
61 fetchmailrc += RC_LINE.format(
62 user_email=escape_rc_string(fetch["user_email"]),
63 protocol=fetch["protocol"],
64 host=escape_rc_string(fetch["host"]),
65 port=fetch["port"],
66 smtphost=smtphostport,
67 username=escape_rc_string(fetch["username"]),
68 password=escape_rc_string(fetch["password"]),
69 options=options
70 )
71 if debug:
72 print(fetchmailrc)
73 try:
74 print(fetchmail(fetchmailrc))
75 error_message = ""
76 except subprocess.CalledProcessError as error:
77 error_message = error.output.decode("utf8")
78 # No mail is not an error
79 if not error_message.startswith("fetchmail: No mail"):
80 print(error_message)
81 user_info = "for %s at %s" % (fetch["user_email"], fetch["host"])
82 # Number of messages seen is not a error as well
83 if ("messages" in error_message and
84 "(seen " in error_message and
85 user_info in error_message):
86 print(error_message)
87 finally:
88 requests.post("http://admin/internal/fetch/{}".format(fetch["id"]),
89 json=error_message.split("\n")[0]
90 )
91 except Exception:
92 traceback.print_exc()
93
94
95 if __name__ == "__main__":
96 while True:
97 time.sleep(int(os.environ.get("FETCHMAIL_DELAY", 60)))
98 run(os.environ.get("DEBUG", None) == "True")
99 sys.stdout.flush()
100
[end of optional/fetchmail/fetchmail.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py
--- a/optional/fetchmail/fetchmail.py
+++ b/optional/fetchmail/fetchmail.py
@@ -47,7 +47,7 @@
def run(debug):
try:
- fetches = requests.get("http://admin/internal/fetch").json()
+ fetches = requests.get("http://" + os.environ.get("HOST_ADMIN", "admin") + "/internal/fetch").json()
smtphost, smtpport = extract_host_port(os.environ.get("HOST_SMTP", "smtp"), None)
if smtpport is None:
smtphostport = smtphost
@@ -85,7 +85,7 @@
user_info in error_message):
print(error_message)
finally:
- requests.post("http://admin/internal/fetch/{}".format(fetch["id"]),
+ requests.post("http://" + os.environ.get("HOST_ADMIN", "admin") + "/internal/fetch/{}".format(fetch["id"]),
json=error_message.split("\n")[0]
)
except Exception:
| {"golden_diff": "diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py\n--- a/optional/fetchmail/fetchmail.py\n+++ b/optional/fetchmail/fetchmail.py\n@@ -47,7 +47,7 @@\n \n def run(debug):\n try:\n- fetches = requests.get(\"http://admin/internal/fetch\").json()\n+ fetches = requests.get(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch\").json()\n smtphost, smtpport = extract_host_port(os.environ.get(\"HOST_SMTP\", \"smtp\"), None)\n if smtpport is None:\n smtphostport = smtphost\n@@ -85,7 +85,7 @@\n user_info in error_message):\n print(error_message)\n finally:\n- requests.post(\"http://admin/internal/fetch/{}\".format(fetch[\"id\"]),\n+ requests.post(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch/{}\".format(fetch[\"id\"]),\n json=error_message.split(\"\\n\")[0]\n )\n except Exception:\n", "issue": "Hardcoded http://admin/ in fetchmail.py\nI've tweaked ``docker-compose.yml`` so that all my containers related to ``mailu`` are prefixed by ``mailu-``, in order to pro-actively avoid conflict with any other containers I may eventually define in future.\r\n\r\nHowever, the hardcode ``http://admin/...`` below causes failure in ``fetchmail``, since my container is now named ``mailu-admin`` in my ``docker-compose.yml``, not ``admin`` as the code supposes it should be.\r\n\r\n```\r\n./services/fetchmail/fetchmail.py:47: fetches = requests.get(\"http://admin/internal/fetch\").json()\r\n./services/fetchmail/fetchmail.py:85: requests.post(\"http://admin/internal/fetch/{}\".format(fetch[\"id\"]),\r\n```\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport time\nimport os\nimport tempfile\nimport shlex\nimport subprocess\nimport re\nimport requests\nimport sys\nimport traceback\n\n\nFETCHMAIL = \"\"\"\nfetchmail -N \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n\"\"\"\n\n\nRC_LINE = \"\"\"\npoll \"{host}\" proto {protocol} port {port}\n user \"{username}\" password \"{password}\"\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {options}\n\"\"\"\n\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\n\ndef escape_rc_string(arg):\n return \"\".join(\"\\\\x%2x\" % ord(char) for char in arg)\n\n\ndef fetchmail(fetchmailrc):\n with tempfile.NamedTemporaryFile() as handler:\n handler.write(fetchmailrc.encode(\"utf8\"))\n handler.flush()\n command = FETCHMAIL.format(shlex.quote(handler.name))\n output = subprocess.check_output(command, shell=True)\n return output\n\n\ndef run(debug):\n try:\n fetches = requests.get(\"http://admin/internal/fetch\").json()\n smtphost, smtpport = extract_host_port(os.environ.get(\"HOST_SMTP\", \"smtp\"), None)\n if smtpport is None:\n smtphostport = smtphost\n else:\n smtphostport = \"%s/%d\" % (smtphost, smtpport)\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n options += \" ssl\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n protocol=fetch[\"protocol\"],\n host=escape_rc_string(fetch[\"host\"]),\n port=fetch[\"port\"],\n smtphost=smtphostport,\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options\n )\n if debug:\n print(fetchmailrc)\n try:\n print(fetchmail(fetchmailrc))\n error_message = \"\"\n except subprocess.CalledProcessError as error:\n error_message = error.output.decode(\"utf8\")\n # No mail is not an error\n if not error_message.startswith(\"fetchmail: No mail\"):\n print(error_message)\n user_info = \"for %s at %s\" % (fetch[\"user_email\"], fetch[\"host\"])\n # Number of messages seen is not a error as well\n if (\"messages\" in error_message and\n \"(seen \" in error_message and\n user_info in error_message):\n print(error_message)\n finally:\n requests.post(\"http://admin/internal/fetch/{}\".format(fetch[\"id\"]),\n json=error_message.split(\"\\n\")[0]\n )\n except Exception:\n traceback.print_exc()\n\n\nif __name__ == \"__main__\":\n while True:\n time.sleep(int(os.environ.get(\"FETCHMAIL_DELAY\", 60)))\n run(os.environ.get(\"DEBUG\", None) == \"True\")\n sys.stdout.flush()\n", "path": "optional/fetchmail/fetchmail.py"}]} | 1,647 | 251 |
gh_patches_debug_15319 | rasdani/github-patches | git_diff | ibis-project__ibis-1816 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PKG: Add pre-commit, black and isort to setup.py
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 import pathlib
4 import sys
5
6 from setuptools import find_packages, setup
7
8 import versioneer
9
10 LONG_DESCRIPTION = """
11 Ibis is a productivity-centric Python big data framework.
12
13 See http://docs.ibis-project.org
14 """
15
16 VERSION = sys.version_info.major, sys.version_info.minor
17
18 impala_requires = ['hdfs>=2.0.16', 'sqlalchemy', 'requests']
19 if VERSION == (3, 5):
20 impala_requires.append('impyla<0.14.2')
21 else:
22 impala_requires.append('impyla>=0.15.0')
23
24 sqlite_requires = ['sqlalchemy']
25 postgres_requires = sqlite_requires + ['psycopg2']
26 mysql_requires = sqlite_requires + ['pymysql']
27
28 if VERSION == (3, 5):
29 mapd_requires = ['pymapd>=0.8.3,<0.11.0']
30 else:
31 mapd_requires = ['pymapd>=0.12.0']
32 kerberos_requires = ['requests-kerberos']
33 visualization_requires = ['graphviz']
34 clickhouse_requires = ['clickhouse-driver>=0.0.8', 'clickhouse-cityhash']
35 bigquery_requires = ['google-cloud-bigquery>=1.0.0', 'pydata-google-auth']
36 hdf5_requires = ['tables>=3.0.0']
37
38 if VERSION == (3, 5):
39 parquet_requires = ['pyarrow<0.12.0']
40 else:
41 parquet_requires = ['pyarrow>=0.12.0']
42
43 all_requires = (
44 impala_requires
45 + postgres_requires
46 + mapd_requires
47 + mysql_requires
48 + kerberos_requires
49 + visualization_requires
50 + clickhouse_requires
51 + bigquery_requires
52 + hdf5_requires
53 + parquet_requires
54 )
55
56 develop_requires = all_requires + ['click', 'flake8', 'mypy', 'pytest>=3']
57
58 install_requires = [
59 line.strip()
60 for line in pathlib.Path(__file__)
61 .parent.joinpath('requirements.txt')
62 .read_text()
63 .splitlines()
64 ]
65
66 setup(
67 name='ibis-framework',
68 url='https://github.com/ibis-project/ibis',
69 packages=find_packages(),
70 version=versioneer.get_version(),
71 cmdclass=versioneer.get_cmdclass(),
72 install_requires=install_requires,
73 python_requires='>=3.5',
74 extras_require={
75 'all': all_requires,
76 'develop': develop_requires,
77 'impala': impala_requires,
78 'kerberos': kerberos_requires,
79 'postgres': postgres_requires,
80 'mapd': mapd_requires,
81 'mysql': mysql_requires,
82 'sqlite': sqlite_requires,
83 'visualization': visualization_requires,
84 'clickhouse': clickhouse_requires,
85 'bigquery': bigquery_requires,
86 'hdf5': hdf5_requires,
87 'parquet': parquet_requires,
88 },
89 description="Productivity-centric Python Big Data Framework",
90 long_description=LONG_DESCRIPTION,
91 classifiers=[
92 'Development Status :: 4 - Beta',
93 'Operating System :: OS Independent',
94 'Intended Audience :: Science/Research',
95 'Programming Language :: Python',
96 'Programming Language :: Python :: 3',
97 'Topic :: Scientific/Engineering',
98 ],
99 license='Apache License, Version 2.0',
100 maintainer="Phillip Cloud",
101 maintainer_email="[email protected]",
102 )
103
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -53,7 +53,14 @@
+ parquet_requires
)
-develop_requires = all_requires + ['click', 'flake8', 'mypy', 'pytest>=3']
+develop_requires = all_requires + [
+ 'click',
+ 'flake8',
+ 'isort',
+ 'mypy',
+ 'pre-commit',
+ 'pytest>=3',
+]
install_requires = [
line.strip()
@@ -73,7 +80,8 @@
python_requires='>=3.5',
extras_require={
'all': all_requires,
- 'develop': develop_requires,
+ 'develop:python_version > "3.5"': develop_requires + ['black'],
+ 'develop:python_version == "3.5"': develop_requires,
'impala': impala_requires,
'kerberos': kerberos_requires,
'postgres': postgres_requires,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -53,7 +53,14 @@\n + parquet_requires\n )\n \n-develop_requires = all_requires + ['click', 'flake8', 'mypy', 'pytest>=3']\n+develop_requires = all_requires + [\n+ 'click',\n+ 'flake8',\n+ 'isort',\n+ 'mypy',\n+ 'pre-commit',\n+ 'pytest>=3',\n+]\n \n install_requires = [\n line.strip()\n@@ -73,7 +80,8 @@\n python_requires='>=3.5',\n extras_require={\n 'all': all_requires,\n- 'develop': develop_requires,\n+ 'develop:python_version > \"3.5\"': develop_requires + ['black'],\n+ 'develop:python_version == \"3.5\"': develop_requires,\n 'impala': impala_requires,\n 'kerberos': kerberos_requires,\n 'postgres': postgres_requires,\n", "issue": "PKG: Add pre-commit, black and isort to setup.py\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport pathlib\nimport sys\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nLONG_DESCRIPTION = \"\"\"\nIbis is a productivity-centric Python big data framework.\n\nSee http://docs.ibis-project.org\n\"\"\"\n\nVERSION = sys.version_info.major, sys.version_info.minor\n\nimpala_requires = ['hdfs>=2.0.16', 'sqlalchemy', 'requests']\nif VERSION == (3, 5):\n impala_requires.append('impyla<0.14.2')\nelse:\n impala_requires.append('impyla>=0.15.0')\n\nsqlite_requires = ['sqlalchemy']\npostgres_requires = sqlite_requires + ['psycopg2']\nmysql_requires = sqlite_requires + ['pymysql']\n\nif VERSION == (3, 5):\n mapd_requires = ['pymapd>=0.8.3,<0.11.0']\nelse:\n mapd_requires = ['pymapd>=0.12.0']\nkerberos_requires = ['requests-kerberos']\nvisualization_requires = ['graphviz']\nclickhouse_requires = ['clickhouse-driver>=0.0.8', 'clickhouse-cityhash']\nbigquery_requires = ['google-cloud-bigquery>=1.0.0', 'pydata-google-auth']\nhdf5_requires = ['tables>=3.0.0']\n\nif VERSION == (3, 5):\n parquet_requires = ['pyarrow<0.12.0']\nelse:\n parquet_requires = ['pyarrow>=0.12.0']\n\nall_requires = (\n impala_requires\n + postgres_requires\n + mapd_requires\n + mysql_requires\n + kerberos_requires\n + visualization_requires\n + clickhouse_requires\n + bigquery_requires\n + hdf5_requires\n + parquet_requires\n)\n\ndevelop_requires = all_requires + ['click', 'flake8', 'mypy', 'pytest>=3']\n\ninstall_requires = [\n line.strip()\n for line in pathlib.Path(__file__)\n .parent.joinpath('requirements.txt')\n .read_text()\n .splitlines()\n]\n\nsetup(\n name='ibis-framework',\n url='https://github.com/ibis-project/ibis',\n packages=find_packages(),\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n install_requires=install_requires,\n python_requires='>=3.5',\n extras_require={\n 'all': all_requires,\n 'develop': develop_requires,\n 'impala': impala_requires,\n 'kerberos': kerberos_requires,\n 'postgres': postgres_requires,\n 'mapd': mapd_requires,\n 'mysql': mysql_requires,\n 'sqlite': sqlite_requires,\n 'visualization': visualization_requires,\n 'clickhouse': clickhouse_requires,\n 'bigquery': bigquery_requires,\n 'hdf5': hdf5_requires,\n 'parquet': parquet_requires,\n },\n description=\"Productivity-centric Python Big Data Framework\",\n long_description=LONG_DESCRIPTION,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering',\n ],\n license='Apache License, Version 2.0',\n maintainer=\"Phillip Cloud\",\n maintainer_email=\"[email protected]\",\n)\n", "path": "setup.py"}]} | 1,494 | 221 |
gh_patches_debug_20919 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-298 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Report error requests on Pyramid
Currently if an error occurs in a Pyramid request, we don't report it. Error cases can be just as useful to see so we should try and do this. It looks like it's possible by rearranging the existing code.
</issue>
<code>
[start of src/scout_apm/pyramid.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import scout_apm.core
5 from scout_apm.core.config import ScoutConfig
6 from scout_apm.core.tracked_request import TrackedRequest
7 from scout_apm.core.web_requests import (
8 create_filtered_path,
9 ignore_path,
10 track_amazon_request_queue_time,
11 track_request_queue_time,
12 )
13
14
15 def includeme(config):
16 configs = {}
17 pyramid_config = config.get_settings()
18 for name in pyramid_config:
19 if name.startswith("SCOUT_"):
20 value = pyramid_config[name]
21 clean_name = name.replace("SCOUT_", "").lower()
22 configs[clean_name] = value
23 ScoutConfig.set(**configs)
24
25 if scout_apm.core.install():
26 config.add_tween("scout_apm.pyramid.instruments")
27
28
29 def instruments(handler, registry):
30 def scout_tween(request):
31 tracked_request = TrackedRequest.instance()
32 span = tracked_request.start_span(operation="Controller/Pyramid")
33
34 try:
35 path = request.path
36 # mixed() returns values as *either* single items or lists
37 url_params = [
38 (k, v) for k, vs in request.GET.dict_of_lists().items() for v in vs
39 ]
40 tracked_request.tag("path", create_filtered_path(path, url_params))
41 if ignore_path(path):
42 tracked_request.tag("ignore_transaction", True)
43
44 try:
45 # Determine a remote IP to associate with the request. The value is
46 # spoofable by the requester so this is not suitable to use in any
47 # security sensitive context.
48 user_ip = (
49 request.headers.get("x-forwarded-for", default="").split(",")[0]
50 or request.headers.get("client-ip", default="").split(",")[0]
51 or request.remote_addr
52 )
53 except Exception:
54 pass
55 else:
56 tracked_request.tag("user_ip", user_ip)
57
58 tracked_queue_time = False
59 try:
60 queue_time = request.headers.get(
61 "x-queue-start", default=""
62 ) or request.headers.get("x-request-start", default="")
63 except Exception:
64 pass
65 else:
66 tracked_queue_time = track_request_queue_time(
67 queue_time, tracked_request
68 )
69 if not tracked_queue_time:
70 try:
71 amazon_queue_time = request.headers.get(
72 "x-amzn-trace-id", default=""
73 )
74 except Exception:
75 pass
76 else:
77 track_amazon_request_queue_time(amazon_queue_time, tracked_request)
78
79 try:
80 response = handler(request)
81 except Exception:
82 tracked_request.tag("error", "true")
83 raise
84
85 # This happens further down the call chain. So time it starting
86 # above, but only name it if it gets to here.
87 if request.matched_route is not None:
88 tracked_request.mark_real_request()
89 span.operation = "Controller/" + request.matched_route.name
90
91 finally:
92 tracked_request.stop_span()
93
94 return response
95
96 return scout_tween
97
[end of src/scout_apm/pyramid.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/scout_apm/pyramid.py b/src/scout_apm/pyramid.py
--- a/src/scout_apm/pyramid.py
+++ b/src/scout_apm/pyramid.py
@@ -77,17 +77,18 @@
track_amazon_request_queue_time(amazon_queue_time, tracked_request)
try:
- response = handler(request)
+ try:
+ response = handler(request)
+ finally:
+ # Routing further down the call chain. So time it starting
+ # above, but only name it if it gets a name
+ if request.matched_route is not None:
+ tracked_request.mark_real_request()
+ span.operation = "Controller/" + request.matched_route.name
except Exception:
tracked_request.tag("error", "true")
raise
- # This happens further down the call chain. So time it starting
- # above, but only name it if it gets to here.
- if request.matched_route is not None:
- tracked_request.mark_real_request()
- span.operation = "Controller/" + request.matched_route.name
-
finally:
tracked_request.stop_span()
| {"golden_diff": "diff --git a/src/scout_apm/pyramid.py b/src/scout_apm/pyramid.py\n--- a/src/scout_apm/pyramid.py\n+++ b/src/scout_apm/pyramid.py\n@@ -77,17 +77,18 @@\n track_amazon_request_queue_time(amazon_queue_time, tracked_request)\n \n try:\n- response = handler(request)\n+ try:\n+ response = handler(request)\n+ finally:\n+ # Routing further down the call chain. So time it starting\n+ # above, but only name it if it gets a name\n+ if request.matched_route is not None:\n+ tracked_request.mark_real_request()\n+ span.operation = \"Controller/\" + request.matched_route.name\n except Exception:\n tracked_request.tag(\"error\", \"true\")\n raise\n \n- # This happens further down the call chain. So time it starting\n- # above, but only name it if it gets to here.\n- if request.matched_route is not None:\n- tracked_request.mark_real_request()\n- span.operation = \"Controller/\" + request.matched_route.name\n-\n finally:\n tracked_request.stop_span()\n", "issue": "Report error requests on Pyramid\nCurrently if an error occurs in a Pyramid request, we don't report it. Error cases can be just as useful to see so we should try and do this. It looks like it's possible by rearranging the existing code.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport scout_apm.core\nfrom scout_apm.core.config import ScoutConfig\nfrom scout_apm.core.tracked_request import TrackedRequest\nfrom scout_apm.core.web_requests import (\n create_filtered_path,\n ignore_path,\n track_amazon_request_queue_time,\n track_request_queue_time,\n)\n\n\ndef includeme(config):\n configs = {}\n pyramid_config = config.get_settings()\n for name in pyramid_config:\n if name.startswith(\"SCOUT_\"):\n value = pyramid_config[name]\n clean_name = name.replace(\"SCOUT_\", \"\").lower()\n configs[clean_name] = value\n ScoutConfig.set(**configs)\n\n if scout_apm.core.install():\n config.add_tween(\"scout_apm.pyramid.instruments\")\n\n\ndef instruments(handler, registry):\n def scout_tween(request):\n tracked_request = TrackedRequest.instance()\n span = tracked_request.start_span(operation=\"Controller/Pyramid\")\n\n try:\n path = request.path\n # mixed() returns values as *either* single items or lists\n url_params = [\n (k, v) for k, vs in request.GET.dict_of_lists().items() for v in vs\n ]\n tracked_request.tag(\"path\", create_filtered_path(path, url_params))\n if ignore_path(path):\n tracked_request.tag(\"ignore_transaction\", True)\n\n try:\n # Determine a remote IP to associate with the request. The value is\n # spoofable by the requester so this is not suitable to use in any\n # security sensitive context.\n user_ip = (\n request.headers.get(\"x-forwarded-for\", default=\"\").split(\",\")[0]\n or request.headers.get(\"client-ip\", default=\"\").split(\",\")[0]\n or request.remote_addr\n )\n except Exception:\n pass\n else:\n tracked_request.tag(\"user_ip\", user_ip)\n\n tracked_queue_time = False\n try:\n queue_time = request.headers.get(\n \"x-queue-start\", default=\"\"\n ) or request.headers.get(\"x-request-start\", default=\"\")\n except Exception:\n pass\n else:\n tracked_queue_time = track_request_queue_time(\n queue_time, tracked_request\n )\n if not tracked_queue_time:\n try:\n amazon_queue_time = request.headers.get(\n \"x-amzn-trace-id\", default=\"\"\n )\n except Exception:\n pass\n else:\n track_amazon_request_queue_time(amazon_queue_time, tracked_request)\n\n try:\n response = handler(request)\n except Exception:\n tracked_request.tag(\"error\", \"true\")\n raise\n\n # This happens further down the call chain. So time it starting\n # above, but only name it if it gets to here.\n if request.matched_route is not None:\n tracked_request.mark_real_request()\n span.operation = \"Controller/\" + request.matched_route.name\n\n finally:\n tracked_request.stop_span()\n\n return response\n\n return scout_tween\n", "path": "src/scout_apm/pyramid.py"}]} | 1,433 | 256 |
gh_patches_debug_17705 | rasdani/github-patches | git_diff | open-mmlab__mmsegmentation-260 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
关于新增的RandomRotate
好像pipeline的__init__.py里面忘记导入这个变换了,导致现在无法使用。
</issue>
<code>
[start of mmseg/datasets/pipelines/__init__.py]
1 from .compose import Compose
2 from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor,
3 Transpose, to_tensor)
4 from .loading import LoadAnnotations, LoadImageFromFile
5 from .test_time_aug import MultiScaleFlipAug
6 from .transforms import (Normalize, Pad, PhotoMetricDistortion, RandomCrop,
7 RandomFlip, Resize, SegRescale)
8
9 __all__ = [
10 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
11 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',
12 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
13 'Normalize', 'SegRescale', 'PhotoMetricDistortion'
14 ]
15
[end of mmseg/datasets/pipelines/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmseg/datasets/pipelines/__init__.py b/mmseg/datasets/pipelines/__init__.py
--- a/mmseg/datasets/pipelines/__init__.py
+++ b/mmseg/datasets/pipelines/__init__.py
@@ -4,11 +4,13 @@
from .loading import LoadAnnotations, LoadImageFromFile
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Normalize, Pad, PhotoMetricDistortion, RandomCrop,
- RandomFlip, Resize, SegRescale)
+ RandomFlip, RandomRotate, Rerange, Resize, RGB2Gray,
+ SegRescale)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',
'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
- 'Normalize', 'SegRescale', 'PhotoMetricDistortion'
+ 'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',
+ 'Rerange', 'RGB2Gray'
]
| {"golden_diff": "diff --git a/mmseg/datasets/pipelines/__init__.py b/mmseg/datasets/pipelines/__init__.py\n--- a/mmseg/datasets/pipelines/__init__.py\n+++ b/mmseg/datasets/pipelines/__init__.py\n@@ -4,11 +4,13 @@\n from .loading import LoadAnnotations, LoadImageFromFile\n from .test_time_aug import MultiScaleFlipAug\n from .transforms import (Normalize, Pad, PhotoMetricDistortion, RandomCrop,\n- RandomFlip, Resize, SegRescale)\n+ RandomFlip, RandomRotate, Rerange, Resize, RGB2Gray,\n+ SegRescale)\n \n __all__ = [\n 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',\n 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',\n 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',\n- 'Normalize', 'SegRescale', 'PhotoMetricDistortion'\n+ 'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',\n+ 'Rerange', 'RGB2Gray'\n ]\n", "issue": "\u5173\u4e8e\u65b0\u589e\u7684RandomRotate\n\u597d\u50cfpipeline\u7684__init__.py\u91cc\u9762\u5fd8\u8bb0\u5bfc\u5165\u8fd9\u4e2a\u53d8\u6362\u4e86\uff0c\u5bfc\u81f4\u73b0\u5728\u65e0\u6cd5\u4f7f\u7528\u3002\n", "before_files": [{"content": "from .compose import Compose\nfrom .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor,\n Transpose, to_tensor)\nfrom .loading import LoadAnnotations, LoadImageFromFile\nfrom .test_time_aug import MultiScaleFlipAug\nfrom .transforms import (Normalize, Pad, PhotoMetricDistortion, RandomCrop,\n RandomFlip, Resize, SegRescale)\n\n__all__ = [\n 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',\n 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',\n 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',\n 'Normalize', 'SegRescale', 'PhotoMetricDistortion'\n]\n", "path": "mmseg/datasets/pipelines/__init__.py"}]} | 755 | 253 |
gh_patches_debug_2920 | rasdani/github-patches | git_diff | encode__starlette-195 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Check directory exists when instantiating `StaticFiles`
The `StaticFiles` application should ensure that the directory exists at the point it is instantiated.
(With an optional switch to turn this behavior off)
</issue>
<code>
[start of starlette/staticfiles.py]
1 import os
2 import stat
3
4 from aiofiles.os import stat as aio_stat
5
6 from starlette.responses import FileResponse, PlainTextResponse, Response
7 from starlette.types import ASGIInstance, Receive, Scope, Send
8
9
10 class StaticFiles:
11 def __init__(self, *, directory: str) -> None:
12 self.directory = directory
13 self.config_checked = False
14
15 def __call__(self, scope: Scope) -> ASGIInstance:
16 assert scope["type"] == "http"
17 if scope["method"] not in ("GET", "HEAD"):
18 return PlainTextResponse("Method Not Allowed", status_code=405)
19 path = os.path.normpath(os.path.join(*scope["path"].split("/")))
20 if path.startswith(".."):
21 return PlainTextResponse("Not Found", status_code=404)
22 path = os.path.join(self.directory, path)
23 if self.config_checked:
24 check_directory = None
25 else:
26 check_directory = self.directory
27 self.config_checked = True
28 return _StaticFilesResponder(scope, path=path, check_directory=check_directory)
29
30
31 class _StaticFilesResponder:
32 def __init__(self, scope: Scope, path: str, check_directory: str = None) -> None:
33 self.scope = scope
34 self.path = path
35 self.check_directory = check_directory
36
37 async def check_directory_configured_correctly(self) -> None:
38 """
39 Perform a one-off configuration check that StaticFiles is actually
40 pointed at a directory, so that we can raise loud errors rather than
41 just returning 404 responses.
42 """
43 directory = self.check_directory
44 try:
45 stat_result = await aio_stat(directory)
46 except FileNotFoundError:
47 raise RuntimeError("StaticFiles directory '%s' does not exist." % directory)
48 if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):
49 raise RuntimeError("StaticFiles path '%s' is not a directory." % directory)
50
51 async def __call__(self, receive: Receive, send: Send) -> None:
52 if self.check_directory is not None:
53 await self.check_directory_configured_correctly()
54
55 try:
56 stat_result = await aio_stat(self.path)
57 except FileNotFoundError:
58 response = PlainTextResponse("Not Found", status_code=404) # type: Response
59 else:
60 mode = stat_result.st_mode
61 if not stat.S_ISREG(mode):
62 response = PlainTextResponse("Not Found", status_code=404)
63 else:
64 response = FileResponse(self.path, stat_result=stat_result)
65
66 await response(receive, send)
67
[end of starlette/staticfiles.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py
--- a/starlette/staticfiles.py
+++ b/starlette/staticfiles.py
@@ -8,7 +8,9 @@
class StaticFiles:
- def __init__(self, *, directory: str) -> None:
+ def __init__(self, *, directory: str, check_dir: bool = True) -> None:
+ if check_dir and not os.path.isdir(directory):
+ raise RuntimeError("Directory '%s' does not exist" % directory)
self.directory = directory
self.config_checked = False
| {"golden_diff": "diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py\n--- a/starlette/staticfiles.py\n+++ b/starlette/staticfiles.py\n@@ -8,7 +8,9 @@\n \n \n class StaticFiles:\n- def __init__(self, *, directory: str) -> None:\n+ def __init__(self, *, directory: str, check_dir: bool = True) -> None:\n+ if check_dir and not os.path.isdir(directory):\n+ raise RuntimeError(\"Directory '%s' does not exist\" % directory)\n self.directory = directory\n self.config_checked = False\n", "issue": "Check directory exists when instantiating `StaticFiles`\nThe `StaticFiles` application should ensure that the directory exists at the point it is instantiated.\r\n\r\n(With an optional switch to turn this behavior off)\n", "before_files": [{"content": "import os\nimport stat\n\nfrom aiofiles.os import stat as aio_stat\n\nfrom starlette.responses import FileResponse, PlainTextResponse, Response\nfrom starlette.types import ASGIInstance, Receive, Scope, Send\n\n\nclass StaticFiles:\n def __init__(self, *, directory: str) -> None:\n self.directory = directory\n self.config_checked = False\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n assert scope[\"type\"] == \"http\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n path = os.path.normpath(os.path.join(*scope[\"path\"].split(\"/\")))\n if path.startswith(\"..\"):\n return PlainTextResponse(\"Not Found\", status_code=404)\n path = os.path.join(self.directory, path)\n if self.config_checked:\n check_directory = None\n else:\n check_directory = self.directory\n self.config_checked = True\n return _StaticFilesResponder(scope, path=path, check_directory=check_directory)\n\n\nclass _StaticFilesResponder:\n def __init__(self, scope: Scope, path: str, check_directory: str = None) -> None:\n self.scope = scope\n self.path = path\n self.check_directory = check_directory\n\n async def check_directory_configured_correctly(self) -> None:\n \"\"\"\n Perform a one-off configuration check that StaticFiles is actually\n pointed at a directory, so that we can raise loud errors rather than\n just returning 404 responses.\n \"\"\"\n directory = self.check_directory\n try:\n stat_result = await aio_stat(directory)\n except FileNotFoundError:\n raise RuntimeError(\"StaticFiles directory '%s' does not exist.\" % directory)\n if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):\n raise RuntimeError(\"StaticFiles path '%s' is not a directory.\" % directory)\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n if self.check_directory is not None:\n await self.check_directory_configured_correctly()\n\n try:\n stat_result = await aio_stat(self.path)\n except FileNotFoundError:\n response = PlainTextResponse(\"Not Found\", status_code=404) # type: Response\n else:\n mode = stat_result.st_mode\n if not stat.S_ISREG(mode):\n response = PlainTextResponse(\"Not Found\", status_code=404)\n else:\n response = FileResponse(self.path, stat_result=stat_result)\n\n await response(receive, send)\n", "path": "starlette/staticfiles.py"}]} | 1,272 | 128 |
gh_patches_debug_14985 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-2513 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Don't validate SAM transformed resources for rule I3042
### CloudFormation Lint Version
v0.71.1
### What operating system are you using?
Mac
### Describe the bug
When SAM transforms templates it can create hardcoded ARNs based on its scenario. It would make sense to not validate those ARNs against rule I3042
### Expected behavior
To not raise I3042 on resources that are created by SAM transform.
### Reproduction template
```yaml
```
</issue>
<code>
[start of src/cfnlint/rules/resources/HardCodedArnProperties.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import re
6
7 from cfnlint.rules import CloudFormationLintRule, RuleMatch
8
9
10 class HardCodedArnProperties(CloudFormationLintRule):
11 """Checks Resources if ARNs use correctly placed Pseudo Parameters instead of hardcoded Partition, Region, and Account Number"""
12
13 id = "I3042"
14 shortdesc = "ARNs should use correctly placed Pseudo Parameters"
15 description = "Checks Resources if ARNs use correctly placed Pseudo Parameters instead of hardcoded Partition, Region, and Account Number"
16 source_url = ""
17 tags = ["resources"]
18 regex = re.compile(
19 r"arn:(\$\{[^:]*::[^:]*}|[^:]*):[^:]+:(\$\{[^:]*::[^:]*}|[^:]*):(\$\{[^:]*::[^:]*}|[^:]*)"
20 )
21
22 def __init__(self):
23 """Init"""
24 super().__init__()
25 self.config_definition = {
26 "partition": {
27 "default": True,
28 "type": "boolean",
29 },
30 "region": {
31 "default": False,
32 "type": "boolean",
33 },
34 "accountId": {
35 "default": False,
36 "type": "boolean",
37 },
38 }
39 self.configure()
40
41 def _match_values(self, cfnelem, path):
42 """Recursively search for values matching the searchRegex"""
43 values = []
44 if isinstance(cfnelem, dict):
45 for key in cfnelem:
46 pathprop = path[:]
47 pathprop.append(key)
48 values.extend(self._match_values(cfnelem[key], pathprop))
49 elif isinstance(cfnelem, list):
50 for index, item in enumerate(cfnelem):
51 pathprop = path[:]
52 pathprop.append(index)
53 values.extend(self._match_values(item, pathprop))
54 else:
55 # Leaf node
56 if isinstance(cfnelem, str): # and re.match(searchRegex, cfnelem):
57 for variable in re.findall(self.regex, cfnelem):
58 if "Fn::Sub" in path:
59 values.append(path + [variable])
60
61 return values
62
63 def match_values(self, cfn):
64 """
65 Search for values in all parts of the templates that match the searchRegex
66 """
67 results = []
68 results.extend(self._match_values(cfn.template.get("Resources", {}), []))
69 # Globals are removed during a transform. They need to be checked manually
70 results.extend(self._match_values(cfn.template.get("Globals", {}), []))
71 return results
72
73 def match(self, cfn):
74 """Check CloudFormation Resources"""
75 matches = []
76
77 # Get a list of paths to every leaf node string containing at least one ${parameter}
78 parameter_string_paths = self.match_values(cfn)
79 # We want to search all of the paths to check if each one contains an 'Fn::Sub'
80 for parameter_string_path in parameter_string_paths:
81 path = ["Resources"] + parameter_string_path[:-1]
82 candidate = parameter_string_path[-1]
83
84 # !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
85 # is valid even with aws as the account #. This handles empty string
86 if self.config["partition"] and not re.match(
87 r"^\$\{\w+}|\$\{AWS::Partition}|$", candidate[0]
88 ):
89 # or not re.match(r'^(\$\{\w+}|\$\{AWS::Region}|)$', candidate[1]) or not re.match(r'^\$\{\w+}|\$\{AWS::AccountId}|aws|$', candidate[2]):
90 message = "ARN in Resource {0} contains hardcoded Partition in ARN or incorrectly placed Pseudo Parameters"
91 matches.append(RuleMatch(path, message.format(path[1])))
92 if self.config["region"] and not re.match(
93 r"^(\$\{\w+}|\$\{AWS::Region}|)$", candidate[1]
94 ):
95 # or or not re.match(r'^\$\{\w+}|\$\{AWS::AccountId}|aws|$', candidate[2]):
96 message = "ARN in Resource {0} contains hardcoded Region in ARN or incorrectly placed Pseudo Parameters"
97 matches.append(RuleMatch(path, message.format(path[1])))
98 if self.config["accountId"] and not re.match(
99 r"^\$\{\w+}|\$\{AWS::AccountId}|aws|$", candidate[2]
100 ):
101 message = "ARN in Resource {0} contains hardcoded AccountId in ARN or incorrectly placed Pseudo Parameters"
102 matches.append(RuleMatch(path, message.format(path[1])))
103
104 return matches
105
[end of src/cfnlint/rules/resources/HardCodedArnProperties.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/resources/HardCodedArnProperties.py b/src/cfnlint/rules/resources/HardCodedArnProperties.py
--- a/src/cfnlint/rules/resources/HardCodedArnProperties.py
+++ b/src/cfnlint/rules/resources/HardCodedArnProperties.py
@@ -71,9 +71,13 @@
return results
def match(self, cfn):
- """Check CloudFormation Resources"""
matches = []
+ transforms = cfn.transform_pre["Transform"]
+ transforms = transforms if isinstance(transforms, list) else [transforms]
+ if "AWS::Serverless-2016-10-31" in cfn.transform_pre["Transform"]:
+ return matches
+
# Get a list of paths to every leaf node string containing at least one ${parameter}
parameter_string_paths = self.match_values(cfn)
# We want to search all of the paths to check if each one contains an 'Fn::Sub'
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/HardCodedArnProperties.py b/src/cfnlint/rules/resources/HardCodedArnProperties.py\n--- a/src/cfnlint/rules/resources/HardCodedArnProperties.py\n+++ b/src/cfnlint/rules/resources/HardCodedArnProperties.py\n@@ -71,9 +71,13 @@\n return results\r\n \r\n def match(self, cfn):\r\n- \"\"\"Check CloudFormation Resources\"\"\"\r\n matches = []\r\n \r\n+ transforms = cfn.transform_pre[\"Transform\"]\r\n+ transforms = transforms if isinstance(transforms, list) else [transforms]\r\n+ if \"AWS::Serverless-2016-10-31\" in cfn.transform_pre[\"Transform\"]:\r\n+ return matches\r\n+\r\n # Get a list of paths to every leaf node string containing at least one ${parameter}\r\n parameter_string_paths = self.match_values(cfn)\r\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n", "issue": "Don't validate SAM transformed resources for rule I3042\n### CloudFormation Lint Version\n\nv0.71.1\n\n### What operating system are you using?\n\nMac\n\n### Describe the bug\n\nWhen SAM transforms templates it can create hardcoded ARNs based on its scenario. It would make sense to not validate those ARNs against rule I3042\n\n### Expected behavior\n\nTo not raise I3042 on resources that are created by SAM transform.\n\n### Reproduction template\n\n```yaml\r\n\r\n```\n", "before_files": [{"content": "\"\"\"\r\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\r\nSPDX-License-Identifier: MIT-0\r\n\"\"\"\r\nimport re\r\n\r\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\r\n\r\n\r\nclass HardCodedArnProperties(CloudFormationLintRule):\r\n \"\"\"Checks Resources if ARNs use correctly placed Pseudo Parameters instead of hardcoded Partition, Region, and Account Number\"\"\"\r\n\r\n id = \"I3042\"\r\n shortdesc = \"ARNs should use correctly placed Pseudo Parameters\"\r\n description = \"Checks Resources if ARNs use correctly placed Pseudo Parameters instead of hardcoded Partition, Region, and Account Number\"\r\n source_url = \"\"\r\n tags = [\"resources\"]\r\n regex = re.compile(\r\n r\"arn:(\\$\\{[^:]*::[^:]*}|[^:]*):[^:]+:(\\$\\{[^:]*::[^:]*}|[^:]*):(\\$\\{[^:]*::[^:]*}|[^:]*)\"\r\n )\r\n\r\n def __init__(self):\r\n \"\"\"Init\"\"\"\r\n super().__init__()\r\n self.config_definition = {\r\n \"partition\": {\r\n \"default\": True,\r\n \"type\": \"boolean\",\r\n },\r\n \"region\": {\r\n \"default\": False,\r\n \"type\": \"boolean\",\r\n },\r\n \"accountId\": {\r\n \"default\": False,\r\n \"type\": \"boolean\",\r\n },\r\n }\r\n self.configure()\r\n\r\n def _match_values(self, cfnelem, path):\r\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\r\n values = []\r\n if isinstance(cfnelem, dict):\r\n for key in cfnelem:\r\n pathprop = path[:]\r\n pathprop.append(key)\r\n values.extend(self._match_values(cfnelem[key], pathprop))\r\n elif isinstance(cfnelem, list):\r\n for index, item in enumerate(cfnelem):\r\n pathprop = path[:]\r\n pathprop.append(index)\r\n values.extend(self._match_values(item, pathprop))\r\n else:\r\n # Leaf node\r\n if isinstance(cfnelem, str): # and re.match(searchRegex, cfnelem):\r\n for variable in re.findall(self.regex, cfnelem):\r\n if \"Fn::Sub\" in path:\r\n values.append(path + [variable])\r\n\r\n return values\r\n\r\n def match_values(self, cfn):\r\n \"\"\"\r\n Search for values in all parts of the templates that match the searchRegex\r\n \"\"\"\r\n results = []\r\n results.extend(self._match_values(cfn.template.get(\"Resources\", {}), []))\r\n # Globals are removed during a transform. They need to be checked manually\r\n results.extend(self._match_values(cfn.template.get(\"Globals\", {}), []))\r\n return results\r\n\r\n def match(self, cfn):\r\n \"\"\"Check CloudFormation Resources\"\"\"\r\n matches = []\r\n\r\n # Get a list of paths to every leaf node string containing at least one ${parameter}\r\n parameter_string_paths = self.match_values(cfn)\r\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\r\n for parameter_string_path in parameter_string_paths:\r\n path = [\"Resources\"] + parameter_string_path[:-1]\r\n candidate = parameter_string_path[-1]\r\n\r\n # !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole\r\n # is valid even with aws as the account #. This handles empty string\r\n if self.config[\"partition\"] and not re.match(\r\n r\"^\\$\\{\\w+}|\\$\\{AWS::Partition}|$\", candidate[0]\r\n ):\r\n # or not re.match(r'^(\\$\\{\\w+}|\\$\\{AWS::Region}|)$', candidate[1]) or not re.match(r'^\\$\\{\\w+}|\\$\\{AWS::AccountId}|aws|$', candidate[2]):\r\n message = \"ARN in Resource {0} contains hardcoded Partition in ARN or incorrectly placed Pseudo Parameters\"\r\n matches.append(RuleMatch(path, message.format(path[1])))\r\n if self.config[\"region\"] and not re.match(\r\n r\"^(\\$\\{\\w+}|\\$\\{AWS::Region}|)$\", candidate[1]\r\n ):\r\n # or or not re.match(r'^\\$\\{\\w+}|\\$\\{AWS::AccountId}|aws|$', candidate[2]):\r\n message = \"ARN in Resource {0} contains hardcoded Region in ARN or incorrectly placed Pseudo Parameters\"\r\n matches.append(RuleMatch(path, message.format(path[1])))\r\n if self.config[\"accountId\"] and not re.match(\r\n r\"^\\$\\{\\w+}|\\$\\{AWS::AccountId}|aws|$\", candidate[2]\r\n ):\r\n message = \"ARN in Resource {0} contains hardcoded AccountId in ARN or incorrectly placed Pseudo Parameters\"\r\n matches.append(RuleMatch(path, message.format(path[1])))\r\n\r\n return matches\r\n", "path": "src/cfnlint/rules/resources/HardCodedArnProperties.py"}]} | 1,901 | 219 |
gh_patches_debug_32901 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-4742 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Logger les suppressions de galleries et de publications
Logger juste le fait que ce soit une suppression, le type d’objet concerné et le slug histoire qu’on puisse facilement remonter aux logs de nginx correspondantes avec la date et l’heures en cas de problème.
</issue>
<code>
[start of zds/tutorialv2/receivers.py]
1 # coding: utf-8
2
3
4 import datetime
5 from django.dispatch.dispatcher import receiver
6 from django.utils.translation import ugettext_lazy as _
7 from zds.tutorialv2.models.models_database import PublishableContent
8 from zds.tutorialv2.signals import content_unpublished
9 from zds.utils import get_current_user
10 from zds.utils.models import Alert
11
12
13 @receiver(content_unpublished, sender=PublishableContent)
14 def cleanup_validation_alerts(sender, instance, **kwargs):
15 """
16 When opinions are unpublished (probably permanently), we must be sure all alerts are handled. For now we just \
17 resolve them.
18
19 :param sender: sender class
20 :param instance: object instance
21 :param kwargs: possibily moderator
22 """
23 if instance.is_opinion:
24 moderator = kwargs.get('moderator', get_current_user())
25 Alert.objects.filter(scope='CONTENT', content=instance).update(moderator=moderator,
26 resolve_reason=_('Le billet a été dépublié.'),
27 solved_date=datetime.datetime.now(),
28 solved=True)
29
[end of zds/tutorialv2/receivers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/tutorialv2/receivers.py b/zds/tutorialv2/receivers.py
--- a/zds/tutorialv2/receivers.py
+++ b/zds/tutorialv2/receivers.py
@@ -2,10 +2,15 @@
import datetime
+import logging
+
from django.dispatch.dispatcher import receiver
from django.utils.translation import ugettext_lazy as _
+from django.db import models
+
from zds.tutorialv2.models.models_database import PublishableContent
from zds.tutorialv2.signals import content_unpublished
+from zds.gallery.models import Gallery
from zds.utils import get_current_user
from zds.utils.models import Alert
@@ -26,3 +31,25 @@
resolve_reason=_('Le billet a été dépublié.'),
solved_date=datetime.datetime.now(),
solved=True)
+
+
+@receiver(models.signals.post_delete, sender=Gallery)
+@receiver(models.signals.post_delete, sender=PublishableContent)
+def log_content_deletion(sender, instance, **kwargs):
+ """
+ When a content or gallery is deleted, this action is logged.
+ """
+
+ logger = logging.getLogger(__name__)
+ current_user = get_current_user()
+
+ if current_user is None:
+ logger.info('%(instance_model)s #%(instance_pk)d (%(instance_slug)s) has been deleted. User not found.',
+ {'instance_model': type(instance).__name__, 'instance_pk': instance.pk,
+ 'instance_slug': instance.slug})
+ else:
+ logger.info('%(instance_model)s #%(instance_pk)d (%(instance_slug)s) has been deleted '
+ 'by user #%(user_pk)d (%(username)s).', {'instance_model': type(instance).__name__,
+ 'instance_pk': instance.pk, 'instance_slug': instance.slug,
+ 'user_pk': current_user.pk,
+ 'username': current_user.username})
| {"golden_diff": "diff --git a/zds/tutorialv2/receivers.py b/zds/tutorialv2/receivers.py\n--- a/zds/tutorialv2/receivers.py\n+++ b/zds/tutorialv2/receivers.py\n@@ -2,10 +2,15 @@\n \n \n import datetime\n+import logging\n+\n from django.dispatch.dispatcher import receiver\n from django.utils.translation import ugettext_lazy as _\n+from django.db import models\n+\n from zds.tutorialv2.models.models_database import PublishableContent\n from zds.tutorialv2.signals import content_unpublished\n+from zds.gallery.models import Gallery\n from zds.utils import get_current_user\n from zds.utils.models import Alert\n \n@@ -26,3 +31,25 @@\n resolve_reason=_('Le billet a \u00e9t\u00e9 d\u00e9publi\u00e9.'),\n solved_date=datetime.datetime.now(),\n solved=True)\n+\n+\n+@receiver(models.signals.post_delete, sender=Gallery)\n+@receiver(models.signals.post_delete, sender=PublishableContent)\n+def log_content_deletion(sender, instance, **kwargs):\n+ \"\"\"\n+ When a content or gallery is deleted, this action is logged.\n+ \"\"\"\n+\n+ logger = logging.getLogger(__name__)\n+ current_user = get_current_user()\n+\n+ if current_user is None:\n+ logger.info('%(instance_model)s #%(instance_pk)d (%(instance_slug)s) has been deleted. User not found.',\n+ {'instance_model': type(instance).__name__, 'instance_pk': instance.pk,\n+ 'instance_slug': instance.slug})\n+ else:\n+ logger.info('%(instance_model)s #%(instance_pk)d (%(instance_slug)s) has been deleted '\n+ 'by user #%(user_pk)d (%(username)s).', {'instance_model': type(instance).__name__,\n+ 'instance_pk': instance.pk, 'instance_slug': instance.slug,\n+ 'user_pk': current_user.pk,\n+ 'username': current_user.username})\n", "issue": "Logger les suppressions de galleries et de publications\nLogger juste le fait que ce soit une suppression, le type d\u2019objet concern\u00e9 et le slug histoire qu\u2019on puisse facilement remonter aux logs de nginx correspondantes avec la date et l\u2019heures en cas de probl\u00e8me.\n", "before_files": [{"content": "# coding: utf-8\n\n\nimport datetime\nfrom django.dispatch.dispatcher import receiver\nfrom django.utils.translation import ugettext_lazy as _\nfrom zds.tutorialv2.models.models_database import PublishableContent\nfrom zds.tutorialv2.signals import content_unpublished\nfrom zds.utils import get_current_user\nfrom zds.utils.models import Alert\n\n\n@receiver(content_unpublished, sender=PublishableContent)\ndef cleanup_validation_alerts(sender, instance, **kwargs):\n \"\"\"\n When opinions are unpublished (probably permanently), we must be sure all alerts are handled. For now we just \\\n resolve them.\n\n :param sender: sender class\n :param instance: object instance\n :param kwargs: possibily moderator\n \"\"\"\n if instance.is_opinion:\n moderator = kwargs.get('moderator', get_current_user())\n Alert.objects.filter(scope='CONTENT', content=instance).update(moderator=moderator,\n resolve_reason=_('Le billet a \u00e9t\u00e9 d\u00e9publi\u00e9.'),\n solved_date=datetime.datetime.now(),\n solved=True)\n", "path": "zds/tutorialv2/receivers.py"}]} | 875 | 420 |
gh_patches_debug_32792 | rasdani/github-patches | git_diff | openvinotoolkit__datumaro-394 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dataset patches do not remove empty (e.g. renamed) subsets
Steps to reproduce:
1. Create a project
2. Import a dataset
3. Rename a subset (e.g. `datum transform -t random_split`)
Depending on the format, the exported dataset will contain annotations from renamed-from and renamed-to subsets. This leads to duplication of annotations in different subsets, which are then found and merged together on importing.
</issue>
<code>
[start of datumaro/components/converter.py]
1 # Copyright (C) 2019-2021 Intel Corporation
2 #
3 # SPDX-License-Identifier: MIT
4
5 from typing import Union
6 import logging as log
7 import os
8 import os.path as osp
9 import shutil
10
11 from datumaro.components.cli_plugin import CliPlugin
12 from datumaro.components.dataset import DatasetPatch
13 from datumaro.components.extractor import DatasetItem
14 from datumaro.util.image import Image
15
16
17 class Converter(CliPlugin):
18 DEFAULT_IMAGE_EXT = None
19
20 @classmethod
21 def build_cmdline_parser(cls, **kwargs):
22 parser = super().build_cmdline_parser(**kwargs)
23 parser.add_argument('--save-images', action='store_true',
24 help="Save images (default: %(default)s)")
25 parser.add_argument('--image-ext', default=None,
26 help="Image extension (default: keep or use format default%s)" % \
27 (' ' + cls.DEFAULT_IMAGE_EXT if cls.DEFAULT_IMAGE_EXT else ''))
28
29 return parser
30
31 @classmethod
32 def convert(cls, extractor, save_dir, **options):
33 converter = cls(extractor, save_dir, **options)
34 return converter.apply()
35
36 @classmethod
37 def patch(cls, dataset, patch, save_dir, **options):
38 return cls.convert(dataset, save_dir, **options)
39
40 def apply(self):
41 raise NotImplementedError("Should be implemented in a subclass")
42
43 def __init__(self, extractor, save_dir, save_images=False,
44 image_ext=None, default_image_ext=None):
45 default_image_ext = default_image_ext or self.DEFAULT_IMAGE_EXT
46 assert default_image_ext
47 self._default_image_ext = default_image_ext
48
49 self._save_images = save_images
50 self._image_ext = image_ext
51
52 self._extractor = extractor
53 self._save_dir = save_dir
54
55 # TODO: refactor this variable.
56 # Can be used by a subclass to store the current patch info
57 if isinstance(extractor, DatasetPatch.DatasetPatchWrapper):
58 self._patch = extractor.patch
59 else:
60 self._patch = None
61
62 def _find_image_ext(self, item: Union[DatasetItem, Image]):
63 src_ext = None
64
65 if isinstance(item, DatasetItem) and item.has_image:
66 src_ext = item.image.ext
67 elif isinstance(item, Image):
68 src_ext = item.ext
69
70 return self._image_ext or src_ext or self._default_image_ext
71
72 def _make_item_filename(self, item, *, name=None, subdir=None):
73 name = name or item.id
74 subdir = subdir or ''
75 return osp.join(subdir, name)
76
77 def _make_image_filename(self, item, *, name=None, subdir=None):
78 return self._make_item_filename(item, name=name, subdir=subdir) + \
79 self._find_image_ext(item)
80
81 def _make_pcd_filename(self, item, *, name=None, subdir=None):
82 return self._make_item_filename(item, name=name, subdir=subdir) + '.pcd'
83
84 def _save_image(self, item, path=None, *,
85 name=None, subdir=None, basedir=None):
86 assert not ((subdir or name or basedir) and path), \
87 "Can't use both subdir or name or basedir and path arguments"
88
89 if not item.has_image or not item.image.has_data:
90 log.warning("Item '%s' has no image", item.id)
91 return
92
93 basedir = basedir or self._save_dir
94 path = path or osp.join(basedir,
95 self._make_image_filename(item, name=name, subdir=subdir))
96 path = osp.abspath(path)
97
98 item.image.save(path)
99
100 def _save_point_cloud(self, item=None, path=None, *,
101 name=None, subdir=None, basedir=None):
102 assert not ((subdir or name or basedir) and path), \
103 "Can't use both subdir or name or basedir and path arguments"
104
105 if not item.point_cloud:
106 log.warning("Item '%s' has no pcd", item.id)
107 return
108
109 basedir = basedir or self._save_dir
110 path = path or osp.join(basedir,
111 self._make_pcd_filename(item, name=name, subdir=subdir))
112 path = osp.abspath(path)
113
114 os.makedirs(osp.dirname(path), exist_ok=True)
115 if item.point_cloud and osp.isfile(item.point_cloud):
116 if item.point_cloud != path:
117 shutil.copyfile(item.point_cloud, path)
118
[end of datumaro/components/converter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/datumaro/components/converter.py b/datumaro/components/converter.py
--- a/datumaro/components/converter.py
+++ b/datumaro/components/converter.py
@@ -2,6 +2,7 @@
#
# SPDX-License-Identifier: MIT
+from tempfile import mkdtemp
from typing import Union
import logging as log
import os
@@ -11,6 +12,7 @@
from datumaro.components.cli_plugin import CliPlugin
from datumaro.components.dataset import DatasetPatch
from datumaro.components.extractor import DatasetItem
+from datumaro.util import error_rollback, on_error_do
from datumaro.util.image import Image
@@ -34,8 +36,34 @@
return converter.apply()
@classmethod
+ @error_rollback
def patch(cls, dataset, patch, save_dir, **options):
- return cls.convert(dataset, save_dir, **options)
+ # This solution is not any better in performance than just
+ # writing a dataset, but in case of patching (i.e. writing
+ # to the previous location), it allows to avoid many problems
+ # with removing and replacing existing files. Surely, this
+ # approach also has problems with removal of the given directory.
+ # Problems can occur if we can't remove the directory,
+ # or want to reuse the given directory. It can happen if it
+ # is mounted or (sym-)linked.
+ # Probably, a better solution could be to wipe directory
+ # contents and write new data there. Note that directly doing this
+ # also doesn't work, because images may be needed for writing.
+
+ if not osp.isdir(save_dir):
+ return cls.convert(dataset, save_dir, **options)
+
+ tmpdir = mkdtemp(dir=osp.dirname(save_dir),
+ prefix=osp.basename(save_dir), suffix='.tmp')
+ on_error_do(shutil.rmtree, tmpdir, ignore_errors=True)
+ shutil.copymode(save_dir, tmpdir)
+
+ retval = cls.convert(dataset, tmpdir, **options)
+
+ shutil.rmtree(save_dir)
+ os.replace(tmpdir, save_dir)
+
+ return retval
def apply(self):
raise NotImplementedError("Should be implemented in a subclass")
| {"golden_diff": "diff --git a/datumaro/components/converter.py b/datumaro/components/converter.py\n--- a/datumaro/components/converter.py\n+++ b/datumaro/components/converter.py\n@@ -2,6 +2,7 @@\n #\n # SPDX-License-Identifier: MIT\n \n+from tempfile import mkdtemp\n from typing import Union\n import logging as log\n import os\n@@ -11,6 +12,7 @@\n from datumaro.components.cli_plugin import CliPlugin\n from datumaro.components.dataset import DatasetPatch\n from datumaro.components.extractor import DatasetItem\n+from datumaro.util import error_rollback, on_error_do\n from datumaro.util.image import Image\n \n \n@@ -34,8 +36,34 @@\n return converter.apply()\n \n @classmethod\n+ @error_rollback\n def patch(cls, dataset, patch, save_dir, **options):\n- return cls.convert(dataset, save_dir, **options)\n+ # This solution is not any better in performance than just\n+ # writing a dataset, but in case of patching (i.e. writing\n+ # to the previous location), it allows to avoid many problems\n+ # with removing and replacing existing files. Surely, this\n+ # approach also has problems with removal of the given directory.\n+ # Problems can occur if we can't remove the directory,\n+ # or want to reuse the given directory. It can happen if it\n+ # is mounted or (sym-)linked.\n+ # Probably, a better solution could be to wipe directory\n+ # contents and write new data there. Note that directly doing this\n+ # also doesn't work, because images may be needed for writing.\n+\n+ if not osp.isdir(save_dir):\n+ return cls.convert(dataset, save_dir, **options)\n+\n+ tmpdir = mkdtemp(dir=osp.dirname(save_dir),\n+ prefix=osp.basename(save_dir), suffix='.tmp')\n+ on_error_do(shutil.rmtree, tmpdir, ignore_errors=True)\n+ shutil.copymode(save_dir, tmpdir)\n+\n+ retval = cls.convert(dataset, tmpdir, **options)\n+\n+ shutil.rmtree(save_dir)\n+ os.replace(tmpdir, save_dir)\n+\n+ return retval\n \n def apply(self):\n raise NotImplementedError(\"Should be implemented in a subclass\")\n", "issue": "Dataset patches do not remove empty (e.g. renamed) subsets\nSteps to reproduce:\r\n1. Create a project\r\n2. Import a dataset\r\n3. Rename a subset (e.g. `datum transform -t random_split`)\r\n\r\nDepending on the format, the exported dataset will contain annotations from renamed-from and renamed-to subsets. This leads to duplication of annotations in different subsets, which are then found and merged together on importing.\n", "before_files": [{"content": "# Copyright (C) 2019-2021 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nfrom typing import Union\nimport logging as log\nimport os\nimport os.path as osp\nimport shutil\n\nfrom datumaro.components.cli_plugin import CliPlugin\nfrom datumaro.components.dataset import DatasetPatch\nfrom datumaro.components.extractor import DatasetItem\nfrom datumaro.util.image import Image\n\n\nclass Converter(CliPlugin):\n DEFAULT_IMAGE_EXT = None\n\n @classmethod\n def build_cmdline_parser(cls, **kwargs):\n parser = super().build_cmdline_parser(**kwargs)\n parser.add_argument('--save-images', action='store_true',\n help=\"Save images (default: %(default)s)\")\n parser.add_argument('--image-ext', default=None,\n help=\"Image extension (default: keep or use format default%s)\" % \\\n (' ' + cls.DEFAULT_IMAGE_EXT if cls.DEFAULT_IMAGE_EXT else ''))\n\n return parser\n\n @classmethod\n def convert(cls, extractor, save_dir, **options):\n converter = cls(extractor, save_dir, **options)\n return converter.apply()\n\n @classmethod\n def patch(cls, dataset, patch, save_dir, **options):\n return cls.convert(dataset, save_dir, **options)\n\n def apply(self):\n raise NotImplementedError(\"Should be implemented in a subclass\")\n\n def __init__(self, extractor, save_dir, save_images=False,\n image_ext=None, default_image_ext=None):\n default_image_ext = default_image_ext or self.DEFAULT_IMAGE_EXT\n assert default_image_ext\n self._default_image_ext = default_image_ext\n\n self._save_images = save_images\n self._image_ext = image_ext\n\n self._extractor = extractor\n self._save_dir = save_dir\n\n # TODO: refactor this variable.\n # Can be used by a subclass to store the current patch info\n if isinstance(extractor, DatasetPatch.DatasetPatchWrapper):\n self._patch = extractor.patch\n else:\n self._patch = None\n\n def _find_image_ext(self, item: Union[DatasetItem, Image]):\n src_ext = None\n\n if isinstance(item, DatasetItem) and item.has_image:\n src_ext = item.image.ext\n elif isinstance(item, Image):\n src_ext = item.ext\n\n return self._image_ext or src_ext or self._default_image_ext\n\n def _make_item_filename(self, item, *, name=None, subdir=None):\n name = name or item.id\n subdir = subdir or ''\n return osp.join(subdir, name)\n\n def _make_image_filename(self, item, *, name=None, subdir=None):\n return self._make_item_filename(item, name=name, subdir=subdir) + \\\n self._find_image_ext(item)\n\n def _make_pcd_filename(self, item, *, name=None, subdir=None):\n return self._make_item_filename(item, name=name, subdir=subdir) + '.pcd'\n\n def _save_image(self, item, path=None, *,\n name=None, subdir=None, basedir=None):\n assert not ((subdir or name or basedir) and path), \\\n \"Can't use both subdir or name or basedir and path arguments\"\n\n if not item.has_image or not item.image.has_data:\n log.warning(\"Item '%s' has no image\", item.id)\n return\n\n basedir = basedir or self._save_dir\n path = path or osp.join(basedir,\n self._make_image_filename(item, name=name, subdir=subdir))\n path = osp.abspath(path)\n\n item.image.save(path)\n\n def _save_point_cloud(self, item=None, path=None, *,\n name=None, subdir=None, basedir=None):\n assert not ((subdir or name or basedir) and path), \\\n \"Can't use both subdir or name or basedir and path arguments\"\n\n if not item.point_cloud:\n log.warning(\"Item '%s' has no pcd\", item.id)\n return\n\n basedir = basedir or self._save_dir\n path = path or osp.join(basedir,\n self._make_pcd_filename(item, name=name, subdir=subdir))\n path = osp.abspath(path)\n\n os.makedirs(osp.dirname(path), exist_ok=True)\n if item.point_cloud and osp.isfile(item.point_cloud):\n if item.point_cloud != path:\n shutil.copyfile(item.point_cloud, path)\n", "path": "datumaro/components/converter.py"}]} | 1,829 | 500 |
gh_patches_debug_27089 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1613 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pass calculator options through `pyhf.infer.upperlimit` (toys)
# Description
Currently there's no easy way to pass custom options to upperlimit so it'll always acll asymptotics
</issue>
<code>
[start of src/pyhf/infer/intervals.py]
1 """Interval estimation"""
2 from pyhf.infer import hypotest
3 from pyhf import get_backend
4 import numpy as np
5
6 __all__ = ["upperlimit"]
7
8
9 def __dir__():
10 return __all__
11
12
13 def _interp(x, xp, fp):
14 tb, _ = get_backend()
15 return tb.astensor(np.interp(x, xp.tolist(), fp.tolist()))
16
17
18 def upperlimit(data, model, scan, level=0.05, return_results=False):
19 """
20 Calculate an upper limit interval ``(0, poi_up)`` for a single
21 Parameter of Interest (POI) using a fixed scan through POI-space.
22
23 Example:
24 >>> import numpy as np
25 >>> import pyhf
26 >>> pyhf.set_backend("numpy")
27 >>> model = pyhf.simplemodels.uncorrelated_background(
28 ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]
29 ... )
30 >>> observations = [51, 48]
31 >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
32 >>> scan = np.linspace(0, 5, 21)
33 >>> obs_limit, exp_limits, (scan, results) = pyhf.infer.intervals.upperlimit(
34 ... data, model, scan, return_results=True
35 ... )
36 >>> obs_limit
37 array(1.01764089)
38 >>> exp_limits
39 [array(0.59576921), array(0.76169166), array(1.08504773), array(1.50170482), array(2.06654952)]
40
41 Args:
42 data (:obj:`tensor`): The observed data.
43 model (~pyhf.pdf.Model): The statistical model adhering to the schema ``model.json``.
44 scan (:obj:`iterable`): Iterable of POI values.
45 level (:obj:`float`): The threshold value to evaluate the interpolated results at.
46 return_results (:obj:`bool`): Whether to return the per-point results.
47
48 Returns:
49 Tuple of Tensors:
50
51 - Tensor: The observed upper limit on the POI.
52 - Tensor: The expected upper limits on the POI.
53 - Tuple of Tensors: The given ``scan`` along with the
54 :class:`~pyhf.infer.hypotest` results at each test POI.
55 Only returned when ``return_results`` is ``True``.
56 """
57 tb, _ = get_backend()
58 results = [
59 hypotest(mu, data, model, test_stat="qtilde", return_expected_set=True)
60 for mu in scan
61 ]
62 obs = tb.astensor([[r[0]] for r in results])
63 exp = tb.astensor([[r[1][idx] for idx in range(5)] for r in results])
64
65 result_arrary = tb.concatenate([obs, exp], axis=1).T
66
67 # observed limit and the (0, +-1, +-2)sigma expected limits
68 limits = [_interp(level, result_arrary[idx][::-1], scan[::-1]) for idx in range(6)]
69 obs_limit, exp_limits = limits[0], limits[1:]
70
71 if return_results:
72 return obs_limit, exp_limits, (scan, results)
73 return obs_limit, exp_limits
74
[end of src/pyhf/infer/intervals.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pyhf/infer/intervals.py b/src/pyhf/infer/intervals.py
--- a/src/pyhf/infer/intervals.py
+++ b/src/pyhf/infer/intervals.py
@@ -15,7 +15,7 @@
return tb.astensor(np.interp(x, xp.tolist(), fp.tolist()))
-def upperlimit(data, model, scan, level=0.05, return_results=False):
+def upperlimit(data, model, scan, level=0.05, return_results=False, **hypotest_kwargs):
"""
Calculate an upper limit interval ``(0, poi_up)`` for a single
Parameter of Interest (POI) using a fixed scan through POI-space.
@@ -44,6 +44,8 @@
scan (:obj:`iterable`): Iterable of POI values.
level (:obj:`float`): The threshold value to evaluate the interpolated results at.
return_results (:obj:`bool`): Whether to return the per-point results.
+ hypotest_kwargs (:obj:`string`): Kwargs for the calls to
+ :class:`~pyhf.infer.hypotest` to configure the fits.
Returns:
Tuple of Tensors:
@@ -56,7 +58,7 @@
"""
tb, _ = get_backend()
results = [
- hypotest(mu, data, model, test_stat="qtilde", return_expected_set=True)
+ hypotest(mu, data, model, return_expected_set=True, **hypotest_kwargs)
for mu in scan
]
obs = tb.astensor([[r[0]] for r in results])
| {"golden_diff": "diff --git a/src/pyhf/infer/intervals.py b/src/pyhf/infer/intervals.py\n--- a/src/pyhf/infer/intervals.py\n+++ b/src/pyhf/infer/intervals.py\n@@ -15,7 +15,7 @@\n return tb.astensor(np.interp(x, xp.tolist(), fp.tolist()))\n \n \n-def upperlimit(data, model, scan, level=0.05, return_results=False):\n+def upperlimit(data, model, scan, level=0.05, return_results=False, **hypotest_kwargs):\n \"\"\"\n Calculate an upper limit interval ``(0, poi_up)`` for a single\n Parameter of Interest (POI) using a fixed scan through POI-space.\n@@ -44,6 +44,8 @@\n scan (:obj:`iterable`): Iterable of POI values.\n level (:obj:`float`): The threshold value to evaluate the interpolated results at.\n return_results (:obj:`bool`): Whether to return the per-point results.\n+ hypotest_kwargs (:obj:`string`): Kwargs for the calls to\n+ :class:`~pyhf.infer.hypotest` to configure the fits.\n \n Returns:\n Tuple of Tensors:\n@@ -56,7 +58,7 @@\n \"\"\"\n tb, _ = get_backend()\n results = [\n- hypotest(mu, data, model, test_stat=\"qtilde\", return_expected_set=True)\n+ hypotest(mu, data, model, return_expected_set=True, **hypotest_kwargs)\n for mu in scan\n ]\n obs = tb.astensor([[r[0]] for r in results])\n", "issue": "pass calculator options through `pyhf.infer.upperlimit` (toys)\n# Description\r\n\r\nCurrently there's no easy way to pass custom options to upperlimit so it'll always acll asymptotics\n", "before_files": [{"content": "\"\"\"Interval estimation\"\"\"\nfrom pyhf.infer import hypotest\nfrom pyhf import get_backend\nimport numpy as np\n\n__all__ = [\"upperlimit\"]\n\n\ndef __dir__():\n return __all__\n\n\ndef _interp(x, xp, fp):\n tb, _ = get_backend()\n return tb.astensor(np.interp(x, xp.tolist(), fp.tolist()))\n\n\ndef upperlimit(data, model, scan, level=0.05, return_results=False):\n \"\"\"\n Calculate an upper limit interval ``(0, poi_up)`` for a single\n Parameter of Interest (POI) using a fixed scan through POI-space.\n\n Example:\n >>> import numpy as np\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.uncorrelated_background(\n ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]\n ... )\n >>> observations = [51, 48]\n >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)\n >>> scan = np.linspace(0, 5, 21)\n >>> obs_limit, exp_limits, (scan, results) = pyhf.infer.intervals.upperlimit(\n ... data, model, scan, return_results=True\n ... )\n >>> obs_limit\n array(1.01764089)\n >>> exp_limits\n [array(0.59576921), array(0.76169166), array(1.08504773), array(1.50170482), array(2.06654952)]\n\n Args:\n data (:obj:`tensor`): The observed data.\n model (~pyhf.pdf.Model): The statistical model adhering to the schema ``model.json``.\n scan (:obj:`iterable`): Iterable of POI values.\n level (:obj:`float`): The threshold value to evaluate the interpolated results at.\n return_results (:obj:`bool`): Whether to return the per-point results.\n\n Returns:\n Tuple of Tensors:\n\n - Tensor: The observed upper limit on the POI.\n - Tensor: The expected upper limits on the POI.\n - Tuple of Tensors: The given ``scan`` along with the\n :class:`~pyhf.infer.hypotest` results at each test POI.\n Only returned when ``return_results`` is ``True``.\n \"\"\"\n tb, _ = get_backend()\n results = [\n hypotest(mu, data, model, test_stat=\"qtilde\", return_expected_set=True)\n for mu in scan\n ]\n obs = tb.astensor([[r[0]] for r in results])\n exp = tb.astensor([[r[1][idx] for idx in range(5)] for r in results])\n\n result_arrary = tb.concatenate([obs, exp], axis=1).T\n\n # observed limit and the (0, +-1, +-2)sigma expected limits\n limits = [_interp(level, result_arrary[idx][::-1], scan[::-1]) for idx in range(6)]\n obs_limit, exp_limits = limits[0], limits[1:]\n\n if return_results:\n return obs_limit, exp_limits, (scan, results)\n return obs_limit, exp_limits\n", "path": "src/pyhf/infer/intervals.py"}]} | 1,491 | 362 |
gh_patches_debug_35293 | rasdani/github-patches | git_diff | beeware__toga-1069 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Crash of ImageView example in Gtk
**Describe the bug**
The `imageview` example crashes on Gtk because `toga_gtk.ImageView.rehint()` is called before `toga_gtk.ImageView._pixbuf` has been set by the interface layer. The following traceback is produced:
```
Traceback (most recent call last):
File "/home/samschott/.local/lib/python3.8/site-packages/toga_gtk/app.py", line 93, in gtk_startup
self.interface.startup()
File "/media/psf/Home/Python/toga/examples/imageview/imageview/app.py", line 18, in startup
imageview_from_path = toga.ImageView(image_from_path)
File "/home/samschott/.local/lib/python3.8/site-packages/toga/widgets/imageview.py", line 25, in __init__
self._impl = self.factory.ImageView(interface=self)
File "/home/samschott/.local/lib/python3.8/site-packages/toga_gtk/widgets/base.py", line 12, in __init__
self.interface.style.reapply()
File "/home/samschott/.local/lib/python3.8/site-packages/travertino/declaration.py", line 88, in reapply
self.apply(style, getattr(self, style))
File "/home/samschott/.local/lib/python3.8/site-packages/toga/style/pack.py", line 104, in apply
self._applicator.set_font(
File "/home/samschott/.local/lib/python3.8/site-packages/toga/style/applicator.py", line 25, in set_font
self.widget._impl.rehint()
File "/home/samschott/.local/lib/python3.8/site-packages/toga_gtk/widgets/imageview.py", line 20, in rehint
original_height=self._pixbuf.get_height(),
AttributeError: 'NoneType' object has no attribute 'get_height'
```
**To Reproduce**
Run the imageview example:
```shell
python3 -m imageview
```
**Environment:**
- Operating System: Ubuntu 20.04
- Python version: Python 3.8
- Software versions:
- Toga: 0.3.0.dev23
**Additional context**
This is a tricky issue and I suspect it was introduced by a change to when the style is applied. Essentially, the interface does set image (pixbuf) during init. Nevertheless, the style is already applied during the init of `toga_gtk.base.Widget`, before setting the image (line 12):
https://github.com/beeware/toga/blob/f8bea583c87642ad102776e1b58fd8bb9265b135/src/gtk/toga_gtk/widgets/base.py#L5-L12
The quickest solution may be to guard against `pixbuf` not being set in the `rehint` implementation.
</issue>
<code>
[start of src/gtk/toga_gtk/widgets/imageview.py]
1
2 from ..libs import GdkPixbuf, Gtk
3 from .base import Widget
4
5
6 class ImageView(Widget):
7
8 def create(self):
9 self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
10 self._image = Gtk.Image()
11 self._pixbuf = None
12 self.native.add(self._image)
13 self.native.interface = self.interface
14
15 def set_image(self, image):
16 self._pixbuf = image._impl.native
17
18 def rehint(self):
19 height, width = self._resize_max(
20 original_height=self._pixbuf.get_height(),
21 original_width=self._pixbuf.get_width(),
22 max_height=self.native.get_allocated_height(),
23 max_width=self.native.get_allocated_width()
24 )
25
26 scaled_pixbuf = self._pixbuf.scale_simple(width, height, GdkPixbuf.InterpType.BILINEAR)
27 self._image.set_from_pixbuf(scaled_pixbuf)
28
29 @staticmethod
30 def _resize_max(original_height, original_width, max_height, max_width):
31
32 # Check to make sure all dimensions have valid sizes
33 if min(original_height, original_width, max_height, max_width) <= 0:
34 return 1, 1
35
36 width_ratio = max_width/original_width
37 height_ratio = max_height/original_height
38
39 height = original_height * width_ratio
40 if height <= max_height:
41 width = original_width * width_ratio
42 else:
43 height = original_height * height_ratio
44 width = original_width * height_ratio
45
46 return int(height), int(width)
47
[end of src/gtk/toga_gtk/widgets/imageview.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/gtk/toga_gtk/widgets/imageview.py b/src/gtk/toga_gtk/widgets/imageview.py
--- a/src/gtk/toga_gtk/widgets/imageview.py
+++ b/src/gtk/toga_gtk/widgets/imageview.py
@@ -1,10 +1,8 @@
-
-from ..libs import GdkPixbuf, Gtk
+from ..libs import GdkPixbuf, Gtk, Gdk
from .base import Widget
class ImageView(Widget):
-
def create(self):
self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self._image = Gtk.Image()
@@ -15,16 +13,30 @@
def set_image(self, image):
self._pixbuf = image._impl.native
+ def set_bounds(self, x, y, width, height):
+ super().set_bounds(x, y, width, height)
+ # rehint to update scaling of pixbuf
+ self.rehint()
+
def rehint(self):
- height, width = self._resize_max(
- original_height=self._pixbuf.get_height(),
- original_width=self._pixbuf.get_width(),
- max_height=self.native.get_allocated_height(),
- max_width=self.native.get_allocated_width()
- )
+ if self._pixbuf:
+ height, width = self._resize_max(
+ original_height=self._pixbuf.get_height(),
+ original_width=self._pixbuf.get_width(),
+ max_height=self.native.get_allocated_height(),
+ max_width=self.native.get_allocated_width(),
+ )
+
+ dpr = self.native.get_scale_factor()
+
+ scaled_pixbuf = self._pixbuf.scale_simple(
+ width * dpr, height * dpr, GdkPixbuf.InterpType.BILINEAR
+ )
- scaled_pixbuf = self._pixbuf.scale_simple(width, height, GdkPixbuf.InterpType.BILINEAR)
- self._image.set_from_pixbuf(scaled_pixbuf)
+ surface = Gdk.cairo_surface_create_from_pixbuf(
+ scaled_pixbuf, 0, self.native.get_window() # scale: 0 = same as window
+ )
+ self._image.set_from_surface(surface)
@staticmethod
def _resize_max(original_height, original_width, max_height, max_width):
@@ -33,8 +45,8 @@
if min(original_height, original_width, max_height, max_width) <= 0:
return 1, 1
- width_ratio = max_width/original_width
- height_ratio = max_height/original_height
+ width_ratio = max_width / original_width
+ height_ratio = max_height / original_height
height = original_height * width_ratio
if height <= max_height:
| {"golden_diff": "diff --git a/src/gtk/toga_gtk/widgets/imageview.py b/src/gtk/toga_gtk/widgets/imageview.py\n--- a/src/gtk/toga_gtk/widgets/imageview.py\n+++ b/src/gtk/toga_gtk/widgets/imageview.py\n@@ -1,10 +1,8 @@\n-\n-from ..libs import GdkPixbuf, Gtk\n+from ..libs import GdkPixbuf, Gtk, Gdk\n from .base import Widget\n \n \n class ImageView(Widget):\n-\n def create(self):\n self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self._image = Gtk.Image()\n@@ -15,16 +13,30 @@\n def set_image(self, image):\n self._pixbuf = image._impl.native\n \n+ def set_bounds(self, x, y, width, height):\n+ super().set_bounds(x, y, width, height)\n+ # rehint to update scaling of pixbuf\n+ self.rehint()\n+\n def rehint(self):\n- height, width = self._resize_max(\n- original_height=self._pixbuf.get_height(),\n- original_width=self._pixbuf.get_width(),\n- max_height=self.native.get_allocated_height(),\n- max_width=self.native.get_allocated_width()\n- )\n+ if self._pixbuf:\n+ height, width = self._resize_max(\n+ original_height=self._pixbuf.get_height(),\n+ original_width=self._pixbuf.get_width(),\n+ max_height=self.native.get_allocated_height(),\n+ max_width=self.native.get_allocated_width(),\n+ )\n+\n+ dpr = self.native.get_scale_factor()\n+\n+ scaled_pixbuf = self._pixbuf.scale_simple(\n+ width * dpr, height * dpr, GdkPixbuf.InterpType.BILINEAR\n+ )\n \n- scaled_pixbuf = self._pixbuf.scale_simple(width, height, GdkPixbuf.InterpType.BILINEAR)\n- self._image.set_from_pixbuf(scaled_pixbuf)\n+ surface = Gdk.cairo_surface_create_from_pixbuf(\n+ scaled_pixbuf, 0, self.native.get_window() # scale: 0 = same as window\n+ )\n+ self._image.set_from_surface(surface)\n \n @staticmethod\n def _resize_max(original_height, original_width, max_height, max_width):\n@@ -33,8 +45,8 @@\n if min(original_height, original_width, max_height, max_width) <= 0:\n return 1, 1\n \n- width_ratio = max_width/original_width\n- height_ratio = max_height/original_height\n+ width_ratio = max_width / original_width\n+ height_ratio = max_height / original_height\n \n height = original_height * width_ratio\n if height <= max_height:\n", "issue": "Crash of ImageView example in Gtk\n**Describe the bug**\r\nThe `imageview` example crashes on Gtk because `toga_gtk.ImageView.rehint()` is called before `toga_gtk.ImageView._pixbuf` has been set by the interface layer. The following traceback is produced:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/samschott/.local/lib/python3.8/site-packages/toga_gtk/app.py\", line 93, in gtk_startup\r\n self.interface.startup()\r\n File \"/media/psf/Home/Python/toga/examples/imageview/imageview/app.py\", line 18, in startup\r\n imageview_from_path = toga.ImageView(image_from_path)\r\n File \"/home/samschott/.local/lib/python3.8/site-packages/toga/widgets/imageview.py\", line 25, in __init__\r\n self._impl = self.factory.ImageView(interface=self)\r\n File \"/home/samschott/.local/lib/python3.8/site-packages/toga_gtk/widgets/base.py\", line 12, in __init__\r\n self.interface.style.reapply()\r\n File \"/home/samschott/.local/lib/python3.8/site-packages/travertino/declaration.py\", line 88, in reapply\r\n self.apply(style, getattr(self, style))\r\n File \"/home/samschott/.local/lib/python3.8/site-packages/toga/style/pack.py\", line 104, in apply\r\n self._applicator.set_font(\r\n File \"/home/samschott/.local/lib/python3.8/site-packages/toga/style/applicator.py\", line 25, in set_font\r\n self.widget._impl.rehint()\r\n File \"/home/samschott/.local/lib/python3.8/site-packages/toga_gtk/widgets/imageview.py\", line 20, in rehint\r\n original_height=self._pixbuf.get_height(),\r\nAttributeError: 'NoneType' object has no attribute 'get_height'\r\n```\r\n\r\n**To Reproduce**\r\nRun the imageview example:\r\n```shell\r\npython3 -m imageview\r\n```\r\n\r\n**Environment:**\r\n - Operating System: Ubuntu 20.04\r\n - Python version: Python 3.8\r\n - Software versions:\r\n - Toga: 0.3.0.dev23\r\n\r\n**Additional context**\r\nThis is a tricky issue and I suspect it was introduced by a change to when the style is applied. Essentially, the interface does set image (pixbuf) during init. Nevertheless, the style is already applied during the init of `toga_gtk.base.Widget`, before setting the image (line 12):\r\n\r\nhttps://github.com/beeware/toga/blob/f8bea583c87642ad102776e1b58fd8bb9265b135/src/gtk/toga_gtk/widgets/base.py#L5-L12\r\n\r\nThe quickest solution may be to guard against `pixbuf` not being set in the `rehint` implementation.\n", "before_files": [{"content": "\nfrom ..libs import GdkPixbuf, Gtk\nfrom .base import Widget\n\n\nclass ImageView(Widget):\n\n def create(self):\n self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self._image = Gtk.Image()\n self._pixbuf = None\n self.native.add(self._image)\n self.native.interface = self.interface\n\n def set_image(self, image):\n self._pixbuf = image._impl.native\n\n def rehint(self):\n height, width = self._resize_max(\n original_height=self._pixbuf.get_height(),\n original_width=self._pixbuf.get_width(),\n max_height=self.native.get_allocated_height(),\n max_width=self.native.get_allocated_width()\n )\n\n scaled_pixbuf = self._pixbuf.scale_simple(width, height, GdkPixbuf.InterpType.BILINEAR)\n self._image.set_from_pixbuf(scaled_pixbuf)\n\n @staticmethod\n def _resize_max(original_height, original_width, max_height, max_width):\n\n # Check to make sure all dimensions have valid sizes\n if min(original_height, original_width, max_height, max_width) <= 0:\n return 1, 1\n\n width_ratio = max_width/original_width\n height_ratio = max_height/original_height\n\n height = original_height * width_ratio\n if height <= max_height:\n width = original_width * width_ratio\n else:\n height = original_height * height_ratio\n width = original_width * height_ratio\n\n return int(height), int(width)\n", "path": "src/gtk/toga_gtk/widgets/imageview.py"}]} | 1,605 | 600 |
gh_patches_debug_11104 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-1322 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Invalid `dev` version identifiers in `setup.py`
There is a bunch of ~invalid~ version matchers (edit: valid, but not parsed correctly by distlib) in `setup.py`. [PEP 440](https://peps.python.org/pep-0440/) states:
> The canonical public version identifiers MUST comply with the following scheme:
> `[N!]N(.N)*[{a|b|rc}N][.postN][.devN]`
So you are missing a dot and a number in every version identifier that contains the string `dev`.
It is also considered bad practice to have an upper bound on package versions and installers like pip do not typically consider development versions in any case (unless explicitly told to).
See: https://github.com/googleapis/google-api-python-client/issues/2151
</issue>
<code>
[start of setup.py]
1 # Copyright 2014 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 from setuptools import find_packages
19 from setuptools import setup
20
21
22 DEPENDENCIES = (
23 "cachetools>=2.0.0,<6.0",
24 "pyasn1-modules>=0.2.1",
25 # rsa==4.5 is the last version to support 2.7
26 # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233
27 "rsa>=3.1.4,<5",
28 # install enum34 to support 2.7. enum34 only works up to python version 3.3.
29 "six>=1.9.0",
30 "urllib3<2.0",
31 )
32
33 extras = {
34 "aiohttp": ["aiohttp >= 3.6.2, < 4.0.0dev", "requests >= 2.20.0, < 3.0.0dev"],
35 "pyopenssl": ["pyopenssl>=20.0.0", "cryptography>=38.0.3"],
36 "requests": "requests >= 2.20.0, < 3.0.0dev",
37 "reauth": "pyu2f>=0.1.5",
38 # Enterprise cert only works for OpenSSL 1.1.1. Newer versions of these
39 # dependencies are built with OpenSSL 3.0 so we need to fix the version.
40 "enterprise_cert": ["cryptography==36.0.2", "pyopenssl==22.0.0"],
41 }
42
43 with io.open("README.rst", "r") as fh:
44 long_description = fh.read()
45
46 package_root = os.path.abspath(os.path.dirname(__file__))
47
48 version = {}
49 with open(os.path.join(package_root, "google/auth/version.py")) as fp:
50 exec(fp.read(), version)
51 version = version["__version__"]
52
53 setup(
54 name="google-auth",
55 version=version,
56 author="Google Cloud Platform",
57 author_email="[email protected]",
58 description="Google Authentication Library",
59 long_description=long_description,
60 url="https://github.com/googleapis/google-auth-library-python",
61 packages=find_packages(exclude=("tests*", "system_tests*")),
62 namespace_packages=("google",),
63 install_requires=DEPENDENCIES,
64 extras_require=extras,
65 python_requires=">=3.6",
66 license="Apache 2.0",
67 keywords="google auth oauth client",
68 classifiers=[
69 "Programming Language :: Python :: 3",
70 "Programming Language :: Python :: 3.6",
71 "Programming Language :: Python :: 3.7",
72 "Programming Language :: Python :: 3.8",
73 "Programming Language :: Python :: 3.9",
74 "Programming Language :: Python :: 3.10",
75 "Programming Language :: Python :: 3.11",
76 "Development Status :: 5 - Production/Stable",
77 "Intended Audience :: Developers",
78 "License :: OSI Approved :: Apache Software License",
79 "Operating System :: POSIX",
80 "Operating System :: Microsoft :: Windows",
81 "Operating System :: MacOS :: MacOS X",
82 "Operating System :: OS Independent",
83 "Topic :: Internet :: WWW/HTTP",
84 ],
85 )
86
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -31,9 +31,9 @@
)
extras = {
- "aiohttp": ["aiohttp >= 3.6.2, < 4.0.0dev", "requests >= 2.20.0, < 3.0.0dev"],
+ "aiohttp": ["aiohttp >= 3.6.2, < 4.0.0.dev0", "requests >= 2.20.0, < 3.0.0.dev0"],
"pyopenssl": ["pyopenssl>=20.0.0", "cryptography>=38.0.3"],
- "requests": "requests >= 2.20.0, < 3.0.0dev",
+ "requests": "requests >= 2.20.0, < 3.0.0.dev0",
"reauth": "pyu2f>=0.1.5",
# Enterprise cert only works for OpenSSL 1.1.1. Newer versions of these
# dependencies are built with OpenSSL 3.0 so we need to fix the version.
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -31,9 +31,9 @@\n )\n \n extras = {\n- \"aiohttp\": [\"aiohttp >= 3.6.2, < 4.0.0dev\", \"requests >= 2.20.0, < 3.0.0dev\"],\n+ \"aiohttp\": [\"aiohttp >= 3.6.2, < 4.0.0.dev0\", \"requests >= 2.20.0, < 3.0.0.dev0\"],\n \"pyopenssl\": [\"pyopenssl>=20.0.0\", \"cryptography>=38.0.3\"],\n- \"requests\": \"requests >= 2.20.0, < 3.0.0dev\",\n+ \"requests\": \"requests >= 2.20.0, < 3.0.0.dev0\",\n \"reauth\": \"pyu2f>=0.1.5\",\n # Enterprise cert only works for OpenSSL 1.1.1. Newer versions of these\n # dependencies are built with OpenSSL 3.0 so we need to fix the version.\n", "issue": "Invalid `dev` version identifiers in `setup.py`\nThere is a bunch of ~invalid~ version matchers (edit: valid, but not parsed correctly by distlib) in `setup.py`. [PEP 440](https://peps.python.org/pep-0440/) states:\r\n\r\n> The canonical public version identifiers MUST comply with the following scheme:\r\n> `[N!]N(.N)*[{a|b|rc}N][.postN][.devN]`\r\n\r\nSo you are missing a dot and a number in every version identifier that contains the string `dev`.\r\n\r\nIt is also considered bad practice to have an upper bound on package versions and installers like pip do not typically consider development versions in any case (unless explicitly told to).\r\n\r\nSee: https://github.com/googleapis/google-api-python-client/issues/2151\n", "before_files": [{"content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n \"cachetools>=2.0.0,<6.0\",\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n \"rsa>=3.1.4,<5\",\n # install enum34 to support 2.7. enum34 only works up to python version 3.3.\n \"six>=1.9.0\",\n \"urllib3<2.0\",\n)\n\nextras = {\n \"aiohttp\": [\"aiohttp >= 3.6.2, < 4.0.0dev\", \"requests >= 2.20.0, < 3.0.0dev\"],\n \"pyopenssl\": [\"pyopenssl>=20.0.0\", \"cryptography>=38.0.3\"],\n \"requests\": \"requests >= 2.20.0, < 3.0.0dev\",\n \"reauth\": \"pyu2f>=0.1.5\",\n # Enterprise cert only works for OpenSSL 1.1.1. Newer versions of these\n # dependencies are built with OpenSSL 3.0 so we need to fix the version.\n \"enterprise_cert\": [\"cryptography==36.0.2\", \"pyopenssl==22.0.0\"],\n}\n\nwith io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nversion = {}\nwith open(os.path.join(package_root, \"google/auth/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\nsetup(\n name=\"google-auth\",\n version=version,\n author=\"Google Cloud Platform\",\n author_email=\"[email protected]\",\n description=\"Google Authentication Library\",\n long_description=long_description,\n url=\"https://github.com/googleapis/google-auth-library-python\",\n packages=find_packages(exclude=(\"tests*\", \"system_tests*\")),\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n extras_require=extras,\n python_requires=\">=3.6\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]} | 1,714 | 271 |
gh_patches_debug_22926 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-236 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No Import BMV
Hi !
Thanks for the plugin, but unfortunately I can't get any data in. I checked, it may be that the street and the street are different. Thank you
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py]
1 import logging
2 from html.parser import HTMLParser
3
4 import requests
5 from waste_collection_schedule import Collection # type: ignore[attr-defined]
6 from waste_collection_schedule.service.ICS import ICS
7
8 TITLE = "BMV.at"
9 DESCRIPTION = "Source for BMV, Austria"
10 URL = "https://www.bmv.at"
11 TEST_CASES = {
12 "Allersdorf": {"ort": "ALLERSDORF", "strasse": "HAUSNUMMER", "hausnummer": 9},
13 "Bad Sauerbrunn": {
14 "ort": "BAD SAUERBRUNN",
15 "strasse": "BUCHINGERWEG",
16 "hausnummer": 16,
17 },
18 }
19
20 _LOGGER = logging.getLogger(__name__)
21
22
23 # Parser for HTML input (hidden) text
24 class HiddenInputParser(HTMLParser):
25 def __init__(self):
26 super().__init__()
27 self._args = {}
28
29 @property
30 def args(self):
31 return self._args
32
33 def handle_starttag(self, tag, attrs):
34 if tag == "input":
35 d = dict(attrs)
36 if d["type"] == "HIDDEN":
37 self._args[d["name"]] = d.get("value")
38
39
40 class Source:
41 def __init__(self, ort, strasse, hausnummer):
42 self._ort = ort
43 self._strasse = strasse
44 self._hausnummer = hausnummer
45 self._ics = ICS()
46
47 def fetch(self):
48 session = requests.session()
49
50 r = session.get(
51 "https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet?SubmitAction=wasteDisposalServices&InFrameMode=TRUE"
52 )
53
54 # add all hidden input fields to form data
55 p = HiddenInputParser()
56 p.feed(r.text)
57 args = p.args
58
59 args["Focus"] = "Hausnummer"
60 args["SubmitAction"] = "forward"
61 args["Ort"] = self._ort
62 args["Strasse"] = self._strasse
63 args["Hausnummer"] = self._hausnummer
64 r = session.post(
65 "https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet", data=args
66 )
67
68 args["ApplicationName"] = "com.athos.kd.udb.AbfuhrTerminModel"
69 args["Focus"] = None
70 args["IsLastPage"] = "true"
71 args["Method"] = "POST"
72 args["PageName"] = "Terminliste"
73 args["SubmitAction"] = "filedownload_ICAL"
74 del args["Ort"]
75 del args["Strasse"]
76 del args["Hausnummer"]
77 r = session.post(
78 "https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet", data=args
79 )
80
81 dates = self._ics.convert(r.text)
82
83 entries = []
84 for d in dates:
85 entries.append(Collection(d[0], d[1]))
86 return entries
87
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py
@@ -15,6 +15,11 @@
"strasse": "BUCHINGERWEG",
"hausnummer": 16,
},
+ "Rattersdorf": {
+ "ort": "RATTERSDORF",
+ "strasse": "SIEBENBRÜNDLGASSE",
+ "hausnummer": 30,
+ },
}
_LOGGER = logging.getLogger(__name__)
@@ -56,6 +61,24 @@
p.feed(r.text)
args = p.args
+ args["Focus"] = "Ort"
+ args["SubmitAction"] = "changedEvent"
+ args["Ort"] = self._ort
+ args["Strasse"] = "HAUSNUMMER"
+ args["Hausnummer"] = 0
+ r = session.post(
+ "https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet", data=args
+ )
+
+ args["Focus"] = "Strasse"
+ args["SubmitAction"] = "changedEvent"
+ args["Ort"] = self._ort
+ args["Strasse"] = self._strasse
+ args["Hausnummer"] = 0
+ r = session.post(
+ "https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet", data=args
+ )
+
args["Focus"] = "Hausnummer"
args["SubmitAction"] = "forward"
args["Ort"] = self._ort
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py\n@@ -15,6 +15,11 @@\n \"strasse\": \"BUCHINGERWEG\",\n \"hausnummer\": 16,\n },\n+ \"Rattersdorf\": {\n+ \"ort\": \"RATTERSDORF\",\n+ \"strasse\": \"SIEBENBR\u00dcNDLGASSE\",\n+ \"hausnummer\": 30,\n+ },\n }\n \n _LOGGER = logging.getLogger(__name__)\n@@ -56,6 +61,24 @@\n p.feed(r.text)\n args = p.args\n \n+ args[\"Focus\"] = \"Ort\"\n+ args[\"SubmitAction\"] = \"changedEvent\"\n+ args[\"Ort\"] = self._ort\n+ args[\"Strasse\"] = \"HAUSNUMMER\"\n+ args[\"Hausnummer\"] = 0\n+ r = session.post(\n+ \"https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet\", data=args\n+ )\n+\n+ args[\"Focus\"] = \"Strasse\"\n+ args[\"SubmitAction\"] = \"changedEvent\"\n+ args[\"Ort\"] = self._ort\n+ args[\"Strasse\"] = self._strasse\n+ args[\"Hausnummer\"] = 0\n+ r = session.post(\n+ \"https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet\", data=args\n+ )\n+\n args[\"Focus\"] = \"Hausnummer\"\n args[\"SubmitAction\"] = \"forward\"\n args[\"Ort\"] = self._ort\n", "issue": "No Import BMV\nHi !\r\nThanks for the plugin, but unfortunately I can't get any data in. I checked, it may be that the street and the street are different. Thank you\n", "before_files": [{"content": "import logging\nfrom html.parser import HTMLParser\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"BMV.at\"\nDESCRIPTION = \"Source for BMV, Austria\"\nURL = \"https://www.bmv.at\"\nTEST_CASES = {\n \"Allersdorf\": {\"ort\": \"ALLERSDORF\", \"strasse\": \"HAUSNUMMER\", \"hausnummer\": 9},\n \"Bad Sauerbrunn\": {\n \"ort\": \"BAD SAUERBRUNN\",\n \"strasse\": \"BUCHINGERWEG\",\n \"hausnummer\": 16,\n },\n}\n\n_LOGGER = logging.getLogger(__name__)\n\n\n# Parser for HTML input (hidden) text\nclass HiddenInputParser(HTMLParser):\n def __init__(self):\n super().__init__()\n self._args = {}\n\n @property\n def args(self):\n return self._args\n\n def handle_starttag(self, tag, attrs):\n if tag == \"input\":\n d = dict(attrs)\n if d[\"type\"] == \"HIDDEN\":\n self._args[d[\"name\"]] = d.get(\"value\")\n\n\nclass Source:\n def __init__(self, ort, strasse, hausnummer):\n self._ort = ort\n self._strasse = strasse\n self._hausnummer = hausnummer\n self._ics = ICS()\n\n def fetch(self):\n session = requests.session()\n\n r = session.get(\n \"https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet?SubmitAction=wasteDisposalServices&InFrameMode=TRUE\"\n )\n\n # add all hidden input fields to form data\n p = HiddenInputParser()\n p.feed(r.text)\n args = p.args\n\n args[\"Focus\"] = \"Hausnummer\"\n args[\"SubmitAction\"] = \"forward\"\n args[\"Ort\"] = self._ort\n args[\"Strasse\"] = self._strasse\n args[\"Hausnummer\"] = self._hausnummer\n r = session.post(\n \"https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet\", data=args\n )\n\n args[\"ApplicationName\"] = \"com.athos.kd.udb.AbfuhrTerminModel\"\n args[\"Focus\"] = None\n args[\"IsLastPage\"] = \"true\"\n args[\"Method\"] = \"POST\"\n args[\"PageName\"] = \"Terminliste\"\n args[\"SubmitAction\"] = \"filedownload_ICAL\"\n del args[\"Ort\"]\n del args[\"Strasse\"]\n del args[\"Hausnummer\"]\n r = session.post(\n \"https://webudb.udb.at/WasteManagementUDB/WasteManagementServlet\", data=args\n )\n\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/bmv_at.py"}]} | 1,423 | 418 |
gh_patches_debug_16835 | rasdani/github-patches | git_diff | ESMCI__cime-538 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PET tests do not work on skybridge
Skybridge insta-fails the single-threaded case because it tries to use 16 procs-per-node and the sbatch only requested 8 ppn.
</issue>
<code>
[start of utils/python/CIME/SystemTests/pet.py]
1 """
2 Implementation of the CIME PET test. This class inherits from SystemTestsCommon
3
4 This is an openmp test to determine that changing thread counts does not change answers.
5 (1) do an initial run where all components are threaded by default (suffix: base)
6 (2) do another initial run with nthrds=1 for all components (suffix: single_thread)
7 """
8
9 from CIME.XML.standard_module_setup import *
10 from CIME.case_setup import case_setup
11 from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo
12
13 logger = logging.getLogger(__name__)
14
15 class PET(SystemTestsCompareTwo):
16
17 _COMPONENT_LIST = ('ATM','CPL','OCN','WAV','GLC','ICE','ROF','LND')
18
19 def __init__(self, case):
20 """
21 initialize a test object
22 """
23 SystemTestsCompareTwo.__init__(self, case,
24 separate_builds = False,
25 run_two_suffix = 'single_thread',
26 run_one_description = 'default threading',
27 run_two_description = 'threads set to 1')
28
29 def _case_one_setup(self):
30 # first make sure that all components have threaded settings
31 for comp in self._COMPONENT_LIST:
32 if self._case.get_value("NTHRDS_%s"%comp) <= 1:
33 self._case.set_value("NTHRDS_%s"%comp, 2)
34
35 # Need to redo case_setup because we may have changed the number of threads
36 case_setup(self._case, reset=True)
37
38 def _case_two_setup(self):
39 #Do a run with all threads set to 1
40 for comp in self._COMPONENT_LIST:
41 self._case.set_value("NTHRDS_%s"%comp, 1)
42
43 # Need to redo case_setup because we may have changed the number of threads
44 case_setup(self._case, reset=True)
45
[end of utils/python/CIME/SystemTests/pet.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/utils/python/CIME/SystemTests/pet.py b/utils/python/CIME/SystemTests/pet.py
--- a/utils/python/CIME/SystemTests/pet.py
+++ b/utils/python/CIME/SystemTests/pet.py
@@ -40,5 +40,14 @@
for comp in self._COMPONENT_LIST:
self._case.set_value("NTHRDS_%s"%comp, 1)
+ # The need for this is subtle. On batch systems, the entire PET test runs
+ # under a single submission and that submission is configured based on
+ # the case settings for case 1, IE 2 threads for all components. This causes
+ # the procs-per-node to be half of what it would be for single thread. On some
+ # machines, if the mpiexec tries to exceed the procs-per-node that were given
+ # to the batch submission, things break. Setting MAX_TASKS_PER_NODE to half of
+ # it original value prevents this.
+ self._case.set_value("MAX_TASKS_PER_NODE", self._case.get_value("MAX_TASKS_PER_NODE") / 2)
+
# Need to redo case_setup because we may have changed the number of threads
case_setup(self._case, reset=True)
| {"golden_diff": "diff --git a/utils/python/CIME/SystemTests/pet.py b/utils/python/CIME/SystemTests/pet.py\n--- a/utils/python/CIME/SystemTests/pet.py\n+++ b/utils/python/CIME/SystemTests/pet.py\n@@ -40,5 +40,14 @@\n for comp in self._COMPONENT_LIST:\n self._case.set_value(\"NTHRDS_%s\"%comp, 1)\n \n+ # The need for this is subtle. On batch systems, the entire PET test runs\n+ # under a single submission and that submission is configured based on\n+ # the case settings for case 1, IE 2 threads for all components. This causes\n+ # the procs-per-node to be half of what it would be for single thread. On some\n+ # machines, if the mpiexec tries to exceed the procs-per-node that were given\n+ # to the batch submission, things break. Setting MAX_TASKS_PER_NODE to half of\n+ # it original value prevents this.\n+ self._case.set_value(\"MAX_TASKS_PER_NODE\", self._case.get_value(\"MAX_TASKS_PER_NODE\") / 2)\n+\n # Need to redo case_setup because we may have changed the number of threads\n case_setup(self._case, reset=True)\n", "issue": "PET tests do not work on skybridge\nSkybridge insta-fails the single-threaded case because it tries to use 16 procs-per-node and the sbatch only requested 8 ppn.\n\n", "before_files": [{"content": "\"\"\"\nImplementation of the CIME PET test. This class inherits from SystemTestsCommon\n\nThis is an openmp test to determine that changing thread counts does not change answers.\n(1) do an initial run where all components are threaded by default (suffix: base)\n(2) do another initial run with nthrds=1 for all components (suffix: single_thread)\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.case_setup import case_setup\nfrom CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo\n\nlogger = logging.getLogger(__name__)\n\nclass PET(SystemTestsCompareTwo):\n\n _COMPONENT_LIST = ('ATM','CPL','OCN','WAV','GLC','ICE','ROF','LND')\n\n def __init__(self, case):\n \"\"\"\n initialize a test object\n \"\"\"\n SystemTestsCompareTwo.__init__(self, case,\n separate_builds = False,\n run_two_suffix = 'single_thread',\n run_one_description = 'default threading',\n run_two_description = 'threads set to 1')\n\n def _case_one_setup(self):\n # first make sure that all components have threaded settings\n for comp in self._COMPONENT_LIST:\n if self._case.get_value(\"NTHRDS_%s\"%comp) <= 1:\n self._case.set_value(\"NTHRDS_%s\"%comp, 2)\n\n # Need to redo case_setup because we may have changed the number of threads\n case_setup(self._case, reset=True)\n\n def _case_two_setup(self):\n #Do a run with all threads set to 1\n for comp in self._COMPONENT_LIST:\n self._case.set_value(\"NTHRDS_%s\"%comp, 1)\n\n # Need to redo case_setup because we may have changed the number of threads\n case_setup(self._case, reset=True)\n", "path": "utils/python/CIME/SystemTests/pet.py"}]} | 1,071 | 280 |
gh_patches_debug_53979 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1261 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove duplicated libraries in setup.py
# Description
In `setup.py` and `setup.cfg` there are some duplicated libraries that should be removed from `setup.py`.
https://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.py#L47
already exists as a core requirement in `setup.cfg`
https://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.cfg#L45
and so should be removed from `setup.py`.
It also isn't clear if
https://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.py#L42
is still required, given that it was added back in PR #186 when we still used Coveralls for coverage.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup
2
3 extras_require = {
4 'shellcomplete': ['click_completion'],
5 'tensorflow': [
6 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major
7 'tensorflow-probability~=0.10.0',
8 ],
9 'torch': ['torch~=1.2'],
10 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],
11 'xmlio': [
12 'uproot3~=3.14',
13 'uproot~=4.0',
14 ], # uproot3 required until writing to ROOT supported in uproot4
15 'minuit': ['iminuit~=2.1'],
16 }
17 extras_require['backends'] = sorted(
18 set(
19 extras_require['tensorflow']
20 + extras_require['torch']
21 + extras_require['jax']
22 + extras_require['minuit']
23 )
24 )
25 extras_require['contrib'] = sorted({'matplotlib', 'requests'})
26 extras_require['lint'] = sorted({'flake8', 'black'})
27
28 extras_require['test'] = sorted(
29 set(
30 extras_require['backends']
31 + extras_require['xmlio']
32 + extras_require['contrib']
33 + extras_require['shellcomplete']
34 + [
35 'pytest~=6.0',
36 'pytest-cov>=2.5.1',
37 'pytest-mock',
38 'pytest-benchmark[histogram]',
39 'pytest-console-scripts',
40 'pytest-mpl',
41 'pydocstyle',
42 'coverage>=4.0', # coveralls
43 'papermill~=2.0',
44 'nteract-scrapbook~=0.2',
45 'jupyter',
46 'graphviz',
47 'jsonpatch',
48 ]
49 )
50 )
51 extras_require['docs'] = sorted(
52 set(
53 extras_require['xmlio']
54 + [
55 'sphinx>=3.1.2',
56 'sphinxcontrib-bibtex~=2.1',
57 'sphinx-click',
58 'sphinx_rtd_theme',
59 'nbsphinx',
60 'ipywidgets',
61 'sphinx-issues',
62 'sphinx-copybutton>0.2.9',
63 ]
64 )
65 )
66 extras_require['develop'] = sorted(
67 set(
68 extras_require['docs']
69 + extras_require['lint']
70 + extras_require['test']
71 + [
72 'nbdime',
73 'bump2version',
74 'ipython',
75 'pre-commit',
76 'check-manifest',
77 'codemetapy>=0.3.4',
78 'twine',
79 ]
80 )
81 )
82 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
83
84
85 setup(
86 extras_require=extras_require,
87 use_scm_version=lambda: {'local_scheme': lambda version: ''},
88 )
89
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -39,12 +39,10 @@
'pytest-console-scripts',
'pytest-mpl',
'pydocstyle',
- 'coverage>=4.0', # coveralls
'papermill~=2.0',
'nteract-scrapbook~=0.2',
'jupyter',
'graphviz',
- 'jsonpatch',
]
)
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -39,12 +39,10 @@\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n- 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n- 'jsonpatch',\n ]\n )\n )\n", "issue": "Remove duplicated libraries in setup.py\n# Description\r\n\r\nIn `setup.py` and `setup.cfg` there are some duplicated libraries that should be removed from `setup.py`.\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.py#L47\r\n\r\nalready exists as a core requirement in `setup.cfg`\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.cfg#L45\r\n\r\nand so should be removed from `setup.py`.\r\n\r\nIt also isn't clear if \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/75f3cd350ed3986d16d680fbb83f312791aafd68/setup.py#L42\r\n\r\nis still required, given that it was added back in PR #186 when we still used Coveralls for coverage.\r\n\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],\n 'xmlio': [\n 'uproot3~=3.14',\n 'uproot~=4.0',\n ], # uproot3 required until writing to ROOT supported in uproot4\n 'minuit': ['iminuit~=2.1'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'bump2version',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,554 | 110 |
gh_patches_debug_28166 | rasdani/github-patches | git_diff | svthalia__concrexit-1818 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add payment_type or full payment to event admin API
### Motivation
`api/v2/admin/events/<eventPk>/registrations/` currently only gives the uuid of a payment, so to display in the admin screen how it was paid, the payment must be requested separately. Doing this for all of the registrations would be very inefficient (like 40 extra requests to load the event admin). If we simply add the payment_type or replace the payment uuid with a payment serializer, it will be much simpler.
</issue>
<code>
[start of website/events/api/v2/serializers/event_registration.py]
1 from rest_framework import serializers
2
3 from events.models import EventRegistration
4 from members.api.v2.serializers.member import MemberSerializer
5
6
7 class EventRegistrationSerializer(serializers.ModelSerializer):
8 """Serializer for event registrations."""
9
10 def __init__(self, *args, **kwargs):
11 # Don't pass the 'fields' arg up to the superclass
12 fields = kwargs.pop("fields", {"pk", "member", "name"})
13
14 # Instantiate the superclass normally
15 super().__init__(*args, **kwargs)
16
17 allowed = set(fields)
18 existing = set(self.fields.keys())
19 for field_name in existing - allowed:
20 self.fields.pop(field_name)
21
22 class Meta:
23 model = EventRegistration
24 fields = (
25 "pk",
26 "present",
27 "queue_position",
28 "date",
29 "payment",
30 "member",
31 "name",
32 )
33
34 member = MemberSerializer(detailed=False, read_only=True)
35
[end of website/events/api/v2/serializers/event_registration.py]
[start of website/events/api/v2/admin/serializers/event_registration.py]
1 from rest_framework import serializers
2
3 from events.models import EventRegistration
4 from members.api.v2.serializers.member import MemberSerializer
5 from members.models import Member
6
7
8 class EventRegistrationAdminSerializer(serializers.ModelSerializer):
9 """Serializer for event registrations."""
10
11 class Meta:
12 model = EventRegistration
13 fields = (
14 "pk",
15 "present",
16 "queue_position",
17 "date",
18 "date_cancelled",
19 "payment",
20 "member",
21 "name",
22 )
23 read_only_fields = ("payment",)
24
25 def to_internal_value(self, data):
26 self.fields["member"] = serializers.PrimaryKeyRelatedField(
27 queryset=Member.objects.all()
28 )
29 return super().to_internal_value(data)
30
31 def to_representation(self, instance):
32 self.fields["member"] = MemberSerializer(detailed=False, read_only=True)
33 return super().to_representation(instance)
34
[end of website/events/api/v2/admin/serializers/event_registration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/events/api/v2/admin/serializers/event_registration.py b/website/events/api/v2/admin/serializers/event_registration.py
--- a/website/events/api/v2/admin/serializers/event_registration.py
+++ b/website/events/api/v2/admin/serializers/event_registration.py
@@ -3,6 +3,7 @@
from events.models import EventRegistration
from members.api.v2.serializers.member import MemberSerializer
from members.models import Member
+from payments.api.v2.serializers import PaymentSerializer
class EventRegistrationAdminSerializer(serializers.ModelSerializer):
@@ -22,6 +23,8 @@
)
read_only_fields = ("payment",)
+ payment = PaymentSerializer()
+
def to_internal_value(self, data):
self.fields["member"] = serializers.PrimaryKeyRelatedField(
queryset=Member.objects.all()
diff --git a/website/events/api/v2/serializers/event_registration.py b/website/events/api/v2/serializers/event_registration.py
--- a/website/events/api/v2/serializers/event_registration.py
+++ b/website/events/api/v2/serializers/event_registration.py
@@ -2,6 +2,7 @@
from events.models import EventRegistration
from members.api.v2.serializers.member import MemberSerializer
+from payments.api.v2.serializers import PaymentSerializer
class EventRegistrationSerializer(serializers.ModelSerializer):
@@ -31,4 +32,5 @@
"name",
)
+ payment = PaymentSerializer()
member = MemberSerializer(detailed=False, read_only=True)
| {"golden_diff": "diff --git a/website/events/api/v2/admin/serializers/event_registration.py b/website/events/api/v2/admin/serializers/event_registration.py\n--- a/website/events/api/v2/admin/serializers/event_registration.py\n+++ b/website/events/api/v2/admin/serializers/event_registration.py\n@@ -3,6 +3,7 @@\n from events.models import EventRegistration\n from members.api.v2.serializers.member import MemberSerializer\n from members.models import Member\n+from payments.api.v2.serializers import PaymentSerializer\n \n \n class EventRegistrationAdminSerializer(serializers.ModelSerializer):\n@@ -22,6 +23,8 @@\n )\n read_only_fields = (\"payment\",)\n \n+ payment = PaymentSerializer()\n+\n def to_internal_value(self, data):\n self.fields[\"member\"] = serializers.PrimaryKeyRelatedField(\n queryset=Member.objects.all()\ndiff --git a/website/events/api/v2/serializers/event_registration.py b/website/events/api/v2/serializers/event_registration.py\n--- a/website/events/api/v2/serializers/event_registration.py\n+++ b/website/events/api/v2/serializers/event_registration.py\n@@ -2,6 +2,7 @@\n \n from events.models import EventRegistration\n from members.api.v2.serializers.member import MemberSerializer\n+from payments.api.v2.serializers import PaymentSerializer\n \n \n class EventRegistrationSerializer(serializers.ModelSerializer):\n@@ -31,4 +32,5 @@\n \"name\",\n )\n \n+ payment = PaymentSerializer()\n member = MemberSerializer(detailed=False, read_only=True)\n", "issue": "Add payment_type or full payment to event admin API\n### Motivation\r\n`api/v2/admin/events/<eventPk>/registrations/` currently only gives the uuid of a payment, so to display in the admin screen how it was paid, the payment must be requested separately. Doing this for all of the registrations would be very inefficient (like 40 extra requests to load the event admin). If we simply add the payment_type or replace the payment uuid with a payment serializer, it will be much simpler.\r\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom events.models import EventRegistration\nfrom members.api.v2.serializers.member import MemberSerializer\n\n\nclass EventRegistrationSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for event registrations.\"\"\"\n\n def __init__(self, *args, **kwargs):\n # Don't pass the 'fields' arg up to the superclass\n fields = kwargs.pop(\"fields\", {\"pk\", \"member\", \"name\"})\n\n # Instantiate the superclass normally\n super().__init__(*args, **kwargs)\n\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n class Meta:\n model = EventRegistration\n fields = (\n \"pk\",\n \"present\",\n \"queue_position\",\n \"date\",\n \"payment\",\n \"member\",\n \"name\",\n )\n\n member = MemberSerializer(detailed=False, read_only=True)\n", "path": "website/events/api/v2/serializers/event_registration.py"}, {"content": "from rest_framework import serializers\n\nfrom events.models import EventRegistration\nfrom members.api.v2.serializers.member import MemberSerializer\nfrom members.models import Member\n\n\nclass EventRegistrationAdminSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for event registrations.\"\"\"\n\n class Meta:\n model = EventRegistration\n fields = (\n \"pk\",\n \"present\",\n \"queue_position\",\n \"date\",\n \"date_cancelled\",\n \"payment\",\n \"member\",\n \"name\",\n )\n read_only_fields = (\"payment\",)\n\n def to_internal_value(self, data):\n self.fields[\"member\"] = serializers.PrimaryKeyRelatedField(\n queryset=Member.objects.all()\n )\n return super().to_internal_value(data)\n\n def to_representation(self, instance):\n self.fields[\"member\"] = MemberSerializer(detailed=False, read_only=True)\n return super().to_representation(instance)\n", "path": "website/events/api/v2/admin/serializers/event_registration.py"}]} | 1,186 | 330 |
gh_patches_debug_8067 | rasdani/github-patches | git_diff | conda__conda-7525 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
should_bypass_proxies still an issue in 4.5.7
https://github.com/conda/conda/issues/7506#issuecomment-403811279
</issue>
<code>
[start of conda/gateways/connection/__init__.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3 from functools import partial
4
5 def should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy):
6 # Monkey patch requests, per https://github.com/requests/requests/pull/4723
7 if url.startswith("file://"):
8 return True
9 try:
10 return should_bypass_proxies_func(url, no_proxy)
11 except TypeError:
12 # For versions of requests we shouldn't have to deal with.
13 # https://github.com/conda/conda/issues/7503
14 # https://github.com/conda/conda/issues/7506
15 return should_bypass_proxies_func(url)
16
17
18 try:
19 from requests import ConnectionError, HTTPError, Session
20 from requests.adapters import BaseAdapter, HTTPAdapter
21 from requests.auth import AuthBase, _basic_auth_str
22 from requests.cookies import extract_cookies_to_jar
23 from requests.exceptions import InvalidSchema, SSLError
24 from requests.hooks import dispatch_hook
25 from requests.models import Response
26 from requests.packages.urllib3.exceptions import InsecureRequestWarning
27 from requests.structures import CaseInsensitiveDict
28 from requests.utils import get_auth_from_url, get_netrc_auth
29
30 # monkeypatch requests
31 from requests.utils import should_bypass_proxies
32 import requests.utils
33 requests.utils.should_bypass_proxies = partial(should_bypass_proxies_patched,
34 should_bypass_proxies)
35 except ImportError: # pragma: no cover
36 from pip._vendor.requests import ConnectionError, HTTPError, Session
37 from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
38 from pip._vendor.requests.auth import AuthBase, _basic_auth_str
39 from pip._vendor.requests.cookies import extract_cookies_to_jar
40 from pip._vendor.requests.exceptions import InvalidSchema, SSLError
41 from pip._vendor.requests.hooks import dispatch_hook
42 from pip._vendor.requests.models import Response
43 from pip._vendor.requests.packages.urllib3.exceptions import InsecureRequestWarning
44 from pip._vendor.requests.structures import CaseInsensitiveDict
45 from pip._vendor.requests.utils import get_auth_from_url, get_netrc_auth
46
47 # monkeypatch requests
48 from pip._vendor.requests.utils import should_bypass_proxies
49 import pip._vendor.requests.utils
50 pip._vendor.requests.utils.should_bypass_proxies = partial(should_bypass_proxies_patched,
51 should_bypass_proxies)
52
53
54 dispatch_hook = dispatch_hook
55 BaseAdapter = BaseAdapter
56 Response = Response
57 CaseInsensitiveDict = CaseInsensitiveDict
58 Session = Session
59 HTTPAdapter = HTTPAdapter
60 AuthBase = AuthBase
61 _basic_auth_str = _basic_auth_str
62 extract_cookies_to_jar = extract_cookies_to_jar
63 get_auth_from_url = get_auth_from_url
64 get_netrc_auth = get_netrc_auth
65 ConnectionError = ConnectionError
66 HTTPError = HTTPError
67 InvalidSchema = InvalidSchema
68 SSLError = SSLError
69 InsecureRequestWarning = InsecureRequestWarning
70
[end of conda/gateways/connection/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda/gateways/connection/__init__.py b/conda/gateways/connection/__init__.py
--- a/conda/gateways/connection/__init__.py
+++ b/conda/gateways/connection/__init__.py
@@ -2,7 +2,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from functools import partial
-def should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy):
+def should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy=None):
# Monkey patch requests, per https://github.com/requests/requests/pull/4723
if url.startswith("file://"):
return True
| {"golden_diff": "diff --git a/conda/gateways/connection/__init__.py b/conda/gateways/connection/__init__.py\n--- a/conda/gateways/connection/__init__.py\n+++ b/conda/gateways/connection/__init__.py\n@@ -2,7 +2,7 @@\n from __future__ import absolute_import, division, print_function, unicode_literals\n from functools import partial\n \n-def should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy):\n+def should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy=None):\n # Monkey patch requests, per https://github.com/requests/requests/pull/4723\n if url.startswith(\"file://\"):\n return True\n", "issue": "should_bypass_proxies still an issue in 4.5.7\nhttps://github.com/conda/conda/issues/7506#issuecomment-403811279\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom functools import partial\n\ndef should_bypass_proxies_patched(should_bypass_proxies_func, url, no_proxy):\n # Monkey patch requests, per https://github.com/requests/requests/pull/4723\n if url.startswith(\"file://\"):\n return True\n try:\n return should_bypass_proxies_func(url, no_proxy)\n except TypeError:\n # For versions of requests we shouldn't have to deal with.\n # https://github.com/conda/conda/issues/7503\n # https://github.com/conda/conda/issues/7506\n return should_bypass_proxies_func(url)\n\n\ntry:\n from requests import ConnectionError, HTTPError, Session\n from requests.adapters import BaseAdapter, HTTPAdapter\n from requests.auth import AuthBase, _basic_auth_str\n from requests.cookies import extract_cookies_to_jar\n from requests.exceptions import InvalidSchema, SSLError\n from requests.hooks import dispatch_hook\n from requests.models import Response\n from requests.packages.urllib3.exceptions import InsecureRequestWarning\n from requests.structures import CaseInsensitiveDict\n from requests.utils import get_auth_from_url, get_netrc_auth\n\n # monkeypatch requests\n from requests.utils import should_bypass_proxies\n import requests.utils\n requests.utils.should_bypass_proxies = partial(should_bypass_proxies_patched,\n should_bypass_proxies)\nexcept ImportError: # pragma: no cover\n from pip._vendor.requests import ConnectionError, HTTPError, Session\n from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter\n from pip._vendor.requests.auth import AuthBase, _basic_auth_str\n from pip._vendor.requests.cookies import extract_cookies_to_jar\n from pip._vendor.requests.exceptions import InvalidSchema, SSLError\n from pip._vendor.requests.hooks import dispatch_hook\n from pip._vendor.requests.models import Response\n from pip._vendor.requests.packages.urllib3.exceptions import InsecureRequestWarning\n from pip._vendor.requests.structures import CaseInsensitiveDict\n from pip._vendor.requests.utils import get_auth_from_url, get_netrc_auth\n\n # monkeypatch requests\n from pip._vendor.requests.utils import should_bypass_proxies\n import pip._vendor.requests.utils\n pip._vendor.requests.utils.should_bypass_proxies = partial(should_bypass_proxies_patched,\n should_bypass_proxies)\n\n\ndispatch_hook = dispatch_hook\nBaseAdapter = BaseAdapter\nResponse = Response\nCaseInsensitiveDict = CaseInsensitiveDict\nSession = Session\nHTTPAdapter = HTTPAdapter\nAuthBase = AuthBase\n_basic_auth_str = _basic_auth_str\nextract_cookies_to_jar = extract_cookies_to_jar\nget_auth_from_url = get_auth_from_url\nget_netrc_auth = get_netrc_auth\nConnectionError = ConnectionError\nHTTPError = HTTPError\nInvalidSchema = InvalidSchema\nSSLError = SSLError\nInsecureRequestWarning = InsecureRequestWarning\n", "path": "conda/gateways/connection/__init__.py"}]} | 1,382 | 163 |
gh_patches_debug_18080 | rasdani/github-patches | git_diff | mozilla__bugbug-1251 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Set label as 0 in the QANeeded model when one of the qa flags are removed
</issue>
<code>
[start of bugbug/models/qaneeded.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import xgboost
7 from imblearn.under_sampling import RandomUnderSampler
8 from sklearn.compose import ColumnTransformer
9 from sklearn.feature_extraction import DictVectorizer
10 from sklearn.pipeline import Pipeline
11
12 from bugbug import bug_features, bugzilla, feature_cleanup
13 from bugbug.model import BugModel
14
15
16 class QANeededModel(BugModel):
17 def __init__(self, lemmatization=False):
18 BugModel.__init__(self, lemmatization)
19
20 self.sampler = RandomUnderSampler(random_state=0)
21
22 feature_extractors = [
23 bug_features.has_str(),
24 bug_features.has_regression_range(),
25 bug_features.severity(),
26 bug_features.keywords({"qawanted"}),
27 bug_features.is_coverity_issue(),
28 bug_features.has_crash_signature(),
29 bug_features.has_url(),
30 bug_features.has_w3c_url(),
31 bug_features.has_github_url(),
32 bug_features.whiteboard(),
33 bug_features.patches(),
34 bug_features.landings(),
35 ]
36
37 cleanup_functions = [
38 feature_cleanup.fileref(),
39 feature_cleanup.url(),
40 feature_cleanup.synonyms(),
41 ]
42
43 self.extraction_pipeline = Pipeline(
44 [
45 (
46 "bug_extractor",
47 bug_features.BugExtractor(
48 feature_extractors,
49 cleanup_functions,
50 rollback=True,
51 rollback_when=self.rollback,
52 ),
53 ),
54 (
55 "union",
56 ColumnTransformer(
57 [
58 ("data", DictVectorizer(), "data"),
59 ("title", self.text_vectorizer(), "title"),
60 ("comments", self.text_vectorizer(), "comments"),
61 ]
62 ),
63 ),
64 ]
65 )
66
67 self.clf = xgboost.XGBClassifier(n_jobs=16)
68 self.clf.set_params(predictor="cpu_predictor")
69
70 def rollback(self, change):
71 return any(
72 change["added"].startswith(prefix)
73 for prefix in ["qawanted", "qe-verify", "qaurgent"]
74 )
75
76 def get_labels(self):
77 classes = {}
78
79 for bug_data in bugzilla.get_bugs():
80 bug_id = int(bug_data["id"])
81
82 found_qa = False
83 if any(
84 keyword.startswith(label)
85 for keyword in bug_data["keywords"]
86 for label in ["qawanted", "qe-verify", "qaurgent"]
87 ):
88 classes[bug_id] = 1
89 found_qa = True
90
91 if not found_qa:
92 for entry in bug_data["history"]:
93 for change in entry["changes"]:
94 if any(
95 change["added"].startswith(label)
96 for label in ["qawanted", "qe-verify", "qaurgent"]
97 ):
98 classes[bug_id] = 1
99 if bug_id not in classes:
100 classes[bug_id] = 0
101
102 return classes, [0, 1]
103
104 def get_feature_names(self):
105 return self.extraction_pipeline.named_steps["union"].get_feature_names()
106
[end of bugbug/models/qaneeded.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bugbug/models/qaneeded.py b/bugbug/models/qaneeded.py
--- a/bugbug/models/qaneeded.py
+++ b/bugbug/models/qaneeded.py
@@ -91,11 +91,18 @@
if not found_qa:
for entry in bug_data["history"]:
for change in entry["changes"]:
+ if any(
+ change["removed"].startswith(label)
+ for label in ["qawanted", "qe-verify", "qaurgent"]
+ ):
+ classes[bug_id] = 0
+
if any(
change["added"].startswith(label)
for label in ["qawanted", "qe-verify", "qaurgent"]
):
classes[bug_id] = 1
+
if bug_id not in classes:
classes[bug_id] = 0
| {"golden_diff": "diff --git a/bugbug/models/qaneeded.py b/bugbug/models/qaneeded.py\n--- a/bugbug/models/qaneeded.py\n+++ b/bugbug/models/qaneeded.py\n@@ -91,11 +91,18 @@\n if not found_qa:\n for entry in bug_data[\"history\"]:\n for change in entry[\"changes\"]:\n+ if any(\n+ change[\"removed\"].startswith(label)\n+ for label in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n+ ):\n+ classes[bug_id] = 0\n+\n if any(\n change[\"added\"].startswith(label)\n for label in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n ):\n classes[bug_id] = 1\n+\n if bug_id not in classes:\n classes[bug_id] = 0\n", "issue": "Set label as 0 in the QANeeded model when one of the qa flags are removed\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup\nfrom bugbug.model import BugModel\n\n\nclass QANeededModel(BugModel):\n def __init__(self, lemmatization=False):\n BugModel.__init__(self, lemmatization)\n\n self.sampler = RandomUnderSampler(random_state=0)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.keywords({\"qawanted\"}),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(\n feature_extractors,\n cleanup_functions,\n rollback=True,\n rollback_when=self.rollback,\n ),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(), \"title\"),\n (\"comments\", self.text_vectorizer(), \"comments\"),\n ]\n ),\n ),\n ]\n )\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor=\"cpu_predictor\")\n\n def rollback(self, change):\n return any(\n change[\"added\"].startswith(prefix)\n for prefix in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n )\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs():\n bug_id = int(bug_data[\"id\"])\n\n found_qa = False\n if any(\n keyword.startswith(label)\n for keyword in bug_data[\"keywords\"]\n for label in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n ):\n classes[bug_id] = 1\n found_qa = True\n\n if not found_qa:\n for entry in bug_data[\"history\"]:\n for change in entry[\"changes\"]:\n if any(\n change[\"added\"].startswith(label)\n for label in [\"qawanted\", \"qe-verify\", \"qaurgent\"]\n ):\n classes[bug_id] = 1\n if bug_id not in classes:\n classes[bug_id] = 0\n\n return classes, [0, 1]\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names()\n", "path": "bugbug/models/qaneeded.py"}]} | 1,457 | 194 |
gh_patches_debug_21022 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1983 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
localisation string for a shelf is missing
**Describe the bug**
A translation exists, but the string "Currently reading" is shown in English language
**To Reproduce**
Switch language to non-english and check book status
**Expected behavior**
Translated string used instead of English
**Screenshots**

**Instance**
https://ziurkes.group.lt
</issue>
<code>
[start of bookwyrm/templatetags/shelf_tags.py]
1 """ Filters and tags related to shelving books """
2 from django import template
3
4 from bookwyrm import models
5 from bookwyrm.utils import cache
6
7
8 register = template.Library()
9
10
11 @register.filter(name="is_book_on_shelf")
12 def get_is_book_on_shelf(book, shelf):
13 """is a book on a shelf"""
14 return cache.get_or_set(
15 f"book-on-shelf-{book.id}-{shelf.id}",
16 lambda b, s: s.books.filter(id=b.id).exists(),
17 book,
18 shelf,
19 timeout=15552000,
20 )
21
22
23 @register.filter(name="next_shelf")
24 def get_next_shelf(current_shelf):
25 """shelf you'd use to update reading progress"""
26 if current_shelf == "to-read":
27 return "reading"
28 if current_shelf == "reading":
29 return "read"
30 if current_shelf == "read":
31 return "complete"
32 return "to-read"
33
34
35 @register.simple_tag(takes_context=True)
36 def active_shelf(context, book):
37 """check what shelf a user has a book on, if any"""
38 user = context["request"].user
39 return cache.get_or_set(
40 f"active_shelf-{user.id}-{book.id}",
41 lambda u, b: (
42 models.ShelfBook.objects.filter(
43 shelf__user=u,
44 book__parent_work__editions=b,
45 ).first()
46 or False
47 ),
48 user,
49 book,
50 timeout=15552000,
51 ) or {"book": book}
52
53
54 @register.simple_tag(takes_context=False)
55 def latest_read_through(book, user):
56 """the most recent read activity"""
57 return cache.get_or_set(
58 f"latest_read_through-{user.id}-{book.id}",
59 lambda u, b: (
60 models.ReadThrough.objects.filter(user=u, book=b, is_active=True)
61 .order_by("-start_date")
62 .first()
63 or False
64 ),
65 user,
66 book,
67 timeout=15552000,
68 )
69
[end of bookwyrm/templatetags/shelf_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/templatetags/shelf_tags.py b/bookwyrm/templatetags/shelf_tags.py
--- a/bookwyrm/templatetags/shelf_tags.py
+++ b/bookwyrm/templatetags/shelf_tags.py
@@ -1,5 +1,6 @@
""" Filters and tags related to shelving books """
from django import template
+from django.utils.translation import gettext_lazy as _
from bookwyrm import models
from bookwyrm.utils import cache
@@ -32,6 +33,24 @@
return "to-read"
[email protected](name="translate_shelf_name")
+def get_translated_shelf_name(shelf):
+ """produced translated shelf nidentifierame"""
+ if not shelf:
+ return ""
+ # support obj or dict
+ identifier = shelf["identifier"] if isinstance(shelf, dict) else shelf.identifier
+ if identifier == "all":
+ return _("All books")
+ if identifier == "to-read":
+ return _("To Read")
+ if identifier == "reading":
+ return _("Currently Reading")
+ if identifier == "read":
+ return _("Read")
+ return shelf["name"] if isinstance(shelf, dict) else shelf.name
+
+
@register.simple_tag(takes_context=True)
def active_shelf(context, book):
"""check what shelf a user has a book on, if any"""
| {"golden_diff": "diff --git a/bookwyrm/templatetags/shelf_tags.py b/bookwyrm/templatetags/shelf_tags.py\n--- a/bookwyrm/templatetags/shelf_tags.py\n+++ b/bookwyrm/templatetags/shelf_tags.py\n@@ -1,5 +1,6 @@\n \"\"\" Filters and tags related to shelving books \"\"\"\n from django import template\n+from django.utils.translation import gettext_lazy as _\n \n from bookwyrm import models\n from bookwyrm.utils import cache\n@@ -32,6 +33,24 @@\n return \"to-read\"\n \n \[email protected](name=\"translate_shelf_name\")\n+def get_translated_shelf_name(shelf):\n+ \"\"\"produced translated shelf nidentifierame\"\"\"\n+ if not shelf:\n+ return \"\"\n+ # support obj or dict\n+ identifier = shelf[\"identifier\"] if isinstance(shelf, dict) else shelf.identifier\n+ if identifier == \"all\":\n+ return _(\"All books\")\n+ if identifier == \"to-read\":\n+ return _(\"To Read\")\n+ if identifier == \"reading\":\n+ return _(\"Currently Reading\")\n+ if identifier == \"read\":\n+ return _(\"Read\")\n+ return shelf[\"name\"] if isinstance(shelf, dict) else shelf.name\n+\n+\n @register.simple_tag(takes_context=True)\n def active_shelf(context, book):\n \"\"\"check what shelf a user has a book on, if any\"\"\"\n", "issue": "localisation string for a shelf is missing\n**Describe the bug**\r\nA translation exists, but the string \"Currently reading\"\u00a0is shown in English language\r\n\r\n**To Reproduce**\r\nSwitch language to non-english and check book status\r\n\r\n**Expected behavior**\r\nTranslated string used instead of English\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n**Instance**\r\nhttps://ziurkes.group.lt\r\n\r\n\n", "before_files": [{"content": "\"\"\" Filters and tags related to shelving books \"\"\"\nfrom django import template\n\nfrom bookwyrm import models\nfrom bookwyrm.utils import cache\n\n\nregister = template.Library()\n\n\[email protected](name=\"is_book_on_shelf\")\ndef get_is_book_on_shelf(book, shelf):\n \"\"\"is a book on a shelf\"\"\"\n return cache.get_or_set(\n f\"book-on-shelf-{book.id}-{shelf.id}\",\n lambda b, s: s.books.filter(id=b.id).exists(),\n book,\n shelf,\n timeout=15552000,\n )\n\n\[email protected](name=\"next_shelf\")\ndef get_next_shelf(current_shelf):\n \"\"\"shelf you'd use to update reading progress\"\"\"\n if current_shelf == \"to-read\":\n return \"reading\"\n if current_shelf == \"reading\":\n return \"read\"\n if current_shelf == \"read\":\n return \"complete\"\n return \"to-read\"\n\n\[email protected]_tag(takes_context=True)\ndef active_shelf(context, book):\n \"\"\"check what shelf a user has a book on, if any\"\"\"\n user = context[\"request\"].user\n return cache.get_or_set(\n f\"active_shelf-{user.id}-{book.id}\",\n lambda u, b: (\n models.ShelfBook.objects.filter(\n shelf__user=u,\n book__parent_work__editions=b,\n ).first()\n or False\n ),\n user,\n book,\n timeout=15552000,\n ) or {\"book\": book}\n\n\[email protected]_tag(takes_context=False)\ndef latest_read_through(book, user):\n \"\"\"the most recent read activity\"\"\"\n return cache.get_or_set(\n f\"latest_read_through-{user.id}-{book.id}\",\n lambda u, b: (\n models.ReadThrough.objects.filter(user=u, book=b, is_active=True)\n .order_by(\"-start_date\")\n .first()\n or False\n ),\n user,\n book,\n timeout=15552000,\n )\n", "path": "bookwyrm/templatetags/shelf_tags.py"}]} | 1,262 | 314 |
gh_patches_debug_21542 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-202 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Too many false positives when detecting wake word
</issue>
<code>
[start of mycroft/client/speech/local_recognizer.py]
1 # Copyright 2016 Mycroft AI, Inc.
2 #
3 # This file is part of Mycroft Core.
4 #
5 # Mycroft Core is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Mycroft Core is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
17
18
19 import time
20
21 import os
22 from pocketsphinx.pocketsphinx import Decoder
23
24 __author__ = 'seanfitz, jdorleans'
25
26 BASEDIR = os.path.dirname(os.path.abspath(__file__))
27
28
29 class LocalRecognizer(object):
30 def __init__(self, sample_rate=16000, lang="en-us", key_phrase="mycroft"):
31 self.lang = lang
32 self.key_phrase = key_phrase
33 self.sample_rate = sample_rate
34 self.configure()
35
36 def configure(self):
37 config = Decoder.default_config()
38 config.set_string('-hmm', os.path.join(BASEDIR, 'model', self.lang,
39 'hmm'))
40 config.set_string('-dict', os.path.join(BASEDIR, 'model', self.lang,
41 'mycroft-en-us.dict'))
42 config.set_string('-keyphrase', self.key_phrase)
43 config.set_float('-kws_threshold', float('1e-45'))
44 config.set_float('-samprate', self.sample_rate)
45 config.set_int('-nfft', 2048)
46 config.set_string('-logfn', '/dev/null')
47 self.decoder = Decoder(config)
48
49 def transcribe(self, byte_data, metrics=None):
50 start = time.time()
51 self.decoder.start_utt()
52 self.decoder.process_raw(byte_data, False, False)
53 self.decoder.end_utt()
54 if metrics:
55 metrics.timer("mycroft.stt.local.time_s", time.time() - start)
56 return self.decoder.hyp()
57
58 def is_recognized(self, byte_data, metrics):
59 hyp = self.transcribe(byte_data, metrics)
60 return hyp and self.key_phrase in hyp.hypstr.lower()
61
62 def found_wake_word(self, hypothesis):
63 return hypothesis and self.key_phrase in hypothesis.hypstr.lower()
64
[end of mycroft/client/speech/local_recognizer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mycroft/client/speech/local_recognizer.py b/mycroft/client/speech/local_recognizer.py
--- a/mycroft/client/speech/local_recognizer.py
+++ b/mycroft/client/speech/local_recognizer.py
@@ -27,7 +27,8 @@
class LocalRecognizer(object):
- def __init__(self, sample_rate=16000, lang="en-us", key_phrase="mycroft"):
+ def __init__(self, sample_rate=16000, lang="en-us",
+ key_phrase="hey mycroft"):
self.lang = lang
self.key_phrase = key_phrase
self.sample_rate = sample_rate
@@ -40,7 +41,7 @@
config.set_string('-dict', os.path.join(BASEDIR, 'model', self.lang,
'mycroft-en-us.dict'))
config.set_string('-keyphrase', self.key_phrase)
- config.set_float('-kws_threshold', float('1e-45'))
+ config.set_float('-kws_threshold', float('1e-90'))
config.set_float('-samprate', self.sample_rate)
config.set_int('-nfft', 2048)
config.set_string('-logfn', '/dev/null')
| {"golden_diff": "diff --git a/mycroft/client/speech/local_recognizer.py b/mycroft/client/speech/local_recognizer.py\n--- a/mycroft/client/speech/local_recognizer.py\n+++ b/mycroft/client/speech/local_recognizer.py\n@@ -27,7 +27,8 @@\n \n \n class LocalRecognizer(object):\n- def __init__(self, sample_rate=16000, lang=\"en-us\", key_phrase=\"mycroft\"):\n+ def __init__(self, sample_rate=16000, lang=\"en-us\",\n+ key_phrase=\"hey mycroft\"):\n self.lang = lang\n self.key_phrase = key_phrase\n self.sample_rate = sample_rate\n@@ -40,7 +41,7 @@\n config.set_string('-dict', os.path.join(BASEDIR, 'model', self.lang,\n 'mycroft-en-us.dict'))\n config.set_string('-keyphrase', self.key_phrase)\n- config.set_float('-kws_threshold', float('1e-45'))\n+ config.set_float('-kws_threshold', float('1e-90'))\n config.set_float('-samprate', self.sample_rate)\n config.set_int('-nfft', 2048)\n config.set_string('-logfn', '/dev/null')\n", "issue": "Too many false positives when detecting wake word\n\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport time\n\nimport os\nfrom pocketsphinx.pocketsphinx import Decoder\n\n__author__ = 'seanfitz, jdorleans'\n\nBASEDIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass LocalRecognizer(object):\n def __init__(self, sample_rate=16000, lang=\"en-us\", key_phrase=\"mycroft\"):\n self.lang = lang\n self.key_phrase = key_phrase\n self.sample_rate = sample_rate\n self.configure()\n\n def configure(self):\n config = Decoder.default_config()\n config.set_string('-hmm', os.path.join(BASEDIR, 'model', self.lang,\n 'hmm'))\n config.set_string('-dict', os.path.join(BASEDIR, 'model', self.lang,\n 'mycroft-en-us.dict'))\n config.set_string('-keyphrase', self.key_phrase)\n config.set_float('-kws_threshold', float('1e-45'))\n config.set_float('-samprate', self.sample_rate)\n config.set_int('-nfft', 2048)\n config.set_string('-logfn', '/dev/null')\n self.decoder = Decoder(config)\n\n def transcribe(self, byte_data, metrics=None):\n start = time.time()\n self.decoder.start_utt()\n self.decoder.process_raw(byte_data, False, False)\n self.decoder.end_utt()\n if metrics:\n metrics.timer(\"mycroft.stt.local.time_s\", time.time() - start)\n return self.decoder.hyp()\n\n def is_recognized(self, byte_data, metrics):\n hyp = self.transcribe(byte_data, metrics)\n return hyp and self.key_phrase in hyp.hypstr.lower()\n\n def found_wake_word(self, hypothesis):\n return hypothesis and self.key_phrase in hypothesis.hypstr.lower()\n", "path": "mycroft/client/speech/local_recognizer.py"}]} | 1,226 | 278 |
gh_patches_debug_27826 | rasdani/github-patches | git_diff | qtile__qtile-3863 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bluetooth widget displays adapter name instead of name of connected device
### The issue:
version: 0.21.0
log: no relevant log
I configured the bluetooth-widget.
When a device is connected, it shows the adapter name, instead of the device name.
### Required:
- [X] I have searched past issues to see if this bug has already been reported.
</issue>
<code>
[start of libqtile/widget/bluetooth.py]
1 # Copyright (c) 2021 Graeme Holliday
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 # SOFTWARE.
20
21 from dbus_next.aio import MessageBus
22 from dbus_next.constants import BusType
23
24 from libqtile.widget import base
25
26 BLUEZ = "org.bluez"
27 BLUEZ_PATH = "/org/bluez/hci0"
28 BLUEZ_ADAPTER = "org.bluez.Adapter1"
29 BLUEZ_DEVICE = "org.bluez.Device1"
30 BLUEZ_PROPERTIES = "org.freedesktop.DBus.Properties"
31
32
33 class Bluetooth(base._TextBox):
34 """
35 Displays bluetooth status for a particular connected device.
36
37 (For example your bluetooth headphones.)
38
39 Uses dbus-next to communicate with the system bus.
40
41 Widget requirements: dbus-next_.
42
43 .. _dbus-next: https://pypi.org/project/dbus-next/
44 """
45
46 defaults = [
47 (
48 "hci",
49 "/dev_XX_XX_XX_XX_XX_XX",
50 "hci0 device path, can be found with d-feet or similar dbus explorer.",
51 )
52 ]
53
54 def __init__(self, **config):
55 base._TextBox.__init__(self, "", **config)
56 self.add_defaults(Bluetooth.defaults)
57
58 async def _config_async(self):
59 # set initial values
60 self.powered = await self._init_adapter()
61 self.connected, self.device = await self._init_device()
62
63 self.update_text()
64
65 async def _init_adapter(self):
66 # set up interface to adapter properties using high-level api
67 bus = await MessageBus(bus_type=BusType.SYSTEM).connect()
68 introspect = await bus.introspect(BLUEZ, BLUEZ_PATH)
69 obj = bus.get_proxy_object(BLUEZ, BLUEZ_PATH, introspect)
70 iface = obj.get_interface(BLUEZ_ADAPTER)
71 props = obj.get_interface(BLUEZ_PROPERTIES)
72
73 powered = await iface.get_powered()
74 # subscribe receiver to property changed
75 props.on_properties_changed(self._signal_received)
76 return powered
77
78 async def _init_device(self):
79 # set up interface to device properties using high-level api
80 bus = await MessageBus(bus_type=BusType.SYSTEM).connect()
81 introspect = await bus.introspect(BLUEZ, BLUEZ_PATH + self.hci)
82 obj = bus.get_proxy_object(BLUEZ, BLUEZ_PATH + self.hci, introspect)
83 iface = obj.get_interface(BLUEZ_DEVICE)
84 props = obj.get_interface(BLUEZ_PROPERTIES)
85
86 connected = await iface.get_connected()
87 name = await iface.get_name()
88 # subscribe receiver to property changed
89 props.on_properties_changed(self._signal_received)
90 return connected, name
91
92 def _signal_received(self, interface_name, changed_properties, _invalidated_properties):
93 powered = changed_properties.get("Powered", None)
94 if powered is not None:
95 self.powered = powered.value
96 self.update_text()
97
98 connected = changed_properties.get("Connected", None)
99 if connected is not None:
100 self.connected = connected.value
101 self.update_text()
102
103 device = changed_properties.get("Name", None)
104 if device is not None:
105 self.device = device.value
106 self.update_text()
107
108 def update_text(self):
109 text = ""
110 if not self.powered:
111 text = "off"
112 else:
113 if not self.connected:
114 text = "on"
115 else:
116 text = self.device
117 self.update(text)
118
[end of libqtile/widget/bluetooth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/widget/bluetooth.py b/libqtile/widget/bluetooth.py
--- a/libqtile/widget/bluetooth.py
+++ b/libqtile/widget/bluetooth.py
@@ -72,7 +72,7 @@
powered = await iface.get_powered()
# subscribe receiver to property changed
- props.on_properties_changed(self._signal_received)
+ props.on_properties_changed(self._adapter_signal_received)
return powered
async def _init_device(self):
@@ -86,15 +86,20 @@
connected = await iface.get_connected()
name = await iface.get_name()
# subscribe receiver to property changed
- props.on_properties_changed(self._signal_received)
+ props.on_properties_changed(self._device_signal_received)
return connected, name
- def _signal_received(self, interface_name, changed_properties, _invalidated_properties):
+ def _adapter_signal_received(
+ self, interface_name, changed_properties, _invalidated_properties
+ ):
powered = changed_properties.get("Powered", None)
if powered is not None:
self.powered = powered.value
self.update_text()
+ def _device_signal_received(
+ self, interface_name, changed_properties, _invalidated_properties
+ ):
connected = changed_properties.get("Connected", None)
if connected is not None:
self.connected = connected.value
| {"golden_diff": "diff --git a/libqtile/widget/bluetooth.py b/libqtile/widget/bluetooth.py\n--- a/libqtile/widget/bluetooth.py\n+++ b/libqtile/widget/bluetooth.py\n@@ -72,7 +72,7 @@\n \n powered = await iface.get_powered()\n # subscribe receiver to property changed\n- props.on_properties_changed(self._signal_received)\n+ props.on_properties_changed(self._adapter_signal_received)\n return powered\n \n async def _init_device(self):\n@@ -86,15 +86,20 @@\n connected = await iface.get_connected()\n name = await iface.get_name()\n # subscribe receiver to property changed\n- props.on_properties_changed(self._signal_received)\n+ props.on_properties_changed(self._device_signal_received)\n return connected, name\n \n- def _signal_received(self, interface_name, changed_properties, _invalidated_properties):\n+ def _adapter_signal_received(\n+ self, interface_name, changed_properties, _invalidated_properties\n+ ):\n powered = changed_properties.get(\"Powered\", None)\n if powered is not None:\n self.powered = powered.value\n self.update_text()\n \n+ def _device_signal_received(\n+ self, interface_name, changed_properties, _invalidated_properties\n+ ):\n connected = changed_properties.get(\"Connected\", None)\n if connected is not None:\n self.connected = connected.value\n", "issue": "Bluetooth widget displays adapter name instead of name of connected device\n### The issue:\n\nversion: 0.21.0\r\nlog: no relevant log\r\n\r\nI configured the bluetooth-widget.\r\nWhen a device is connected, it shows the adapter name, instead of the device name.\n\n### Required:\n\n- [X] I have searched past issues to see if this bug has already been reported.\n", "before_files": [{"content": "# Copyright (c) 2021 Graeme Holliday\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom dbus_next.aio import MessageBus\nfrom dbus_next.constants import BusType\n\nfrom libqtile.widget import base\n\nBLUEZ = \"org.bluez\"\nBLUEZ_PATH = \"/org/bluez/hci0\"\nBLUEZ_ADAPTER = \"org.bluez.Adapter1\"\nBLUEZ_DEVICE = \"org.bluez.Device1\"\nBLUEZ_PROPERTIES = \"org.freedesktop.DBus.Properties\"\n\n\nclass Bluetooth(base._TextBox):\n \"\"\"\n Displays bluetooth status for a particular connected device.\n\n (For example your bluetooth headphones.)\n\n Uses dbus-next to communicate with the system bus.\n\n Widget requirements: dbus-next_.\n\n .. _dbus-next: https://pypi.org/project/dbus-next/\n \"\"\"\n\n defaults = [\n (\n \"hci\",\n \"/dev_XX_XX_XX_XX_XX_XX\",\n \"hci0 device path, can be found with d-feet or similar dbus explorer.\",\n )\n ]\n\n def __init__(self, **config):\n base._TextBox.__init__(self, \"\", **config)\n self.add_defaults(Bluetooth.defaults)\n\n async def _config_async(self):\n # set initial values\n self.powered = await self._init_adapter()\n self.connected, self.device = await self._init_device()\n\n self.update_text()\n\n async def _init_adapter(self):\n # set up interface to adapter properties using high-level api\n bus = await MessageBus(bus_type=BusType.SYSTEM).connect()\n introspect = await bus.introspect(BLUEZ, BLUEZ_PATH)\n obj = bus.get_proxy_object(BLUEZ, BLUEZ_PATH, introspect)\n iface = obj.get_interface(BLUEZ_ADAPTER)\n props = obj.get_interface(BLUEZ_PROPERTIES)\n\n powered = await iface.get_powered()\n # subscribe receiver to property changed\n props.on_properties_changed(self._signal_received)\n return powered\n\n async def _init_device(self):\n # set up interface to device properties using high-level api\n bus = await MessageBus(bus_type=BusType.SYSTEM).connect()\n introspect = await bus.introspect(BLUEZ, BLUEZ_PATH + self.hci)\n obj = bus.get_proxy_object(BLUEZ, BLUEZ_PATH + self.hci, introspect)\n iface = obj.get_interface(BLUEZ_DEVICE)\n props = obj.get_interface(BLUEZ_PROPERTIES)\n\n connected = await iface.get_connected()\n name = await iface.get_name()\n # subscribe receiver to property changed\n props.on_properties_changed(self._signal_received)\n return connected, name\n\n def _signal_received(self, interface_name, changed_properties, _invalidated_properties):\n powered = changed_properties.get(\"Powered\", None)\n if powered is not None:\n self.powered = powered.value\n self.update_text()\n\n connected = changed_properties.get(\"Connected\", None)\n if connected is not None:\n self.connected = connected.value\n self.update_text()\n\n device = changed_properties.get(\"Name\", None)\n if device is not None:\n self.device = device.value\n self.update_text()\n\n def update_text(self):\n text = \"\"\n if not self.powered:\n text = \"off\"\n else:\n if not self.connected:\n text = \"on\"\n else:\n text = self.device\n self.update(text)\n", "path": "libqtile/widget/bluetooth.py"}]} | 1,820 | 301 |
gh_patches_debug_5575 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3047 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add OpenAPI Specification for /databases/ endpoint
## Problem
In order to ensure the accuracy of the specs generated by drf-spectacular for /databases/ endpoint , we will generate an OpenAPI Specification specifically for this endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page.
## Proposed solution
* Implement custom preprocessing hook function to filter out all endpoints except for the /databases/ endpoint.The hook function selectively retains only the endpoint paths that match the /datafiles/
* Configure the PREPROCESSING_HOOKS setting with the custom hook function, we ensure that only the /datafiles/ endpoint is considered during the OpenAPI specification generation process.
* Finally, generate the spec file using the _./manage.py spectacular --color --file schema.yml_ command
</issue>
<code>
[start of config/settings/openapi.py]
1 def custom_preprocessing_hook(endpoints):
2 filtered = []
3 for (path, path_regex, method, callback) in endpoints:
4 # Remove all but DRF API endpoints
5 if path.startswith("/api/db/v0/data_files/"):
6 filtered.append((path, path_regex, method, callback))
7 return filtered
8
9
10 def remove_url_prefix_hook(result, **kwargs):
11 # Remove namespace and version URL prefix from the operation Id of the generated API schema
12 for path, path_info in result['paths'].items():
13 for method, operation in path_info.items():
14 operation_id = operation.get('operationId')
15 if operation_id:
16 if path.startswith('/api/db/v0/'):
17 operation['operationId'] = operation_id.replace('db_v0_', '')
18 elif path.startswith('/api/ui/v0/'):
19 operation['operationId'] = operation_id.replace('ui_v0_', '')
20
21 return result
22
[end of config/settings/openapi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/config/settings/openapi.py b/config/settings/openapi.py
--- a/config/settings/openapi.py
+++ b/config/settings/openapi.py
@@ -2,7 +2,7 @@
filtered = []
for (path, path_regex, method, callback) in endpoints:
# Remove all but DRF API endpoints
- if path.startswith("/api/db/v0/data_files/"):
+ if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/"):
filtered.append((path, path_regex, method, callback))
return filtered
| {"golden_diff": "diff --git a/config/settings/openapi.py b/config/settings/openapi.py\n--- a/config/settings/openapi.py\n+++ b/config/settings/openapi.py\n@@ -2,7 +2,7 @@\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n- if path.startswith(\"/api/db/v0/data_files/\"):\n+ if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n", "issue": "Add OpenAPI Specification for /databases/ endpoint \n## Problem\r\nIn order to ensure the accuracy of the specs generated by drf-spectacular for /databases/ endpoint , we will generate an OpenAPI Specification specifically for this endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page.\r\n\r\n## Proposed solution\r\n* Implement custom preprocessing hook function to filter out all endpoints except for the /databases/ endpoint.The hook function selectively retains only the endpoint paths that match the /datafiles/ \r\n* Configure the PREPROCESSING_HOOKS setting with the custom hook function, we ensure that only the /datafiles/ endpoint is considered during the OpenAPI specification generation process.\r\n* Finally, generate the spec file using the _./manage.py spectacular --color --file schema.yml_ command\r\n\r\n\n", "before_files": [{"content": "def custom_preprocessing_hook(endpoints):\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n if path.startswith(\"/api/db/v0/data_files/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}]} | 940 | 128 |
gh_patches_debug_7158 | rasdani/github-patches | git_diff | liberapay__liberapay.com-1140 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GitLab support is broken
`{"error":"API V3 is no longer supported. Use API V4 instead."}`
</issue>
<code>
[start of liberapay/elsewhere/gitlab.py]
1 from __future__ import absolute_import, division, print_function, unicode_literals
2
3 from liberapay.elsewhere._base import PlatformOAuth2
4 from liberapay.elsewhere._extractors import key
5 from liberapay.elsewhere._paginators import header_links_paginator
6
7
8 class GitLab(PlatformOAuth2):
9
10 # Platform attributes
11 name = 'gitlab'
12 display_name = 'GitLab'
13 account_url = 'https://gitlab.com/u/{user_name}'
14 repo_url = 'https://gitlab.com/{slug}'
15 has_teams = True
16
17 # Auth attributes
18 # GitLab uses https://github.com/doorkeeper-gem/doorkeeper
19 auth_url = 'https://gitlab.com/oauth/authorize'
20 access_token_url = 'https://gitlab.com/oauth/token'
21
22 # can_auth_with_client_credentials = True
23 # https://gitlab.com/gitlab-org/gitlab-ce/issues/13795
24
25 # API attributes
26 # http://doc.gitlab.com/ce/api/
27 api_format = 'json'
28 api_paginator = header_links_paginator(total_header='X-Total')
29 api_url = 'https://gitlab.com/api/v3'
30 api_user_info_path = '/users/{user_id}'
31 api_user_name_info_path = '/users?username={user_name}'
32 api_user_self_info_path = '/user'
33 api_team_members_path = '/groups/{user_name}/members'
34 api_repos_path = '/projects?owned=true&visibility=public&order_by=last_activity_at&per_page=100'
35 api_starred_path = '/projects?starred=true&visibility=public'
36
37 # User info extractors
38 x_user_id = key('id')
39 x_user_name = key('username')
40 x_display_name = key('name')
41 x_email = key('email')
42 x_avatar_url = key('avatar_url')
43 x_description = key('bio')
44
45 # Repo info extractors
46 x_repo_id = key('id')
47 x_repo_name = key('name')
48 x_repo_slug = key('path_with_namespace')
49 x_repo_description = key('description')
50 x_repo_last_update = key('last_activity_at')
51 x_repo_is_fork = key('forked_from_project', clean=bool)
52 x_repo_stars_count = key('star_count')
53 x_repo_owner_id = key('owner', clean=lambda d: d['id'])
54
[end of liberapay/elsewhere/gitlab.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/liberapay/elsewhere/gitlab.py b/liberapay/elsewhere/gitlab.py
--- a/liberapay/elsewhere/gitlab.py
+++ b/liberapay/elsewhere/gitlab.py
@@ -26,7 +26,7 @@
# http://doc.gitlab.com/ce/api/
api_format = 'json'
api_paginator = header_links_paginator(total_header='X-Total')
- api_url = 'https://gitlab.com/api/v3'
+ api_url = 'https://gitlab.com/api/v4'
api_user_info_path = '/users/{user_id}'
api_user_name_info_path = '/users?username={user_name}'
api_user_self_info_path = '/user'
| {"golden_diff": "diff --git a/liberapay/elsewhere/gitlab.py b/liberapay/elsewhere/gitlab.py\n--- a/liberapay/elsewhere/gitlab.py\n+++ b/liberapay/elsewhere/gitlab.py\n@@ -26,7 +26,7 @@\n # http://doc.gitlab.com/ce/api/\n api_format = 'json'\n api_paginator = header_links_paginator(total_header='X-Total')\n- api_url = 'https://gitlab.com/api/v3'\n+ api_url = 'https://gitlab.com/api/v4'\n api_user_info_path = '/users/{user_id}'\n api_user_name_info_path = '/users?username={user_name}'\n api_user_self_info_path = '/user'\n", "issue": "GitLab support is broken\n`{\"error\":\"API V3 is no longer supported. Use API V4 instead.\"}`\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom liberapay.elsewhere._base import PlatformOAuth2\nfrom liberapay.elsewhere._extractors import key\nfrom liberapay.elsewhere._paginators import header_links_paginator\n\n\nclass GitLab(PlatformOAuth2):\n\n # Platform attributes\n name = 'gitlab'\n display_name = 'GitLab'\n account_url = 'https://gitlab.com/u/{user_name}'\n repo_url = 'https://gitlab.com/{slug}'\n has_teams = True\n\n # Auth attributes\n # GitLab uses https://github.com/doorkeeper-gem/doorkeeper\n auth_url = 'https://gitlab.com/oauth/authorize'\n access_token_url = 'https://gitlab.com/oauth/token'\n\n # can_auth_with_client_credentials = True\n # https://gitlab.com/gitlab-org/gitlab-ce/issues/13795\n\n # API attributes\n # http://doc.gitlab.com/ce/api/\n api_format = 'json'\n api_paginator = header_links_paginator(total_header='X-Total')\n api_url = 'https://gitlab.com/api/v3'\n api_user_info_path = '/users/{user_id}'\n api_user_name_info_path = '/users?username={user_name}'\n api_user_self_info_path = '/user'\n api_team_members_path = '/groups/{user_name}/members'\n api_repos_path = '/projects?owned=true&visibility=public&order_by=last_activity_at&per_page=100'\n api_starred_path = '/projects?starred=true&visibility=public'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('username')\n x_display_name = key('name')\n x_email = key('email')\n x_avatar_url = key('avatar_url')\n x_description = key('bio')\n\n # Repo info extractors\n x_repo_id = key('id')\n x_repo_name = key('name')\n x_repo_slug = key('path_with_namespace')\n x_repo_description = key('description')\n x_repo_last_update = key('last_activity_at')\n x_repo_is_fork = key('forked_from_project', clean=bool)\n x_repo_stars_count = key('star_count')\n x_repo_owner_id = key('owner', clean=lambda d: d['id'])\n", "path": "liberapay/elsewhere/gitlab.py"}]} | 1,198 | 167 |
gh_patches_debug_20940 | rasdani/github-patches | git_diff | inventree__InvenTree-2984 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add unittests for auth stack
Add full coverage for https://github.com/inventree/InvenTree/pull/2976
And the full auth stack in the middleware
Add unittests for auth stack
Add full coverage for https://github.com/inventree/InvenTree/pull/2976
And the full auth stack in the middleware
</issue>
<code>
[start of InvenTree/InvenTree/middleware.py]
1 # -*- coding: utf-8 -*-
2
3 from django.conf import settings
4 from django.contrib.auth.middleware import PersistentRemoteUserMiddleware
5 from django.http import HttpResponse
6 from django.shortcuts import HttpResponseRedirect
7 from django.shortcuts import redirect
8 from django.urls import reverse_lazy, Resolver404
9 from django.urls import include, re_path
10
11 import logging
12
13 from rest_framework.authtoken.models import Token
14 from allauth_2fa.middleware import BaseRequire2FAMiddleware, AllauthTwoFactorMiddleware
15
16 from InvenTree.urls import frontendpatterns
17 from common.models import InvenTreeSetting
18
19
20 logger = logging.getLogger("inventree")
21
22
23 class AuthRequiredMiddleware(object):
24 def __init__(self, get_response):
25 self.get_response = get_response
26
27 def __call__(self, request):
28 # Code to be executed for each request before
29 # the view (and later middleware) are called.
30
31 assert hasattr(request, 'user')
32
33 # API requests are handled by the DRF library
34 if request.path_info.startswith('/api/'):
35 return self.get_response(request)
36
37 if not request.user.is_authenticated:
38 """
39 Normally, a web-based session would use csrftoken based authentication.
40 However when running an external application (e.g. the InvenTree app or Python library),
41 we must validate the user token manually.
42 """
43
44 authorized = False
45
46 # Allow static files to be accessed without auth
47 # Important for e.g. login page
48 if request.path_info.startswith('/static/'):
49 authorized = True
50
51 # Unauthorized users can access the login page
52 elif request.path_info.startswith('/accounts/'):
53 authorized = True
54
55 elif 'Authorization' in request.headers.keys() or 'authorization' in request.headers.keys():
56 auth = request.headers.get('Authorization', request.headers.get('authorization')).strip()
57
58 if auth.lower().startswith('token') and len(auth.split()) == 2:
59 token_key = auth.split()[1]
60
61 # Does the provided token match a valid user?
62 try:
63 token = Token.objects.get(key=token_key)
64
65 # Provide the user information to the request
66 request.user = token.user
67 authorized = True
68
69 except Token.DoesNotExist:
70 logger.warning(f"Access denied for unknown token {token_key}")
71
72 # No authorization was found for the request
73 if not authorized:
74 # A logout request will redirect the user to the login screen
75 if request.path_info == reverse_lazy('account_logout'):
76 return HttpResponseRedirect(reverse_lazy('account_login'))
77
78 path = request.path_info
79
80 # List of URL endpoints we *do not* want to redirect to
81 urls = [
82 reverse_lazy('account_login'),
83 reverse_lazy('account_logout'),
84 reverse_lazy('admin:login'),
85 reverse_lazy('admin:logout'),
86 ]
87
88 # Do not redirect requests to any of these paths
89 paths_ignore = [
90 '/api/',
91 '/js/',
92 '/media/',
93 '/static/',
94 ]
95
96 if path not in urls and not any([path.startswith(p) for p in paths_ignore]):
97 # Save the 'next' parameter to pass through to the login view
98
99 return redirect('{}?next={}'.format(reverse_lazy('account_login'), request.path))
100
101 else:
102 # Return a 401 (Unauthorized) response code for this request
103 return HttpResponse('Unauthorized', status=401)
104
105 response = self.get_response(request)
106
107 return response
108
109
110 url_matcher = re_path('', include(frontendpatterns))
111
112
113 class Check2FAMiddleware(BaseRequire2FAMiddleware):
114 """check if user is required to have MFA enabled"""
115 def require_2fa(self, request):
116 # Superusers are require to have 2FA.
117 try:
118 if url_matcher.resolve(request.path[1:]):
119 return InvenTreeSetting.get_setting('LOGIN_ENFORCE_MFA')
120 except Resolver404:
121 pass
122 return False
123
124
125 class CustomAllauthTwoFactorMiddleware(AllauthTwoFactorMiddleware):
126 """This function ensures only frontend code triggers the MFA auth cycle"""
127 def process_request(self, request):
128 try:
129 if not url_matcher.resolve(request.path[1:]):
130 super().process_request(request)
131 except Resolver404:
132 pass
133
134
135 class InvenTreeRemoteUserMiddleware(PersistentRemoteUserMiddleware):
136 """
137 Middleware to check if HTTP-header based auth is enabled and to set it up
138 """
139 header = settings.REMOTE_LOGIN_HEADER
140
141 def process_request(self, request):
142 if not settings.REMOTE_LOGIN:
143 return
144
145 return super().process_request(request)
146
[end of InvenTree/InvenTree/middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/InvenTree/InvenTree/middleware.py b/InvenTree/InvenTree/middleware.py
--- a/InvenTree/InvenTree/middleware.py
+++ b/InvenTree/InvenTree/middleware.py
@@ -3,7 +3,6 @@
from django.conf import settings
from django.contrib.auth.middleware import PersistentRemoteUserMiddleware
from django.http import HttpResponse
-from django.shortcuts import HttpResponseRedirect
from django.shortcuts import redirect
from django.urls import reverse_lazy, Resolver404
from django.urls import include, re_path
@@ -71,10 +70,6 @@
# No authorization was found for the request
if not authorized:
- # A logout request will redirect the user to the login screen
- if request.path_info == reverse_lazy('account_logout'):
- return HttpResponseRedirect(reverse_lazy('account_login'))
-
path = request.path_info
# List of URL endpoints we *do not* want to redirect to
| {"golden_diff": "diff --git a/InvenTree/InvenTree/middleware.py b/InvenTree/InvenTree/middleware.py\n--- a/InvenTree/InvenTree/middleware.py\n+++ b/InvenTree/InvenTree/middleware.py\n@@ -3,7 +3,6 @@\n from django.conf import settings\n from django.contrib.auth.middleware import PersistentRemoteUserMiddleware\n from django.http import HttpResponse\n-from django.shortcuts import HttpResponseRedirect\n from django.shortcuts import redirect\n from django.urls import reverse_lazy, Resolver404\n from django.urls import include, re_path\n@@ -71,10 +70,6 @@\n \n # No authorization was found for the request\n if not authorized:\n- # A logout request will redirect the user to the login screen\n- if request.path_info == reverse_lazy('account_logout'):\n- return HttpResponseRedirect(reverse_lazy('account_login'))\n-\n path = request.path_info\n \n # List of URL endpoints we *do not* want to redirect to\n", "issue": "Add unittests for auth stack\nAdd full coverage for https://github.com/inventree/InvenTree/pull/2976\r\nAnd the full auth stack in the middleware\nAdd unittests for auth stack\nAdd full coverage for https://github.com/inventree/InvenTree/pull/2976\r\nAnd the full auth stack in the middleware\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.contrib.auth.middleware import PersistentRemoteUserMiddleware\nfrom django.http import HttpResponse\nfrom django.shortcuts import HttpResponseRedirect\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy, Resolver404\nfrom django.urls import include, re_path\n\nimport logging\n\nfrom rest_framework.authtoken.models import Token\nfrom allauth_2fa.middleware import BaseRequire2FAMiddleware, AllauthTwoFactorMiddleware\n\nfrom InvenTree.urls import frontendpatterns\nfrom common.models import InvenTreeSetting\n\n\nlogger = logging.getLogger(\"inventree\")\n\n\nclass AuthRequiredMiddleware(object):\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n\n assert hasattr(request, 'user')\n\n # API requests are handled by the DRF library\n if request.path_info.startswith('/api/'):\n return self.get_response(request)\n\n if not request.user.is_authenticated:\n \"\"\"\n Normally, a web-based session would use csrftoken based authentication.\n However when running an external application (e.g. the InvenTree app or Python library),\n we must validate the user token manually.\n \"\"\"\n\n authorized = False\n\n # Allow static files to be accessed without auth\n # Important for e.g. login page\n if request.path_info.startswith('/static/'):\n authorized = True\n\n # Unauthorized users can access the login page\n elif request.path_info.startswith('/accounts/'):\n authorized = True\n\n elif 'Authorization' in request.headers.keys() or 'authorization' in request.headers.keys():\n auth = request.headers.get('Authorization', request.headers.get('authorization')).strip()\n\n if auth.lower().startswith('token') and len(auth.split()) == 2:\n token_key = auth.split()[1]\n\n # Does the provided token match a valid user?\n try:\n token = Token.objects.get(key=token_key)\n\n # Provide the user information to the request\n request.user = token.user\n authorized = True\n\n except Token.DoesNotExist:\n logger.warning(f\"Access denied for unknown token {token_key}\")\n\n # No authorization was found for the request\n if not authorized:\n # A logout request will redirect the user to the login screen\n if request.path_info == reverse_lazy('account_logout'):\n return HttpResponseRedirect(reverse_lazy('account_login'))\n\n path = request.path_info\n\n # List of URL endpoints we *do not* want to redirect to\n urls = [\n reverse_lazy('account_login'),\n reverse_lazy('account_logout'),\n reverse_lazy('admin:login'),\n reverse_lazy('admin:logout'),\n ]\n\n # Do not redirect requests to any of these paths\n paths_ignore = [\n '/api/',\n '/js/',\n '/media/',\n '/static/',\n ]\n\n if path not in urls and not any([path.startswith(p) for p in paths_ignore]):\n # Save the 'next' parameter to pass through to the login view\n\n return redirect('{}?next={}'.format(reverse_lazy('account_login'), request.path))\n\n else:\n # Return a 401 (Unauthorized) response code for this request\n return HttpResponse('Unauthorized', status=401)\n\n response = self.get_response(request)\n\n return response\n\n\nurl_matcher = re_path('', include(frontendpatterns))\n\n\nclass Check2FAMiddleware(BaseRequire2FAMiddleware):\n \"\"\"check if user is required to have MFA enabled\"\"\"\n def require_2fa(self, request):\n # Superusers are require to have 2FA.\n try:\n if url_matcher.resolve(request.path[1:]):\n return InvenTreeSetting.get_setting('LOGIN_ENFORCE_MFA')\n except Resolver404:\n pass\n return False\n\n\nclass CustomAllauthTwoFactorMiddleware(AllauthTwoFactorMiddleware):\n \"\"\"This function ensures only frontend code triggers the MFA auth cycle\"\"\"\n def process_request(self, request):\n try:\n if not url_matcher.resolve(request.path[1:]):\n super().process_request(request)\n except Resolver404:\n pass\n\n\nclass InvenTreeRemoteUserMiddleware(PersistentRemoteUserMiddleware):\n \"\"\"\n Middleware to check if HTTP-header based auth is enabled and to set it up\n \"\"\"\n header = settings.REMOTE_LOGIN_HEADER\n\n def process_request(self, request):\n if not settings.REMOTE_LOGIN:\n return\n\n return super().process_request(request)\n", "path": "InvenTree/InvenTree/middleware.py"}]} | 1,948 | 214 |
gh_patches_debug_1058 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-4303 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Domain missing from Holland & Barrett website URLs
In the holland_and_barrett spider results, the website values returned are missing the domain, e.g. `"website": "/stores/aylesbury-3180/"`. This is what's in the code that the scraper is reading. But presumably AllThePlaces should return a fully qualified url, i.e. `https://www.hollandandbarrett.com/stores/aylesbury-3180/` in this case.
I don't know what the micordata etc standards say about whether relative URLs are allowed, but perhaps the framework code could be modified to automatically complete the URL of the page if a relative URL is harvested.
</issue>
<code>
[start of locations/spiders/holland_and_barrett.py]
1 from scrapy.spiders import SitemapSpider
2
3 from locations.linked_data_parser import LinkedDataParser
4
5
6 class HollandAndBarrettSpider(SitemapSpider):
7 name = "holland_and_barrett"
8 item_attributes = {
9 "brand": "Holland & Barrett",
10 "brand_wikidata": "Q5880870",
11 }
12 sitemap_urls = [
13 "https://www.hollandandbarrett.com/sitemap-stores.xml",
14 "https://www.hollandandbarrett.nl/sitemap-stores.xml",
15 "https://www.hollandandbarrett.be/sitemap-stores.xml",
16 "https://www.hollandandbarrett.ie/sitemap-stores.xml",
17 ]
18 sitemap_rules = [("/stores/", "parse"), ("/winkels/", "parse")]
19 download_delay = 1.0
20
21 def parse(self, response):
22 yield LinkedDataParser.parse(response, "LocalBusiness")
23
[end of locations/spiders/holland_and_barrett.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/holland_and_barrett.py b/locations/spiders/holland_and_barrett.py
--- a/locations/spiders/holland_and_barrett.py
+++ b/locations/spiders/holland_and_barrett.py
@@ -19,4 +19,6 @@
download_delay = 1.0
def parse(self, response):
- yield LinkedDataParser.parse(response, "LocalBusiness")
+ item = LinkedDataParser.parse(response, "LocalBusiness")
+ item["website"] = response.urljoin(item["website"])
+ yield item
| {"golden_diff": "diff --git a/locations/spiders/holland_and_barrett.py b/locations/spiders/holland_and_barrett.py\n--- a/locations/spiders/holland_and_barrett.py\n+++ b/locations/spiders/holland_and_barrett.py\n@@ -19,4 +19,6 @@\n download_delay = 1.0\n \n def parse(self, response):\n- yield LinkedDataParser.parse(response, \"LocalBusiness\")\n+ item = LinkedDataParser.parse(response, \"LocalBusiness\")\n+ item[\"website\"] = response.urljoin(item[\"website\"])\n+ yield item\n", "issue": "Domain missing from Holland & Barrett website URLs\nIn the holland_and_barrett spider results, the website values returned are missing the domain, e.g. `\"website\": \"/stores/aylesbury-3180/\"`. This is what's in the code that the scraper is reading. But presumably AllThePlaces should return a fully qualified url, i.e. `https://www.hollandandbarrett.com/stores/aylesbury-3180/` in this case.\r\n\r\nI don't know what the micordata etc standards say about whether relative URLs are allowed, but perhaps the framework code could be modified to automatically complete the URL of the page if a relative URL is harvested.\n", "before_files": [{"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.linked_data_parser import LinkedDataParser\n\n\nclass HollandAndBarrettSpider(SitemapSpider):\n name = \"holland_and_barrett\"\n item_attributes = {\n \"brand\": \"Holland & Barrett\",\n \"brand_wikidata\": \"Q5880870\",\n }\n sitemap_urls = [\n \"https://www.hollandandbarrett.com/sitemap-stores.xml\",\n \"https://www.hollandandbarrett.nl/sitemap-stores.xml\",\n \"https://www.hollandandbarrett.be/sitemap-stores.xml\",\n \"https://www.hollandandbarrett.ie/sitemap-stores.xml\",\n ]\n sitemap_rules = [(\"/stores/\", \"parse\"), (\"/winkels/\", \"parse\")]\n download_delay = 1.0\n\n def parse(self, response):\n yield LinkedDataParser.parse(response, \"LocalBusiness\")\n", "path": "locations/spiders/holland_and_barrett.py"}]} | 924 | 127 |
gh_patches_debug_36801 | rasdani/github-patches | git_diff | pypa__pip-7216 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add SSL CA certificate information to `pip debug`
**What's the problem this feature will solve?**
As described in [#6720 (comment)](https://github.com/pypa/pip/issues/6720#issuecomment-538791684), pip may be using several sources of information for the CA certificate bundle to use for HTTPS requests. This makes it hard to debug user issues.
**Describe the solution you'd like**
In the output of `pip debug` we should include:
* the `cert` setting from the highest-priority pip configuration file (~~and the configuration file path~~) - on second thought the location doesn't matter much
* `os.environ.get('REQUESTS_CA_BUNDLE')`
* `os.environ.get('CURL_CA_BUNDLE')`
* `pip._vendor.certifi.where()`
This will provide insight into the CA certificate bundle in use for a given request, which can then be used in instructions to the user in conjunction with curl/openssl to submit an HTTP request independent of pip and rule out pip-specific issues.
**Alternative Solutions**
Do nothing.
**Additional context**
* #4459
* #4919
* #6335
* #6720
* #6915
</issue>
<code>
[start of src/pip/_internal/commands/debug.py]
1 # The following comment should be removed at some point in the future.
2 # mypy: disallow-untyped-defs=False
3
4 from __future__ import absolute_import
5
6 import locale
7 import logging
8 import sys
9
10 from pip._internal.cli import cmdoptions
11 from pip._internal.cli.base_command import Command
12 from pip._internal.cli.cmdoptions import make_target_python
13 from pip._internal.cli.status_codes import SUCCESS
14 from pip._internal.utils.logging import indent_log
15 from pip._internal.utils.misc import get_pip_version
16 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
17 from pip._internal.wheel import format_tag
18
19 if MYPY_CHECK_RUNNING:
20 from typing import Any, List
21 from optparse import Values
22
23 logger = logging.getLogger(__name__)
24
25
26 def show_value(name, value):
27 # type: (str, str) -> None
28 logger.info('{}: {}'.format(name, value))
29
30
31 def show_sys_implementation():
32 # type: () -> None
33 logger.info('sys.implementation:')
34 if hasattr(sys, 'implementation'):
35 implementation = sys.implementation # type: ignore
36 implementation_name = implementation.name
37 else:
38 implementation_name = ''
39
40 with indent_log():
41 show_value('name', implementation_name)
42
43
44 def show_tags(options):
45 # type: (Values) -> None
46 tag_limit = 10
47
48 target_python = make_target_python(options)
49 tags = target_python.get_tags()
50
51 # Display the target options that were explicitly provided.
52 formatted_target = target_python.format_given()
53 suffix = ''
54 if formatted_target:
55 suffix = ' (target: {})'.format(formatted_target)
56
57 msg = 'Compatible tags: {}{}'.format(len(tags), suffix)
58 logger.info(msg)
59
60 if options.verbose < 1 and len(tags) > tag_limit:
61 tags_limited = True
62 tags = tags[:tag_limit]
63 else:
64 tags_limited = False
65
66 with indent_log():
67 for tag in tags:
68 logger.info(format_tag(tag))
69
70 if tags_limited:
71 msg = (
72 '...\n'
73 '[First {tag_limit} tags shown. Pass --verbose to show all.]'
74 ).format(tag_limit=tag_limit)
75 logger.info(msg)
76
77
78 class DebugCommand(Command):
79 """
80 Display debug information.
81 """
82
83 usage = """
84 %prog <options>"""
85 ignore_require_venv = True
86
87 def __init__(self, *args, **kw):
88 super(DebugCommand, self).__init__(*args, **kw)
89
90 cmd_opts = self.cmd_opts
91 cmdoptions.add_target_python_options(cmd_opts)
92 self.parser.insert_option_group(0, cmd_opts)
93
94 def run(self, options, args):
95 # type: (Values, List[Any]) -> int
96 logger.warning(
97 "This command is only meant for debugging. "
98 "Do not use this with automation for parsing and getting these "
99 "details, since the output and options of this command may "
100 "change without notice."
101 )
102 show_value('pip version', get_pip_version())
103 show_value('sys.version', sys.version)
104 show_value('sys.executable', sys.executable)
105 show_value('sys.getdefaultencoding', sys.getdefaultencoding())
106 show_value('sys.getfilesystemencoding', sys.getfilesystemencoding())
107 show_value(
108 'locale.getpreferredencoding', locale.getpreferredencoding(),
109 )
110 show_value('sys.platform', sys.platform)
111 show_sys_implementation()
112
113 show_tags(options)
114
115 return SUCCESS
116
[end of src/pip/_internal/commands/debug.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pip/_internal/commands/debug.py b/src/pip/_internal/commands/debug.py
--- a/src/pip/_internal/commands/debug.py
+++ b/src/pip/_internal/commands/debug.py
@@ -5,8 +5,11 @@
import locale
import logging
+import os
import sys
+from pip._vendor.certifi import where
+
from pip._internal.cli import cmdoptions
from pip._internal.cli.base_command import Command
from pip._internal.cli.cmdoptions import make_target_python
@@ -17,14 +20,14 @@
from pip._internal.wheel import format_tag
if MYPY_CHECK_RUNNING:
- from typing import Any, List
+ from typing import Any, List, Optional
from optparse import Values
logger = logging.getLogger(__name__)
def show_value(name, value):
- # type: (str, str) -> None
+ # type: (str, Optional[str]) -> None
logger.info('{}: {}'.format(name, value))
@@ -75,6 +78,25 @@
logger.info(msg)
+def ca_bundle_info(config):
+ levels = set()
+ for key, value in config.items():
+ levels.add(key.split('.')[0])
+
+ if not levels:
+ return "Not specified"
+
+ levels_that_override_global = ['install', 'wheel', 'download']
+ global_overriding_level = [
+ level for level in levels if level in levels_that_override_global
+ ]
+ if not global_overriding_level:
+ return 'global'
+
+ levels.remove('global')
+ return ", ".join(levels)
+
+
class DebugCommand(Command):
"""
Display debug information.
@@ -90,6 +112,7 @@
cmd_opts = self.cmd_opts
cmdoptions.add_target_python_options(cmd_opts)
self.parser.insert_option_group(0, cmd_opts)
+ self.parser.config.load()
def run(self, options, args):
# type: (Values, List[Any]) -> int
@@ -110,6 +133,11 @@
show_value('sys.platform', sys.platform)
show_sys_implementation()
+ show_value("'cert' config value", ca_bundle_info(self.parser.config))
+ show_value("REQUESTS_CA_BUNDLE", os.environ.get('REQUESTS_CA_BUNDLE'))
+ show_value("CURL_CA_BUNDLE", os.environ.get('CURL_CA_BUNDLE'))
+ show_value("pip._vendor.certifi.where()", where())
+
show_tags(options)
return SUCCESS
| {"golden_diff": "diff --git a/src/pip/_internal/commands/debug.py b/src/pip/_internal/commands/debug.py\n--- a/src/pip/_internal/commands/debug.py\n+++ b/src/pip/_internal/commands/debug.py\n@@ -5,8 +5,11 @@\n \n import locale\n import logging\n+import os\n import sys\n \n+from pip._vendor.certifi import where\n+\n from pip._internal.cli import cmdoptions\n from pip._internal.cli.base_command import Command\n from pip._internal.cli.cmdoptions import make_target_python\n@@ -17,14 +20,14 @@\n from pip._internal.wheel import format_tag\n \n if MYPY_CHECK_RUNNING:\n- from typing import Any, List\n+ from typing import Any, List, Optional\n from optparse import Values\n \n logger = logging.getLogger(__name__)\n \n \n def show_value(name, value):\n- # type: (str, str) -> None\n+ # type: (str, Optional[str]) -> None\n logger.info('{}: {}'.format(name, value))\n \n \n@@ -75,6 +78,25 @@\n logger.info(msg)\n \n \n+def ca_bundle_info(config):\n+ levels = set()\n+ for key, value in config.items():\n+ levels.add(key.split('.')[0])\n+\n+ if not levels:\n+ return \"Not specified\"\n+\n+ levels_that_override_global = ['install', 'wheel', 'download']\n+ global_overriding_level = [\n+ level for level in levels if level in levels_that_override_global\n+ ]\n+ if not global_overriding_level:\n+ return 'global'\n+\n+ levels.remove('global')\n+ return \", \".join(levels)\n+\n+\n class DebugCommand(Command):\n \"\"\"\n Display debug information.\n@@ -90,6 +112,7 @@\n cmd_opts = self.cmd_opts\n cmdoptions.add_target_python_options(cmd_opts)\n self.parser.insert_option_group(0, cmd_opts)\n+ self.parser.config.load()\n \n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n@@ -110,6 +133,11 @@\n show_value('sys.platform', sys.platform)\n show_sys_implementation()\n \n+ show_value(\"'cert' config value\", ca_bundle_info(self.parser.config))\n+ show_value(\"REQUESTS_CA_BUNDLE\", os.environ.get('REQUESTS_CA_BUNDLE'))\n+ show_value(\"CURL_CA_BUNDLE\", os.environ.get('CURL_CA_BUNDLE'))\n+ show_value(\"pip._vendor.certifi.where()\", where())\n+\n show_tags(options)\n \n return SUCCESS\n", "issue": "Add SSL CA certificate information to `pip debug`\n**What's the problem this feature will solve?**\r\n\r\nAs described in [#6720 (comment)](https://github.com/pypa/pip/issues/6720#issuecomment-538791684), pip may be using several sources of information for the CA certificate bundle to use for HTTPS requests. This makes it hard to debug user issues.\r\n\r\n**Describe the solution you'd like**\r\n\r\nIn the output of `pip debug` we should include:\r\n\r\n* the `cert` setting from the highest-priority pip configuration file (~~and the configuration file path~~) - on second thought the location doesn't matter much\r\n* `os.environ.get('REQUESTS_CA_BUNDLE')`\r\n* `os.environ.get('CURL_CA_BUNDLE')`\r\n* `pip._vendor.certifi.where()`\r\n\r\nThis will provide insight into the CA certificate bundle in use for a given request, which can then be used in instructions to the user in conjunction with curl/openssl to submit an HTTP request independent of pip and rule out pip-specific issues.\r\n\r\n**Alternative Solutions**\r\n\r\nDo nothing.\r\n\r\n**Additional context**\r\n\r\n* #4459\r\n* #4919\r\n* #6335\r\n* #6720\r\n* #6915\n", "before_files": [{"content": "# The following comment should be removed at some point in the future.\n# mypy: disallow-untyped-defs=False\n\nfrom __future__ import absolute_import\n\nimport locale\nimport logging\nimport sys\n\nfrom pip._internal.cli import cmdoptions\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.cmdoptions import make_target_python\nfrom pip._internal.cli.status_codes import SUCCESS\nfrom pip._internal.utils.logging import indent_log\nfrom pip._internal.utils.misc import get_pip_version\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\nfrom pip._internal.wheel import format_tag\n\nif MYPY_CHECK_RUNNING:\n from typing import Any, List\n from optparse import Values\n\nlogger = logging.getLogger(__name__)\n\n\ndef show_value(name, value):\n # type: (str, str) -> None\n logger.info('{}: {}'.format(name, value))\n\n\ndef show_sys_implementation():\n # type: () -> None\n logger.info('sys.implementation:')\n if hasattr(sys, 'implementation'):\n implementation = sys.implementation # type: ignore\n implementation_name = implementation.name\n else:\n implementation_name = ''\n\n with indent_log():\n show_value('name', implementation_name)\n\n\ndef show_tags(options):\n # type: (Values) -> None\n tag_limit = 10\n\n target_python = make_target_python(options)\n tags = target_python.get_tags()\n\n # Display the target options that were explicitly provided.\n formatted_target = target_python.format_given()\n suffix = ''\n if formatted_target:\n suffix = ' (target: {})'.format(formatted_target)\n\n msg = 'Compatible tags: {}{}'.format(len(tags), suffix)\n logger.info(msg)\n\n if options.verbose < 1 and len(tags) > tag_limit:\n tags_limited = True\n tags = tags[:tag_limit]\n else:\n tags_limited = False\n\n with indent_log():\n for tag in tags:\n logger.info(format_tag(tag))\n\n if tags_limited:\n msg = (\n '...\\n'\n '[First {tag_limit} tags shown. Pass --verbose to show all.]'\n ).format(tag_limit=tag_limit)\n logger.info(msg)\n\n\nclass DebugCommand(Command):\n \"\"\"\n Display debug information.\n \"\"\"\n\n usage = \"\"\"\n %prog <options>\"\"\"\n ignore_require_venv = True\n\n def __init__(self, *args, **kw):\n super(DebugCommand, self).__init__(*args, **kw)\n\n cmd_opts = self.cmd_opts\n cmdoptions.add_target_python_options(cmd_opts)\n self.parser.insert_option_group(0, cmd_opts)\n\n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n logger.warning(\n \"This command is only meant for debugging. \"\n \"Do not use this with automation for parsing and getting these \"\n \"details, since the output and options of this command may \"\n \"change without notice.\"\n )\n show_value('pip version', get_pip_version())\n show_value('sys.version', sys.version)\n show_value('sys.executable', sys.executable)\n show_value('sys.getdefaultencoding', sys.getdefaultencoding())\n show_value('sys.getfilesystemencoding', sys.getfilesystemencoding())\n show_value(\n 'locale.getpreferredencoding', locale.getpreferredencoding(),\n )\n show_value('sys.platform', sys.platform)\n show_sys_implementation()\n\n show_tags(options)\n\n return SUCCESS\n", "path": "src/pip/_internal/commands/debug.py"}]} | 1,824 | 570 |
gh_patches_debug_19632 | rasdani/github-patches | git_diff | networkx__networkx-3628 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
jit json import/export
I would consider the functions `jit_data` and `jit_graph` to be their inverses, so that
```
import networkx as nx
nx.jit_graph(nx.jit_data(nx.Graph()))
```
works.
Instead, it produces a TypeError (nx 2.2), because jit_data is a function `nx graph -> json string`, while jit_graph is a function `json object -> nx graph`, so that the correct program would be
```
import networkx as nx
import json
nx.jit_graph(json.loads(nx.jit_data(nx.Graph())))
```
This is documented, but in my view unexpected and incoherent behavior. I'm pretty new to networkx and are not familiar with your design philosophy, but see the options
* to add a clarifying note in the documentation OR
* return the json object in `jit_data` OR
* make use of the json.loads function in `jit_graph`.
What are your opinions on this?
I am willing to submit a PR (but probably it is just easier for you to make that oneline-commit, so that's also fine :))
</issue>
<code>
[start of networkx/readwrite/json_graph/jit.py]
1 # Copyright (C) 2011-2019 by
2 # Aric Hagberg <[email protected]>
3 # Dan Schult <[email protected]>
4 # Pieter Swart <[email protected]>
5 # All rights reserved.
6 # BSD license.
7
8 """
9 Read and write NetworkX graphs as JavaScript InfoVis Toolkit (JIT) format JSON.
10
11 See the `JIT documentation`_ for more examples.
12
13 Format
14 ------
15 var json = [
16 {
17 "id": "aUniqueIdentifier",
18 "name": "usually a nodes name",
19 "data": {
20 "some key": "some value",
21 "some other key": "some other value"
22 },
23 "adjacencies": [
24 {
25 nodeTo:"aNodeId",
26 data: {} //put whatever you want here
27 },
28 'other adjacencies go here...'
29 },
30
31 'other nodes go here...'
32 ];
33 .. _JIT documentation: http://thejit.org
34 """
35
36 import json
37 import networkx as nx
38 from networkx.utils.decorators import not_implemented_for
39
40 __all__ = ['jit_graph', 'jit_data']
41
42
43 def jit_graph(data, create_using=None):
44 """Read a graph from JIT JSON.
45
46 Parameters
47 ----------
48 data : JSON Graph Object
49
50 create_using : Networkx Graph, optional (default: Graph())
51 Return graph of this type. The provided instance will be cleared.
52
53 Returns
54 -------
55 G : NetworkX Graph built from create_using if provided.
56 """
57 if create_using is None:
58 G = nx.Graph()
59 else:
60 G = create_using
61 G.clear()
62
63 for node in data:
64 G.add_node(node['id'], **node['data'])
65 if node.get('adjacencies') is not None:
66 for adj in node['adjacencies']:
67 G.add_edge(node['id'], adj['nodeTo'], **adj['data'])
68 return G
69
70
71 @not_implemented_for('multigraph')
72 def jit_data(G, indent=None):
73 """Returns data in JIT JSON format.
74
75 Parameters
76 ----------
77 G : NetworkX Graph
78
79 indent: optional, default=None
80 If indent is a non-negative integer, then JSON array elements and object
81 members will be pretty-printed with that indent level. An indent level
82 of 0, or negative, will only insert newlines. None (the default) selects
83 the most compact representation.
84
85 Returns
86 -------
87 data: JIT JSON string
88 """
89 json_graph = []
90 for node in G.nodes():
91 json_node = {
92 "id": node,
93 "name": node
94 }
95 # node data
96 json_node["data"] = G.nodes[node]
97 # adjacencies
98 if G[node]:
99 json_node["adjacencies"] = []
100 for neighbour in G[node]:
101 adjacency = {
102 "nodeTo": neighbour,
103 }
104 # adjacency data
105 adjacency["data"] = G.edges[node, neighbour]
106 json_node["adjacencies"].append(adjacency)
107 json_graph.append(json_node)
108 return json.dumps(json_graph, indent=indent)
109
[end of networkx/readwrite/json_graph/jit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/networkx/readwrite/json_graph/jit.py b/networkx/readwrite/json_graph/jit.py
--- a/networkx/readwrite/json_graph/jit.py
+++ b/networkx/readwrite/json_graph/jit.py
@@ -60,6 +60,9 @@
G = create_using
G.clear()
+ if nx.utils.is_string_like(data):
+ data = json.loads(data)
+
for node in data:
G.add_node(node['id'], **node['data'])
if node.get('adjacencies') is not None:
@@ -77,10 +80,10 @@
G : NetworkX Graph
indent: optional, default=None
- If indent is a non-negative integer, then JSON array elements and object
- members will be pretty-printed with that indent level. An indent level
- of 0, or negative, will only insert newlines. None (the default) selects
- the most compact representation.
+ If indent is a non-negative integer, then JSON array elements and
+ object members will be pretty-printed with that indent level.
+ An indent level of 0, or negative, will only insert newlines.
+ None (the default) selects the most compact representation.
Returns
-------
| {"golden_diff": "diff --git a/networkx/readwrite/json_graph/jit.py b/networkx/readwrite/json_graph/jit.py\n--- a/networkx/readwrite/json_graph/jit.py\n+++ b/networkx/readwrite/json_graph/jit.py\n@@ -60,6 +60,9 @@\n G = create_using\n G.clear()\n \n+ if nx.utils.is_string_like(data):\n+ data = json.loads(data)\n+\n for node in data:\n G.add_node(node['id'], **node['data'])\n if node.get('adjacencies') is not None:\n@@ -77,10 +80,10 @@\n G : NetworkX Graph\n \n indent: optional, default=None\n- If indent is a non-negative integer, then JSON array elements and object\n- members will be pretty-printed with that indent level. An indent level\n- of 0, or negative, will only insert newlines. None (the default) selects\n- the most compact representation.\n+ If indent is a non-negative integer, then JSON array elements and\n+ object members will be pretty-printed with that indent level.\n+ An indent level of 0, or negative, will only insert newlines.\n+ None (the default) selects the most compact representation.\n \n Returns\n -------\n", "issue": "jit json import/export\nI would consider the functions `jit_data` and `jit_graph` to be their inverses, so that\r\n```\r\nimport networkx as nx\r\nnx.jit_graph(nx.jit_data(nx.Graph()))\r\n```\r\nworks.\r\n\r\nInstead, it produces a TypeError (nx 2.2), because jit_data is a function `nx graph -> json string`, while jit_graph is a function `json object -> nx graph`, so that the correct program would be\r\n```\r\nimport networkx as nx\r\nimport json\r\nnx.jit_graph(json.loads(nx.jit_data(nx.Graph())))\r\n```\r\n\r\nThis is documented, but in my view unexpected and incoherent behavior. I'm pretty new to networkx and are not familiar with your design philosophy, but see the options\r\n* to add a clarifying note in the documentation OR\r\n* return the json object in `jit_data` OR\r\n* make use of the json.loads function in `jit_graph`.\r\n\r\nWhat are your opinions on this?\r\nI am willing to submit a PR (but probably it is just easier for you to make that oneline-commit, so that's also fine :))\n", "before_files": [{"content": "# Copyright (C) 2011-2019 by\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\n\n\"\"\"\nRead and write NetworkX graphs as JavaScript InfoVis Toolkit (JIT) format JSON.\n\nSee the `JIT documentation`_ for more examples.\n\nFormat\n------\nvar json = [\n {\n \"id\": \"aUniqueIdentifier\",\n \"name\": \"usually a nodes name\",\n \"data\": {\n \"some key\": \"some value\",\n \"some other key\": \"some other value\"\n },\n \"adjacencies\": [\n {\n nodeTo:\"aNodeId\",\n data: {} //put whatever you want here\n },\n 'other adjacencies go here...'\n },\n\n 'other nodes go here...'\n];\n.. _JIT documentation: http://thejit.org\n\"\"\"\n\nimport json\nimport networkx as nx\nfrom networkx.utils.decorators import not_implemented_for\n\n__all__ = ['jit_graph', 'jit_data']\n\n\ndef jit_graph(data, create_using=None):\n \"\"\"Read a graph from JIT JSON.\n\n Parameters\n ----------\n data : JSON Graph Object\n\n create_using : Networkx Graph, optional (default: Graph())\n Return graph of this type. The provided instance will be cleared.\n\n Returns\n -------\n G : NetworkX Graph built from create_using if provided.\n \"\"\"\n if create_using is None:\n G = nx.Graph()\n else:\n G = create_using\n G.clear()\n\n for node in data:\n G.add_node(node['id'], **node['data'])\n if node.get('adjacencies') is not None:\n for adj in node['adjacencies']:\n G.add_edge(node['id'], adj['nodeTo'], **adj['data'])\n return G\n\n\n@not_implemented_for('multigraph')\ndef jit_data(G, indent=None):\n \"\"\"Returns data in JIT JSON format.\n\n Parameters\n ----------\n G : NetworkX Graph\n\n indent: optional, default=None\n If indent is a non-negative integer, then JSON array elements and object\n members will be pretty-printed with that indent level. An indent level\n of 0, or negative, will only insert newlines. None (the default) selects\n the most compact representation.\n\n Returns\n -------\n data: JIT JSON string\n \"\"\"\n json_graph = []\n for node in G.nodes():\n json_node = {\n \"id\": node,\n \"name\": node\n }\n # node data\n json_node[\"data\"] = G.nodes[node]\n # adjacencies\n if G[node]:\n json_node[\"adjacencies\"] = []\n for neighbour in G[node]:\n adjacency = {\n \"nodeTo\": neighbour,\n }\n # adjacency data\n adjacency[\"data\"] = G.edges[node, neighbour]\n json_node[\"adjacencies\"].append(adjacency)\n json_graph.append(json_node)\n return json.dumps(json_graph, indent=indent)\n", "path": "networkx/readwrite/json_graph/jit.py"}]} | 1,690 | 282 |
gh_patches_debug_102 | rasdani/github-patches | git_diff | scipy__scipy-17210 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG: Build failure due to problems with shebang line in cythoner.py
I ran into a problem running `dev.py` that appears to be caused by the shebang line `#!python3` in the file `scipy/_build_utils/cythoner.py`. If I change it to `#!/usr/bin/env python` then the build works fine.
Most files in scipy with a shebang line use `#!/usr/bin/env python`. Only files in the `_build_utils` use `#!python3`.
Error message when running `python dev.py build`:
```shell
Meson build setup OK
💻 ninja -C /mnt/c/Users/Jozsef/OSS/scipy-test/build
ninja: Entering directory `/mnt/c/Users/Jozsef/OSS/scipy-test/build'
[3/1562] Generating 'scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c'.
FAILED: scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c
/mnt/c/Users/Jozsef/OSS/scipy-test/scipy/_build_utils/cythoner.py ../scipy/_lib/_ccallback_c.pyx scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c
/bin/sh: 1: /mnt/c/Users/Jozsef/OSS/scipy-test/scipy/_build_utils/cythoner.py: not found
[12/1562] Compiling C++ object scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so.p/_uarray_dispatch.cxx.o
ninja: build stopped: subcommand failed.
Build failed!
```
If I try running `cythoner.py` directly:
```shell
-bash: /mnt/c/Users/Jozsef/OSS/scipy-conda/scipy/_build_utils/cythoner.py: python3: bad interpreter: No such file or directory
```
I'm using conda with WSL (Ubuntu).
</issue>
<code>
[start of scipy/_build_utils/cythoner.py]
1 #!python3
2 """ Scipy variant of Cython command
3
4 Cython, as applied to single pyx file.
5
6 Expects two arguments, infile and outfile.
7
8 Other options passed through to cython command line parser.
9 """
10
11 import os
12 import os.path as op
13 import sys
14 import subprocess as sbp
15
16
17 def main():
18 in_fname, out_fname = (op.abspath(p) for p in sys.argv[1:3])
19
20 sbp.run(['cython', '-3', '--fast-fail',
21 '--output-file', out_fname,
22 '--include-dir', os.getcwd()] +
23 sys.argv[3:] + [in_fname],
24 check=True)
25
26
27 if __name__ == '__main__':
28 main()
29
[end of scipy/_build_utils/cythoner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scipy/_build_utils/cythoner.py b/scipy/_build_utils/cythoner.py
--- a/scipy/_build_utils/cythoner.py
+++ b/scipy/_build_utils/cythoner.py
@@ -1,4 +1,4 @@
-#!python3
+#!/usr/bin/env python3
""" Scipy variant of Cython command
Cython, as applied to single pyx file.
| {"golden_diff": "diff --git a/scipy/_build_utils/cythoner.py b/scipy/_build_utils/cythoner.py\n--- a/scipy/_build_utils/cythoner.py\n+++ b/scipy/_build_utils/cythoner.py\n@@ -1,4 +1,4 @@\n-#!python3\n+#!/usr/bin/env python3\n \"\"\" Scipy variant of Cython command\n \n Cython, as applied to single pyx file.\n", "issue": "BUG: Build failure due to problems with shebang line in cythoner.py\nI ran into a problem running `dev.py` that appears to be caused by the shebang line `#!python3` in the file `scipy/_build_utils/cythoner.py`. If I change it to `#!/usr/bin/env python` then the build works fine.\r\n\r\nMost files in scipy with a shebang line use `#!/usr/bin/env python`. Only files in the `_build_utils` use `#!python3`.\r\n\r\nError message when running `python dev.py build`:\r\n\r\n```shell\r\nMeson build setup OK\r\n\ud83d\udcbb ninja -C /mnt/c/Users/Jozsef/OSS/scipy-test/build\r\nninja: Entering directory `/mnt/c/Users/Jozsef/OSS/scipy-test/build'\r\n[3/1562] Generating 'scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c'.\r\nFAILED: scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c\r\n/mnt/c/Users/Jozsef/OSS/scipy-test/scipy/_build_utils/cythoner.py ../scipy/_lib/_ccallback_c.pyx scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so.p/_ccallback_c.c\r\n/bin/sh: 1: /mnt/c/Users/Jozsef/OSS/scipy-test/scipy/_build_utils/cythoner.py: not found\r\n[12/1562] Compiling C++ object scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so.p/_uarray_dispatch.cxx.o\r\nninja: build stopped: subcommand failed.\r\nBuild failed!\r\n```\r\n\r\nIf I try running `cythoner.py` directly:\r\n\r\n```shell\r\n-bash: /mnt/c/Users/Jozsef/OSS/scipy-conda/scipy/_build_utils/cythoner.py: python3: bad interpreter: No such file or directory\r\n```\r\n\r\nI'm using conda with WSL (Ubuntu).\n", "before_files": [{"content": "#!python3\n\"\"\" Scipy variant of Cython command\n\nCython, as applied to single pyx file.\n\nExpects two arguments, infile and outfile.\n\nOther options passed through to cython command line parser.\n\"\"\"\n\nimport os\nimport os.path as op\nimport sys\nimport subprocess as sbp\n\n\ndef main():\n in_fname, out_fname = (op.abspath(p) for p in sys.argv[1:3])\n\n sbp.run(['cython', '-3', '--fast-fail',\n '--output-file', out_fname,\n '--include-dir', os.getcwd()] +\n sys.argv[3:] + [in_fname],\n check=True)\n\n\nif __name__ == '__main__':\n main()\n", "path": "scipy/_build_utils/cythoner.py"}]} | 1,211 | 92 |
gh_patches_debug_28238 | rasdani/github-patches | git_diff | falconry__falcon-1785 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Custom media handlers: Unexpected issue when providing custom json handler
This is in falcon-2.0
Look at the documentation [here][1] for using rapidjson for encoding/decoding json. By providing:
`extra_handlers={'application/json': json_handler}` we are still left with the default handler for content-type `application-json; charset=UTF-8`. This results in an unexpected behaviour when some client library (e.g. Retrofit for Android) includes the charset in the header.
While the documentation should be updated, the expected behaviour is that if the handler for `application/json` is updated - it should also update the handler for variant with charset (or at least throw a warning) otherwise there is a possibility of hidden bugs.
[1]: https://falcon.readthedocs.io/en/stable/api/media.html
</issue>
<code>
[start of falcon/media/handlers.py]
1 from collections import UserDict
2
3 from falcon import errors
4 from falcon.constants import MEDIA_MULTIPART, MEDIA_URLENCODED
5 from falcon.media.json import JSONHandler
6 from falcon.media.multipart import MultipartFormHandler, MultipartParseOptions
7 from falcon.media.urlencoded import URLEncodedFormHandler
8 from falcon.vendor import mimeparse
9
10
11 class Handlers(UserDict):
12 """A :class:`dict`-like object that manages Internet media type handlers."""
13 def __init__(self, initial=None):
14 handlers = initial or {
15 'application/json': JSONHandler(),
16 'application/json; charset=UTF-8': JSONHandler(),
17 MEDIA_MULTIPART: MultipartFormHandler(),
18 MEDIA_URLENCODED: URLEncodedFormHandler(),
19 }
20
21 # NOTE(jmvrbanac): Directly calling UserDict as it's not inheritable.
22 # Also, this results in self.update(...) being called.
23 UserDict.__init__(self, handlers)
24
25 def _resolve_media_type(self, media_type, all_media_types):
26 resolved = None
27
28 try:
29 # NOTE(jmvrbanac): Mimeparse will return an empty string if it can
30 # parse the media type, but cannot find a suitable type.
31 resolved = mimeparse.best_match(
32 all_media_types,
33 media_type
34 )
35 except ValueError:
36 pass
37
38 return resolved
39
40 def find_by_media_type(self, media_type, default):
41 # PERF(jmvrbanac): Check via a quick methods first for performance
42 if media_type == '*/*' or not media_type:
43 media_type = default
44
45 try:
46 return self.data[media_type]
47 except KeyError:
48 pass
49
50 # PERF(jmvrbanac): Fallback to the slower method
51 resolved = self._resolve_media_type(media_type, self.data.keys())
52
53 if not resolved:
54 raise errors.HTTPUnsupportedMediaType(
55 description='{0} is an unsupported media type.'.format(media_type)
56 )
57
58 return self.data[resolved]
59
60
61 # NOTE(vytas): An ugly way to work around circular imports.
62 MultipartParseOptions._DEFAULT_HANDLERS = Handlers({
63 'application/json': JSONHandler(),
64 'application/json; charset=UTF-8': JSONHandler(),
65 MEDIA_URLENCODED: URLEncodedFormHandler(),
66 }) # type: ignore
67
[end of falcon/media/handlers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/falcon/media/handlers.py b/falcon/media/handlers.py
--- a/falcon/media/handlers.py
+++ b/falcon/media/handlers.py
@@ -1,7 +1,7 @@
from collections import UserDict
from falcon import errors
-from falcon.constants import MEDIA_MULTIPART, MEDIA_URLENCODED
+from falcon.constants import MEDIA_JSON, MEDIA_MULTIPART, MEDIA_URLENCODED
from falcon.media.json import JSONHandler
from falcon.media.multipart import MultipartFormHandler, MultipartParseOptions
from falcon.media.urlencoded import URLEncodedFormHandler
@@ -12,8 +12,7 @@
"""A :class:`dict`-like object that manages Internet media type handlers."""
def __init__(self, initial=None):
handlers = initial or {
- 'application/json': JSONHandler(),
- 'application/json; charset=UTF-8': JSONHandler(),
+ MEDIA_JSON: JSONHandler(),
MEDIA_MULTIPART: MultipartFormHandler(),
MEDIA_URLENCODED: URLEncodedFormHandler(),
}
@@ -60,7 +59,6 @@
# NOTE(vytas): An ugly way to work around circular imports.
MultipartParseOptions._DEFAULT_HANDLERS = Handlers({
- 'application/json': JSONHandler(),
- 'application/json; charset=UTF-8': JSONHandler(),
+ MEDIA_JSON: JSONHandler(),
MEDIA_URLENCODED: URLEncodedFormHandler(),
}) # type: ignore
| {"golden_diff": "diff --git a/falcon/media/handlers.py b/falcon/media/handlers.py\n--- a/falcon/media/handlers.py\n+++ b/falcon/media/handlers.py\n@@ -1,7 +1,7 @@\n from collections import UserDict\n \n from falcon import errors\n-from falcon.constants import MEDIA_MULTIPART, MEDIA_URLENCODED\n+from falcon.constants import MEDIA_JSON, MEDIA_MULTIPART, MEDIA_URLENCODED\n from falcon.media.json import JSONHandler\n from falcon.media.multipart import MultipartFormHandler, MultipartParseOptions\n from falcon.media.urlencoded import URLEncodedFormHandler\n@@ -12,8 +12,7 @@\n \"\"\"A :class:`dict`-like object that manages Internet media type handlers.\"\"\"\n def __init__(self, initial=None):\n handlers = initial or {\n- 'application/json': JSONHandler(),\n- 'application/json; charset=UTF-8': JSONHandler(),\n+ MEDIA_JSON: JSONHandler(),\n MEDIA_MULTIPART: MultipartFormHandler(),\n MEDIA_URLENCODED: URLEncodedFormHandler(),\n }\n@@ -60,7 +59,6 @@\n \n # NOTE(vytas): An ugly way to work around circular imports.\n MultipartParseOptions._DEFAULT_HANDLERS = Handlers({\n- 'application/json': JSONHandler(),\n- 'application/json; charset=UTF-8': JSONHandler(),\n+ MEDIA_JSON: JSONHandler(),\n MEDIA_URLENCODED: URLEncodedFormHandler(),\n }) # type: ignore\n", "issue": "Custom media handlers: Unexpected issue when providing custom json handler\nThis is in falcon-2.0\r\n\r\nLook at the documentation [here][1] for using rapidjson for encoding/decoding json. By providing:\r\n\r\n`extra_handlers={'application/json': json_handler}` we are still left with the default handler for content-type `application-json; charset=UTF-8`. This results in an unexpected behaviour when some client library (e.g. Retrofit for Android) includes the charset in the header. \r\n\r\nWhile the documentation should be updated, the expected behaviour is that if the handler for `application/json` is updated - it should also update the handler for variant with charset (or at least throw a warning) otherwise there is a possibility of hidden bugs. \r\n\r\n[1]: https://falcon.readthedocs.io/en/stable/api/media.html\n", "before_files": [{"content": "from collections import UserDict\n\nfrom falcon import errors\nfrom falcon.constants import MEDIA_MULTIPART, MEDIA_URLENCODED\nfrom falcon.media.json import JSONHandler\nfrom falcon.media.multipart import MultipartFormHandler, MultipartParseOptions\nfrom falcon.media.urlencoded import URLEncodedFormHandler\nfrom falcon.vendor import mimeparse\n\n\nclass Handlers(UserDict):\n \"\"\"A :class:`dict`-like object that manages Internet media type handlers.\"\"\"\n def __init__(self, initial=None):\n handlers = initial or {\n 'application/json': JSONHandler(),\n 'application/json; charset=UTF-8': JSONHandler(),\n MEDIA_MULTIPART: MultipartFormHandler(),\n MEDIA_URLENCODED: URLEncodedFormHandler(),\n }\n\n # NOTE(jmvrbanac): Directly calling UserDict as it's not inheritable.\n # Also, this results in self.update(...) being called.\n UserDict.__init__(self, handlers)\n\n def _resolve_media_type(self, media_type, all_media_types):\n resolved = None\n\n try:\n # NOTE(jmvrbanac): Mimeparse will return an empty string if it can\n # parse the media type, but cannot find a suitable type.\n resolved = mimeparse.best_match(\n all_media_types,\n media_type\n )\n except ValueError:\n pass\n\n return resolved\n\n def find_by_media_type(self, media_type, default):\n # PERF(jmvrbanac): Check via a quick methods first for performance\n if media_type == '*/*' or not media_type:\n media_type = default\n\n try:\n return self.data[media_type]\n except KeyError:\n pass\n\n # PERF(jmvrbanac): Fallback to the slower method\n resolved = self._resolve_media_type(media_type, self.data.keys())\n\n if not resolved:\n raise errors.HTTPUnsupportedMediaType(\n description='{0} is an unsupported media type.'.format(media_type)\n )\n\n return self.data[resolved]\n\n\n# NOTE(vytas): An ugly way to work around circular imports.\nMultipartParseOptions._DEFAULT_HANDLERS = Handlers({\n 'application/json': JSONHandler(),\n 'application/json; charset=UTF-8': JSONHandler(),\n MEDIA_URLENCODED: URLEncodedFormHandler(),\n}) # type: ignore\n", "path": "falcon/media/handlers.py"}]} | 1,351 | 341 |
gh_patches_debug_29595 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-7414 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Log level `DEPRECATION` is documented but not working
## Description of the issue
Log level `DEPRECATION` is documented but not working.
### Context information (for bug reports)
* Output of `pyinstaller --version`: ```5.7.0```
* Version of Python: 3.10.6
* Platform: Ubuntu 22.04
* How you installed Python: apt
* Did you also try this on another platform? Does it work there? yes, same thing
* try the latest development version, using the following command: yes, same thing
### A minimal example program which shows the error
```
$ pyinstaller --help | grep -U1 DEPREC
--log-level LEVEL Amount of detail in build-time console messages. LEVEL
may be one of TRACE, DEBUG, INFO, WARN, DEPRECATION,
ERROR, CRITICAL (default: INFO). Also settable via and
$ pyinstaller --log-level DEPRECATION .
[...]
pyinstaller: error: Unknown log level `DEPRECATION`
```
</issue>
<code>
[start of PyInstaller/log.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2013-2023, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11 """
12 Logging module for PyInstaller.
13 """
14
15 __all__ = ['getLogger', 'INFO', 'WARN', 'DEBUG', 'TRACE', 'ERROR', 'FATAL', 'DEPRECATION']
16
17 import os
18 import logging
19 from logging import DEBUG, ERROR, FATAL, INFO, WARN, getLogger
20
21 TRACE = logging.TRACE = DEBUG - 5
22 logging.addLevelName(TRACE, 'TRACE')
23 DEPRECATION = WARN + 5
24 logging.addLevelName(DEPRECATION, 'DEPRECATION')
25 LEVELS = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'DEPRECATION', 'ERROR', 'CRITICAL')
26
27 FORMAT = '%(relativeCreated)d %(levelname)s: %(message)s'
28 _env_level = os.environ.get("PYI_LOG_LEVEL", "INFO")
29 try:
30 level = getattr(logging, _env_level.upper())
31 except AttributeError:
32 raise SystemExit(f"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {LEVELS}.")
33 logging.basicConfig(format=FORMAT, level=level)
34 logger = getLogger('PyInstaller')
35
36
37 def __add_options(parser):
38 parser.add_argument(
39 '--log-level',
40 choices=LEVELS,
41 metavar="LEVEL",
42 dest='loglevel',
43 help='Amount of detail in build-time console messages. LEVEL may be one of %s (default: INFO). '
44 'Also settable via and overrides the PYI_LOG_LEVEL environment variable.' % ', '.join(LEVELS),
45 )
46
47
48 def __process_options(parser, opts):
49 if opts.loglevel:
50 try:
51 level = opts.loglevel.upper()
52 _level = getattr(logging, level)
53 except AttributeError:
54 parser.error('Unknown log level `%s`' % opts.loglevel)
55 logger.setLevel(_level)
56 os.environ["PYI_LOG_LEVEL"] = level
57
[end of PyInstaller/log.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/log.py b/PyInstaller/log.py
--- a/PyInstaller/log.py
+++ b/PyInstaller/log.py
@@ -18,18 +18,26 @@
import logging
from logging import DEBUG, ERROR, FATAL, INFO, WARN, getLogger
-TRACE = logging.TRACE = DEBUG - 5
+TRACE = DEBUG - 5
logging.addLevelName(TRACE, 'TRACE')
DEPRECATION = WARN + 5
logging.addLevelName(DEPRECATION, 'DEPRECATION')
-LEVELS = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'DEPRECATION', 'ERROR', 'CRITICAL')
+LEVELS = {
+ 'TRACE': TRACE,
+ 'DEBUG': DEBUG,
+ 'INFO': INFO,
+ 'WARN': WARN,
+ 'DEPRECATION': DEPRECATION,
+ 'ERROR': ERROR,
+ 'FATAL': FATAL,
+}
FORMAT = '%(relativeCreated)d %(levelname)s: %(message)s'
_env_level = os.environ.get("PYI_LOG_LEVEL", "INFO")
try:
- level = getattr(logging, _env_level.upper())
-except AttributeError:
- raise SystemExit(f"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {LEVELS}.")
+ level = LEVELS[_env_level.upper()]
+except KeyError:
+ raise SystemExit(f"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {list(LEVELS)}.")
logging.basicConfig(format=FORMAT, level=level)
logger = getLogger('PyInstaller')
@@ -49,8 +57,8 @@
if opts.loglevel:
try:
level = opts.loglevel.upper()
- _level = getattr(logging, level)
- except AttributeError:
+ _level = LEVELS[level]
+ except KeyError:
parser.error('Unknown log level `%s`' % opts.loglevel)
logger.setLevel(_level)
os.environ["PYI_LOG_LEVEL"] = level
| {"golden_diff": "diff --git a/PyInstaller/log.py b/PyInstaller/log.py\n--- a/PyInstaller/log.py\n+++ b/PyInstaller/log.py\n@@ -18,18 +18,26 @@\n import logging\n from logging import DEBUG, ERROR, FATAL, INFO, WARN, getLogger\n \n-TRACE = logging.TRACE = DEBUG - 5\n+TRACE = DEBUG - 5\n logging.addLevelName(TRACE, 'TRACE')\n DEPRECATION = WARN + 5\n logging.addLevelName(DEPRECATION, 'DEPRECATION')\n-LEVELS = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'DEPRECATION', 'ERROR', 'CRITICAL')\n+LEVELS = {\n+ 'TRACE': TRACE,\n+ 'DEBUG': DEBUG,\n+ 'INFO': INFO,\n+ 'WARN': WARN,\n+ 'DEPRECATION': DEPRECATION,\n+ 'ERROR': ERROR,\n+ 'FATAL': FATAL,\n+}\n \n FORMAT = '%(relativeCreated)d %(levelname)s: %(message)s'\n _env_level = os.environ.get(\"PYI_LOG_LEVEL\", \"INFO\")\n try:\n- level = getattr(logging, _env_level.upper())\n-except AttributeError:\n- raise SystemExit(f\"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {LEVELS}.\")\n+ level = LEVELS[_env_level.upper()]\n+except KeyError:\n+ raise SystemExit(f\"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {list(LEVELS)}.\")\n logging.basicConfig(format=FORMAT, level=level)\n logger = getLogger('PyInstaller')\n \n@@ -49,8 +57,8 @@\n if opts.loglevel:\n try:\n level = opts.loglevel.upper()\n- _level = getattr(logging, level)\n- except AttributeError:\n+ _level = LEVELS[level]\n+ except KeyError:\n parser.error('Unknown log level `%s`' % opts.loglevel)\n logger.setLevel(_level)\n os.environ[\"PYI_LOG_LEVEL\"] = level\n", "issue": "Log level `DEPRECATION` is documented but not working\n## Description of the issue\r\n\r\nLog level `DEPRECATION` is documented but not working.\r\n\r\n### Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```5.7.0```\r\n* Version of Python: 3.10.6\r\n* Platform: Ubuntu 22.04\r\n* How you installed Python: apt\r\n* Did you also try this on another platform? Does it work there? yes, same thing\r\n* try the latest development version, using the following command: yes, same thing\r\n\r\n### A minimal example program which shows the error\r\n\r\n```\r\n$ pyinstaller --help | grep -U1 DEPREC\r\n --log-level LEVEL Amount of detail in build-time console messages. LEVEL\r\n may be one of TRACE, DEBUG, INFO, WARN, DEPRECATION,\r\n ERROR, CRITICAL (default: INFO). Also settable via and\r\n$ pyinstaller --log-level DEPRECATION .\r\n[...]\r\npyinstaller: error: Unknown log level `DEPRECATION`\r\n```\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2023, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\"\"\"\nLogging module for PyInstaller.\n\"\"\"\n\n__all__ = ['getLogger', 'INFO', 'WARN', 'DEBUG', 'TRACE', 'ERROR', 'FATAL', 'DEPRECATION']\n\nimport os\nimport logging\nfrom logging import DEBUG, ERROR, FATAL, INFO, WARN, getLogger\n\nTRACE = logging.TRACE = DEBUG - 5\nlogging.addLevelName(TRACE, 'TRACE')\nDEPRECATION = WARN + 5\nlogging.addLevelName(DEPRECATION, 'DEPRECATION')\nLEVELS = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'DEPRECATION', 'ERROR', 'CRITICAL')\n\nFORMAT = '%(relativeCreated)d %(levelname)s: %(message)s'\n_env_level = os.environ.get(\"PYI_LOG_LEVEL\", \"INFO\")\ntry:\n level = getattr(logging, _env_level.upper())\nexcept AttributeError:\n raise SystemExit(f\"Invalid PYI_LOG_LEVEL value '{_env_level}'. Should be one of {LEVELS}.\")\nlogging.basicConfig(format=FORMAT, level=level)\nlogger = getLogger('PyInstaller')\n\n\ndef __add_options(parser):\n parser.add_argument(\n '--log-level',\n choices=LEVELS,\n metavar=\"LEVEL\",\n dest='loglevel',\n help='Amount of detail in build-time console messages. LEVEL may be one of %s (default: INFO). '\n 'Also settable via and overrides the PYI_LOG_LEVEL environment variable.' % ', '.join(LEVELS),\n )\n\n\ndef __process_options(parser, opts):\n if opts.loglevel:\n try:\n level = opts.loglevel.upper()\n _level = getattr(logging, level)\n except AttributeError:\n parser.error('Unknown log level `%s`' % opts.loglevel)\n logger.setLevel(_level)\n os.environ[\"PYI_LOG_LEVEL\"] = level\n", "path": "PyInstaller/log.py"}]} | 1,348 | 439 |
gh_patches_debug_9449 | rasdani/github-patches | git_diff | mirumee__ariadne-523 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove superfluous schema validation
It turns out that `validate_schema` called in https://github.com/mirumee/ariadne/blob/master/ariadne/executable_schema.py#L26 is not needed here.
In the other hand, `assert_validate_schema` is called here: https://github.com/mirumee/ariadne/blob/master/ariadne/executable_schema.py#L40 which is sufficient.
Fixes #523
</issue>
<code>
[start of ariadne/executable_schema.py]
1 from typing import Dict, List, Type, Union
2
3 from graphql import (
4 GraphQLSchema,
5 assert_valid_schema,
6 build_ast_schema,
7 parse,
8 validate_schema,
9 )
10
11 from .enums import set_default_enum_values_on_schema
12 from .schema_visitor import SchemaDirectiveVisitor
13 from .types import SchemaBindable
14
15
16 def make_executable_schema(
17 type_defs: Union[str, List[str]],
18 *bindables: Union[SchemaBindable, List[SchemaBindable]],
19 directives: Dict[str, Type[SchemaDirectiveVisitor]] = None,
20 ) -> GraphQLSchema:
21 if isinstance(type_defs, list):
22 type_defs = join_type_defs(type_defs)
23
24 ast_document = parse(type_defs)
25 schema = build_ast_schema(ast_document)
26 validate_schema(schema)
27
28 for bindable in bindables:
29 if isinstance(bindable, list):
30 for obj in bindable:
31 obj.bind_to_schema(schema)
32 else:
33 bindable.bind_to_schema(schema)
34
35 set_default_enum_values_on_schema(schema)
36
37 if directives:
38 SchemaDirectiveVisitor.visit_schema_directives(schema, directives)
39
40 assert_valid_schema(schema)
41
42 return schema
43
44
45 def join_type_defs(type_defs: List[str]) -> str:
46 return "\n\n".join(t.strip() for t in type_defs)
47
[end of ariadne/executable_schema.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ariadne/executable_schema.py b/ariadne/executable_schema.py
--- a/ariadne/executable_schema.py
+++ b/ariadne/executable_schema.py
@@ -5,7 +5,6 @@
assert_valid_schema,
build_ast_schema,
parse,
- validate_schema,
)
from .enums import set_default_enum_values_on_schema
@@ -23,7 +22,6 @@
ast_document = parse(type_defs)
schema = build_ast_schema(ast_document)
- validate_schema(schema)
for bindable in bindables:
if isinstance(bindable, list):
| {"golden_diff": "diff --git a/ariadne/executable_schema.py b/ariadne/executable_schema.py\n--- a/ariadne/executable_schema.py\n+++ b/ariadne/executable_schema.py\n@@ -5,7 +5,6 @@\n assert_valid_schema,\n build_ast_schema,\n parse,\n- validate_schema,\n )\n \n from .enums import set_default_enum_values_on_schema\n@@ -23,7 +22,6 @@\n \n ast_document = parse(type_defs)\n schema = build_ast_schema(ast_document)\n- validate_schema(schema)\n \n for bindable in bindables:\n if isinstance(bindable, list):\n", "issue": "Remove superfluous schema validation\nIt turns out that `validate_schema` called in https://github.com/mirumee/ariadne/blob/master/ariadne/executable_schema.py#L26 is not needed here. \r\nIn the other hand, `assert_validate_schema` is called here: https://github.com/mirumee/ariadne/blob/master/ariadne/executable_schema.py#L40 which is sufficient. \r\n\r\nFixes #523 \n", "before_files": [{"content": "from typing import Dict, List, Type, Union\n\nfrom graphql import (\n GraphQLSchema,\n assert_valid_schema,\n build_ast_schema,\n parse,\n validate_schema,\n)\n\nfrom .enums import set_default_enum_values_on_schema\nfrom .schema_visitor import SchemaDirectiveVisitor\nfrom .types import SchemaBindable\n\n\ndef make_executable_schema(\n type_defs: Union[str, List[str]],\n *bindables: Union[SchemaBindable, List[SchemaBindable]],\n directives: Dict[str, Type[SchemaDirectiveVisitor]] = None,\n) -> GraphQLSchema:\n if isinstance(type_defs, list):\n type_defs = join_type_defs(type_defs)\n\n ast_document = parse(type_defs)\n schema = build_ast_schema(ast_document)\n validate_schema(schema)\n\n for bindable in bindables:\n if isinstance(bindable, list):\n for obj in bindable:\n obj.bind_to_schema(schema)\n else:\n bindable.bind_to_schema(schema)\n\n set_default_enum_values_on_schema(schema)\n\n if directives:\n SchemaDirectiveVisitor.visit_schema_directives(schema, directives)\n\n assert_valid_schema(schema)\n\n return schema\n\n\ndef join_type_defs(type_defs: List[str]) -> str:\n return \"\\n\\n\".join(t.strip() for t in type_defs)\n", "path": "ariadne/executable_schema.py"}]} | 999 | 139 |
gh_patches_debug_51325 | rasdani/github-patches | git_diff | scikit-image__scikit-image-6307 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Undefined names in Python code found with flake8
## Description
## Way to reproduce
[flake8](http://flake8.pycqa.org) testing of https://github.com/scikit-image/scikit-image on Python 3.7.1
$ __flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics__
```
./skimage/measure/mc_meta/createluts.py:139:18: F821 undefined name 'luts'
for a in dir(luts):
^
./doc/ext/notebook_doc.py:1:1: F822 undefined name 'python_to_notebook' in __all__
__all__ = ['python_to_notebook', 'Notebook']
^
1 F821 undefined name 'luts'
1 F822 undefined name 'python_to_notebook' in __all__
2
```
__E901,E999,F821,F822,F823__ are the "_showstopper_" [flake8](http://flake8.pycqa.org) issues that can halt the runtime with a SyntaxError, NameError, etc. These 5 are different from most other flake8 issues which are merely "style violations" -- useful for readability but they do not effect runtime safety.
* F821: undefined name `name`
* F822: undefined name `name` in `__all__`
* F823: local variable name referenced before assignment
* E901: SyntaxError or IndentationError
* E999: SyntaxError -- failed to compile a file into an Abstract Syntax Tree
</issue>
<code>
[start of doc/ext/notebook_doc.py]
1 __all__ = ['python_to_notebook', 'Notebook']
2
3 import json
4 import copy
5 import warnings
6
7
8 # Skeleton notebook in JSON format
9 skeleton_nb = """{
10 "metadata": {
11 "name":""
12 },
13 "nbformat": 3,
14 "nbformat_minor": 0,
15 "worksheets": [
16 {
17 "cells": [
18 {
19 "cell_type": "code",
20 "collapsed": false,
21 "input": [
22 "%matplotlib inline"
23 ],
24 "language": "python",
25 "metadata": {},
26 "outputs": []
27 }
28 ],
29 "metadata": {}
30 }
31 ]
32 }"""
33
34
35 class Notebook(object):
36 """
37 Notebook object for building an IPython notebook cell-by-cell.
38 """
39
40 def __init__(self):
41 # cell type code
42 self.cell_code = {
43 'cell_type': 'code',
44 'collapsed': False,
45 'input': [
46 '# Code Goes Here'
47 ],
48 'language': 'python',
49 'metadata': {},
50 'outputs': []
51 }
52
53 # cell type markdown
54 self.cell_md = {
55 'cell_type': 'markdown',
56 'metadata': {},
57 'source': [
58 'Markdown Goes Here'
59 ]
60 }
61
62 self.template = json.loads(skeleton_nb)
63 self.cell_type = {'input': self.cell_code, 'source': self.cell_md}
64 self.valuetype_to_celltype = {'code': 'input', 'markdown': 'source'}
65
66 def add_cell(self, value, cell_type='code'):
67 """Add a notebook cell.
68
69 Parameters
70 ----------
71 value : str
72 Cell content.
73 cell_type : {'code', 'markdown'}
74 Type of content (default is 'code').
75
76 """
77 if cell_type in ['markdown', 'code']:
78 key = self.valuetype_to_celltype[cell_type]
79 cells = self.template['worksheets'][0]['cells']
80 cells.append(copy.deepcopy(self.cell_type[key]))
81 # assign value to the last cell
82 cells[-1][key] = value
83 else:
84 warnings.warn('Ignoring unsupported cell type (%s)' % cell_type)
85
86 def json(self):
87 """Return a JSON representation of the notebook.
88
89 Returns
90 -------
91 str
92 JSON notebook.
93
94 """
95 return json.dumps(self.template, indent=2)
96
97
98
[end of doc/ext/notebook_doc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/doc/ext/notebook_doc.py b/doc/ext/notebook_doc.py
--- a/doc/ext/notebook_doc.py
+++ b/doc/ext/notebook_doc.py
@@ -1,4 +1,4 @@
-__all__ = ['python_to_notebook', 'Notebook']
+__all__ = ['Notebook']
import json
import copy
| {"golden_diff": "diff --git a/doc/ext/notebook_doc.py b/doc/ext/notebook_doc.py\n--- a/doc/ext/notebook_doc.py\n+++ b/doc/ext/notebook_doc.py\n@@ -1,4 +1,4 @@\n-__all__ = ['python_to_notebook', 'Notebook']\n+__all__ = ['Notebook']\n \n import json\n import copy\n", "issue": "Undefined names in Python code found with flake8\n## Description\r\n\r\n\r\n## Way to reproduce\r\n[flake8](http://flake8.pycqa.org) testing of https://github.com/scikit-image/scikit-image on Python 3.7.1\r\n\r\n$ __flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics__\r\n```\r\n./skimage/measure/mc_meta/createluts.py:139:18: F821 undefined name 'luts'\r\n for a in dir(luts):\r\n ^\r\n./doc/ext/notebook_doc.py:1:1: F822 undefined name 'python_to_notebook' in __all__\r\n__all__ = ['python_to_notebook', 'Notebook']\r\n^\r\n1 F821 undefined name 'luts'\r\n1 F822 undefined name 'python_to_notebook' in __all__\r\n2\r\n```\r\n__E901,E999,F821,F822,F823__ are the \"_showstopper_\" [flake8](http://flake8.pycqa.org) issues that can halt the runtime with a SyntaxError, NameError, etc. These 5 are different from most other flake8 issues which are merely \"style violations\" -- useful for readability but they do not effect runtime safety.\r\n* F821: undefined name `name`\r\n* F822: undefined name `name` in `__all__`\r\n* F823: local variable name referenced before assignment\r\n* E901: SyntaxError or IndentationError\r\n* E999: SyntaxError -- failed to compile a file into an Abstract Syntax Tree\r\n\n", "before_files": [{"content": "__all__ = ['python_to_notebook', 'Notebook']\n\nimport json\nimport copy\nimport warnings\n\n\n# Skeleton notebook in JSON format\nskeleton_nb = \"\"\"{\n \"metadata\": {\n \"name\":\"\"\n },\n \"nbformat\": 3,\n \"nbformat_minor\": 0,\n \"worksheets\": [\n {\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"collapsed\": false,\n \"input\": [\n \"%matplotlib inline\"\n ],\n \"language\": \"python\",\n \"metadata\": {},\n \"outputs\": []\n }\n ],\n \"metadata\": {}\n }\n ]\n}\"\"\"\n\n\nclass Notebook(object):\n \"\"\"\n Notebook object for building an IPython notebook cell-by-cell.\n \"\"\"\n\n def __init__(self):\n # cell type code\n self.cell_code = {\n 'cell_type': 'code',\n 'collapsed': False,\n 'input': [\n '# Code Goes Here'\n ],\n 'language': 'python',\n 'metadata': {},\n 'outputs': []\n }\n\n # cell type markdown\n self.cell_md = {\n 'cell_type': 'markdown',\n 'metadata': {},\n 'source': [\n 'Markdown Goes Here'\n ]\n }\n\n self.template = json.loads(skeleton_nb)\n self.cell_type = {'input': self.cell_code, 'source': self.cell_md}\n self.valuetype_to_celltype = {'code': 'input', 'markdown': 'source'}\n\n def add_cell(self, value, cell_type='code'):\n \"\"\"Add a notebook cell.\n\n Parameters\n ----------\n value : str\n Cell content.\n cell_type : {'code', 'markdown'}\n Type of content (default is 'code').\n\n \"\"\"\n if cell_type in ['markdown', 'code']:\n key = self.valuetype_to_celltype[cell_type]\n cells = self.template['worksheets'][0]['cells']\n cells.append(copy.deepcopy(self.cell_type[key]))\n # assign value to the last cell\n cells[-1][key] = value\n else:\n warnings.warn('Ignoring unsupported cell type (%s)' % cell_type)\n\n def json(self):\n \"\"\"Return a JSON representation of the notebook.\n\n Returns\n -------\n str\n JSON notebook.\n\n \"\"\"\n return json.dumps(self.template, indent=2)\n\n\n", "path": "doc/ext/notebook_doc.py"}]} | 1,600 | 82 |
gh_patches_debug_22583 | rasdani/github-patches | git_diff | PennyLaneAI__pennylane-583 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Avoid quadratic scaling of template integration tests
#### Issue description
Currently, in ``test_templates.py`` a compatibility test is performed by having every template in the library (of the same device) applied before every other one - a quadratic growth of test cases in the number of templates. This becomes prohibitive, and we should find another solution that tests templates' compatibility.
#### Additional information
The issue could be easily fixed by defining small dummy templates that are called before and after the tested template. We could also try to make the tested templates pass parameters to each other.
</issue>
<code>
[start of pennylane/templates/embeddings/basis.py]
1 # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 r"""
15 Contains the ``BasisEmbedding`` template.
16 """
17 # pylint: disable-msg=too-many-branches,too-many-arguments,protected-access
18 import numpy as np
19
20 from pennylane.templates.decorator import template
21 from pennylane.ops import BasisState
22 from pennylane.templates.utils import check_shape, check_wires, get_shape
23
24
25 @template
26 def BasisEmbedding(features, wires):
27 r"""Encodes :math:`n` binary features into a basis state of :math:`n` qubits.
28
29 For example, for ``features=np.array([0, 1, 0])``, the quantum system will be
30 prepared in state :math:`|010 \rangle`.
31
32 .. warning::
33
34 ``BasisEmbedding`` calls a circuit whose architecture depends on the binary features.
35 The ``features`` argument is therefore not differentiable when using the template, and
36 gradients with respect to the argument cannot be computed by PennyLane.
37
38 Args:
39 features (array): binary input array of shape ``(n, )``
40 wires (Sequence[int] or int): qubit indices that the template acts on
41
42 Raises:
43 ValueError: if inputs do not have the correct format
44 """
45
46 #############
47 # Input checks
48
49 wires = check_wires(wires)
50
51 expected_shape = (len(wires),)
52 check_shape(
53 features,
54 expected_shape,
55 msg="'features' must be of shape {}; got {}" "".format(expected_shape, get_shape(features)),
56 )
57
58 if any([b not in [0, 1] for b in features]):
59 raise ValueError("'basis_state' must only consist of 0s and 1s; got {}".format(features))
60
61 ###############
62
63 features = np.array(features)
64 BasisState(features, wires=wires)
65
[end of pennylane/templates/embeddings/basis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pennylane/templates/embeddings/basis.py b/pennylane/templates/embeddings/basis.py
--- a/pennylane/templates/embeddings/basis.py
+++ b/pennylane/templates/embeddings/basis.py
@@ -15,11 +15,11 @@
Contains the ``BasisEmbedding`` template.
"""
# pylint: disable-msg=too-many-branches,too-many-arguments,protected-access
-import numpy as np
+from collections import Iterable
from pennylane.templates.decorator import template
-from pennylane.ops import BasisState
-from pennylane.templates.utils import check_shape, check_wires, get_shape
+from pennylane.templates.utils import check_shape, check_wires, get_shape, check_type
+import pennylane as qml
@template
@@ -48,6 +48,10 @@
wires = check_wires(wires)
+ check_type(
+ features, [Iterable], msg="'features' must be iterable; got type {}".format(type(features))
+ )
+
expected_shape = (len(wires),)
check_shape(
features,
@@ -60,5 +64,6 @@
###############
- features = np.array(features)
- BasisState(features, wires=wires)
+ for wire, bit in zip(wires, features):
+ if bit == 1:
+ qml.PauliX(wire)
| {"golden_diff": "diff --git a/pennylane/templates/embeddings/basis.py b/pennylane/templates/embeddings/basis.py\n--- a/pennylane/templates/embeddings/basis.py\n+++ b/pennylane/templates/embeddings/basis.py\n@@ -15,11 +15,11 @@\n Contains the ``BasisEmbedding`` template.\n \"\"\"\n # pylint: disable-msg=too-many-branches,too-many-arguments,protected-access\n-import numpy as np\n+from collections import Iterable\n \n from pennylane.templates.decorator import template\n-from pennylane.ops import BasisState\n-from pennylane.templates.utils import check_shape, check_wires, get_shape\n+from pennylane.templates.utils import check_shape, check_wires, get_shape, check_type\n+import pennylane as qml\n \n \n @template\n@@ -48,6 +48,10 @@\n \n wires = check_wires(wires)\n \n+ check_type(\n+ features, [Iterable], msg=\"'features' must be iterable; got type {}\".format(type(features))\n+ )\n+\n expected_shape = (len(wires),)\n check_shape(\n features,\n@@ -60,5 +64,6 @@\n \n ###############\n \n- features = np.array(features)\n- BasisState(features, wires=wires)\n+ for wire, bit in zip(wires, features):\n+ if bit == 1:\n+ qml.PauliX(wire)\n", "issue": "Avoid quadratic scaling of template integration tests\n#### Issue description\r\n\r\nCurrently, in ``test_templates.py`` a compatibility test is performed by having every template in the library (of the same device) applied before every other one - a quadratic growth of test cases in the number of templates. This becomes prohibitive, and we should find another solution that tests templates' compatibility.\r\n\r\n#### Additional information\r\n\r\nThe issue could be easily fixed by defining small dummy templates that are called before and after the tested template. We could also try to make the tested templates pass parameters to each other.\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018-2020 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nContains the ``BasisEmbedding`` template.\n\"\"\"\n# pylint: disable-msg=too-many-branches,too-many-arguments,protected-access\nimport numpy as np\n\nfrom pennylane.templates.decorator import template\nfrom pennylane.ops import BasisState\nfrom pennylane.templates.utils import check_shape, check_wires, get_shape\n\n\n@template\ndef BasisEmbedding(features, wires):\n r\"\"\"Encodes :math:`n` binary features into a basis state of :math:`n` qubits.\n\n For example, for ``features=np.array([0, 1, 0])``, the quantum system will be\n prepared in state :math:`|010 \\rangle`.\n\n .. warning::\n\n ``BasisEmbedding`` calls a circuit whose architecture depends on the binary features.\n The ``features`` argument is therefore not differentiable when using the template, and\n gradients with respect to the argument cannot be computed by PennyLane.\n\n Args:\n features (array): binary input array of shape ``(n, )``\n wires (Sequence[int] or int): qubit indices that the template acts on\n\n Raises:\n ValueError: if inputs do not have the correct format\n \"\"\"\n\n #############\n # Input checks\n\n wires = check_wires(wires)\n\n expected_shape = (len(wires),)\n check_shape(\n features,\n expected_shape,\n msg=\"'features' must be of shape {}; got {}\" \"\".format(expected_shape, get_shape(features)),\n )\n\n if any([b not in [0, 1] for b in features]):\n raise ValueError(\"'basis_state' must only consist of 0s and 1s; got {}\".format(features))\n\n ###############\n\n features = np.array(features)\n BasisState(features, wires=wires)\n", "path": "pennylane/templates/embeddings/basis.py"}]} | 1,312 | 317 |
gh_patches_debug_11080 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-6581 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Installation on Armv6l creates a Linux-32bit-unknown bootloader.
## Description of the issue
When installing on a RPi 1b you get an incorrectly named bootloader created. The directory is called Linux-32bit-unknown and not Linux-32bit-arm.
### Context information (for bug reports)
* Output of `pyinstaller --version`: ```4.8```
* Version of Python: <!-- e.g. 3.7 --> 3.7
* Platform: <!-- e.g GNU/Linux (distribution), Windows (language settings), OS X, FreeBSD --> Raspbian GNU/Linux 10 (buster)* How you installed Python: <!-- e.g. python.org/downloads, conda, brew, pyenv, apt, Windows store -->pip3 install pyinstaller
* Did you also try this on another platform? Does it work there? yes.
* [x] start with clean installation
* [x] use the latest development version
</issue>
<code>
[start of PyInstaller/_shared_with_waf.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2021, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11 """
12 Code to be shared by PyInstaller and the bootloader/wscript file.
13
14 This code must not assume that either PyInstaller or any of its dependencies installed. I.e., the only imports allowed
15 in here are standard library ones. Within reason, it is preferable that this file should still run under Python 2.7 as
16 many compiler docker images still have only Python 2 installed.
17 """
18
19 import platform
20 import re
21
22
23 def _pyi_machine(machine, system):
24 # type: (str, str) -> str
25 """
26 Choose an intentionally simplified architecture identifier to be used in the bootloader's directory name.
27
28 Args:
29 machine:
30 The output of ``platform.machine()`` or any known architecture alias or shorthand that may be used by a
31 C compiler.
32 system:
33 The output of ``platform.system()`` on the target machine.
34 Returns:
35 Either a string tag or, on platforms that don't need an architecture tag, ``None``.
36
37 Ideally, we would just use ``platform.machine()`` directly, but that makes cross-compiling the bootloader almost
38 impossible, because you need to know at compile time exactly what ``platform.machine()`` will be at run time, based
39 only on the machine name alias or shorthand reported by the C compiler at the build time. Rather, use a loose
40 differentiation, and trust that anyone mixing armv6l with armv6h knows what they are doing.
41 """
42 # See the corresponding tests in tests/unit/test_compat.py for examples.
43
44 if platform.machine() == "sw_64" or platform.machine() == "loongarch64":
45 # This explicitly inhibits cross compiling the bootloader for or on SunWay and LoongArch machine.
46 return platform.machine()
47
48 if system != "Linux":
49 # No architecture specifier for anything par Linux.
50 # - Windows only has one 32 and one 64 bit architecture, but lots of aliases for each so it is both pointless
51 # and painful to give Windows an architecture specifier.
52 # - macOS is on two 64 bit architectures, but they are merged into one "universal2" bootloader.
53 # - BSD supports a wide range of architectures, but according to PyPI's download statistics, every one of our
54 # BSD users are on x86_64. This may change in the distant future.
55 return
56
57 if machine.startswith(("arm", "aarch")):
58 # ARM has a huge number of similar and aliased sub-versions, such as armv5, armv6l armv8h, aarch64.
59 return "arm"
60 if machine in ("x86_64", "x64", "x86"):
61 return "intel"
62 if re.fullmatch("i[1-6]86", machine):
63 return "intel"
64 if machine.startswith(("ppc", "powerpc")):
65 # PowerPC comes in 64 vs 32 bit and little vs big endian variants.
66 return "ppc"
67 if machine in ("mips64", "mips"):
68 return "mips"
69 # Machines with no known aliases :)
70 if machine in ("s390x",):
71 return machine
72
73 # Unknown architectures are allowed by default, but will all be placed under one directory. In theory, trying to
74 # have multiple unknown architectures in one copy of PyInstaller will not work, but that should be sufficiently
75 # unlikely to ever happen.
76 return "unknown"
77
[end of PyInstaller/_shared_with_waf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/_shared_with_waf.py b/PyInstaller/_shared_with_waf.py
--- a/PyInstaller/_shared_with_waf.py
+++ b/PyInstaller/_shared_with_waf.py
@@ -57,6 +57,10 @@
if machine.startswith(("arm", "aarch")):
# ARM has a huge number of similar and aliased sub-versions, such as armv5, armv6l armv8h, aarch64.
return "arm"
+ if machine in ("thumb"):
+ # Reported by waf/gcc when Thumb instruction set is enabled on 32-bit ARM. The platform.machine() returns "arm"
+ # regardless of the instruction set.
+ return "arm"
if machine in ("x86_64", "x64", "x86"):
return "intel"
if re.fullmatch("i[1-6]86", machine):
| {"golden_diff": "diff --git a/PyInstaller/_shared_with_waf.py b/PyInstaller/_shared_with_waf.py\n--- a/PyInstaller/_shared_with_waf.py\n+++ b/PyInstaller/_shared_with_waf.py\n@@ -57,6 +57,10 @@\n if machine.startswith((\"arm\", \"aarch\")):\n # ARM has a huge number of similar and aliased sub-versions, such as armv5, armv6l armv8h, aarch64.\n return \"arm\"\n+ if machine in (\"thumb\"):\n+ # Reported by waf/gcc when Thumb instruction set is enabled on 32-bit ARM. The platform.machine() returns \"arm\"\n+ # regardless of the instruction set.\n+ return \"arm\"\n if machine in (\"x86_64\", \"x64\", \"x86\"):\n return \"intel\"\n if re.fullmatch(\"i[1-6]86\", machine):\n", "issue": "Installation on Armv6l creates a Linux-32bit-unknown bootloader.\n## Description of the issue\r\nWhen installing on a RPi 1b you get an incorrectly named bootloader created. The directory is called Linux-32bit-unknown and not Linux-32bit-arm.\r\n\r\n### Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```4.8```\r\n* Version of Python: <!-- e.g. 3.7 --> 3.7\r\n* Platform: <!-- e.g GNU/Linux (distribution), Windows (language settings), OS X, FreeBSD --> Raspbian GNU/Linux 10 (buster)* How you installed Python: <!-- e.g. python.org/downloads, conda, brew, pyenv, apt, Windows store -->pip3 install pyinstaller\r\n* Did you also try this on another platform? Does it work there? yes.\r\n\r\n\r\n\r\n * [x] start with clean installation\r\n * [x] use the latest development version\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2021, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\"\"\"\nCode to be shared by PyInstaller and the bootloader/wscript file.\n\nThis code must not assume that either PyInstaller or any of its dependencies installed. I.e., the only imports allowed\nin here are standard library ones. Within reason, it is preferable that this file should still run under Python 2.7 as\nmany compiler docker images still have only Python 2 installed.\n\"\"\"\n\nimport platform\nimport re\n\n\ndef _pyi_machine(machine, system):\n # type: (str, str) -> str\n \"\"\"\n Choose an intentionally simplified architecture identifier to be used in the bootloader's directory name.\n\n Args:\n machine:\n The output of ``platform.machine()`` or any known architecture alias or shorthand that may be used by a\n C compiler.\n system:\n The output of ``platform.system()`` on the target machine.\n Returns:\n Either a string tag or, on platforms that don't need an architecture tag, ``None``.\n\n Ideally, we would just use ``platform.machine()`` directly, but that makes cross-compiling the bootloader almost\n impossible, because you need to know at compile time exactly what ``platform.machine()`` will be at run time, based\n only on the machine name alias or shorthand reported by the C compiler at the build time. Rather, use a loose\n differentiation, and trust that anyone mixing armv6l with armv6h knows what they are doing.\n \"\"\"\n # See the corresponding tests in tests/unit/test_compat.py for examples.\n\n if platform.machine() == \"sw_64\" or platform.machine() == \"loongarch64\":\n # This explicitly inhibits cross compiling the bootloader for or on SunWay and LoongArch machine.\n return platform.machine()\n\n if system != \"Linux\":\n # No architecture specifier for anything par Linux.\n # - Windows only has one 32 and one 64 bit architecture, but lots of aliases for each so it is both pointless\n # and painful to give Windows an architecture specifier.\n # - macOS is on two 64 bit architectures, but they are merged into one \"universal2\" bootloader.\n # - BSD supports a wide range of architectures, but according to PyPI's download statistics, every one of our\n # BSD users are on x86_64. This may change in the distant future.\n return\n\n if machine.startswith((\"arm\", \"aarch\")):\n # ARM has a huge number of similar and aliased sub-versions, such as armv5, armv6l armv8h, aarch64.\n return \"arm\"\n if machine in (\"x86_64\", \"x64\", \"x86\"):\n return \"intel\"\n if re.fullmatch(\"i[1-6]86\", machine):\n return \"intel\"\n if machine.startswith((\"ppc\", \"powerpc\")):\n # PowerPC comes in 64 vs 32 bit and little vs big endian variants.\n return \"ppc\"\n if machine in (\"mips64\", \"mips\"):\n return \"mips\"\n # Machines with no known aliases :)\n if machine in (\"s390x\",):\n return machine\n\n # Unknown architectures are allowed by default, but will all be placed under one directory. In theory, trying to\n # have multiple unknown architectures in one copy of PyInstaller will not work, but that should be sufficiently\n # unlikely to ever happen.\n return \"unknown\"\n", "path": "PyInstaller/_shared_with_waf.py"}]} | 1,738 | 211 |
gh_patches_debug_11874 | rasdani/github-patches | git_diff | kubeflow__pipelines-2213 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Component] GCP dataproc create_cluster component cannot correctly specify image_version.
Issue:
When specifying not-null image version, create_cluster component raises:
`<HttpError 400 when requesting
https://dataproc.googleapis.com/v1/projects/ml-pipeline-dogfood/regions/us-central1/clusters?alt=json&requestId=7c933fdacb068cd6811fb40b8334a3d4
returned "Invalid JSON payload received. Unknown name "softwareConfig" at 'cluster': Cannot find field.">`
Initial investigation shows that in [here](https://github.com/kubeflow/pipelines/blob/7dab30085e2edda6fb4ecb61a61c9f37664009a1/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py#L70), `softwareConfig` was specified as a top-layer member of cluster payload, but actually it should be a member of cluster['config']. See [this reference](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster).
Will work out a fix shortly.
</issue>
<code>
[start of components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import json
15
16 from fire import decorators
17 from ._client import DataprocClient
18 from kfp_component.core import KfpExecutionContext, display
19 from .. import common as gcp_common
20
21 @decorators.SetParseFns(image_version=str)
22 def create_cluster(project_id, region, name=None, name_prefix=None,
23 initialization_actions=None, config_bucket=None, image_version=None,
24 cluster=None, wait_interval=30):
25 """Creates a DataProc cluster under a project.
26
27 Args:
28 project_id (str): Required. The ID of the Google Cloud Platform project
29 that the cluster belongs to.
30 region (str): Required. The Cloud Dataproc region in which to handle the
31 request.
32 name (str): Optional. The cluster name. Cluster names within a project
33 must be unique. Names of deleted clusters can be reused.
34 name_prefix (str): Optional. The prefix of the cluster name.
35 initialization_actions (list): Optional. List of GCS URIs of executables
36 to execute on each node after config is completed. By default,
37 executables are run on master and all worker nodes.
38 config_bucket (str): Optional. A Google Cloud Storage bucket used to
39 stage job dependencies, config files, and job driver console output.
40 image_version (str): Optional. The version of software inside the cluster.
41 cluster (dict): Optional. The full cluster config. See [full details](
42 https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster)
43 wait_interval (int): The wait seconds between polling the operation.
44 Defaults to 30s.
45
46 Returns:
47 The created cluster object.
48
49 Output Files:
50 $KFP_OUTPUT_PATH/dataproc/cluster_name.txt: The cluster name of the
51 created cluster.
52 """
53 if not cluster:
54 cluster = {}
55 cluster['projectId'] = project_id
56 if 'config' not in cluster:
57 cluster['config'] = {}
58 if name:
59 cluster['clusterName'] = name
60 if initialization_actions:
61 cluster['config']['initializationActions'] = list(
62 map(lambda file: {
63 'executableFile': file
64 }, initialization_actions)
65 )
66 if config_bucket:
67 cluster['config']['configBucket'] = config_bucket
68 if image_version:
69 if 'softwareConfig' not in cluster:
70 cluster['softwareConfig'] = {}
71 cluster['softwareConfig']['imageVersion'] = image_version
72
73 return _create_cluster_internal(project_id, region, cluster, name_prefix,
74 wait_interval)
75
76 def _create_cluster_internal(project_id, region, cluster, name_prefix,
77 wait_interval):
78 client = DataprocClient()
79 operation_name = None
80 with KfpExecutionContext(
81 on_cancel=lambda: client.cancel_operation(operation_name)) as ctx:
82 _set_cluster_name(cluster, ctx.context_id(), name_prefix)
83 _dump_metadata(cluster, region)
84 operation = client.create_cluster(project_id, region, cluster,
85 request_id=ctx.context_id())
86 operation_name = operation.get('name')
87 operation = client.wait_for_operation_done(operation_name,
88 wait_interval)
89 return _dump_cluster(operation.get('response'))
90
91 def _set_cluster_name(cluster, context_id, name_prefix):
92 if 'clusterName' in cluster:
93 return
94 if not name_prefix:
95 name_prefix = 'cluster'
96 cluster['clusterName'] = name_prefix + '-' + context_id
97
98 def _dump_metadata(cluster, region):
99 display.display(display.Link(
100 'https://console.cloud.google.com/dataproc/clusters/{}?project={}®ion={}'.format(
101 cluster.get('clusterName'), cluster.get('projectId'), region),
102 'Cluster Details'
103 ))
104
105 def _dump_cluster(cluster):
106 gcp_common.dump_file('/tmp/kfp/output/dataproc/cluster.json',
107 json.dumps(cluster))
108 gcp_common.dump_file('/tmp/kfp/output/dataproc/cluster_name.txt',
109 cluster.get('clusterName'))
110 return cluster
111
[end of components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py b/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py
--- a/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py
+++ b/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py
@@ -66,9 +66,9 @@
if config_bucket:
cluster['config']['configBucket'] = config_bucket
if image_version:
- if 'softwareConfig' not in cluster:
- cluster['softwareConfig'] = {}
- cluster['softwareConfig']['imageVersion'] = image_version
+ if 'softwareConfig' not in cluster['config']:
+ cluster['config']['softwareConfig'] = {}
+ cluster['config']['softwareConfig']['imageVersion'] = image_version
return _create_cluster_internal(project_id, region, cluster, name_prefix,
wait_interval)
| {"golden_diff": "diff --git a/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py b/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py\n--- a/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py\n+++ b/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py\n@@ -66,9 +66,9 @@\n if config_bucket:\n cluster['config']['configBucket'] = config_bucket\n if image_version:\n- if 'softwareConfig' not in cluster:\n- cluster['softwareConfig'] = {}\n- cluster['softwareConfig']['imageVersion'] = image_version\n+ if 'softwareConfig' not in cluster['config']:\n+ cluster['config']['softwareConfig'] = {}\n+ cluster['config']['softwareConfig']['imageVersion'] = image_version\n \n return _create_cluster_internal(project_id, region, cluster, name_prefix,\n wait_interval)\n", "issue": "[Component] GCP dataproc create_cluster component cannot correctly specify image_version.\nIssue:\r\nWhen specifying not-null image version, create_cluster component raises:\r\n`<HttpError 400 when requesting \r\nhttps://dataproc.googleapis.com/v1/projects/ml-pipeline-dogfood/regions/us-central1/clusters?alt=json&requestId=7c933fdacb068cd6811fb40b8334a3d4\r\n returned \"Invalid JSON payload received. Unknown name \"softwareConfig\" at 'cluster': Cannot find field.\">`\r\n\r\nInitial investigation shows that in [here](https://github.com/kubeflow/pipelines/blob/7dab30085e2edda6fb4ecb61a61c9f37664009a1/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py#L70), `softwareConfig` was specified as a top-layer member of cluster payload, but actually it should be a member of cluster['config']. See [this reference](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster).\r\n\r\nWill work out a fix shortly.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport json\n\nfrom fire import decorators\nfrom ._client import DataprocClient\nfrom kfp_component.core import KfpExecutionContext, display\nfrom .. import common as gcp_common\n\[email protected](image_version=str)\ndef create_cluster(project_id, region, name=None, name_prefix=None,\n initialization_actions=None, config_bucket=None, image_version=None,\n cluster=None, wait_interval=30):\n \"\"\"Creates a DataProc cluster under a project.\n\n Args:\n project_id (str): Required. The ID of the Google Cloud Platform project \n that the cluster belongs to.\n region (str): Required. The Cloud Dataproc region in which to handle the \n request.\n name (str): Optional. The cluster name. Cluster names within a project\n must be unique. Names of deleted clusters can be reused.\n name_prefix (str): Optional. The prefix of the cluster name.\n initialization_actions (list): Optional. List of GCS URIs of executables \n to execute on each node after config is completed. By default,\n executables are run on master and all worker nodes. \n config_bucket (str): Optional. A Google Cloud Storage bucket used to \n stage job dependencies, config files, and job driver console output.\n image_version (str): Optional. The version of software inside the cluster.\n cluster (dict): Optional. The full cluster config. See [full details](\n https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster)\n wait_interval (int): The wait seconds between polling the operation. \n Defaults to 30s.\n\n Returns:\n The created cluster object.\n\n Output Files:\n $KFP_OUTPUT_PATH/dataproc/cluster_name.txt: The cluster name of the \n created cluster.\n \"\"\"\n if not cluster:\n cluster = {}\n cluster['projectId'] = project_id\n if 'config' not in cluster:\n cluster['config'] = {}\n if name:\n cluster['clusterName'] = name\n if initialization_actions:\n cluster['config']['initializationActions'] = list(\n map(lambda file: {\n 'executableFile': file\n }, initialization_actions)\n )\n if config_bucket:\n cluster['config']['configBucket'] = config_bucket\n if image_version:\n if 'softwareConfig' not in cluster:\n cluster['softwareConfig'] = {}\n cluster['softwareConfig']['imageVersion'] = image_version\n\n return _create_cluster_internal(project_id, region, cluster, name_prefix,\n wait_interval)\n\ndef _create_cluster_internal(project_id, region, cluster, name_prefix, \n wait_interval):\n client = DataprocClient()\n operation_name = None\n with KfpExecutionContext(\n on_cancel=lambda: client.cancel_operation(operation_name)) as ctx:\n _set_cluster_name(cluster, ctx.context_id(), name_prefix)\n _dump_metadata(cluster, region)\n operation = client.create_cluster(project_id, region, cluster, \n request_id=ctx.context_id())\n operation_name = operation.get('name')\n operation = client.wait_for_operation_done(operation_name, \n wait_interval)\n return _dump_cluster(operation.get('response'))\n\ndef _set_cluster_name(cluster, context_id, name_prefix):\n if 'clusterName' in cluster:\n return\n if not name_prefix:\n name_prefix = 'cluster'\n cluster['clusterName'] = name_prefix + '-' + context_id\n\ndef _dump_metadata(cluster, region):\n display.display(display.Link(\n 'https://console.cloud.google.com/dataproc/clusters/{}?project={}®ion={}'.format(\n cluster.get('clusterName'), cluster.get('projectId'), region),\n 'Cluster Details'\n ))\n\ndef _dump_cluster(cluster):\n gcp_common.dump_file('/tmp/kfp/output/dataproc/cluster.json', \n json.dumps(cluster))\n gcp_common.dump_file('/tmp/kfp/output/dataproc/cluster_name.txt',\n cluster.get('clusterName'))\n return cluster\n", "path": "components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py"}]} | 2,033 | 213 |
gh_patches_debug_3201 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-337 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[DOC] Remove example.py in examples directory
The code in the example.py file currently reads some data frame from a file called 'dirty_data.xls'.
We can change this to include a concrete example.
</issue>
<code>
[start of examples/example.py]
1 import pandas as pd
2
3 import janitor as jn
4
5 df = (
6 pd.read_excel("dirty_data.xlsx")
7 .clean_names()
8 .remove_empty()
9 .rename_column("%_allocated", "percent_allocated")
10 .rename_column("full_time_", "full_time")
11 .coalesce(["certification", "certification_1"], "certification")
12 .encode_categorical(["subject", "employee_status", "full_time"])
13 .convert_excel_date("hire_date")
14 )
15
16 print(df)
17 print(df.original_names)
18
[end of examples/example.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/example.py b/examples/example.py
deleted file mode 100644
--- a/examples/example.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import pandas as pd
-
-import janitor as jn
-
-df = (
- pd.read_excel("dirty_data.xlsx")
- .clean_names()
- .remove_empty()
- .rename_column("%_allocated", "percent_allocated")
- .rename_column("full_time_", "full_time")
- .coalesce(["certification", "certification_1"], "certification")
- .encode_categorical(["subject", "employee_status", "full_time"])
- .convert_excel_date("hire_date")
-)
-
-print(df)
-print(df.original_names)
| {"golden_diff": "diff --git a/examples/example.py b/examples/example.py\ndeleted file mode 100644\n--- a/examples/example.py\n+++ /dev/null\n@@ -1,17 +0,0 @@\n-import pandas as pd\n-\n-import janitor as jn\n-\n-df = (\n- pd.read_excel(\"dirty_data.xlsx\")\n- .clean_names()\n- .remove_empty()\n- .rename_column(\"%_allocated\", \"percent_allocated\")\n- .rename_column(\"full_time_\", \"full_time\")\n- .coalesce([\"certification\", \"certification_1\"], \"certification\")\n- .encode_categorical([\"subject\", \"employee_status\", \"full_time\"])\n- .convert_excel_date(\"hire_date\")\n-)\n-\n-print(df)\n-print(df.original_names)\n", "issue": "[DOC] Remove example.py in examples directory\nThe code in the example.py file currently reads some data frame from a file called 'dirty_data.xls'.\r\nWe can change this to include a concrete example.\n", "before_files": [{"content": "import pandas as pd\n\nimport janitor as jn\n\ndf = (\n pd.read_excel(\"dirty_data.xlsx\")\n .clean_names()\n .remove_empty()\n .rename_column(\"%_allocated\", \"percent_allocated\")\n .rename_column(\"full_time_\", \"full_time\")\n .coalesce([\"certification\", \"certification_1\"], \"certification\")\n .encode_categorical([\"subject\", \"employee_status\", \"full_time\"])\n .convert_excel_date(\"hire_date\")\n)\n\nprint(df)\nprint(df.original_names)\n", "path": "examples/example.py"}]} | 713 | 168 |
gh_patches_debug_24396 | rasdani/github-patches | git_diff | graspologic-org__graspologic-438 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
remove ari return value from AutoGMM.fit_predict
Doesn't match with API well, should just get rid of this
</issue>
<code>
[start of graspy/cluster/base.py]
1 # Copyright 2019 NeuroData (http://neurodata.io)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from abc import ABC, abstractmethod
16
17 from sklearn.base import BaseEstimator, ClusterMixin
18 from sklearn.metrics import adjusted_rand_score
19 from sklearn.utils.validation import check_is_fitted
20
21
22 class BaseCluster(ABC, BaseEstimator, ClusterMixin):
23 """
24 Base clustering class.
25 """
26
27 @abstractmethod
28 def fit(self, X, y=None):
29 """
30 Compute clusters based on given method.
31
32 Parameters
33 ----------
34 X : array-like, shape (n_samples, n_features)
35 List of n_features-dimensional data points. Each row
36 corresponds to a single data point.
37
38 y : array-like, shape (n_samples,), optional (default=None)
39 List of labels for X if available. Used to compute
40 ARI scores.
41
42 Returns
43 -------
44 self
45 """
46
47 def predict(self, X, y=None): # pragma: no cover
48 """
49 Predict clusters based on best model.
50
51 Parameters
52 ----------
53 X : array-like, shape (n_samples, n_features)
54 List of n_features-dimensional data points. Each row
55 corresponds to a single data point.
56 y : array-like, shape (n_samples, ), optional (default=None)
57 List of labels for X if available. Used to compute
58 ARI scores.
59
60 Returns
61 -------
62 labels : array, shape (n_samples,)
63 Component labels.
64
65 ari : float
66 Adjusted Rand index. Only returned if y is given.
67 """
68 # Check if fit is already called
69 check_is_fitted(self, ["model_"], all_or_any=all)
70 labels = self.model_.predict(X)
71
72 if y is None:
73 return labels
74 else:
75 ari = adjusted_rand_score(y, labels)
76 return labels, ari
77
78 def fit_predict(self, X, y=None): # pragma: no cover
79 """
80 Fit the models and predict clusters based on best model.
81
82 Parameters
83 ----------
84 X : array-like, shape (n_samples, n_features)
85 List of n_features-dimensional data points. Each row
86 corresponds to a single data point.
87
88 y : array-like, shape (n_samples,), optional (default=None)
89 List of labels for X if available. Used to compute
90 ARI scores.
91
92 Returns
93 -------
94 labels : array, shape (n_samples,)
95 Component labels.
96
97 ari : float
98 Adjusted Rand index. Only returned if y is given.
99 """
100 self.fit(X, y)
101
102 if y is None:
103 labels = self.predict(X, y)
104 return labels
105 else:
106 labels, ari = self.predict(X, y)
107 return labels, ari
108
[end of graspy/cluster/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/graspy/cluster/base.py b/graspy/cluster/base.py
--- a/graspy/cluster/base.py
+++ b/graspy/cluster/base.py
@@ -61,19 +61,12 @@
-------
labels : array, shape (n_samples,)
Component labels.
-
- ari : float
- Adjusted Rand index. Only returned if y is given.
"""
# Check if fit is already called
check_is_fitted(self, ["model_"], all_or_any=all)
labels = self.model_.predict(X)
- if y is None:
- return labels
- else:
- ari = adjusted_rand_score(y, labels)
- return labels, ari
+ return labels
def fit_predict(self, X, y=None): # pragma: no cover
"""
@@ -93,15 +86,8 @@
-------
labels : array, shape (n_samples,)
Component labels.
-
- ari : float
- Adjusted Rand index. Only returned if y is given.
"""
self.fit(X, y)
- if y is None:
- labels = self.predict(X, y)
- return labels
- else:
- labels, ari = self.predict(X, y)
- return labels, ari
+ labels = self.predict(X, y)
+ return labels
| {"golden_diff": "diff --git a/graspy/cluster/base.py b/graspy/cluster/base.py\n--- a/graspy/cluster/base.py\n+++ b/graspy/cluster/base.py\n@@ -61,19 +61,12 @@\n -------\n labels : array, shape (n_samples,)\n Component labels.\n-\n- ari : float\n- Adjusted Rand index. Only returned if y is given.\n \"\"\"\n # Check if fit is already called\n check_is_fitted(self, [\"model_\"], all_or_any=all)\n labels = self.model_.predict(X)\n \n- if y is None:\n- return labels\n- else:\n- ari = adjusted_rand_score(y, labels)\n- return labels, ari\n+ return labels\n \n def fit_predict(self, X, y=None): # pragma: no cover\n \"\"\"\n@@ -93,15 +86,8 @@\n -------\n labels : array, shape (n_samples,)\n Component labels.\n-\n- ari : float\n- Adjusted Rand index. Only returned if y is given.\n \"\"\"\n self.fit(X, y)\n \n- if y is None:\n- labels = self.predict(X, y)\n- return labels\n- else:\n- labels, ari = self.predict(X, y)\n- return labels, ari\n+ labels = self.predict(X, y)\n+ return labels\n", "issue": "remove ari return value from AutoGMM.fit_predict\nDoesn't match with API well, should just get rid of this\n", "before_files": [{"content": "# Copyright 2019 NeuroData (http://neurodata.io)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\n\nfrom sklearn.base import BaseEstimator, ClusterMixin\nfrom sklearn.metrics import adjusted_rand_score\nfrom sklearn.utils.validation import check_is_fitted\n\n\nclass BaseCluster(ABC, BaseEstimator, ClusterMixin):\n \"\"\"\n Base clustering class.\n \"\"\"\n\n @abstractmethod\n def fit(self, X, y=None):\n \"\"\"\n Compute clusters based on given method.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n y : array-like, shape (n_samples,), optional (default=None)\n List of labels for X if available. Used to compute\n ARI scores.\n\n Returns\n -------\n self\n \"\"\"\n\n def predict(self, X, y=None): # pragma: no cover\n \"\"\"\n Predict clusters based on best model.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n y : array-like, shape (n_samples, ), optional (default=None)\n List of labels for X if available. Used to compute\n ARI scores.\n\n Returns\n -------\n labels : array, shape (n_samples,)\n Component labels.\n\n ari : float\n Adjusted Rand index. Only returned if y is given.\n \"\"\"\n # Check if fit is already called\n check_is_fitted(self, [\"model_\"], all_or_any=all)\n labels = self.model_.predict(X)\n\n if y is None:\n return labels\n else:\n ari = adjusted_rand_score(y, labels)\n return labels, ari\n\n def fit_predict(self, X, y=None): # pragma: no cover\n \"\"\"\n Fit the models and predict clusters based on best model.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n y : array-like, shape (n_samples,), optional (default=None)\n List of labels for X if available. Used to compute\n ARI scores.\n\n Returns\n -------\n labels : array, shape (n_samples,)\n Component labels.\n\n ari : float\n Adjusted Rand index. Only returned if y is given.\n \"\"\"\n self.fit(X, y)\n\n if y is None:\n labels = self.predict(X, y)\n return labels\n else:\n labels, ari = self.predict(X, y)\n return labels, ari\n", "path": "graspy/cluster/base.py"}]} | 1,502 | 314 |
gh_patches_debug_15455 | rasdani/github-patches | git_diff | kubeflow__pipelines-9088 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
v2 - support resource requests and limits
* [x] https://github.com/kubeflow/pipelines/pull/7045
* [x] #7043
* [x] #7047
</issue>
<code>
[start of samples/core/resource_spec/resource_spec_v2.py]
1 # Copyright 2020-2021 The Kubeflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 from kfp import dsl
17
18 # In tests, we install a KFP package from the PR under test. Users should not
19 # normally need to specify `kfp_package_path` in their component definitions.
20 _KFP_PACKAGE_PATH = os.getenv('KFP_PACKAGE_PATH')
21
22
23 @dsl.component(kfp_package_path=_KFP_PACKAGE_PATH)
24 def training_op(n: int) -> int:
25 # quickly allocate a lot of memory to verify memory is enough
26 a = [i for i in range(n)]
27 return len(a)
28
29
30 @dsl.pipeline(
31 name='pipeline-with-resource-spec',
32 description='A pipeline with resource specification.')
33 def my_pipeline(n: int = 11234567):
34 # For units of these resource limits,
35 # refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes
36 # 11234567 roughly needs 400Mi+ memory.
37 #
38 # Note, with v2 python components, there's a larger memory overhead caused
39 # by installing KFP SDK in the component, so we had to increase memory limit to 650M.
40 training_task = training_op(n=n).set_cpu_limit('1').set_memory_limit('650M')
41
42 # TODO(Bobgy): other resource specs like cpu requests, memory requests and
43 # GPU limits are not available yet: https://github.com/kubeflow/pipelines/issues/6354.
44 # There are other resource spec you can set.
45 # For example, to use TPU, add the following:
46 # .add_node_selector_constraint('cloud.google.com/gke-accelerator', 'tpu-v3')
47 # .set_gpu_limit(1)
48
[end of samples/core/resource_spec/resource_spec_v2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/samples/core/resource_spec/resource_spec_v2.py b/samples/core/resource_spec/resource_spec_v2.py
--- a/samples/core/resource_spec/resource_spec_v2.py
+++ b/samples/core/resource_spec/resource_spec_v2.py
@@ -38,6 +38,9 @@
# Note, with v2 python components, there's a larger memory overhead caused
# by installing KFP SDK in the component, so we had to increase memory limit to 650M.
training_task = training_op(n=n).set_cpu_limit('1').set_memory_limit('650M')
+
+ # TODO(gkcalat): enable requests once SDK implements the feature
+ # training_task = training_task.set_cpu_request('1').set_memory_request('650M')
# TODO(Bobgy): other resource specs like cpu requests, memory requests and
# GPU limits are not available yet: https://github.com/kubeflow/pipelines/issues/6354.
| {"golden_diff": "diff --git a/samples/core/resource_spec/resource_spec_v2.py b/samples/core/resource_spec/resource_spec_v2.py\n--- a/samples/core/resource_spec/resource_spec_v2.py\n+++ b/samples/core/resource_spec/resource_spec_v2.py\n@@ -38,6 +38,9 @@\n # Note, with v2 python components, there's a larger memory overhead caused\n # by installing KFP SDK in the component, so we had to increase memory limit to 650M.\n training_task = training_op(n=n).set_cpu_limit('1').set_memory_limit('650M')\n+ \n+ # TODO(gkcalat): enable requests once SDK implements the feature\n+ # training_task = training_task.set_cpu_request('1').set_memory_request('650M')\n \n # TODO(Bobgy): other resource specs like cpu requests, memory requests and\n # GPU limits are not available yet: https://github.com/kubeflow/pipelines/issues/6354.\n", "issue": "v2 - support resource requests and limits\n* [x] https://github.com/kubeflow/pipelines/pull/7045\r\n* [x] #7043\r\n* [x] #7047\r\n\n", "before_files": [{"content": "# Copyright 2020-2021 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom kfp import dsl\n\n# In tests, we install a KFP package from the PR under test. Users should not\n# normally need to specify `kfp_package_path` in their component definitions.\n_KFP_PACKAGE_PATH = os.getenv('KFP_PACKAGE_PATH')\n\n\[email protected](kfp_package_path=_KFP_PACKAGE_PATH)\ndef training_op(n: int) -> int:\n # quickly allocate a lot of memory to verify memory is enough\n a = [i for i in range(n)]\n return len(a)\n\n\[email protected](\n name='pipeline-with-resource-spec',\n description='A pipeline with resource specification.')\ndef my_pipeline(n: int = 11234567):\n # For units of these resource limits,\n # refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes\n # 11234567 roughly needs 400Mi+ memory.\n #\n # Note, with v2 python components, there's a larger memory overhead caused\n # by installing KFP SDK in the component, so we had to increase memory limit to 650M.\n training_task = training_op(n=n).set_cpu_limit('1').set_memory_limit('650M')\n\n # TODO(Bobgy): other resource specs like cpu requests, memory requests and\n # GPU limits are not available yet: https://github.com/kubeflow/pipelines/issues/6354.\n # There are other resource spec you can set.\n # For example, to use TPU, add the following:\n # .add_node_selector_constraint('cloud.google.com/gke-accelerator', 'tpu-v3')\n # .set_gpu_limit(1)\n", "path": "samples/core/resource_spec/resource_spec_v2.py"}]} | 1,204 | 219 |
gh_patches_debug_33968 | rasdani/github-patches | git_diff | pypa__pip-2281 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip 6.0.3 weird symbols shown with download progress bar
with pip 6.0.3 on Windows with cmd.exe
```
py -mpip install --upgrade setuptools
Collecting setuptools from https://pypi.python.org/packages/3.4/s/setuptools/set
uptools-8.3-py2.py3-none-any.whl#md5=a6c2914e2ae62227a5dfb6e908475b02
Downloading setuptools-8.3-py2.py3-none-any.whl (552kB)
←[K 100% |################################| 552kB 835kB/s ta 0:00:01
←[?25hInstalling collected packages: setuptools
Found existing installation: setuptools 7.0
Uninstalling setuptools-7.0:
Successfully uninstalled setuptools-7.0
Successfully installed setuptools-8.3
```
There's weird stuff with the progress bar, possibly control characers that cmd.exe terminal can't handle
</issue>
<code>
[start of pip/utils/ui.py]
1 from __future__ import absolute_import
2 from __future__ import division
3
4 import itertools
5 import sys
6
7 from pip.utils import format_size
8 from pip.utils.logging import get_indentation
9 from pip._vendor.progress.bar import Bar
10 from pip._vendor.progress.helpers import WritelnMixin
11 from pip._vendor.progress.spinner import Spinner
12
13
14 class DownloadProgressMixin(object):
15
16 def __init__(self, *args, **kwargs):
17 super(DownloadProgressMixin, self).__init__(*args, **kwargs)
18 self.message = (" " * (get_indentation() + 2)) + self.message
19
20 @property
21 def downloaded(self):
22 return format_size(self.index)
23
24 @property
25 def download_speed(self):
26 # Avoid zero division errors...
27 if self.avg == 0.0:
28 return "..."
29 return format_size(1 / self.avg) + "/s"
30
31 @property
32 def pretty_eta(self):
33 if self.eta:
34 return "eta %s" % self.eta_td
35 return ""
36
37 def iter(self, it, n=1):
38 for x in it:
39 yield x
40 self.next(n)
41 self.finish()
42
43
44 class DownloadProgressBar(DownloadProgressMixin, Bar):
45
46 file = sys.stdout
47 message = "%(percent)d%%"
48 suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s"
49
50
51 class DownloadProgressSpinner(DownloadProgressMixin, WritelnMixin, Spinner):
52
53 file = sys.stdout
54 suffix = "%(downloaded)s %(download_speed)s"
55
56 def next_phase(self):
57 if not hasattr(self, "_phaser"):
58 self._phaser = itertools.cycle(self.phases)
59 return next(self._phaser)
60
61 def update(self):
62 message = self.message % self
63 phase = self.next_phase()
64 suffix = self.suffix % self
65 line = ''.join([
66 message,
67 " " if message else "",
68 phase,
69 " " if suffix else "",
70 suffix,
71 ])
72
73 self.writeln(line)
74
[end of pip/utils/ui.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pip/utils/ui.py b/pip/utils/ui.py
--- a/pip/utils/ui.py
+++ b/pip/utils/ui.py
@@ -4,12 +4,20 @@
import itertools
import sys
+from pip.compat import WINDOWS
from pip.utils import format_size
from pip.utils.logging import get_indentation
from pip._vendor.progress.bar import Bar
from pip._vendor.progress.helpers import WritelnMixin
from pip._vendor.progress.spinner import Spinner
+try:
+ from pip._vendor import colorama
+# Lots of different errors can come from this, including SystemError and
+# ImportError.
+except Exception:
+ colorama = None
+
class DownloadProgressMixin(object):
@@ -41,14 +49,40 @@
self.finish()
-class DownloadProgressBar(DownloadProgressMixin, Bar):
+class WindowsMixin(object):
+
+ def __init__(self, *args, **kwargs):
+ super(WindowsMixin, self).__init__(*args, **kwargs)
+
+ # Check if we are running on Windows and we have the colorama module,
+ # if we do then wrap our file with it.
+ if WINDOWS and colorama:
+ self.file = colorama.AnsiToWin32(self.file)
+ # The progress code expects to be able to call self.file.isatty()
+ # but the colorama.AnsiToWin32() object doesn't have that, so we'll
+ # add it.
+ self.file.isatty = lambda: self.file.wrapped.isatty()
+ # The progress code expects to be able to call self.file.flush()
+ # but the colorama.AnsiToWin32() object doesn't have that, so we'll
+ # add it.
+ self.file.flush = lambda: self.file.wrapped.flush()
+
+ # The Windows terminal does not support the hide/show cursor ANSI codes
+ # even with colorama. So we'll ensure that hide_cursor is False on
+ # Windows.
+ if WINDOWS and self.hide_cursor:
+ self.hide_cursor = False
+
+
+class DownloadProgressBar(WindowsMixin, DownloadProgressMixin, Bar):
file = sys.stdout
message = "%(percent)d%%"
suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s"
-class DownloadProgressSpinner(DownloadProgressMixin, WritelnMixin, Spinner):
+class DownloadProgressSpinner(WindowsMixin, DownloadProgressMixin,
+ WritelnMixin, Spinner):
file = sys.stdout
suffix = "%(downloaded)s %(download_speed)s"
| {"golden_diff": "diff --git a/pip/utils/ui.py b/pip/utils/ui.py\n--- a/pip/utils/ui.py\n+++ b/pip/utils/ui.py\n@@ -4,12 +4,20 @@\n import itertools\n import sys\n \n+from pip.compat import WINDOWS\n from pip.utils import format_size\n from pip.utils.logging import get_indentation\n from pip._vendor.progress.bar import Bar\n from pip._vendor.progress.helpers import WritelnMixin\n from pip._vendor.progress.spinner import Spinner\n \n+try:\n+ from pip._vendor import colorama\n+# Lots of different errors can come from this, including SystemError and\n+# ImportError.\n+except Exception:\n+ colorama = None\n+\n \n class DownloadProgressMixin(object):\n \n@@ -41,14 +49,40 @@\n self.finish()\n \n \n-class DownloadProgressBar(DownloadProgressMixin, Bar):\n+class WindowsMixin(object):\n+\n+ def __init__(self, *args, **kwargs):\n+ super(WindowsMixin, self).__init__(*args, **kwargs)\n+\n+ # Check if we are running on Windows and we have the colorama module,\n+ # if we do then wrap our file with it.\n+ if WINDOWS and colorama:\n+ self.file = colorama.AnsiToWin32(self.file)\n+ # The progress code expects to be able to call self.file.isatty()\n+ # but the colorama.AnsiToWin32() object doesn't have that, so we'll\n+ # add it.\n+ self.file.isatty = lambda: self.file.wrapped.isatty()\n+ # The progress code expects to be able to call self.file.flush()\n+ # but the colorama.AnsiToWin32() object doesn't have that, so we'll\n+ # add it.\n+ self.file.flush = lambda: self.file.wrapped.flush()\n+\n+ # The Windows terminal does not support the hide/show cursor ANSI codes\n+ # even with colorama. So we'll ensure that hide_cursor is False on\n+ # Windows.\n+ if WINDOWS and self.hide_cursor:\n+ self.hide_cursor = False\n+\n+\n+class DownloadProgressBar(WindowsMixin, DownloadProgressMixin, Bar):\n \n file = sys.stdout\n message = \"%(percent)d%%\"\n suffix = \"%(downloaded)s %(download_speed)s %(pretty_eta)s\"\n \n \n-class DownloadProgressSpinner(DownloadProgressMixin, WritelnMixin, Spinner):\n+class DownloadProgressSpinner(WindowsMixin, DownloadProgressMixin,\n+ WritelnMixin, Spinner):\n \n file = sys.stdout\n suffix = \"%(downloaded)s %(download_speed)s\"\n", "issue": "pip 6.0.3 weird symbols shown with download progress bar\nwith pip 6.0.3 on Windows with cmd.exe\n\n```\npy -mpip install --upgrade setuptools\n\nCollecting setuptools from https://pypi.python.org/packages/3.4/s/setuptools/set\nuptools-8.3-py2.py3-none-any.whl#md5=a6c2914e2ae62227a5dfb6e908475b02\n Downloading setuptools-8.3-py2.py3-none-any.whl (552kB)\n\u2190[K 100% |################################| 552kB 835kB/s ta 0:00:01\n\u2190[?25hInstalling collected packages: setuptools\n Found existing installation: setuptools 7.0\n Uninstalling setuptools-7.0:\n Successfully uninstalled setuptools-7.0\n\nSuccessfully installed setuptools-8.3\n```\n\nThere's weird stuff with the progress bar, possibly control characers that cmd.exe terminal can't handle\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport itertools\nimport sys\n\nfrom pip.utils import format_size\nfrom pip.utils.logging import get_indentation\nfrom pip._vendor.progress.bar import Bar\nfrom pip._vendor.progress.helpers import WritelnMixin\nfrom pip._vendor.progress.spinner import Spinner\n\n\nclass DownloadProgressMixin(object):\n\n def __init__(self, *args, **kwargs):\n super(DownloadProgressMixin, self).__init__(*args, **kwargs)\n self.message = (\" \" * (get_indentation() + 2)) + self.message\n\n @property\n def downloaded(self):\n return format_size(self.index)\n\n @property\n def download_speed(self):\n # Avoid zero division errors...\n if self.avg == 0.0:\n return \"...\"\n return format_size(1 / self.avg) + \"/s\"\n\n @property\n def pretty_eta(self):\n if self.eta:\n return \"eta %s\" % self.eta_td\n return \"\"\n\n def iter(self, it, n=1):\n for x in it:\n yield x\n self.next(n)\n self.finish()\n\n\nclass DownloadProgressBar(DownloadProgressMixin, Bar):\n\n file = sys.stdout\n message = \"%(percent)d%%\"\n suffix = \"%(downloaded)s %(download_speed)s %(pretty_eta)s\"\n\n\nclass DownloadProgressSpinner(DownloadProgressMixin, WritelnMixin, Spinner):\n\n file = sys.stdout\n suffix = \"%(downloaded)s %(download_speed)s\"\n\n def next_phase(self):\n if not hasattr(self, \"_phaser\"):\n self._phaser = itertools.cycle(self.phases)\n return next(self._phaser)\n\n def update(self):\n message = self.message % self\n phase = self.next_phase()\n suffix = self.suffix % self\n line = ''.join([\n message,\n \" \" if message else \"\",\n phase,\n \" \" if suffix else \"\",\n suffix,\n ])\n\n self.writeln(line)\n", "path": "pip/utils/ui.py"}]} | 1,350 | 566 |
gh_patches_debug_17125 | rasdani/github-patches | git_diff | spack__spack-18458 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Installation issue: r-boot
<!-- Thanks for taking the time to report this build failure. To proceed with the report please:
1. Title the issue "Installation issue: <name-of-the-package>".
2. Provide the information required below.
We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively! -->
### Steps to reproduce the issue
<!-- Fill in the exact spec you are trying to build and the relevant part of the error message -->
```console
$ spack install r-boot%fj
==> Error: ChecksumError: sha256 checksum failed for /home/users/ea01/ea0114/spack-stage/spack-stage-r-boot-1.3-23-mm6cmoaof62r5y527kz24snjifgwpir6/boot_1.3-23.tar.gz
Expected 30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f but got 79236a5a770dc8bf5ce25d9aa303c5dc0574d94aa043fd00b8b4c8ccc877357f
```
Build of `r-boot%fj` on 2020 Aug has a checksum error.
This version added to spack at 31 Aug 2019. Please see https://github.com/spack/spack/commit/661a894c85f451a4ef868abcc9871653914361bd
According to our log, same build succeeded on 2019 Oct.
https://cloud.r-project.org/src/contrib/boot_1.3-23.tar.gz seems to be changed between these attempts.
We found old(30c89e19) boot_1.3-23.tar.gz from http://in.archive.ubuntu.com/pool/universe/b/boot/boot_1.3-23.orig.tar.gz
and compared with new(79236a5a) one.
Difference was tribial. ("Date/Publication" in boot/DESCRIPTION, and MD5 of the file in boot/MD5)
So I would like to update checksum value.
We have another question.
In this case, we found "old" archive and proof the differnce is trivial.
If we found checksum mismatch and could not find "old" archive to verify,
which is better in view of security?
1. create issue and discuss
2. directly make PR
<!-- Some packages have maintainers who have volunteered to debug build failures. Run `spack maintainers <name-of-the-package>` and @mention them here if they exist. -->
### General information
<!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. -->
- [ ] I have run `spack debug report` and reported the version of Spack/Python/Platform
- [ ] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers
- [ ] I have uploaded the build log and environment files
- [x] I have searched the issues of this repo and believe this is not a duplicate
</issue>
<code>
[start of var/spack/repos/builtin/packages/r-boot/package.py]
1 # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 from spack import *
7
8
9 class RBoot(RPackage):
10 """Functions and datasets for bootstrapping from the book "Bootstrap
11 Methods and Their Application" by A. C. Davison and D. V. Hinkley (1997,
12 CUP), originally written by Angelo Canty for S."""
13
14 homepage = "https://cloud.r-project.org/package=boot"
15 url = "https://cloud.r-project.org/src/contrib/boot_1.3-18.tar.gz"
16 list_url = "https://cloud.r-project.org/src/contrib/Archive/boot"
17
18 version('1.3-23', sha256='30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f')
19 version('1.3-22', sha256='cf1f0cb1e0a7a36dcb6ae038f5d0211a0e7a009c149bc9d21acb9c58c38b4dfc')
20 version('1.3-20', sha256='adcb90b72409705e3f9c69ea6c15673dcb649b464fed06723fe0930beac5212a')
21 version('1.3-18', sha256='12fd237f810a69cc8d0a51a67c57eaf9506bf0341c764f8ab7c1feb73722235e')
22
23 depends_on('[email protected]:', type=('build', 'run'))
24
[end of var/spack/repos/builtin/packages/r-boot/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/var/spack/repos/builtin/packages/r-boot/package.py b/var/spack/repos/builtin/packages/r-boot/package.py
--- a/var/spack/repos/builtin/packages/r-boot/package.py
+++ b/var/spack/repos/builtin/packages/r-boot/package.py
@@ -15,7 +15,7 @@
url = "https://cloud.r-project.org/src/contrib/boot_1.3-18.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/boot"
- version('1.3-23', sha256='30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f')
+ version('1.3-23', sha256='79236a5a770dc8bf5ce25d9aa303c5dc0574d94aa043fd00b8b4c8ccc877357f')
version('1.3-22', sha256='cf1f0cb1e0a7a36dcb6ae038f5d0211a0e7a009c149bc9d21acb9c58c38b4dfc')
version('1.3-20', sha256='adcb90b72409705e3f9c69ea6c15673dcb649b464fed06723fe0930beac5212a')
version('1.3-18', sha256='12fd237f810a69cc8d0a51a67c57eaf9506bf0341c764f8ab7c1feb73722235e')
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/r-boot/package.py b/var/spack/repos/builtin/packages/r-boot/package.py\n--- a/var/spack/repos/builtin/packages/r-boot/package.py\n+++ b/var/spack/repos/builtin/packages/r-boot/package.py\n@@ -15,7 +15,7 @@\n url = \"https://cloud.r-project.org/src/contrib/boot_1.3-18.tar.gz\"\n list_url = \"https://cloud.r-project.org/src/contrib/Archive/boot\"\n \n- version('1.3-23', sha256='30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f')\n+ version('1.3-23', sha256='79236a5a770dc8bf5ce25d9aa303c5dc0574d94aa043fd00b8b4c8ccc877357f')\n version('1.3-22', sha256='cf1f0cb1e0a7a36dcb6ae038f5d0211a0e7a009c149bc9d21acb9c58c38b4dfc')\n version('1.3-20', sha256='adcb90b72409705e3f9c69ea6c15673dcb649b464fed06723fe0930beac5212a')\n version('1.3-18', sha256='12fd237f810a69cc8d0a51a67c57eaf9506bf0341c764f8ab7c1feb73722235e')\n", "issue": "Installation issue: r-boot\n<!-- Thanks for taking the time to report this build failure. To proceed with the report please:\r\n\r\n1. Title the issue \"Installation issue: <name-of-the-package>\".\r\n2. Provide the information required below.\r\n\r\nWe encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively! -->\r\n\r\n### Steps to reproduce the issue\r\n\r\n<!-- Fill in the exact spec you are trying to build and the relevant part of the error message -->\r\n```console\r\n$ spack install r-boot%fj\r\n==> Error: ChecksumError: sha256 checksum failed for /home/users/ea01/ea0114/spack-stage/spack-stage-r-boot-1.3-23-mm6cmoaof62r5y527kz24snjifgwpir6/boot_1.3-23.tar.gz\r\n Expected 30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f but got 79236a5a770dc8bf5ce25d9aa303c5dc0574d94aa043fd00b8b4c8ccc877357f\r\n```\r\n\r\nBuild of `r-boot%fj` on 2020 Aug has a checksum error.\r\nThis version added to spack at 31 Aug 2019. Please see https://github.com/spack/spack/commit/661a894c85f451a4ef868abcc9871653914361bd\r\nAccording to our log, same build succeeded on 2019 Oct. \r\n\r\nhttps://cloud.r-project.org/src/contrib/boot_1.3-23.tar.gz seems to be changed between these attempts.\r\nWe found old(30c89e19) boot_1.3-23.tar.gz from http://in.archive.ubuntu.com/pool/universe/b/boot/boot_1.3-23.orig.tar.gz\r\nand compared with new(79236a5a) one.\r\nDifference was tribial. (\"Date/Publication\" in boot/DESCRIPTION, and MD5 of the file in boot/MD5)\r\nSo I would like to update checksum value.\r\n\r\nWe have another question.\r\nIn this case, we found \"old\" archive and proof the differnce is trivial.\r\nIf we found checksum mismatch and could not find \"old\" archive to verify, \r\nwhich is better in view of security?\r\n1. create issue and discuss\r\n2. directly make PR\r\n\r\n<!-- Some packages have maintainers who have volunteered to debug build failures. Run `spack maintainers <name-of-the-package>` and @mention them here if they exist. -->\r\n\r\n### General information\r\n\r\n<!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. -->\r\n- [ ] I have run `spack debug report` and reported the version of Spack/Python/Platform\r\n- [ ] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers\r\n- [ ] I have uploaded the build log and environment files\r\n- [x] I have searched the issues of this repo and believe this is not a duplicate\r\n\n", "before_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass RBoot(RPackage):\n \"\"\"Functions and datasets for bootstrapping from the book \"Bootstrap\n Methods and Their Application\" by A. C. Davison and D. V. Hinkley (1997,\n CUP), originally written by Angelo Canty for S.\"\"\"\n\n homepage = \"https://cloud.r-project.org/package=boot\"\n url = \"https://cloud.r-project.org/src/contrib/boot_1.3-18.tar.gz\"\n list_url = \"https://cloud.r-project.org/src/contrib/Archive/boot\"\n\n version('1.3-23', sha256='30c89e19dd6490b943233e87dfe422bfef92cfbb7a7dfb5c17dfd9b2d63fa02f')\n version('1.3-22', sha256='cf1f0cb1e0a7a36dcb6ae038f5d0211a0e7a009c149bc9d21acb9c58c38b4dfc')\n version('1.3-20', sha256='adcb90b72409705e3f9c69ea6c15673dcb649b464fed06723fe0930beac5212a')\n version('1.3-18', sha256='12fd237f810a69cc8d0a51a67c57eaf9506bf0341c764f8ab7c1feb73722235e')\n\n depends_on('[email protected]:', type=('build', 'run'))\n", "path": "var/spack/repos/builtin/packages/r-boot/package.py"}]} | 1,834 | 472 |
gh_patches_debug_19842 | rasdani/github-patches | git_diff | ietf-tools__datatracker-5726 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Don't crash when a nomcom is partially set up.
Right now, if a nomcom group is created, and the associated NomCom object is not, anyone with a role in the nomcom group cannot use the datatracker - the construction of the menu crashes.
The places that crash need to be protected against this misconfiguration (especially while the configuration of the Group and NomCom objects are done manually).
See also https://github.com/ietf-tools/datatracker/issues/3289
</issue>
<code>
[start of ietf/group/templatetags/group_filters.py]
1 from django import template
2
3 import debug # pyflakes:ignore
4
5 from ietf.group.models import Group
6
7 register = template.Library()
8
9 @register.filter
10 def has_sessions(group,num):
11 return group.session_set.filter(meeting__number=num).exists()
12
13 @register.filter
14 def active_roles(queryset):
15 return queryset.filter(group__state_id__in=['active', 'bof']).exclude(group__acronym='secretariat')
16
17 @register.filter
18 def active_nomcoms(user):
19 if not (user and hasattr(user, "is_authenticated") and user.is_authenticated):
20 return []
21
22 groups = []
23
24 groups.extend(Group.objects.filter(
25 role__person__user=user,
26 type_id='nomcom',
27 state__slug='active').distinct().select_related("type"))
28
29 return groups
30
31 @register.inclusion_tag('person/person_link.html')
32 def role_person_link(role, **kwargs):
33 title = kwargs.get('title', '')
34 cls = kwargs.get('class', '')
35 name = role.person.name
36 plain_name = role.person.plain_name()
37 email = role.email.address
38 return {'name': name, 'plain_name': plain_name, 'email': email, 'title': title, 'class': cls}
39
[end of ietf/group/templatetags/group_filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ietf/group/templatetags/group_filters.py b/ietf/group/templatetags/group_filters.py
--- a/ietf/group/templatetags/group_filters.py
+++ b/ietf/group/templatetags/group_filters.py
@@ -2,7 +2,7 @@
import debug # pyflakes:ignore
-from ietf.group.models import Group
+from ietf.nomcom.models import NomCom
register = template.Library()
@@ -19,14 +19,15 @@
if not (user and hasattr(user, "is_authenticated") and user.is_authenticated):
return []
- groups = []
-
- groups.extend(Group.objects.filter(
- role__person__user=user,
- type_id='nomcom',
- state__slug='active').distinct().select_related("type"))
-
- return groups
+ return list(
+ NomCom.objects.filter(
+ group__role__person__user=user,
+ group__type_id='nomcom', # just in case...
+ group__state__slug='active',
+ )
+ .distinct()
+ .order_by("group__acronym")
+ )
@register.inclusion_tag('person/person_link.html')
def role_person_link(role, **kwargs):
| {"golden_diff": "diff --git a/ietf/group/templatetags/group_filters.py b/ietf/group/templatetags/group_filters.py\n--- a/ietf/group/templatetags/group_filters.py\n+++ b/ietf/group/templatetags/group_filters.py\n@@ -2,7 +2,7 @@\n \n import debug # pyflakes:ignore\n \n-from ietf.group.models import Group\n+from ietf.nomcom.models import NomCom\n \n register = template.Library()\n \n@@ -19,14 +19,15 @@\n if not (user and hasattr(user, \"is_authenticated\") and user.is_authenticated):\n return []\n \n- groups = []\n-\n- groups.extend(Group.objects.filter(\n- role__person__user=user,\n- type_id='nomcom',\n- state__slug='active').distinct().select_related(\"type\"))\n-\n- return groups\n+ return list(\n+ NomCom.objects.filter(\n+ group__role__person__user=user,\n+ group__type_id='nomcom', # just in case...\n+ group__state__slug='active',\n+ )\n+ .distinct()\n+ .order_by(\"group__acronym\")\n+ )\n \n @register.inclusion_tag('person/person_link.html')\n def role_person_link(role, **kwargs):\n", "issue": "Don't crash when a nomcom is partially set up.\nRight now, if a nomcom group is created, and the associated NomCom object is not, anyone with a role in the nomcom group cannot use the datatracker - the construction of the menu crashes.\r\n\r\nThe places that crash need to be protected against this misconfiguration (especially while the configuration of the Group and NomCom objects are done manually).\r\n\r\nSee also https://github.com/ietf-tools/datatracker/issues/3289\n", "before_files": [{"content": "from django import template\n\nimport debug # pyflakes:ignore\n\nfrom ietf.group.models import Group\n\nregister = template.Library()\n\[email protected]\ndef has_sessions(group,num):\n return group.session_set.filter(meeting__number=num).exists()\n\[email protected]\ndef active_roles(queryset):\n return queryset.filter(group__state_id__in=['active', 'bof']).exclude(group__acronym='secretariat')\n \[email protected]\ndef active_nomcoms(user):\n if not (user and hasattr(user, \"is_authenticated\") and user.is_authenticated):\n return []\n\n groups = []\n\n groups.extend(Group.objects.filter(\n role__person__user=user,\n type_id='nomcom',\n state__slug='active').distinct().select_related(\"type\"))\n\n return groups\n\[email protected]_tag('person/person_link.html')\ndef role_person_link(role, **kwargs):\n title = kwargs.get('title', '')\n cls = kwargs.get('class', '')\n name = role.person.name\n plain_name = role.person.plain_name()\n email = role.email.address\n return {'name': name, 'plain_name': plain_name, 'email': email, 'title': title, 'class': cls}\n", "path": "ietf/group/templatetags/group_filters.py"}]} | 988 | 285 |
gh_patches_debug_13573 | rasdani/github-patches | git_diff | vyperlang__vyper-891 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Disallow int128->int128 conversion.
### What's your issue about?
Disallow`int128` to be converted to `int128`, follows https://github.com/ethereum/vyper/pull/882.
### How can it be fixed?
Fill this in if you know how to fix it.
#### Cute Animal Picture

</issue>
<code>
[start of vyper/types/convert.py]
1 from vyper.functions.signature import (
2 signature
3 )
4 from vyper.parser.parser_utils import (
5 LLLnode,
6 getpos,
7 byte_array_to_num
8 )
9 from vyper.exceptions import (
10 InvalidLiteralException,
11 TypeMismatchException,
12 )
13 from vyper.types import (
14 BaseType,
15 )
16 from vyper.types import (
17 get_type,
18 )
19 from vyper.utils import (
20 DECIMAL_DIVISOR,
21 MemoryPositions,
22 SizeLimits
23 )
24
25
26 @signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal')
27 def to_int128(expr, args, kwargs, context):
28 in_node = args[0]
29 typ, len = get_type(in_node)
30 if typ in ('int128', 'uint256', 'bytes32'):
31 if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):
32 raise InvalidLiteralException("Number out of range: {}".format(in_node.value), expr)
33 return LLLnode.from_list(
34 ['clamp', ['mload', MemoryPositions.MINNUM], in_node,
35 ['mload', MemoryPositions.MAXNUM]], typ=BaseType('int128', in_node.typ.unit), pos=getpos(expr)
36 )
37 else:
38 return byte_array_to_num(in_node, expr, 'int128')
39
40
41 @signature(('num_literal', 'int128', 'bytes32'), 'str_literal')
42 def to_uint256(expr, args, kwargs, context):
43 in_node = args[0]
44 typ, len = get_type(in_node)
45 if isinstance(in_node, int):
46
47 if not SizeLimits.in_bounds('uint256', in_node):
48 raise InvalidLiteralException("Number out of range: {}".format(in_node))
49 _unit = in_node.typ.unit if typ == 'int128' else None
50 return LLLnode.from_list(in_node, typ=BaseType('uint256', _unit), pos=getpos(expr))
51 elif isinstance(in_node, LLLnode) and typ in ('int128', 'num_literal'):
52 _unit = in_node.typ.unit if typ == 'int128' else None
53 return LLLnode.from_list(['clampge', in_node, 0], typ=BaseType('uint256', _unit), pos=getpos(expr))
54 elif isinstance(in_node, LLLnode) and typ in ('bytes32'):
55 return LLLnode(value=in_node.value, args=in_node.args, typ=BaseType('uint256'), pos=getpos(expr))
56 else:
57 raise InvalidLiteralException("Invalid input for uint256: %r" % in_node, expr)
58
59
60 @signature(('int128', 'uint256'), 'str_literal')
61 def to_decimal(expr, args, kwargs, context):
62 input = args[0]
63 if input.typ.typ == 'uint256':
64 return LLLnode.from_list(
65 ['uclample', ['mul', input, DECIMAL_DIVISOR], ['mload', MemoryPositions.MAXDECIMAL]],
66 typ=BaseType('decimal', input.typ.unit, input.typ.positional), pos=getpos(expr)
67 )
68 else:
69 return LLLnode.from_list(
70 ['mul', input, DECIMAL_DIVISOR],
71 typ=BaseType('decimal', input.typ.unit, input.typ.positional),
72 pos=getpos(expr)
73 )
74
75
76 @signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')
77 def to_bytes32(expr, args, kwargs, context):
78 input = args[0]
79 typ, len = get_type(input)
80 if typ == 'bytes':
81 if len != 32:
82 raise TypeMismatchException("Unable to convert bytes[{}] to bytes32".format(len))
83 if input.location == "memory":
84 return LLLnode.from_list(
85 ['mload', ['add', input, 32]], typ=BaseType('bytes32')
86 )
87 elif input.location == "storage":
88 return LLLnode.from_list(
89 ['sload', ['add', ['sha3_32', input], 1]], typ=BaseType('bytes32')
90 )
91 else:
92 return LLLnode(value=input.value, args=input.args, typ=BaseType('bytes32'), pos=getpos(expr))
93
94
95 def convert(expr, context):
96 output_type = expr.args[1].s
97 if output_type in conversion_table:
98 return conversion_table[output_type](expr, context)
99 else:
100 raise Exception("Conversion to {} is invalid.".format(output_type))
101
102
103 conversion_table = {
104 'int128': to_int128,
105 'uint256': to_uint256,
106 'decimal': to_decimal,
107 'bytes32': to_bytes32,
108 }
109
[end of vyper/types/convert.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vyper/types/convert.py b/vyper/types/convert.py
--- a/vyper/types/convert.py
+++ b/vyper/types/convert.py
@@ -23,11 +23,11 @@
)
-@signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal')
+@signature(('uint256', 'bytes32', 'bytes'), 'str_literal')
def to_int128(expr, args, kwargs, context):
in_node = args[0]
typ, len = get_type(in_node)
- if typ in ('int128', 'uint256', 'bytes32'):
+ if typ in ('uint256', 'bytes32'):
if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):
raise InvalidLiteralException("Number out of range: {}".format(in_node.value), expr)
return LLLnode.from_list(
| {"golden_diff": "diff --git a/vyper/types/convert.py b/vyper/types/convert.py\n--- a/vyper/types/convert.py\n+++ b/vyper/types/convert.py\n@@ -23,11 +23,11 @@\n )\n \n \n-@signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal')\n+@signature(('uint256', 'bytes32', 'bytes'), 'str_literal')\n def to_int128(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n- if typ in ('int128', 'uint256', 'bytes32'):\n+ if typ in ('uint256', 'bytes32'):\n if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node.value), expr)\n return LLLnode.from_list(\n", "issue": "Disallow int128->int128 conversion.\n### What's your issue about?\r\n\r\nDisallow`int128` to be converted to `int128`, follows https://github.com/ethereum/vyper/pull/882.\r\n\r\n### How can it be fixed?\r\n\r\nFill this in if you know how to fix it.\r\n\r\n#### Cute Animal Picture\r\n\r\n\n", "before_files": [{"content": "from vyper.functions.signature import (\n signature\n)\nfrom vyper.parser.parser_utils import (\n LLLnode,\n getpos,\n byte_array_to_num\n)\nfrom vyper.exceptions import (\n InvalidLiteralException,\n TypeMismatchException,\n)\nfrom vyper.types import (\n BaseType,\n)\nfrom vyper.types import (\n get_type,\n)\nfrom vyper.utils import (\n DECIMAL_DIVISOR,\n MemoryPositions,\n SizeLimits\n)\n\n\n@signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal')\ndef to_int128(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n if typ in ('int128', 'uint256', 'bytes32'):\n if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node.value), expr)\n return LLLnode.from_list(\n ['clamp', ['mload', MemoryPositions.MINNUM], in_node,\n ['mload', MemoryPositions.MAXNUM]], typ=BaseType('int128', in_node.typ.unit), pos=getpos(expr)\n )\n else:\n return byte_array_to_num(in_node, expr, 'int128')\n\n\n@signature(('num_literal', 'int128', 'bytes32'), 'str_literal')\ndef to_uint256(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n if isinstance(in_node, int):\n\n if not SizeLimits.in_bounds('uint256', in_node):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node))\n _unit = in_node.typ.unit if typ == 'int128' else None\n return LLLnode.from_list(in_node, typ=BaseType('uint256', _unit), pos=getpos(expr))\n elif isinstance(in_node, LLLnode) and typ in ('int128', 'num_literal'):\n _unit = in_node.typ.unit if typ == 'int128' else None\n return LLLnode.from_list(['clampge', in_node, 0], typ=BaseType('uint256', _unit), pos=getpos(expr))\n elif isinstance(in_node, LLLnode) and typ in ('bytes32'):\n return LLLnode(value=in_node.value, args=in_node.args, typ=BaseType('uint256'), pos=getpos(expr))\n else:\n raise InvalidLiteralException(\"Invalid input for uint256: %r\" % in_node, expr)\n\n\n@signature(('int128', 'uint256'), 'str_literal')\ndef to_decimal(expr, args, kwargs, context):\n input = args[0]\n if input.typ.typ == 'uint256':\n return LLLnode.from_list(\n ['uclample', ['mul', input, DECIMAL_DIVISOR], ['mload', MemoryPositions.MAXDECIMAL]],\n typ=BaseType('decimal', input.typ.unit, input.typ.positional), pos=getpos(expr)\n )\n else:\n return LLLnode.from_list(\n ['mul', input, DECIMAL_DIVISOR],\n typ=BaseType('decimal', input.typ.unit, input.typ.positional),\n pos=getpos(expr)\n )\n\n\n@signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')\ndef to_bytes32(expr, args, kwargs, context):\n input = args[0]\n typ, len = get_type(input)\n if typ == 'bytes':\n if len != 32:\n raise TypeMismatchException(\"Unable to convert bytes[{}] to bytes32\".format(len))\n if input.location == \"memory\":\n return LLLnode.from_list(\n ['mload', ['add', input, 32]], typ=BaseType('bytes32')\n )\n elif input.location == \"storage\":\n return LLLnode.from_list(\n ['sload', ['add', ['sha3_32', input], 1]], typ=BaseType('bytes32')\n )\n else:\n return LLLnode(value=input.value, args=input.args, typ=BaseType('bytes32'), pos=getpos(expr))\n\n\ndef convert(expr, context):\n output_type = expr.args[1].s\n if output_type in conversion_table:\n return conversion_table[output_type](expr, context)\n else:\n raise Exception(\"Conversion to {} is invalid.\".format(output_type))\n\n\nconversion_table = {\n 'int128': to_int128,\n 'uint256': to_uint256,\n 'decimal': to_decimal,\n 'bytes32': to_bytes32,\n}\n", "path": "vyper/types/convert.py"}]} | 1,926 | 221 |
gh_patches_debug_9417 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3804 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
boots.py spider doesn't correctly pick up all opticians
The current test in boots.py to switch the brand tags for opticians is `properties["name"].startswith("Opticians - ")`:
https://github.com/alltheplaces/alltheplaces/blob/master/locations/spiders/boots.py#L73
But this is not general enough to catch all of them. The displayed name of some opticians branches only start with "Opticians " or "Opticians-". For example https://www.boots.com/stores/3730-tewkesbury-high-street-opticians-gl20-5jz and https://www.boots.com/stores/3947-camden-high-street-opticians-nw1-0lu
I think you could safely change the test to `properties["name"].startswith("Opticians")` but the code a few lines below to strip out the "Opticians" prefix would need to be more complicated.
</issue>
<code>
[start of locations/spiders/boots.py]
1 import scrapy
2
3 from locations.items import GeojsonPointItem
4
5
6 class BootsSpider(scrapy.Spider):
7 name = "boots"
8 item_attributes = {"brand": "Boots", "brand_wikidata": "Q6123139"}
9 allowed_domains = ["www.boots.com", "www.boots.ie"]
10 download_delay = 0.5
11 start_urls = ["http://www.boots.com/store-a-z", "http://www.boots.ie/store-a-z"]
12
13 def parse_hours(self, lis):
14 hours = []
15 for li in lis:
16 day = li.xpath(
17 'normalize-space(./td[@class="store_hours_day"]/text())'
18 ).extract_first()
19 times = (
20 li.xpath('normalize-space(./td[@class="store_hours_time"]/text())')
21 .extract_first()
22 .replace(" ", "")
23 .replace("Closed-Closed", "off")
24 )
25 if times and day:
26 hours.append(day[:2] + " " + times)
27
28 return "; ".join(hours)
29
30 def parse_stores(self, response):
31 addr_full = response.xpath(
32 '//section[@class="store_details_content rowContainer"]/dl[@class="store_info_list"][1]/dd[@class="store_info_list_item"]/text()'
33 ).extract()
34 address = ", ".join(map(str.strip, addr_full))
35 # Handle blank store pages e.g. https://www.boots.com/stores/2250-alnwick-paikes-street-ne66-1hx
36 if len(address) == 0:
37 return
38
39 properties = {
40 "ref": response.xpath(
41 'normalize-space(//input[@id="bootsStoreId"]/@value)'
42 ).extract_first(),
43 "name": response.xpath(
44 'normalize-space(//input[@id="inputLocation"][@name="inputLocation"]/@value)'
45 ).extract_first(),
46 "postcode": response.xpath(
47 'normalize-space(//input[@id="storePostcode"]/@value)'
48 ).extract_first(),
49 "addr_full": address,
50 "phone": response.xpath(
51 '//section[@class="store_details_content rowContainer"]/dl[@class="store_info_list"][3]/dd[@class="store_info_list_item"]/a/text()'
52 ).extract_first(),
53 "country": response.xpath(
54 'normalize-space(//input[@id="countryCode"][@name="countryCode"]/@value)'
55 ).extract_first(),
56 "website": response.url,
57 "lat": response.xpath(
58 'normalize-space(//input[@id="lat"]/@value)'
59 ).extract_first(),
60 "lon": response.xpath(
61 'normalize-space(//input[@id="lon"]/@value)'
62 ).extract_first(),
63 }
64
65 hours = self.parse_hours(
66 response.xpath(
67 '//div[@class="row store_all_opening_hours"]/div[1]/table[@class="store_opening_hours "]/tbody/tr'
68 )
69 )
70 if hours:
71 properties["opening_hours"] = hours
72
73 if properties["name"].startswith("Opticians - "):
74 properties["brand"] = "Boots Opticians"
75 properties["brand_wikidata"] = "Q4944037"
76 properties["name"] = properties["name"][12:]
77
78 yield GeojsonPointItem(**properties)
79
80 def parse(self, response):
81 urls = response.xpath(
82 '//div[@class="brand_list_viewer"]/div[@class="column"]/ul/li/a/@href'
83 ).extract()
84 for path in urls:
85 yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)
86
[end of locations/spiders/boots.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/boots.py b/locations/spiders/boots.py
--- a/locations/spiders/boots.py
+++ b/locations/spiders/boots.py
@@ -70,10 +70,10 @@
if hours:
properties["opening_hours"] = hours
- if properties["name"].startswith("Opticians - "):
+ if properties["name"].startswith("Opticians"):
properties["brand"] = "Boots Opticians"
properties["brand_wikidata"] = "Q4944037"
- properties["name"] = properties["name"][12:]
+ properties["name"] = properties["name"].replace("Opticians", "").strip("- ")
yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/boots.py b/locations/spiders/boots.py\n--- a/locations/spiders/boots.py\n+++ b/locations/spiders/boots.py\n@@ -70,10 +70,10 @@\n if hours:\n properties[\"opening_hours\"] = hours\n \n- if properties[\"name\"].startswith(\"Opticians - \"):\n+ if properties[\"name\"].startswith(\"Opticians\"):\n properties[\"brand\"] = \"Boots Opticians\"\n properties[\"brand_wikidata\"] = \"Q4944037\"\n- properties[\"name\"] = properties[\"name\"][12:]\n+ properties[\"name\"] = properties[\"name\"].replace(\"Opticians\", \"\").strip(\"- \")\n \n yield GeojsonPointItem(**properties)\n", "issue": "boots.py spider doesn't correctly pick up all opticians\nThe current test in boots.py to switch the brand tags for opticians is `properties[\"name\"].startswith(\"Opticians - \")`:\r\nhttps://github.com/alltheplaces/alltheplaces/blob/master/locations/spiders/boots.py#L73\r\n\r\nBut this is not general enough to catch all of them. The displayed name of some opticians branches only start with \"Opticians \" or \"Opticians-\". For example https://www.boots.com/stores/3730-tewkesbury-high-street-opticians-gl20-5jz and https://www.boots.com/stores/3947-camden-high-street-opticians-nw1-0lu\r\n\r\nI think you could safely change the test to `properties[\"name\"].startswith(\"Opticians\")` but the code a few lines below to strip out the \"Opticians\" prefix would need to be more complicated.\n", "before_files": [{"content": "import scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass BootsSpider(scrapy.Spider):\n name = \"boots\"\n item_attributes = {\"brand\": \"Boots\", \"brand_wikidata\": \"Q6123139\"}\n allowed_domains = [\"www.boots.com\", \"www.boots.ie\"]\n download_delay = 0.5\n start_urls = [\"http://www.boots.com/store-a-z\", \"http://www.boots.ie/store-a-z\"]\n\n def parse_hours(self, lis):\n hours = []\n for li in lis:\n day = li.xpath(\n 'normalize-space(./td[@class=\"store_hours_day\"]/text())'\n ).extract_first()\n times = (\n li.xpath('normalize-space(./td[@class=\"store_hours_time\"]/text())')\n .extract_first()\n .replace(\" \", \"\")\n .replace(\"Closed-Closed\", \"off\")\n )\n if times and day:\n hours.append(day[:2] + \" \" + times)\n\n return \"; \".join(hours)\n\n def parse_stores(self, response):\n addr_full = response.xpath(\n '//section[@class=\"store_details_content rowContainer\"]/dl[@class=\"store_info_list\"][1]/dd[@class=\"store_info_list_item\"]/text()'\n ).extract()\n address = \", \".join(map(str.strip, addr_full))\n # Handle blank store pages e.g. https://www.boots.com/stores/2250-alnwick-paikes-street-ne66-1hx\n if len(address) == 0:\n return\n\n properties = {\n \"ref\": response.xpath(\n 'normalize-space(//input[@id=\"bootsStoreId\"]/@value)'\n ).extract_first(),\n \"name\": response.xpath(\n 'normalize-space(//input[@id=\"inputLocation\"][@name=\"inputLocation\"]/@value)'\n ).extract_first(),\n \"postcode\": response.xpath(\n 'normalize-space(//input[@id=\"storePostcode\"]/@value)'\n ).extract_first(),\n \"addr_full\": address,\n \"phone\": response.xpath(\n '//section[@class=\"store_details_content rowContainer\"]/dl[@class=\"store_info_list\"][3]/dd[@class=\"store_info_list_item\"]/a/text()'\n ).extract_first(),\n \"country\": response.xpath(\n 'normalize-space(//input[@id=\"countryCode\"][@name=\"countryCode\"]/@value)'\n ).extract_first(),\n \"website\": response.url,\n \"lat\": response.xpath(\n 'normalize-space(//input[@id=\"lat\"]/@value)'\n ).extract_first(),\n \"lon\": response.xpath(\n 'normalize-space(//input[@id=\"lon\"]/@value)'\n ).extract_first(),\n }\n\n hours = self.parse_hours(\n response.xpath(\n '//div[@class=\"row store_all_opening_hours\"]/div[1]/table[@class=\"store_opening_hours \"]/tbody/tr'\n )\n )\n if hours:\n properties[\"opening_hours\"] = hours\n\n if properties[\"name\"].startswith(\"Opticians - \"):\n properties[\"brand\"] = \"Boots Opticians\"\n properties[\"brand_wikidata\"] = \"Q4944037\"\n properties[\"name\"] = properties[\"name\"][12:]\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n urls = response.xpath(\n '//div[@class=\"brand_list_viewer\"]/div[@class=\"column\"]/ul/li/a/@href'\n ).extract()\n for path in urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_stores)\n", "path": "locations/spiders/boots.py"}]} | 1,688 | 173 |
gh_patches_debug_384 | rasdani/github-patches | git_diff | Gallopsled__pwntools-1811 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
List comprehension in __all__ prevents Pylance from working
Thanks for contributing to Pwntools! Ideas from the community help make Pwntools an amazing tool for everybody.
If you've got an idea for a new feature, please provide information about:
* What the feature does
According to https://github.com/microsoft/pylance-release/issues/289, the list comprehension in `__all__` in https://github.com/Gallopsled/pwntools/blob/4e6ccb0da17fb91e43a4f9e95edf4fd83806ba23/pwn/toplevel.py#L85 prevents [Pylance](https://marketplace.visualstudio.com/items?itemName=ms-python.vscode-pylance) from working (when using `from pwn import *` instead of manually importing all modules).
https://github.com/compas-dev/compas/issues/621 may be a solution instead of listing all attributes manually to fix that.
* Why the feature should exist
To make Pylance happy :smile:
* What tests should be included
Test in VS Code to ensure it works.
If you think you can write the feature yourself, please submit a Pull Request and we can review your changes!
</issue>
<code>
[start of pwn/toplevel.py]
1 # Get all the modules from pwnlib
2 import collections
3 import logging
4 import math
5 import operator
6 import os
7 import platform
8 import re
9 import requests
10 import socks
11 import signal
12 import string
13 import struct
14 import subprocess
15 import sys
16 import tempfile
17 import threading
18 import time
19
20 from pprint import pprint
21
22 import pwnlib
23 from pwnlib import *
24 from pwnlib.asm import *
25 from pwnlib.context import Thread
26 from pwnlib.context import context, LocalContext
27 from pwnlib.dynelf import DynELF
28 from pwnlib.encoders import *
29 from pwnlib.elf.corefile import Core, Corefile, Coredump
30 from pwnlib.elf.elf import ELF, load
31 from pwnlib.encoders import *
32 from pwnlib.exception import PwnlibException
33 from pwnlib.gdb import attach, debug_assembly, debug_shellcode
34 from pwnlib.filepointer import *
35 from pwnlib.flag import *
36 from pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split
37 from pwnlib.log import getLogger
38 from pwnlib.memleak import MemLeak, RelativeMemLeak
39 from pwnlib.regsort import *
40 from pwnlib.replacements import *
41 from pwnlib.rop import ROP
42 from pwnlib.rop.call import AppendedArgument
43 from pwnlib.rop.srop import SigreturnFrame
44 from pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload
45 from pwnlib.runner import *
46 from pwnlib.term.readline import str_input
47 from pwnlib.timeout import Timeout
48 from pwnlib.tubes.listen import listen
49 from pwnlib.tubes.process import process, PTY, PIPE, STDOUT
50 from pwnlib.tubes.remote import remote, tcp, udp, connect
51 from pwnlib.tubes.serialtube import serialtube
52 from pwnlib.tubes.server import server
53 from pwnlib.tubes.ssh import ssh
54 from pwnlib.tubes.tube import tube
55 from pwnlib.ui import *
56 from pwnlib.util import crc
57 from pwnlib.util import iters
58 from pwnlib.util import net
59 from pwnlib.util import proc
60 from pwnlib.util import safeeval
61 from pwnlib.util.crc import BitPolynom
62 from pwnlib.util.cyclic import *
63 from pwnlib.util.fiddling import *
64 from pwnlib.util.getdents import *
65 from pwnlib.util.hashes import *
66 from pwnlib.util.lists import *
67 from pwnlib.util.misc import *
68 from pwnlib.util.packing import *
69 from pwnlib.util.proc import pidof
70 from pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with
71 from pwnlib.util.splash import *
72 from pwnlib.util.web import *
73
74 # Promote these modules, so that "from pwn import *" will let you access them
75
76 from six.moves import cPickle as pickle, cStringIO as StringIO
77 from six import BytesIO
78
79 error = log.error
80 warning = log.warning
81 warn = log.warning
82 info = log.info
83 debug = log.debug
84 success = log.success
85
86 __all__ = [x for x in tuple(globals()) if x != '__name__']
87
[end of pwn/toplevel.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwn/toplevel.py b/pwn/toplevel.py
--- a/pwn/toplevel.py
+++ b/pwn/toplevel.py
@@ -83,4 +83,5 @@
debug = log.debug
success = log.success
-__all__ = [x for x in tuple(globals()) if x != '__name__']
+# Equivalence with the default behavior of "from import *"
+# __all__ = [x for x in tuple(globals()) if not x.startswith('_')]
| {"golden_diff": "diff --git a/pwn/toplevel.py b/pwn/toplevel.py\n--- a/pwn/toplevel.py\n+++ b/pwn/toplevel.py\n@@ -83,4 +83,5 @@\n debug = log.debug\n success = log.success\n \n-__all__ = [x for x in tuple(globals()) if x != '__name__']\n+# Equivalence with the default behavior of \"from import *\"\n+# __all__ = [x for x in tuple(globals()) if not x.startswith('_')]\n", "issue": "List comprehension in __all__ prevents Pylance from working\nThanks for contributing to Pwntools! Ideas from the community help make Pwntools an amazing tool for everybody.\r\n\r\nIf you've got an idea for a new feature, please provide information about:\r\n\r\n* What the feature does\r\nAccording to https://github.com/microsoft/pylance-release/issues/289, the list comprehension in `__all__` in https://github.com/Gallopsled/pwntools/blob/4e6ccb0da17fb91e43a4f9e95edf4fd83806ba23/pwn/toplevel.py#L85 prevents [Pylance](https://marketplace.visualstudio.com/items?itemName=ms-python.vscode-pylance) from working (when using `from pwn import *` instead of manually importing all modules).\r\nhttps://github.com/compas-dev/compas/issues/621 may be a solution instead of listing all attributes manually to fix that.\r\n* Why the feature should exist\r\nTo make Pylance happy :smile: \r\n* What tests should be included\r\nTest in VS Code to ensure it works.\r\n\r\nIf you think you can write the feature yourself, please submit a Pull Request and we can review your changes!\r\n\n", "before_files": [{"content": "# Get all the modules from pwnlib\nimport collections\nimport logging\nimport math\nimport operator\nimport os\nimport platform\nimport re\nimport requests\nimport socks\nimport signal\nimport string\nimport struct\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\n\nfrom pprint import pprint\n\nimport pwnlib\nfrom pwnlib import *\nfrom pwnlib.asm import *\nfrom pwnlib.context import Thread\nfrom pwnlib.context import context, LocalContext\nfrom pwnlib.dynelf import DynELF\nfrom pwnlib.encoders import *\nfrom pwnlib.elf.corefile import Core, Corefile, Coredump\nfrom pwnlib.elf.elf import ELF, load\nfrom pwnlib.encoders import *\nfrom pwnlib.exception import PwnlibException\nfrom pwnlib.gdb import attach, debug_assembly, debug_shellcode\nfrom pwnlib.filepointer import *\nfrom pwnlib.flag import *\nfrom pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split\nfrom pwnlib.log import getLogger\nfrom pwnlib.memleak import MemLeak, RelativeMemLeak\nfrom pwnlib.regsort import *\nfrom pwnlib.replacements import *\nfrom pwnlib.rop import ROP\nfrom pwnlib.rop.call import AppendedArgument\nfrom pwnlib.rop.srop import SigreturnFrame\nfrom pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload\nfrom pwnlib.runner import *\nfrom pwnlib.term.readline import str_input\nfrom pwnlib.timeout import Timeout\nfrom pwnlib.tubes.listen import listen\nfrom pwnlib.tubes.process import process, PTY, PIPE, STDOUT\nfrom pwnlib.tubes.remote import remote, tcp, udp, connect\nfrom pwnlib.tubes.serialtube import serialtube\nfrom pwnlib.tubes.server import server\nfrom pwnlib.tubes.ssh import ssh\nfrom pwnlib.tubes.tube import tube\nfrom pwnlib.ui import *\nfrom pwnlib.util import crc\nfrom pwnlib.util import iters\nfrom pwnlib.util import net\nfrom pwnlib.util import proc\nfrom pwnlib.util import safeeval\nfrom pwnlib.util.crc import BitPolynom\nfrom pwnlib.util.cyclic import *\nfrom pwnlib.util.fiddling import *\nfrom pwnlib.util.getdents import *\nfrom pwnlib.util.hashes import *\nfrom pwnlib.util.lists import *\nfrom pwnlib.util.misc import *\nfrom pwnlib.util.packing import *\nfrom pwnlib.util.proc import pidof\nfrom pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with\nfrom pwnlib.util.splash import *\nfrom pwnlib.util.web import *\n\n# Promote these modules, so that \"from pwn import *\" will let you access them\n\nfrom six.moves import cPickle as pickle, cStringIO as StringIO\nfrom six import BytesIO\n\nerror = log.error\nwarning = log.warning\nwarn = log.warning\ninfo = log.info\ndebug = log.debug\nsuccess = log.success\n\n__all__ = [x for x in tuple(globals()) if x != '__name__']\n", "path": "pwn/toplevel.py"}]} | 1,680 | 110 |
gh_patches_debug_14937 | rasdani/github-patches | git_diff | microsoft__DeepSpeed-4918 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
nv-sd CI test failure
The Nightly CI for https://github.com/microsoft/DeepSpeed/actions/runs/7434747598 failed.
</issue>
<code>
[start of deepspeed/module_inject/containers/vae.py]
1 # Copyright (c) Microsoft Corporation.
2 # SPDX-License-Identifier: Apache-2.0
3
4 # DeepSpeed Team
5
6 from ..policy import DSPolicy
7 from ...model_implementations.diffusers.vae import DSVAE
8
9
10 class VAEPolicy(DSPolicy):
11
12 def __init__(self):
13 super().__init__()
14 try:
15 import diffusers
16 if hasattr(diffusers.models.vae, "AutoencoderKL"):
17 self._orig_layer_class = diffusers.models.vae.AutoencoderKL
18 else:
19 # Diffusers >= 0.12.0 changes location of AutoencoderKL
20 self._orig_layer_class = diffusers.models.autoencoder_kl.AutoencoderKL
21 except ImportError:
22 self._orig_layer_class = None
23
24 def match(self, module):
25 return isinstance(module, self._orig_layer_class)
26
27 def match_replaced(self, module):
28 return isinstance(module, DSVAE)
29
30 def apply(self, module, enable_cuda_graph=True):
31 # TODO(cmikeh2): Enable cuda graph should be an inference configuration
32 return DSVAE(module, enable_cuda_graph=enable_cuda_graph)
33
34 # NOTE (lekurile): Should we have a diffusers policy class?
35 def attention(self, client_module):
36 pass
37
[end of deepspeed/module_inject/containers/vae.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/deepspeed/module_inject/containers/vae.py b/deepspeed/module_inject/containers/vae.py
--- a/deepspeed/module_inject/containers/vae.py
+++ b/deepspeed/module_inject/containers/vae.py
@@ -13,11 +13,11 @@
super().__init__()
try:
import diffusers
- if hasattr(diffusers.models.vae, "AutoencoderKL"):
- self._orig_layer_class = diffusers.models.vae.AutoencoderKL
+ if hasattr(diffusers.models.autoencoders.vae, "AutoencoderKL"):
+ self._orig_layer_class = diffusers.models.autoencoders.vae.AutoencoderKL
else:
# Diffusers >= 0.12.0 changes location of AutoencoderKL
- self._orig_layer_class = diffusers.models.autoencoder_kl.AutoencoderKL
+ self._orig_layer_class = diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL
except ImportError:
self._orig_layer_class = None
| {"golden_diff": "diff --git a/deepspeed/module_inject/containers/vae.py b/deepspeed/module_inject/containers/vae.py\n--- a/deepspeed/module_inject/containers/vae.py\n+++ b/deepspeed/module_inject/containers/vae.py\n@@ -13,11 +13,11 @@\n super().__init__()\n try:\n import diffusers\n- if hasattr(diffusers.models.vae, \"AutoencoderKL\"):\n- self._orig_layer_class = diffusers.models.vae.AutoencoderKL\n+ if hasattr(diffusers.models.autoencoders.vae, \"AutoencoderKL\"):\n+ self._orig_layer_class = diffusers.models.autoencoders.vae.AutoencoderKL\n else:\n # Diffusers >= 0.12.0 changes location of AutoencoderKL\n- self._orig_layer_class = diffusers.models.autoencoder_kl.AutoencoderKL\n+ self._orig_layer_class = diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL\n except ImportError:\n self._orig_layer_class = None\n", "issue": "nv-sd CI test failure\nThe Nightly CI for https://github.com/microsoft/DeepSpeed/actions/runs/7434747598 failed.\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nfrom ..policy import DSPolicy\nfrom ...model_implementations.diffusers.vae import DSVAE\n\n\nclass VAEPolicy(DSPolicy):\n\n def __init__(self):\n super().__init__()\n try:\n import diffusers\n if hasattr(diffusers.models.vae, \"AutoencoderKL\"):\n self._orig_layer_class = diffusers.models.vae.AutoencoderKL\n else:\n # Diffusers >= 0.12.0 changes location of AutoencoderKL\n self._orig_layer_class = diffusers.models.autoencoder_kl.AutoencoderKL\n except ImportError:\n self._orig_layer_class = None\n\n def match(self, module):\n return isinstance(module, self._orig_layer_class)\n\n def match_replaced(self, module):\n return isinstance(module, DSVAE)\n\n def apply(self, module, enable_cuda_graph=True):\n # TODO(cmikeh2): Enable cuda graph should be an inference configuration\n return DSVAE(module, enable_cuda_graph=enable_cuda_graph)\n\n # NOTE (lekurile): Should we have a diffusers policy class?\n def attention(self, client_module):\n pass\n", "path": "deepspeed/module_inject/containers/vae.py"}]} | 922 | 230 |
gh_patches_debug_37365 | rasdani/github-patches | git_diff | pantsbuild__pants-13583 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scala import extraction for inference
Inference for Scala will require (at a minimum) import extraction from Scala sources. In v1 this was accomplished with https://scalameta.org/, which still seems to be active.
https://scalameta.org/docs/trees/guide.html#parse-trees
</issue>
<code>
[start of src/python/pants/backend/scala/dependency_inference/rules.py]
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3 from __future__ import annotations
4
5 import logging
6
7 from pants.backend.scala.dependency_inference import scala_parser, symbol_mapper
8 from pants.backend.scala.dependency_inference.scala_parser import ScalaSourceDependencyAnalysis
9 from pants.backend.scala.subsystems.scala_infer import ScalaInferSubsystem
10 from pants.backend.scala.target_types import ScalaSourceField
11 from pants.build_graph.address import Address
12 from pants.core.util_rules.source_files import SourceFilesRequest
13 from pants.engine.internals.selectors import Get, MultiGet
14 from pants.engine.rules import collect_rules, rule
15 from pants.engine.target import (
16 Dependencies,
17 DependenciesRequest,
18 ExplicitlyProvidedDependencies,
19 InferDependenciesRequest,
20 InferredDependencies,
21 WrappedTarget,
22 )
23 from pants.engine.unions import UnionRule
24 from pants.jvm.dependency_inference.symbol_mapper import FirstPartySymbolMapping
25 from pants.util.ordered_set import OrderedSet
26
27 logger = logging.getLogger(__name__)
28
29
30 class InferScalaSourceDependencies(InferDependenciesRequest):
31 infer_from = ScalaSourceField
32
33
34 @rule(desc="Inferring Scala dependencies by analyzing sources")
35 async def infer_scala_dependencies_via_source_analysis(
36 request: InferScalaSourceDependencies,
37 scala_infer_subsystem: ScalaInferSubsystem,
38 first_party_symbol_map: FirstPartySymbolMapping,
39 ) -> InferredDependencies:
40 if not scala_infer_subsystem.imports:
41 return InferredDependencies([])
42
43 address = request.sources_field.address
44 wrapped_tgt = await Get(WrappedTarget, Address, address)
45 explicitly_provided_deps, analysis = await MultiGet(
46 Get(ExplicitlyProvidedDependencies, DependenciesRequest(wrapped_tgt.target[Dependencies])),
47 Get(ScalaSourceDependencyAnalysis, SourceFilesRequest([request.sources_field])),
48 )
49
50 symbols: OrderedSet[str] = OrderedSet()
51 if scala_infer_subsystem.imports:
52 symbols.update(analysis.all_imports())
53
54 dependencies: OrderedSet[Address] = OrderedSet()
55 for symbol in symbols:
56 matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol)
57 if not matches:
58 continue
59
60 explicitly_provided_deps.maybe_warn_of_ambiguous_dependency_inference(
61 matches,
62 address,
63 import_reference="type",
64 context=f"The target {address} imports `{symbol}`",
65 )
66
67 maybe_disambiguated = explicitly_provided_deps.disambiguated(matches)
68 if maybe_disambiguated:
69 dependencies.add(maybe_disambiguated)
70
71 return InferredDependencies(dependencies)
72
73
74 def rules():
75 return [
76 *collect_rules(),
77 *scala_parser.rules(),
78 *symbol_mapper.rules(),
79 UnionRule(InferDependenciesRequest, InferScalaSourceDependencies),
80 ]
81
[end of src/python/pants/backend/scala/dependency_inference/rules.py]
[start of src/python/pants/backend/java/subsystems/java_infer.py]
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3 from typing import cast
4
5 from pants.option.subsystem import Subsystem
6 from pants.util.docutil import git_url
7
8
9 class JavaInferSubsystem(Subsystem):
10 options_scope = "java-infer"
11 help = "Options controlling which dependencies will be inferred for Java targets."
12
13 @classmethod
14 def register_options(cls, register):
15 super().register_options(register)
16 register(
17 "--imports",
18 default=True,
19 type=bool,
20 help=("Infer a target's dependencies by parsing import statements from sources."),
21 )
22 register(
23 "--consumed-types",
24 default=True,
25 type=bool,
26 help=("Infer a target's dependencies by parsing consumed types from sources."),
27 )
28 register(
29 "--third-party-imports",
30 default=True,
31 type=bool,
32 help="Infer a target's third-party dependencies using Java import statements.",
33 )
34 _default_package_mapping_url = git_url(
35 "src/python/pants/backend/java/dependency_inference/jvm_artifact_mappings.py"
36 )
37 register(
38 "--third-party-import-mapping",
39 type=dict,
40 help=(
41 "A dictionary mapping a Java package path to a JVM artifact coordinate (GROUP:ARTIFACT) "
42 "without the version. The package path may be made recursive to match symbols in subpackages "
43 "by adding `.**` to the end of the package path. For example, specify `{'org.junit.**': 'junit:junit'} `"
44 "to infer a dependency on junit:junit for any file importing a symbol from org.junit or its "
45 f"subpackages. Pants also supplies a default package mapping ({_default_package_mapping_url})."
46 ),
47 )
48
49 @property
50 def imports(self) -> bool:
51 return cast(bool, self.options.imports)
52
53 @property
54 def consumed_types(self) -> bool:
55 return cast(bool, self.options.consumed_types)
56
57 @property
58 def third_party_imports(self) -> bool:
59 return cast(bool, self.options.third_party_imports)
60
61 @property
62 def third_party_import_mapping(self) -> dict:
63 return cast(dict, self.options.third_party_import_mapping)
64
[end of src/python/pants/backend/java/subsystems/java_infer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/backend/java/subsystems/java_infer.py b/src/python/pants/backend/java/subsystems/java_infer.py
--- a/src/python/pants/backend/java/subsystems/java_infer.py
+++ b/src/python/pants/backend/java/subsystems/java_infer.py
@@ -34,6 +34,7 @@
_default_package_mapping_url = git_url(
"src/python/pants/backend/java/dependency_inference/jvm_artifact_mappings.py"
)
+ # TODO: Move to `coursier` or a generic `jvm` subsystem.
register(
"--third-party-import-mapping",
type=dict,
diff --git a/src/python/pants/backend/scala/dependency_inference/rules.py b/src/python/pants/backend/scala/dependency_inference/rules.py
--- a/src/python/pants/backend/scala/dependency_inference/rules.py
+++ b/src/python/pants/backend/scala/dependency_inference/rules.py
@@ -21,6 +21,12 @@
WrappedTarget,
)
from pants.engine.unions import UnionRule
+from pants.jvm.dependency_inference import artifact_mapper
+from pants.jvm.dependency_inference.artifact_mapper import (
+ AvailableThirdPartyArtifacts,
+ ThirdPartyPackageToArtifactMapping,
+ find_artifact_mapping,
+)
from pants.jvm.dependency_inference.symbol_mapper import FirstPartySymbolMapping
from pants.util.ordered_set import OrderedSet
@@ -36,6 +42,8 @@
request: InferScalaSourceDependencies,
scala_infer_subsystem: ScalaInferSubsystem,
first_party_symbol_map: FirstPartySymbolMapping,
+ third_party_artifact_mapping: ThirdPartyPackageToArtifactMapping,
+ available_artifacts: AvailableThirdPartyArtifacts,
) -> InferredDependencies:
if not scala_infer_subsystem.imports:
return InferredDependencies([])
@@ -53,7 +61,11 @@
dependencies: OrderedSet[Address] = OrderedSet()
for symbol in symbols:
- matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol)
+ first_party_matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol)
+ third_party_matches = find_artifact_mapping(
+ symbol, third_party_artifact_mapping, available_artifacts
+ )
+ matches = first_party_matches.union(third_party_matches)
if not matches:
continue
@@ -74,6 +86,7 @@
def rules():
return [
*collect_rules(),
+ *artifact_mapper.rules(),
*scala_parser.rules(),
*symbol_mapper.rules(),
UnionRule(InferDependenciesRequest, InferScalaSourceDependencies),
| {"golden_diff": "diff --git a/src/python/pants/backend/java/subsystems/java_infer.py b/src/python/pants/backend/java/subsystems/java_infer.py\n--- a/src/python/pants/backend/java/subsystems/java_infer.py\n+++ b/src/python/pants/backend/java/subsystems/java_infer.py\n@@ -34,6 +34,7 @@\n _default_package_mapping_url = git_url(\n \"src/python/pants/backend/java/dependency_inference/jvm_artifact_mappings.py\"\n )\n+ # TODO: Move to `coursier` or a generic `jvm` subsystem.\n register(\n \"--third-party-import-mapping\",\n type=dict,\ndiff --git a/src/python/pants/backend/scala/dependency_inference/rules.py b/src/python/pants/backend/scala/dependency_inference/rules.py\n--- a/src/python/pants/backend/scala/dependency_inference/rules.py\n+++ b/src/python/pants/backend/scala/dependency_inference/rules.py\n@@ -21,6 +21,12 @@\n WrappedTarget,\n )\n from pants.engine.unions import UnionRule\n+from pants.jvm.dependency_inference import artifact_mapper\n+from pants.jvm.dependency_inference.artifact_mapper import (\n+ AvailableThirdPartyArtifacts,\n+ ThirdPartyPackageToArtifactMapping,\n+ find_artifact_mapping,\n+)\n from pants.jvm.dependency_inference.symbol_mapper import FirstPartySymbolMapping\n from pants.util.ordered_set import OrderedSet\n \n@@ -36,6 +42,8 @@\n request: InferScalaSourceDependencies,\n scala_infer_subsystem: ScalaInferSubsystem,\n first_party_symbol_map: FirstPartySymbolMapping,\n+ third_party_artifact_mapping: ThirdPartyPackageToArtifactMapping,\n+ available_artifacts: AvailableThirdPartyArtifacts,\n ) -> InferredDependencies:\n if not scala_infer_subsystem.imports:\n return InferredDependencies([])\n@@ -53,7 +61,11 @@\n \n dependencies: OrderedSet[Address] = OrderedSet()\n for symbol in symbols:\n- matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol)\n+ first_party_matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol)\n+ third_party_matches = find_artifact_mapping(\n+ symbol, third_party_artifact_mapping, available_artifacts\n+ )\n+ matches = first_party_matches.union(third_party_matches)\n if not matches:\n continue\n \n@@ -74,6 +86,7 @@\n def rules():\n return [\n *collect_rules(),\n+ *artifact_mapper.rules(),\n *scala_parser.rules(),\n *symbol_mapper.rules(),\n UnionRule(InferDependenciesRequest, InferScalaSourceDependencies),\n", "issue": "Scala import extraction for inference\nInference for Scala will require (at a minimum) import extraction from Scala sources. In v1 this was accomplished with https://scalameta.org/, which still seems to be active.\r\n\r\nhttps://scalameta.org/docs/trees/guide.html#parse-trees\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nfrom __future__ import annotations\n\nimport logging\n\nfrom pants.backend.scala.dependency_inference import scala_parser, symbol_mapper\nfrom pants.backend.scala.dependency_inference.scala_parser import ScalaSourceDependencyAnalysis\nfrom pants.backend.scala.subsystems.scala_infer import ScalaInferSubsystem\nfrom pants.backend.scala.target_types import ScalaSourceField\nfrom pants.build_graph.address import Address\nfrom pants.core.util_rules.source_files import SourceFilesRequest\nfrom pants.engine.internals.selectors import Get, MultiGet\nfrom pants.engine.rules import collect_rules, rule\nfrom pants.engine.target import (\n Dependencies,\n DependenciesRequest,\n ExplicitlyProvidedDependencies,\n InferDependenciesRequest,\n InferredDependencies,\n WrappedTarget,\n)\nfrom pants.engine.unions import UnionRule\nfrom pants.jvm.dependency_inference.symbol_mapper import FirstPartySymbolMapping\nfrom pants.util.ordered_set import OrderedSet\n\nlogger = logging.getLogger(__name__)\n\n\nclass InferScalaSourceDependencies(InferDependenciesRequest):\n infer_from = ScalaSourceField\n\n\n@rule(desc=\"Inferring Scala dependencies by analyzing sources\")\nasync def infer_scala_dependencies_via_source_analysis(\n request: InferScalaSourceDependencies,\n scala_infer_subsystem: ScalaInferSubsystem,\n first_party_symbol_map: FirstPartySymbolMapping,\n) -> InferredDependencies:\n if not scala_infer_subsystem.imports:\n return InferredDependencies([])\n\n address = request.sources_field.address\n wrapped_tgt = await Get(WrappedTarget, Address, address)\n explicitly_provided_deps, analysis = await MultiGet(\n Get(ExplicitlyProvidedDependencies, DependenciesRequest(wrapped_tgt.target[Dependencies])),\n Get(ScalaSourceDependencyAnalysis, SourceFilesRequest([request.sources_field])),\n )\n\n symbols: OrderedSet[str] = OrderedSet()\n if scala_infer_subsystem.imports:\n symbols.update(analysis.all_imports())\n\n dependencies: OrderedSet[Address] = OrderedSet()\n for symbol in symbols:\n matches = first_party_symbol_map.symbols.addresses_for_symbol(symbol)\n if not matches:\n continue\n\n explicitly_provided_deps.maybe_warn_of_ambiguous_dependency_inference(\n matches,\n address,\n import_reference=\"type\",\n context=f\"The target {address} imports `{symbol}`\",\n )\n\n maybe_disambiguated = explicitly_provided_deps.disambiguated(matches)\n if maybe_disambiguated:\n dependencies.add(maybe_disambiguated)\n\n return InferredDependencies(dependencies)\n\n\ndef rules():\n return [\n *collect_rules(),\n *scala_parser.rules(),\n *symbol_mapper.rules(),\n UnionRule(InferDependenciesRequest, InferScalaSourceDependencies),\n ]\n", "path": "src/python/pants/backend/scala/dependency_inference/rules.py"}, {"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nfrom typing import cast\n\nfrom pants.option.subsystem import Subsystem\nfrom pants.util.docutil import git_url\n\n\nclass JavaInferSubsystem(Subsystem):\n options_scope = \"java-infer\"\n help = \"Options controlling which dependencies will be inferred for Java targets.\"\n\n @classmethod\n def register_options(cls, register):\n super().register_options(register)\n register(\n \"--imports\",\n default=True,\n type=bool,\n help=(\"Infer a target's dependencies by parsing import statements from sources.\"),\n )\n register(\n \"--consumed-types\",\n default=True,\n type=bool,\n help=(\"Infer a target's dependencies by parsing consumed types from sources.\"),\n )\n register(\n \"--third-party-imports\",\n default=True,\n type=bool,\n help=\"Infer a target's third-party dependencies using Java import statements.\",\n )\n _default_package_mapping_url = git_url(\n \"src/python/pants/backend/java/dependency_inference/jvm_artifact_mappings.py\"\n )\n register(\n \"--third-party-import-mapping\",\n type=dict,\n help=(\n \"A dictionary mapping a Java package path to a JVM artifact coordinate (GROUP:ARTIFACT) \"\n \"without the version. The package path may be made recursive to match symbols in subpackages \"\n \"by adding `.**` to the end of the package path. For example, specify `{'org.junit.**': 'junit:junit'} `\"\n \"to infer a dependency on junit:junit for any file importing a symbol from org.junit or its \"\n f\"subpackages. Pants also supplies a default package mapping ({_default_package_mapping_url}).\"\n ),\n )\n\n @property\n def imports(self) -> bool:\n return cast(bool, self.options.imports)\n\n @property\n def consumed_types(self) -> bool:\n return cast(bool, self.options.consumed_types)\n\n @property\n def third_party_imports(self) -> bool:\n return cast(bool, self.options.third_party_imports)\n\n @property\n def third_party_import_mapping(self) -> dict:\n return cast(dict, self.options.third_party_import_mapping)\n", "path": "src/python/pants/backend/java/subsystems/java_infer.py"}]} | 1,999 | 573 |
gh_patches_debug_9049 | rasdani/github-patches | git_diff | avocado-framework__avocado-714 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Proper simple tests examples
Even though simple tests are, well, simple, let's have a couple of them in the examples directory.
A big reason for that is that we currently use wrappers as the simple tests examples in the Getting Started guide (`avocado list examples/wrappers`) which can be confusing to new users.
</issue>
<code>
[start of setup.py]
1 #!/bin/env python
2 # This program is free software; you can redistribute it and/or modify
3 # it under the terms of the GNU General Public License as published by
4 # the Free Software Foundation; either version 2 of the License, or
5 # (at your option) any later version.
6 #
7 # This program is distributed in the hope that it will be useful,
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
10 #
11 # See LICENSE for more details.
12 #
13 # Copyright: Red Hat Inc. 2013-2014
14 # Author: Lucas Meneghel Rodrigues <[email protected]>
15
16 import glob
17 import os
18 # pylint: disable=E0611
19
20 from distutils.core import setup
21
22 from avocado import VERSION
23
24
25 VIRTUAL_ENV = 'VIRTUAL_ENV' in os.environ
26
27
28 def get_dir(system_path=None, virtual_path=None):
29 """
30 Retrieve VIRTUAL_ENV friendly path
31 :param system_path: Relative system path
32 :param virtual_path: Overrides system_path for virtual_env only
33 :return: VIRTUAL_ENV friendly path
34 """
35 if virtual_path is None:
36 virtual_path = system_path
37 if VIRTUAL_ENV:
38 if virtual_path is None:
39 virtual_path = []
40 return os.path.join(*virtual_path)
41 else:
42 if system_path is None:
43 system_path = []
44 return os.path.join(*(['/'] + system_path))
45
46
47 def get_tests_dir():
48 return get_dir(['usr', 'share', 'avocado', 'tests'], ['tests'])
49
50
51 def get_avocado_libexec_dir():
52 if VIRTUAL_ENV:
53 return get_dir(['libexec'])
54 elif os.path.exists('/usr/libexec'): # RHEL-like distro
55 return get_dir(['usr', 'libexec', 'avocado'])
56 else: # Debian-like distro
57 return get_dir(['usr', 'lib', 'avocado'])
58
59
60 def get_data_files():
61 data_files = [(get_dir(['etc', 'avocado']), ['etc/avocado/avocado.conf'])]
62 data_files += [(get_dir(['etc', 'avocado', 'conf.d']),
63 ['etc/avocado/conf.d/README', 'etc/avocado/conf.d/gdb.conf'])]
64 data_files += [(get_dir(['etc', 'avocado', 'sysinfo']),
65 ['etc/avocado/sysinfo/commands', 'etc/avocado/sysinfo/files',
66 'etc/avocado/sysinfo/profilers'])]
67 data_files += [(get_tests_dir(), glob.glob('examples/tests/*.py'))]
68 for data_dir in glob.glob('examples/tests/*.data'):
69 fmt_str = '%s/*' % data_dir
70 for f in glob.glob(fmt_str):
71 data_files += [(os.path.join(get_tests_dir(),
72 os.path.basename(data_dir)), [f])]
73 data_files.append((get_dir(['usr', 'share', 'doc', 'avocado'], ['.']),
74 ['man/avocado.rst', 'man/avocado-rest-client.rst']))
75 data_files += [(get_dir(['usr', 'share', 'avocado', 'wrappers'],
76 ['wrappers']),
77 glob.glob('examples/wrappers/*.sh'))]
78 data_files.append((get_avocado_libexec_dir(), glob.glob('libexec/*')))
79 return data_files
80
81
82 def _get_plugin_resource_files(path):
83 """
84 Given a path, return all the files in there to package
85 """
86 flist = []
87 for root, _, files in sorted(os.walk(path)):
88 for name in files:
89 fullname = os.path.join(root, name)
90 flist.append(fullname[len('avocado/core/plugins/'):])
91 return flist
92
93
94 def get_long_description():
95 with open('README.rst', 'r') as req:
96 req_contents = req.read()
97 return req_contents
98
99 if __name__ == '__main__':
100 setup(name='avocado',
101 version=VERSION,
102 description='Avocado Test Framework',
103 long_description=get_long_description(),
104 author='Avocado Developers',
105 author_email='[email protected]',
106 url='http://avocado-framework.github.io/',
107 packages=['avocado',
108 'avocado.core',
109 'avocado.core.plugins',
110 'avocado.utils',
111 'avocado.utils.external',
112 'avocado.core.remote',
113 'avocado.core.restclient',
114 'avocado.core.restclient.cli',
115 'avocado.core.restclient.cli.args',
116 'avocado.core.restclient.cli.actions'],
117 package_data={'avocado.core.plugins': _get_plugin_resource_files(
118 'avocado/core/plugins/resources')},
119 data_files=get_data_files(),
120 scripts=['scripts/avocado',
121 'scripts/avocado-rest-client'])
122
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -75,6 +75,11 @@
data_files += [(get_dir(['usr', 'share', 'avocado', 'wrappers'],
['wrappers']),
glob.glob('examples/wrappers/*.sh'))]
+
+ data_files += [(get_dir(['usr', 'share', 'avocado', 'simpletests'],
+ ['simpletests']),
+ glob.glob('examples/simpletests/*.sh'))]
+
data_files.append((get_avocado_libexec_dir(), glob.glob('libexec/*')))
return data_files
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -75,6 +75,11 @@\n data_files += [(get_dir(['usr', 'share', 'avocado', 'wrappers'],\n ['wrappers']),\n glob.glob('examples/wrappers/*.sh'))]\n+\n+ data_files += [(get_dir(['usr', 'share', 'avocado', 'simpletests'],\n+ ['simpletests']),\n+ glob.glob('examples/simpletests/*.sh'))]\n+\n data_files.append((get_avocado_libexec_dir(), glob.glob('libexec/*')))\n return data_files\n", "issue": "Proper simple tests examples\nEven though simple tests are, well, simple, let's have a couple of them in the examples directory.\n\nA big reason for that is that we currently use wrappers as the simple tests examples in the Getting Started guide (`avocado list examples/wrappers`) which can be confusing to new users.\n\n", "before_files": [{"content": "#!/bin/env python\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2013-2014\n# Author: Lucas Meneghel Rodrigues <[email protected]>\n\nimport glob\nimport os\n# pylint: disable=E0611\n\nfrom distutils.core import setup\n\nfrom avocado import VERSION\n\n\nVIRTUAL_ENV = 'VIRTUAL_ENV' in os.environ\n\n\ndef get_dir(system_path=None, virtual_path=None):\n \"\"\"\n Retrieve VIRTUAL_ENV friendly path\n :param system_path: Relative system path\n :param virtual_path: Overrides system_path for virtual_env only\n :return: VIRTUAL_ENV friendly path\n \"\"\"\n if virtual_path is None:\n virtual_path = system_path\n if VIRTUAL_ENV:\n if virtual_path is None:\n virtual_path = []\n return os.path.join(*virtual_path)\n else:\n if system_path is None:\n system_path = []\n return os.path.join(*(['/'] + system_path))\n\n\ndef get_tests_dir():\n return get_dir(['usr', 'share', 'avocado', 'tests'], ['tests'])\n\n\ndef get_avocado_libexec_dir():\n if VIRTUAL_ENV:\n return get_dir(['libexec'])\n elif os.path.exists('/usr/libexec'): # RHEL-like distro\n return get_dir(['usr', 'libexec', 'avocado'])\n else: # Debian-like distro\n return get_dir(['usr', 'lib', 'avocado'])\n\n\ndef get_data_files():\n data_files = [(get_dir(['etc', 'avocado']), ['etc/avocado/avocado.conf'])]\n data_files += [(get_dir(['etc', 'avocado', 'conf.d']),\n ['etc/avocado/conf.d/README', 'etc/avocado/conf.d/gdb.conf'])]\n data_files += [(get_dir(['etc', 'avocado', 'sysinfo']),\n ['etc/avocado/sysinfo/commands', 'etc/avocado/sysinfo/files',\n 'etc/avocado/sysinfo/profilers'])]\n data_files += [(get_tests_dir(), glob.glob('examples/tests/*.py'))]\n for data_dir in glob.glob('examples/tests/*.data'):\n fmt_str = '%s/*' % data_dir\n for f in glob.glob(fmt_str):\n data_files += [(os.path.join(get_tests_dir(),\n os.path.basename(data_dir)), [f])]\n data_files.append((get_dir(['usr', 'share', 'doc', 'avocado'], ['.']),\n ['man/avocado.rst', 'man/avocado-rest-client.rst']))\n data_files += [(get_dir(['usr', 'share', 'avocado', 'wrappers'],\n ['wrappers']),\n glob.glob('examples/wrappers/*.sh'))]\n data_files.append((get_avocado_libexec_dir(), glob.glob('libexec/*')))\n return data_files\n\n\ndef _get_plugin_resource_files(path):\n \"\"\"\n Given a path, return all the files in there to package\n \"\"\"\n flist = []\n for root, _, files in sorted(os.walk(path)):\n for name in files:\n fullname = os.path.join(root, name)\n flist.append(fullname[len('avocado/core/plugins/'):])\n return flist\n\n\ndef get_long_description():\n with open('README.rst', 'r') as req:\n req_contents = req.read()\n return req_contents\n\nif __name__ == '__main__':\n setup(name='avocado',\n version=VERSION,\n description='Avocado Test Framework',\n long_description=get_long_description(),\n author='Avocado Developers',\n author_email='[email protected]',\n url='http://avocado-framework.github.io/',\n packages=['avocado',\n 'avocado.core',\n 'avocado.core.plugins',\n 'avocado.utils',\n 'avocado.utils.external',\n 'avocado.core.remote',\n 'avocado.core.restclient',\n 'avocado.core.restclient.cli',\n 'avocado.core.restclient.cli.args',\n 'avocado.core.restclient.cli.actions'],\n package_data={'avocado.core.plugins': _get_plugin_resource_files(\n 'avocado/core/plugins/resources')},\n data_files=get_data_files(),\n scripts=['scripts/avocado',\n 'scripts/avocado-rest-client'])\n", "path": "setup.py"}]} | 1,882 | 138 |
gh_patches_debug_19668 | rasdani/github-patches | git_diff | docker__docker-py-1050 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
login failed with default registry
I am using docker-py (1.8.0) and trying to using login API.
If I don't input `registry='https://index.docker.io/v1/'` .
It will raise exception as following:
```
docker.errors.APIError: 500 Server Error: Internal Server Error ("Unexpected status code [301] :")
```
But I saw https://github.com/docker/docker-py/blob/81edb398ebf7ce5c7ef14aa0739de5329589aabe/docker/api/daemon.py#L52 in source code. Should work with default registry.
</issue>
<code>
[start of docker/api/daemon.py]
1 import os
2 import warnings
3 from datetime import datetime
4
5 from ..auth import auth
6 from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
7 from ..utils import utils
8
9
10 class DaemonApiMixin(object):
11 def events(self, since=None, until=None, filters=None, decode=None):
12 if isinstance(since, datetime):
13 since = utils.datetime_to_timestamp(since)
14
15 if isinstance(until, datetime):
16 until = utils.datetime_to_timestamp(until)
17
18 if filters:
19 filters = utils.convert_filters(filters)
20
21 params = {
22 'since': since,
23 'until': until,
24 'filters': filters
25 }
26
27 return self._stream_helper(
28 self.get(self._url('/events'), params=params, stream=True),
29 decode=decode
30 )
31
32 def info(self):
33 return self._result(self._get(self._url("/info")), True)
34
35 def login(self, username, password=None, email=None, registry=None,
36 reauth=False, insecure_registry=False, dockercfg_path=None):
37 if insecure_registry:
38 warnings.warn(
39 INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),
40 DeprecationWarning
41 )
42
43 # If we don't have any auth data so far, try reloading the config file
44 # one more time in case anything showed up in there.
45 # If dockercfg_path is passed check to see if the config file exists,
46 # if so load that config.
47 if dockercfg_path and os.path.exists(dockercfg_path):
48 self._auth_configs = auth.load_config(dockercfg_path)
49 elif not self._auth_configs:
50 self._auth_configs = auth.load_config()
51
52 registry = registry or auth.INDEX_URL
53
54 authcfg = auth.resolve_authconfig(self._auth_configs, registry)
55 # If we found an existing auth config for this registry and username
56 # combination, we can return it immediately unless reauth is requested.
57 if authcfg and authcfg.get('username', None) == username \
58 and not reauth:
59 return authcfg
60
61 req_data = {
62 'username': username,
63 'password': password,
64 'email': email,
65 'serveraddress': registry,
66 }
67
68 response = self._post_json(self._url('/auth'), data=req_data)
69 if response.status_code == 200:
70 self._auth_configs[registry] = req_data
71 return self._result(response, json=True)
72
73 def ping(self):
74 return self._result(self._get(self._url('/_ping')))
75
76 def version(self, api_version=True):
77 url = self._url("/version", versioned_api=api_version)
78 return self._result(self._get(url), json=True)
79
[end of docker/api/daemon.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/api/daemon.py b/docker/api/daemon.py
--- a/docker/api/daemon.py
+++ b/docker/api/daemon.py
@@ -49,8 +49,6 @@
elif not self._auth_configs:
self._auth_configs = auth.load_config()
- registry = registry or auth.INDEX_URL
-
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
@@ -67,7 +65,7 @@
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
- self._auth_configs[registry] = req_data
+ self._auth_configs[registry or auth.INDEX_NAME] = req_data
return self._result(response, json=True)
def ping(self):
| {"golden_diff": "diff --git a/docker/api/daemon.py b/docker/api/daemon.py\n--- a/docker/api/daemon.py\n+++ b/docker/api/daemon.py\n@@ -49,8 +49,6 @@\n elif not self._auth_configs:\n self._auth_configs = auth.load_config()\n \n- registry = registry or auth.INDEX_URL\n-\n authcfg = auth.resolve_authconfig(self._auth_configs, registry)\n # If we found an existing auth config for this registry and username\n # combination, we can return it immediately unless reauth is requested.\n@@ -67,7 +65,7 @@\n \n response = self._post_json(self._url('/auth'), data=req_data)\n if response.status_code == 200:\n- self._auth_configs[registry] = req_data\n+ self._auth_configs[registry or auth.INDEX_NAME] = req_data\n return self._result(response, json=True)\n \n def ping(self):\n", "issue": "login failed with default registry\nI am using docker-py (1.8.0) and trying to using login API.\n\nIf I don't input `registry='https://index.docker.io/v1/'` .\nIt will raise exception as following:\n\n```\ndocker.errors.APIError: 500 Server Error: Internal Server Error (\"Unexpected status code [301] :\")\n```\n\nBut I saw https://github.com/docker/docker-py/blob/81edb398ebf7ce5c7ef14aa0739de5329589aabe/docker/api/daemon.py#L52 in source code. Should work with default registry.\n\n", "before_files": [{"content": "import os\nimport warnings\nfrom datetime import datetime\n\nfrom ..auth import auth\nfrom ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING\nfrom ..utils import utils\n\n\nclass DaemonApiMixin(object):\n def events(self, since=None, until=None, filters=None, decode=None):\n if isinstance(since, datetime):\n since = utils.datetime_to_timestamp(since)\n\n if isinstance(until, datetime):\n until = utils.datetime_to_timestamp(until)\n\n if filters:\n filters = utils.convert_filters(filters)\n\n params = {\n 'since': since,\n 'until': until,\n 'filters': filters\n }\n\n return self._stream_helper(\n self.get(self._url('/events'), params=params, stream=True),\n decode=decode\n )\n\n def info(self):\n return self._result(self._get(self._url(\"/info\")), True)\n\n def login(self, username, password=None, email=None, registry=None,\n reauth=False, insecure_registry=False, dockercfg_path=None):\n if insecure_registry:\n warnings.warn(\n INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),\n DeprecationWarning\n )\n\n # If we don't have any auth data so far, try reloading the config file\n # one more time in case anything showed up in there.\n # If dockercfg_path is passed check to see if the config file exists,\n # if so load that config.\n if dockercfg_path and os.path.exists(dockercfg_path):\n self._auth_configs = auth.load_config(dockercfg_path)\n elif not self._auth_configs:\n self._auth_configs = auth.load_config()\n\n registry = registry or auth.INDEX_URL\n\n authcfg = auth.resolve_authconfig(self._auth_configs, registry)\n # If we found an existing auth config for this registry and username\n # combination, we can return it immediately unless reauth is requested.\n if authcfg and authcfg.get('username', None) == username \\\n and not reauth:\n return authcfg\n\n req_data = {\n 'username': username,\n 'password': password,\n 'email': email,\n 'serveraddress': registry,\n }\n\n response = self._post_json(self._url('/auth'), data=req_data)\n if response.status_code == 200:\n self._auth_configs[registry] = req_data\n return self._result(response, json=True)\n\n def ping(self):\n return self._result(self._get(self._url('/_ping')))\n\n def version(self, api_version=True):\n url = self._url(\"/version\", versioned_api=api_version)\n return self._result(self._get(url), json=True)\n", "path": "docker/api/daemon.py"}]} | 1,417 | 209 |
gh_patches_debug_29586 | rasdani/github-patches | git_diff | blaze__blaze-1114 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dask test failure
it seems `atop` is using an older dask API
```
================================================================================= FAILURES ==================================================================================
____________________________________________________________________________ test_compute[expr0] ____________________________________________________________________________
blaze/compute/tests/test_dask.py:69: in test_compute
result = compute(expr, dask_ns)
../../../../miniconda/envs/py27/lib/python2.7/site-packages/multipledispatch/dispatcher.py:163: in __call__
return func(*args, **kwargs)
blaze/compute/core.py:470: in compute
result = top_then_bottom_then_top_again_etc(expr3, d4, **kwargs)
blaze/compute/core.py:164: in top_then_bottom_then_top_again_etc
expr2, scope2 = bottom_up_until_type_break(expr, scope, **kwargs)
blaze/compute/core.py:371: in bottom_up_until_type_break
**kwargs)}
../../../../miniconda/envs/py27/lib/python2.7/site-packages/multipledispatch/dispatcher.py:163: in __call__
return func(*args, **kwargs)
blaze/compute/dask.py:40: in compute_broadcast
*concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))
../../../../code/py/dask/dask/array/core.py:1099: in atop
numblocks = dict([(a.name, a.numblocks) for a, ind in arginds])
```
</issue>
<code>
[start of blaze/compute/dask.py]
1 from __future__ import absolute_import, division, print_function
2
3 from numbers import Number
4 from toolz import concat, first, curry, compose
5 from datashape import DataShape
6
7 from blaze import compute, ndim
8 from blaze.dispatch import dispatch
9 from blaze.compute.core import compute_up, optimize
10 from blaze.expr import (ElemWise, symbol, Reduction, Transpose, TensorDot,
11 Expr, Slice, Broadcast)
12 from blaze.expr.split import split
13
14 from dask.array.core import (_concatenate2, Array, atop, names, transpose,
15 tensordot)
16
17
18 def compute_it(expr, leaves, *data, **kwargs):
19 kwargs.pop('scope')
20 return compute(expr, dict(zip(leaves, data)), **kwargs)
21
22
23 def elemwise_array(expr, *data, **kwargs):
24 leaves = expr._inputs
25 expr_inds = tuple(range(ndim(expr)))[::-1]
26 return atop(curry(compute_it, expr, leaves, **kwargs),
27 next(names), expr_inds,
28 *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))
29
30
31 try:
32 from blaze.compute.numba import (get_numba_ufunc, broadcast_collect,
33 Broadcastable)
34
35 def compute_broadcast(expr, *data, **kwargs):
36 expr_inds = tuple(range(ndim(expr)))[::-1]
37 func = get_numba_ufunc(expr)
38 return atop(func,
39 next(names), expr_inds,
40 *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))
41
42 def optimize_array(expr, *data):
43 return broadcast_collect(expr, Broadcastable=Broadcastable,
44 WantToBroadcast=Broadcastable)
45
46 for i in range(5):
47 compute_up.register(Broadcast, *([(Array, Number)] * i))(compute_broadcast)
48 optimize.register(Expr, *([(Array, Number)] * i))(optimize_array)
49
50 except ImportError:
51 pass
52
53
54 for i in range(5):
55 compute_up.register(ElemWise, *([Array] * i))(elemwise_array)
56
57
58 @dispatch(Reduction, Array)
59 def compute_up(expr, data, **kwargs):
60 leaf = expr._leaves()[0]
61 chunk = symbol('chunk', DataShape(*(tuple(map(first, data.chunks)) +
62 (leaf.dshape.measure,))))
63 (chunk, chunk_expr), (agg, agg_expr) = split(expr._child, expr,
64 chunk=chunk)
65
66 inds = tuple(range(ndim(leaf)))
67 tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs),
68 next(names), inds,
69 data, inds)
70
71 return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs),
72 curry(_concatenate2, axes=expr.axis)),
73 next(names), tuple(i for i in inds if i not in expr.axis),
74 tmp, inds)
75
76
77 @dispatch(Transpose, Array)
78 def compute_up(expr, data, **kwargs):
79 return transpose(data, expr.axes)
80
81
82 @dispatch(TensorDot, Array, Array)
83 def compute_up(expr, lhs, rhs, **kwargs):
84 return tensordot(lhs, rhs, (expr._left_axes, expr._right_axes))
85
86
87 @dispatch(Slice, Array)
88 def compute_up(expr, data, **kwargs):
89 return data[expr.index]
90
[end of blaze/compute/dask.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/blaze/compute/dask.py b/blaze/compute/dask.py
--- a/blaze/compute/dask.py
+++ b/blaze/compute/dask.py
@@ -24,7 +24,7 @@
leaves = expr._inputs
expr_inds = tuple(range(ndim(expr)))[::-1]
return atop(curry(compute_it, expr, leaves, **kwargs),
- next(names), expr_inds,
+ expr_inds,
*concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))
@@ -36,7 +36,7 @@
expr_inds = tuple(range(ndim(expr)))[::-1]
func = get_numba_ufunc(expr)
return atop(func,
- next(names), expr_inds,
+ expr_inds,
*concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))
def optimize_array(expr, *data):
@@ -64,13 +64,12 @@
chunk=chunk)
inds = tuple(range(ndim(leaf)))
- tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs),
- next(names), inds,
- data, inds)
+ tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs), inds, data,
+ inds)
return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs),
curry(_concatenate2, axes=expr.axis)),
- next(names), tuple(i for i in inds if i not in expr.axis),
+ tuple(i for i in inds if i not in expr.axis),
tmp, inds)
| {"golden_diff": "diff --git a/blaze/compute/dask.py b/blaze/compute/dask.py\n--- a/blaze/compute/dask.py\n+++ b/blaze/compute/dask.py\n@@ -24,7 +24,7 @@\n leaves = expr._inputs\n expr_inds = tuple(range(ndim(expr)))[::-1]\n return atop(curry(compute_it, expr, leaves, **kwargs),\n- next(names), expr_inds,\n+ expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n \n \n@@ -36,7 +36,7 @@\n expr_inds = tuple(range(ndim(expr)))[::-1]\n func = get_numba_ufunc(expr)\n return atop(func,\n- next(names), expr_inds,\n+ expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n \n def optimize_array(expr, *data):\n@@ -64,13 +64,12 @@\n chunk=chunk)\n \n inds = tuple(range(ndim(leaf)))\n- tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs),\n- next(names), inds,\n- data, inds)\n+ tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs), inds, data,\n+ inds)\n \n return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs),\n curry(_concatenate2, axes=expr.axis)),\n- next(names), tuple(i for i in inds if i not in expr.axis),\n+ tuple(i for i in inds if i not in expr.axis),\n tmp, inds)\n", "issue": "dask test failure\nit seems `atop` is using an older dask API\n\n```\n================================================================================= FAILURES ==================================================================================\n____________________________________________________________________________ test_compute[expr0] ____________________________________________________________________________\nblaze/compute/tests/test_dask.py:69: in test_compute\n result = compute(expr, dask_ns)\n../../../../miniconda/envs/py27/lib/python2.7/site-packages/multipledispatch/dispatcher.py:163: in __call__\n return func(*args, **kwargs)\nblaze/compute/core.py:470: in compute\n result = top_then_bottom_then_top_again_etc(expr3, d4, **kwargs)\nblaze/compute/core.py:164: in top_then_bottom_then_top_again_etc\n expr2, scope2 = bottom_up_until_type_break(expr, scope, **kwargs)\nblaze/compute/core.py:371: in bottom_up_until_type_break\n **kwargs)}\n../../../../miniconda/envs/py27/lib/python2.7/site-packages/multipledispatch/dispatcher.py:163: in __call__\n return func(*args, **kwargs)\nblaze/compute/dask.py:40: in compute_broadcast\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n../../../../code/py/dask/dask/array/core.py:1099: in atop\n numblocks = dict([(a.name, a.numblocks) for a, ind in arginds])\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nfrom numbers import Number\nfrom toolz import concat, first, curry, compose\nfrom datashape import DataShape\n\nfrom blaze import compute, ndim\nfrom blaze.dispatch import dispatch\nfrom blaze.compute.core import compute_up, optimize\nfrom blaze.expr import (ElemWise, symbol, Reduction, Transpose, TensorDot,\n Expr, Slice, Broadcast)\nfrom blaze.expr.split import split\n\nfrom dask.array.core import (_concatenate2, Array, atop, names, transpose,\n tensordot)\n\n\ndef compute_it(expr, leaves, *data, **kwargs):\n kwargs.pop('scope')\n return compute(expr, dict(zip(leaves, data)), **kwargs)\n\n\ndef elemwise_array(expr, *data, **kwargs):\n leaves = expr._inputs\n expr_inds = tuple(range(ndim(expr)))[::-1]\n return atop(curry(compute_it, expr, leaves, **kwargs),\n next(names), expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n\n\ntry:\n from blaze.compute.numba import (get_numba_ufunc, broadcast_collect,\n Broadcastable)\n\n def compute_broadcast(expr, *data, **kwargs):\n expr_inds = tuple(range(ndim(expr)))[::-1]\n func = get_numba_ufunc(expr)\n return atop(func,\n next(names), expr_inds,\n *concat((dat, tuple(range(ndim(dat))[::-1])) for dat in data))\n\n def optimize_array(expr, *data):\n return broadcast_collect(expr, Broadcastable=Broadcastable,\n WantToBroadcast=Broadcastable)\n\n for i in range(5):\n compute_up.register(Broadcast, *([(Array, Number)] * i))(compute_broadcast)\n optimize.register(Expr, *([(Array, Number)] * i))(optimize_array)\n\nexcept ImportError:\n pass\n\n\nfor i in range(5):\n compute_up.register(ElemWise, *([Array] * i))(elemwise_array)\n\n\n@dispatch(Reduction, Array)\ndef compute_up(expr, data, **kwargs):\n leaf = expr._leaves()[0]\n chunk = symbol('chunk', DataShape(*(tuple(map(first, data.chunks)) +\n (leaf.dshape.measure,))))\n (chunk, chunk_expr), (agg, agg_expr) = split(expr._child, expr,\n chunk=chunk)\n\n inds = tuple(range(ndim(leaf)))\n tmp = atop(curry(compute_it, chunk_expr, [chunk], **kwargs),\n next(names), inds,\n data, inds)\n\n return atop(compose(curry(compute_it, agg_expr, [agg], **kwargs),\n curry(_concatenate2, axes=expr.axis)),\n next(names), tuple(i for i in inds if i not in expr.axis),\n tmp, inds)\n\n\n@dispatch(Transpose, Array)\ndef compute_up(expr, data, **kwargs):\n return transpose(data, expr.axes)\n\n\n@dispatch(TensorDot, Array, Array)\ndef compute_up(expr, lhs, rhs, **kwargs):\n return tensordot(lhs, rhs, (expr._left_axes, expr._right_axes))\n\n\n@dispatch(Slice, Array)\ndef compute_up(expr, data, **kwargs):\n return data[expr.index]\n", "path": "blaze/compute/dask.py"}]} | 1,755 | 371 |
gh_patches_debug_14393 | rasdani/github-patches | git_diff | falconry__falcon-993 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Default OPTIONS responder does not set Content-Length to "0"
Per RFC 7231:
> A server MUST generate a Content-Length field with a value of "0" if no payload body is to be sent in the response.
</issue>
<code>
[start of falcon/responders.py]
1 # Copyright 2013 by Rackspace Hosting, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Default responder implementations."""
16
17 from falcon.errors import HTTPBadRequest
18 from falcon.errors import HTTPMethodNotAllowed
19 from falcon.errors import HTTPNotFound
20 from falcon.status_codes import HTTP_204
21
22
23 def path_not_found(req, resp, **kwargs):
24 """Raise 404 HTTPNotFound error"""
25 raise HTTPNotFound()
26
27
28 def bad_request(req, resp, **kwargs):
29 """Raise 400 HTTPBadRequest error"""
30 raise HTTPBadRequest('Bad request', 'Invalid HTTP method')
31
32
33 def create_method_not_allowed(allowed_methods):
34 """Creates a responder for "405 Method Not Allowed"
35
36 Args:
37 allowed_methods: A list of HTTP methods (uppercase) that should be
38 returned in the Allow header.
39
40 """
41 def method_not_allowed(req, resp, **kwargs):
42 """Raise 405 HTTPMethodNotAllowed error"""
43 raise HTTPMethodNotAllowed(allowed_methods)
44
45 return method_not_allowed
46
47
48 def create_default_options(allowed_methods):
49 """Creates a default responder for the OPTIONS method
50
51 Args:
52 allowed_methods: A list of HTTP methods (uppercase) that should be
53 returned in the Allow header.
54
55 """
56 allowed = ', '.join(allowed_methods)
57
58 def on_options(req, resp, **kwargs):
59 resp.status = HTTP_204
60 resp.set_header('Allow', allowed)
61 resp.set_header('Content-Length', '0')
62
63 return on_options
64
[end of falcon/responders.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/falcon/responders.py b/falcon/responders.py
--- a/falcon/responders.py
+++ b/falcon/responders.py
@@ -17,7 +17,7 @@
from falcon.errors import HTTPBadRequest
from falcon.errors import HTTPMethodNotAllowed
from falcon.errors import HTTPNotFound
-from falcon.status_codes import HTTP_204
+from falcon.status_codes import HTTP_200
def path_not_found(req, resp, **kwargs):
@@ -56,7 +56,7 @@
allowed = ', '.join(allowed_methods)
def on_options(req, resp, **kwargs):
- resp.status = HTTP_204
+ resp.status = HTTP_200
resp.set_header('Allow', allowed)
resp.set_header('Content-Length', '0')
| {"golden_diff": "diff --git a/falcon/responders.py b/falcon/responders.py\n--- a/falcon/responders.py\n+++ b/falcon/responders.py\n@@ -17,7 +17,7 @@\n from falcon.errors import HTTPBadRequest\n from falcon.errors import HTTPMethodNotAllowed\n from falcon.errors import HTTPNotFound\n-from falcon.status_codes import HTTP_204\n+from falcon.status_codes import HTTP_200\n \n \n def path_not_found(req, resp, **kwargs):\n@@ -56,7 +56,7 @@\n allowed = ', '.join(allowed_methods)\n \n def on_options(req, resp, **kwargs):\n- resp.status = HTTP_204\n+ resp.status = HTTP_200\n resp.set_header('Allow', allowed)\n resp.set_header('Content-Length', '0')\n", "issue": "Default OPTIONS responder does not set Content-Length to \"0\"\nPer RFC 7231:\n\n> A server MUST generate a Content-Length field with a value of \"0\" if no payload body is to be sent in the response.\n\n", "before_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Default responder implementations.\"\"\"\n\nfrom falcon.errors import HTTPBadRequest\nfrom falcon.errors import HTTPMethodNotAllowed\nfrom falcon.errors import HTTPNotFound\nfrom falcon.status_codes import HTTP_204\n\n\ndef path_not_found(req, resp, **kwargs):\n \"\"\"Raise 404 HTTPNotFound error\"\"\"\n raise HTTPNotFound()\n\n\ndef bad_request(req, resp, **kwargs):\n \"\"\"Raise 400 HTTPBadRequest error\"\"\"\n raise HTTPBadRequest('Bad request', 'Invalid HTTP method')\n\n\ndef create_method_not_allowed(allowed_methods):\n \"\"\"Creates a responder for \"405 Method Not Allowed\"\n\n Args:\n allowed_methods: A list of HTTP methods (uppercase) that should be\n returned in the Allow header.\n\n \"\"\"\n def method_not_allowed(req, resp, **kwargs):\n \"\"\"Raise 405 HTTPMethodNotAllowed error\"\"\"\n raise HTTPMethodNotAllowed(allowed_methods)\n\n return method_not_allowed\n\n\ndef create_default_options(allowed_methods):\n \"\"\"Creates a default responder for the OPTIONS method\n\n Args:\n allowed_methods: A list of HTTP methods (uppercase) that should be\n returned in the Allow header.\n\n \"\"\"\n allowed = ', '.join(allowed_methods)\n\n def on_options(req, resp, **kwargs):\n resp.status = HTTP_204\n resp.set_header('Allow', allowed)\n resp.set_header('Content-Length', '0')\n\n return on_options\n", "path": "falcon/responders.py"}]} | 1,151 | 183 |
gh_patches_debug_37383 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-3568 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python3.4 PyQt5 QML application requires environment variables
With the PyQt5 fixes merged https://github.com/pyinstaller/pyinstaller/pull/3439 I decided to try creating an executable that I have been having trouble with. https://github.com/pyinstaller/pyinstaller/pull/3439#issuecomment-379064155
This is the source code https://github.com/Siecje/qml-testing/tree/PyInstaller
I'm using
- Windows 7 32-bit
- Qt 5.10.1
- PyQt5 compiled from source
- Python 3.4.4
- pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip
When I run the .exe I get an error
```
QWindowsEGLStaticContext::create: Failed to load and resolve libEGL function
Failed to load opengl32sw.dll (The specified module could not be found.)
Failed to load and resolve WGL/OpenGL functions
Failed to create OpenGL context for format QsurfaceFormat(version 2.0, options QFlags<QSurfaceFormat::FormatOption>(), depthBufferSize 24, ...<snip>
This is most likely caused by not having the necessary graphics drivers installed.
Install a driver providing OpenGL 2.0 or higher, or, if this is not possible, make sure the ANGLE Open GL ES 2.0 emulation libraries (libEGL.dll, libLESv2.dll and d3dcompiler_*.dll) are available in the application executabl's directory or in a location listed in PATH.
```
To run the application I can copy these four .dlls into the `dist\main\` directory.
- libEGL.dll
- libGLESv2.dll
- d3dcompiler_47.dll
- opengl32sw.dll
When I run it I get Command Prompt window with this output.
```
QWindowsWGLStaticContext::create: Could not initialize EGL display: error 0x3001
QWindowsWGLStaticContext::create: When using ANGLE, check if d3dcompiler_4x.dll is available.
```
Instead of copying those .dll files I can add the Qt bin directory to my PATH.
```
set PATH=%PATH%;C:\Qt\5.10.1\msvc2015\bin
call main.exe
QWindowsWGLStaticContext::create: Could not initialize EGL display: error 0x3001
QWindowsWGLStaticContext::create: When using ANGLE, check if d3dcompiler_4x.dll is available.
```
When I copy the `dist\main\` to another computer (Windows 10).
I have to set two environment variables before the application will work.
```
set QT_QPA_PLATFORM_PLUGIN_PATH=%exeDir%\PyQt5\Qt\plugins\platforms
set QML2_IMPORT_PATH=%exeDir%\PyQt5\Qt\qml
```
There are no error messages on the Windows 10 computer with these two environment variables set.
</issue>
<code>
[start of PyInstaller/hooks/hook-PyQt5.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2018, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9 import os
10
11 from PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files
12
13 hiddenimports = ['sip']
14
15 # Collect the ``qt.conf`` file.
16 datas = [x for x in
17 collect_system_data_files(pyqt5_library_info.location['PrefixPath'],
18 'PyQt5')
19 if os.path.basename(x[0]) == 'qt.conf']
20
21 # Include ICU files, if they exist. See the "Deployment approach" section in
22 # ``PyInstaller/utils/hooks/qt.py``.
23 [(os.path.join(pyqt5_library_info.location['BinariesPath'], dll),
24 os.path.join('PyQt5', 'Qt', 'bin', dll))
25 for dll in ('icudt??.dll', 'icuin??.dll', 'icuuc??.dll')]
26
27 # TODO: Include software rendering for OpenGL. See the "Deployment approach". However, because the standard PyQt5 wheel `doesn't include <https://www.riverbankcomputing.com/pipermail/pyqt/2018-June/040387.html>`_ ``d3dcompiler_XX.dll``, this produces failures. When the wheel is updated, this code can be uncommented.
28 ##binaries = []
29 ##for dll in ('libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll', 'opengl32sw.dll'):
30 ## dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], dll)
31 ## # Only add files if they exist.
32 ## if glob(dll_path):
33 ## binaries += [(dll_path, os.path.join('PyQt5', 'Qt', 'bin', dll))]
34
[end of PyInstaller/hooks/hook-PyQt5.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/hook-PyQt5.py b/PyInstaller/hooks/hook-PyQt5.py
--- a/PyInstaller/hooks/hook-PyQt5.py
+++ b/PyInstaller/hooks/hook-PyQt5.py
@@ -6,6 +6,7 @@
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
+import glob
import os
from PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files
@@ -18,16 +19,42 @@
'PyQt5')
if os.path.basename(x[0]) == 'qt.conf']
-# Include ICU files, if they exist. See the "Deployment approach" section in
-# ``PyInstaller/utils/hooks/qt.py``.
-[(os.path.join(pyqt5_library_info.location['BinariesPath'], dll),
- os.path.join('PyQt5', 'Qt', 'bin', dll))
- for dll in ('icudt??.dll', 'icuin??.dll', 'icuuc??.dll')]
-
-# TODO: Include software rendering for OpenGL. See the "Deployment approach". However, because the standard PyQt5 wheel `doesn't include <https://www.riverbankcomputing.com/pipermail/pyqt/2018-June/040387.html>`_ ``d3dcompiler_XX.dll``, this produces failures. When the wheel is updated, this code can be uncommented.
-##binaries = []
-##for dll in ('libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll', 'opengl32sw.dll'):
-## dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], dll)
-## # Only add files if they exist.
-## if glob(dll_path):
-## binaries += [(dll_path, os.path.join('PyQt5', 'Qt', 'bin', dll))]
+
+def find_all_or_none(globs_to_include, num_files):
+ """
+ globs_to_include is a list of file name globs
+ If the number of found files does not match num_files
+ then no files will be included.
+ """
+ # TODO: This function is required because CI is failing to include libEGL
+ # The error in AppVeyor is:
+ # [2312] LOADER: Running pyi_lib_PyQt5-uic.py
+ # Failed to load libEGL (Access is denied.)
+ # More info: https://github.com/pyinstaller/pyinstaller/pull/3568
+ # Since the PyQt5 wheels do not include d3dcompiler_4?.dll, libEGL.dll and
+ # libGLESv2.dll will not be included for PyQt5 builds during CI.
+ to_include = []
+ for dll in globs_to_include:
+ dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'],
+ dll)
+ dll_file_paths = glob.glob(dll_path)
+ for dll_file_path in dll_file_paths:
+ file_name = os.path.basename(dll_file_path)
+ dst_dll_path = os.path.join('PyQt5', 'Qt', 'bin', file_name)
+ to_include.append((dll_file_path, dst_dll_path))
+ if len(to_include) == num_files:
+ return to_include
+ return []
+
+
+binaries = []
+angle_files = ['libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll']
+binaries += find_all_or_none(angle_files, 3)
+
+opengl_software_renderer = ['opengl32sw.dll']
+binaries += find_all_or_none(opengl_software_renderer, 1)
+
+# Include ICU files, if they exist.
+# See the "Deployment approach" section in ``PyInstaller/utils/hooks/qt.py``.
+icu_files = ['icudt??.dll', 'icuin??.dll', 'icuuc??.dll']
+binaries += find_all_or_none(icu_files, 3)
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-PyQt5.py b/PyInstaller/hooks/hook-PyQt5.py\n--- a/PyInstaller/hooks/hook-PyQt5.py\n+++ b/PyInstaller/hooks/hook-PyQt5.py\n@@ -6,6 +6,7 @@\n #\n # The full license is in the file COPYING.txt, distributed with this software.\n #-----------------------------------------------------------------------------\n+import glob\n import os\n \n from PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files\n@@ -18,16 +19,42 @@\n 'PyQt5')\n if os.path.basename(x[0]) == 'qt.conf']\n \n-# Include ICU files, if they exist. See the \"Deployment approach\" section in\n-# ``PyInstaller/utils/hooks/qt.py``.\n-[(os.path.join(pyqt5_library_info.location['BinariesPath'], dll),\n- os.path.join('PyQt5', 'Qt', 'bin', dll))\n- for dll in ('icudt??.dll', 'icuin??.dll', 'icuuc??.dll')]\n-\n-# TODO: Include software rendering for OpenGL. See the \"Deployment approach\". However, because the standard PyQt5 wheel `doesn't include <https://www.riverbankcomputing.com/pipermail/pyqt/2018-June/040387.html>`_ ``d3dcompiler_XX.dll``, this produces failures. When the wheel is updated, this code can be uncommented.\n-##binaries = []\n-##for dll in ('libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll', 'opengl32sw.dll'):\n-## dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], dll)\n-## # Only add files if they exist.\n-## if glob(dll_path):\n-## binaries += [(dll_path, os.path.join('PyQt5', 'Qt', 'bin', dll))]\n+\n+def find_all_or_none(globs_to_include, num_files):\n+ \"\"\"\n+ globs_to_include is a list of file name globs\n+ If the number of found files does not match num_files\n+ then no files will be included.\n+ \"\"\"\n+ # TODO: This function is required because CI is failing to include libEGL\n+ # The error in AppVeyor is:\n+ # [2312] LOADER: Running pyi_lib_PyQt5-uic.py\n+ # Failed to load libEGL (Access is denied.)\n+ # More info: https://github.com/pyinstaller/pyinstaller/pull/3568\n+ # Since the PyQt5 wheels do not include d3dcompiler_4?.dll, libEGL.dll and\n+ # libGLESv2.dll will not be included for PyQt5 builds during CI.\n+ to_include = []\n+ for dll in globs_to_include:\n+ dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'],\n+ dll)\n+ dll_file_paths = glob.glob(dll_path)\n+ for dll_file_path in dll_file_paths:\n+ file_name = os.path.basename(dll_file_path)\n+ dst_dll_path = os.path.join('PyQt5', 'Qt', 'bin', file_name)\n+ to_include.append((dll_file_path, dst_dll_path))\n+ if len(to_include) == num_files:\n+ return to_include\n+ return []\n+\n+\n+binaries = []\n+angle_files = ['libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll']\n+binaries += find_all_or_none(angle_files, 3)\n+\n+opengl_software_renderer = ['opengl32sw.dll']\n+binaries += find_all_or_none(opengl_software_renderer, 1)\n+\n+# Include ICU files, if they exist.\n+# See the \"Deployment approach\" section in ``PyInstaller/utils/hooks/qt.py``.\n+icu_files = ['icudt??.dll', 'icuin??.dll', 'icuuc??.dll']\n+binaries += find_all_or_none(icu_files, 3)\n", "issue": "Python3.4 PyQt5 QML application requires environment variables\nWith the PyQt5 fixes merged https://github.com/pyinstaller/pyinstaller/pull/3439 I decided to try creating an executable that I have been having trouble with. https://github.com/pyinstaller/pyinstaller/pull/3439#issuecomment-379064155\r\n\r\nThis is the source code https://github.com/Siecje/qml-testing/tree/PyInstaller\r\n\r\nI'm using\r\n- Windows 7 32-bit\r\n- Qt 5.10.1\r\n- PyQt5 compiled from source\r\n- Python 3.4.4\r\n- pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip\r\nWhen I run the .exe I get an error\r\n\r\n```\r\nQWindowsEGLStaticContext::create: Failed to load and resolve libEGL function\r\nFailed to load opengl32sw.dll (The specified module could not be found.)\r\nFailed to load and resolve WGL/OpenGL functions\r\nFailed to create OpenGL context for format QsurfaceFormat(version 2.0, options QFlags<QSurfaceFormat::FormatOption>(), depthBufferSize 24, ...<snip>\r\nThis is most likely caused by not having the necessary graphics drivers installed.\r\n\r\nInstall a driver providing OpenGL 2.0 or higher, or, if this is not possible, make sure the ANGLE Open GL ES 2.0 emulation libraries (libEGL.dll, libLESv2.dll and d3dcompiler_*.dll) are available in the application executabl's directory or in a location listed in PATH.\r\n```\r\n\r\nTo run the application I can copy these four .dlls into the `dist\\main\\` directory.\r\n\r\n- libEGL.dll\r\n- libGLESv2.dll\r\n- d3dcompiler_47.dll\r\n- opengl32sw.dll\r\n\r\nWhen I run it I get Command Prompt window with this output.\r\n```\r\nQWindowsWGLStaticContext::create: Could not initialize EGL display: error 0x3001\r\nQWindowsWGLStaticContext::create: When using ANGLE, check if d3dcompiler_4x.dll is available.\r\n```\r\nInstead of copying those .dll files I can add the Qt bin directory to my PATH.\r\n\r\n```\r\nset PATH=%PATH%;C:\\Qt\\5.10.1\\msvc2015\\bin\r\ncall main.exe\r\nQWindowsWGLStaticContext::create: Could not initialize EGL display: error 0x3001\r\nQWindowsWGLStaticContext::create: When using ANGLE, check if d3dcompiler_4x.dll is available.\r\n```\r\n\r\nWhen I copy the `dist\\main\\` to another computer (Windows 10).\r\n\r\nI have to set two environment variables before the application will work.\r\n\r\n```\r\nset QT_QPA_PLATFORM_PLUGIN_PATH=%exeDir%\\PyQt5\\Qt\\plugins\\platforms\r\nset QML2_IMPORT_PATH=%exeDir%\\PyQt5\\Qt\\qml\r\n```\r\n\r\nThere are no error messages on the Windows 10 computer with these two environment variables set.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2018, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nimport os\n\nfrom PyInstaller.utils.hooks import pyqt5_library_info, collect_system_data_files\n\nhiddenimports = ['sip']\n\n# Collect the ``qt.conf`` file.\ndatas = [x for x in\n collect_system_data_files(pyqt5_library_info.location['PrefixPath'],\n 'PyQt5')\n if os.path.basename(x[0]) == 'qt.conf']\n\n# Include ICU files, if they exist. See the \"Deployment approach\" section in\n# ``PyInstaller/utils/hooks/qt.py``.\n[(os.path.join(pyqt5_library_info.location['BinariesPath'], dll),\n os.path.join('PyQt5', 'Qt', 'bin', dll))\n for dll in ('icudt??.dll', 'icuin??.dll', 'icuuc??.dll')]\n\n# TODO: Include software rendering for OpenGL. See the \"Deployment approach\". However, because the standard PyQt5 wheel `doesn't include <https://www.riverbankcomputing.com/pipermail/pyqt/2018-June/040387.html>`_ ``d3dcompiler_XX.dll``, this produces failures. When the wheel is updated, this code can be uncommented.\n##binaries = []\n##for dll in ('libEGL.dll', 'libGLESv2.dll', 'd3dcompiler_??.dll', 'opengl32sw.dll'):\n## dll_path = os.path.join(pyqt5_library_info.location['BinariesPath'], dll)\n## # Only add files if they exist.\n## if glob(dll_path):\n## binaries += [(dll_path, os.path.join('PyQt5', 'Qt', 'bin', dll))]\n", "path": "PyInstaller/hooks/hook-PyQt5.py"}]} | 1,690 | 919 |
gh_patches_debug_24813 | rasdani/github-patches | git_diff | scrapy__scrapy-2577 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BrowserLikeContextFactory not available in some conditions
While tracing the error that lead to #2555 I tried the workaround [mentioned in the documentation](https://doc.scrapy.org/en/latest/topics/settings.html#downloader-clientcontextfactory) without success.
This code lives [incontextfactory.py](https://github.com/scrapy/scrapy/blob/c3411373e8a8ee2786588bdad7be469c69a25e2a/scrapy/core/downloader/contextfactory.py#L63) but was not reachable as the import was failing on my system due to #2555.
This file is a large `try/except` block with many potential points of failure and it's likely to trip other users in the future.
That said, could this be refactored to provide a fallback for `BrowserLikeContextFactory` or otherwise reduce the scope of the `try/except` to avoid breaking the API?
</issue>
<code>
[start of scrapy/core/downloader/contextfactory.py]
1 from OpenSSL import SSL
2 from twisted.internet.ssl import ClientContextFactory
3
4 try:
5
6 from zope.interface.declarations import implementer
7
8 # the following should be available from Twisted 14.0.0
9 from twisted.internet.ssl import (optionsForClientTLS,
10 CertificateOptions,
11 platformTrust)
12
13 from twisted.web.client import BrowserLikePolicyForHTTPS
14 from twisted.web.iweb import IPolicyForHTTPS
15
16 from scrapy.core.downloader.tls import ScrapyClientTLSOptions, DEFAULT_CIPHERS
17
18
19 @implementer(IPolicyForHTTPS)
20 class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):
21 """
22 Non-peer-certificate verifying HTTPS context factory
23
24 Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)
25 which allows TLS protocol negotiation
26
27 'A TLS/SSL connection established with [this method] may
28 understand the SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols.'
29 """
30
31 def __init__(self, method=SSL.SSLv23_METHOD, *args, **kwargs):
32 super(ScrapyClientContextFactory, self).__init__(*args, **kwargs)
33 self._ssl_method = method
34
35 def getCertificateOptions(self):
36 # setting verify=True will require you to provide CAs
37 # to verify against; in other words: it's not that simple
38
39 # backward-compatible SSL/TLS method:
40 #
41 # * this will respect `method` attribute in often recommended
42 # `ScrapyClientContextFactory` subclass
43 # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)
44 #
45 # * getattr() for `_ssl_method` attribute for context factories
46 # not calling super(..., self).__init__
47 return CertificateOptions(verify=False,
48 method=getattr(self, 'method',
49 getattr(self, '_ssl_method', None)),
50 fixBrokenPeers=True,
51 acceptableCiphers=DEFAULT_CIPHERS)
52
53 # kept for old-style HTTP/1.0 downloader context twisted calls,
54 # e.g. connectSSL()
55 def getContext(self, hostname=None, port=None):
56 return self.getCertificateOptions().getContext()
57
58 def creatorForNetloc(self, hostname, port):
59 return ScrapyClientTLSOptions(hostname.decode("ascii"), self.getContext())
60
61
62 @implementer(IPolicyForHTTPS)
63 class BrowserLikeContextFactory(ScrapyClientContextFactory):
64 """
65 Twisted-recommended context factory for web clients.
66
67 Quoting http://twistedmatrix.com/documents/current/api/twisted.web.client.Agent.html:
68 "The default is to use a BrowserLikePolicyForHTTPS,
69 so unless you have special requirements you can leave this as-is."
70
71 creatorForNetloc() is the same as BrowserLikePolicyForHTTPS
72 except this context factory allows setting the TLS/SSL method to use.
73
74 Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)
75 which allows TLS protocol negotiation.
76 """
77 def creatorForNetloc(self, hostname, port):
78
79 # trustRoot set to platformTrust() will use the platform's root CAs.
80 #
81 # This means that a website like https://www.cacert.org will be rejected
82 # by default, since CAcert.org CA certificate is seldom shipped.
83 return optionsForClientTLS(hostname.decode("ascii"),
84 trustRoot=platformTrust(),
85 extraCertificateOptions={
86 'method': self._ssl_method,
87 })
88
89 except ImportError:
90
91 class ScrapyClientContextFactory(ClientContextFactory):
92 "A SSL context factory which is more permissive against SSL bugs."
93 # see https://github.com/scrapy/scrapy/issues/82
94 # and https://github.com/scrapy/scrapy/issues/26
95 # and https://github.com/scrapy/scrapy/issues/981
96
97 def __init__(self, method=SSL.SSLv23_METHOD):
98 self.method = method
99
100 def getContext(self, hostname=None, port=None):
101 ctx = ClientContextFactory.getContext(self)
102 # Enable all workarounds to SSL bugs as documented by
103 # http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
104 ctx.set_options(SSL.OP_ALL)
105 return ctx
106
[end of scrapy/core/downloader/contextfactory.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/core/downloader/contextfactory.py b/scrapy/core/downloader/contextfactory.py
--- a/scrapy/core/downloader/contextfactory.py
+++ b/scrapy/core/downloader/contextfactory.py
@@ -1,15 +1,15 @@
from OpenSSL import SSL
from twisted.internet.ssl import ClientContextFactory
-try:
+from scrapy import twisted_version
+
+if twisted_version >= (14, 0, 0):
from zope.interface.declarations import implementer
- # the following should be available from Twisted 14.0.0
from twisted.internet.ssl import (optionsForClientTLS,
CertificateOptions,
platformTrust)
-
from twisted.web.client import BrowserLikePolicyForHTTPS
from twisted.web.iweb import IPolicyForHTTPS
@@ -86,7 +86,7 @@
'method': self._ssl_method,
})
-except ImportError:
+else:
class ScrapyClientContextFactory(ClientContextFactory):
"A SSL context factory which is more permissive against SSL bugs."
| {"golden_diff": "diff --git a/scrapy/core/downloader/contextfactory.py b/scrapy/core/downloader/contextfactory.py\n--- a/scrapy/core/downloader/contextfactory.py\n+++ b/scrapy/core/downloader/contextfactory.py\n@@ -1,15 +1,15 @@\n from OpenSSL import SSL\n from twisted.internet.ssl import ClientContextFactory\n \n-try:\n+from scrapy import twisted_version\n+\n+if twisted_version >= (14, 0, 0):\n \n from zope.interface.declarations import implementer\n \n- # the following should be available from Twisted 14.0.0\n from twisted.internet.ssl import (optionsForClientTLS,\n CertificateOptions,\n platformTrust)\n-\n from twisted.web.client import BrowserLikePolicyForHTTPS\n from twisted.web.iweb import IPolicyForHTTPS\n \n@@ -86,7 +86,7 @@\n 'method': self._ssl_method,\n })\n \n-except ImportError:\n+else:\n \n class ScrapyClientContextFactory(ClientContextFactory):\n \"A SSL context factory which is more permissive against SSL bugs.\"\n", "issue": "BrowserLikeContextFactory not available in some conditions\nWhile tracing the error that lead to #2555 I tried the workaround [mentioned in the documentation](https://doc.scrapy.org/en/latest/topics/settings.html#downloader-clientcontextfactory) without success.\r\n\r\nThis code lives [incontextfactory.py](https://github.com/scrapy/scrapy/blob/c3411373e8a8ee2786588bdad7be469c69a25e2a/scrapy/core/downloader/contextfactory.py#L63) but was not reachable as the import was failing on my system due to #2555.\r\n\r\nThis file is a large `try/except` block with many potential points of failure and it's likely to trip other users in the future.\r\nThat said, could this be refactored to provide a fallback for `BrowserLikeContextFactory` or otherwise reduce the scope of the `try/except` to avoid breaking the API?\n", "before_files": [{"content": "from OpenSSL import SSL\nfrom twisted.internet.ssl import ClientContextFactory\n\ntry:\n\n from zope.interface.declarations import implementer\n\n # the following should be available from Twisted 14.0.0\n from twisted.internet.ssl import (optionsForClientTLS,\n CertificateOptions,\n platformTrust)\n\n from twisted.web.client import BrowserLikePolicyForHTTPS\n from twisted.web.iweb import IPolicyForHTTPS\n\n from scrapy.core.downloader.tls import ScrapyClientTLSOptions, DEFAULT_CIPHERS\n\n\n @implementer(IPolicyForHTTPS)\n class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):\n \"\"\"\n Non-peer-certificate verifying HTTPS context factory\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation\n\n 'A TLS/SSL connection established with [this method] may\n understand the SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols.'\n \"\"\"\n\n def __init__(self, method=SSL.SSLv23_METHOD, *args, **kwargs):\n super(ScrapyClientContextFactory, self).__init__(*args, **kwargs)\n self._ssl_method = method\n\n def getCertificateOptions(self):\n # setting verify=True will require you to provide CAs\n # to verify against; in other words: it's not that simple\n\n # backward-compatible SSL/TLS method:\n #\n # * this will respect `method` attribute in often recommended\n # `ScrapyClientContextFactory` subclass\n # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)\n #\n # * getattr() for `_ssl_method` attribute for context factories\n # not calling super(..., self).__init__\n return CertificateOptions(verify=False,\n method=getattr(self, 'method',\n getattr(self, '_ssl_method', None)),\n fixBrokenPeers=True,\n acceptableCiphers=DEFAULT_CIPHERS)\n\n # kept for old-style HTTP/1.0 downloader context twisted calls,\n # e.g. connectSSL()\n def getContext(self, hostname=None, port=None):\n return self.getCertificateOptions().getContext()\n\n def creatorForNetloc(self, hostname, port):\n return ScrapyClientTLSOptions(hostname.decode(\"ascii\"), self.getContext())\n\n\n @implementer(IPolicyForHTTPS)\n class BrowserLikeContextFactory(ScrapyClientContextFactory):\n \"\"\"\n Twisted-recommended context factory for web clients.\n\n Quoting http://twistedmatrix.com/documents/current/api/twisted.web.client.Agent.html:\n \"The default is to use a BrowserLikePolicyForHTTPS,\n so unless you have special requirements you can leave this as-is.\"\n\n creatorForNetloc() is the same as BrowserLikePolicyForHTTPS\n except this context factory allows setting the TLS/SSL method to use.\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation.\n \"\"\"\n def creatorForNetloc(self, hostname, port):\n\n # trustRoot set to platformTrust() will use the platform's root CAs.\n #\n # This means that a website like https://www.cacert.org will be rejected\n # by default, since CAcert.org CA certificate is seldom shipped.\n return optionsForClientTLS(hostname.decode(\"ascii\"),\n trustRoot=platformTrust(),\n extraCertificateOptions={\n 'method': self._ssl_method,\n })\n\nexcept ImportError:\n\n class ScrapyClientContextFactory(ClientContextFactory):\n \"A SSL context factory which is more permissive against SSL bugs.\"\n # see https://github.com/scrapy/scrapy/issues/82\n # and https://github.com/scrapy/scrapy/issues/26\n # and https://github.com/scrapy/scrapy/issues/981\n\n def __init__(self, method=SSL.SSLv23_METHOD):\n self.method = method\n\n def getContext(self, hostname=None, port=None):\n ctx = ClientContextFactory.getContext(self)\n # Enable all workarounds to SSL bugs as documented by\n # http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html\n ctx.set_options(SSL.OP_ALL)\n return ctx\n", "path": "scrapy/core/downloader/contextfactory.py"}]} | 1,890 | 231 |
gh_patches_debug_63531 | rasdani/github-patches | git_diff | MongoEngine__mongoengine-2224 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New release
Hi,
When is coming new release, because I can't update to mongodb 4.2 because of this: https://github.com/MongoEngine/mongoengine/pull/2160/commits/47f8a126ca167cb8fe020e3cc5604b155dfcdebc.
Thanks
</issue>
<code>
[start of mongoengine/__init__.py]
1 # Import submodules so that we can expose their __all__
2 from mongoengine import connection
3 from mongoengine import document
4 from mongoengine import errors
5 from mongoengine import fields
6 from mongoengine import queryset
7 from mongoengine import signals
8
9 # Import everything from each submodule so that it can be accessed via
10 # mongoengine, e.g. instead of `from mongoengine.connection import connect`,
11 # users can simply use `from mongoengine import connect`, or even
12 # `from mongoengine import *` and then `connect('testdb')`.
13 from mongoengine.connection import *
14 from mongoengine.document import *
15 from mongoengine.errors import *
16 from mongoengine.fields import *
17 from mongoengine.queryset import *
18 from mongoengine.signals import *
19
20
21 __all__ = (
22 list(document.__all__)
23 + list(fields.__all__)
24 + list(connection.__all__)
25 + list(queryset.__all__)
26 + list(signals.__all__)
27 + list(errors.__all__)
28 )
29
30
31 VERSION = (0, 18, 2)
32
33
34 def get_version():
35 """Return the VERSION as a string.
36
37 For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.
38 """
39 return ".".join(map(str, VERSION))
40
41
42 __version__ = get_version()
43
[end of mongoengine/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mongoengine/__init__.py b/mongoengine/__init__.py
--- a/mongoengine/__init__.py
+++ b/mongoengine/__init__.py
@@ -28,7 +28,7 @@
)
-VERSION = (0, 18, 2)
+VERSION = (0, 19, 0)
def get_version():
| {"golden_diff": "diff --git a/mongoengine/__init__.py b/mongoengine/__init__.py\n--- a/mongoengine/__init__.py\n+++ b/mongoengine/__init__.py\n@@ -28,7 +28,7 @@\n )\n \n \n-VERSION = (0, 18, 2)\n+VERSION = (0, 19, 0)\n \n \n def get_version():\n", "issue": "New release\nHi,\r\n\r\nWhen is coming new release, because I can't update to mongodb 4.2 because of this: https://github.com/MongoEngine/mongoengine/pull/2160/commits/47f8a126ca167cb8fe020e3cc5604b155dfcdebc.\r\n\r\nThanks\n", "before_files": [{"content": "# Import submodules so that we can expose their __all__\nfrom mongoengine import connection\nfrom mongoengine import document\nfrom mongoengine import errors\nfrom mongoengine import fields\nfrom mongoengine import queryset\nfrom mongoengine import signals\n\n# Import everything from each submodule so that it can be accessed via\n# mongoengine, e.g. instead of `from mongoengine.connection import connect`,\n# users can simply use `from mongoengine import connect`, or even\n# `from mongoengine import *` and then `connect('testdb')`.\nfrom mongoengine.connection import *\nfrom mongoengine.document import *\nfrom mongoengine.errors import *\nfrom mongoengine.fields import *\nfrom mongoengine.queryset import *\nfrom mongoengine.signals import *\n\n\n__all__ = (\n list(document.__all__)\n + list(fields.__all__)\n + list(connection.__all__)\n + list(queryset.__all__)\n + list(signals.__all__)\n + list(errors.__all__)\n)\n\n\nVERSION = (0, 18, 2)\n\n\ndef get_version():\n \"\"\"Return the VERSION as a string.\n\n For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.\n \"\"\"\n return \".\".join(map(str, VERSION))\n\n\n__version__ = get_version()\n", "path": "mongoengine/__init__.py"}]} | 978 | 86 |
gh_patches_debug_4281 | rasdani/github-patches | git_diff | ocf__ocfweb-808 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
disk quota on Account commands page errors
When trying to check disk quota usage on the commands page (https://www.ocf.berkeley.edu/account/commands/)
this error appears
`quota: Bad output format units for human readable output: vQ`

</issue>
<code>
[start of ocfweb/account/commands.py]
1 from django import forms
2 from django.forms import widgets
3 from django.http import HttpRequest
4 from django.http import HttpResponse
5 from django.shortcuts import render
6 from paramiko import AuthenticationException
7 from paramiko import SSHClient
8 from paramiko.hostkeys import HostKeyEntry
9
10 from ocfweb.component.forms import Form
11
12
13 def commands(request: HttpRequest) -> HttpResponse:
14 command_to_run = ''
15 output = ''
16 error = ''
17 if request.method == 'POST':
18 form = CommandForm(request.POST)
19 if form.is_valid():
20 username = form.cleaned_data['username']
21 password = form.cleaned_data['password']
22
23 command_to_run = form.cleaned_data['command_to_run']
24
25 ssh = SSHClient()
26
27 host_keys = ssh.get_host_keys()
28 entry = HostKeyEntry.from_line(
29 'ssh.ocf.berkeley.edu ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAqMkHVVoMl8md25iky7e2Xe3ARaC4H1PbIpv5Y+xT4KOT17gGvFSmfjGyW9P8ZTyqxq560iWdyELIn7efaGPbkUo9retcnT6WLmuh9nRIYwb6w7BGEEvlblBmH27Fkgt7JQ6+1sr5teuABfIMg22WTQAeDQe1jg0XsPu36OjbC7HjA3BXsiNBpxKDolYIXWzOD+r9FxZLP0lawh8dl//O5FW4ha1IbHklq2i9Mgl79wAH3jxf66kQJTvLmalKnQ0Dbp2+vYGGhIjVFXlGSzKsHAVhuVD6TBXZbxWOYoXanS7CC43MrEtBYYnc6zMn/k/rH0V+WeRhuzTnr/OZGJbBBw==', # noqa
30 )
31 assert entry is not None # should never be none as we are passing a static string above
32 host_keys.add(
33 'ssh.ocf.berkeley.edu',
34 'ssh-rsa',
35 entry.key,
36 )
37
38 try:
39 ssh.connect(
40 'ssh.ocf.berkeley.edu',
41 username=username,
42 password=password,
43 )
44 except AuthenticationException:
45 error = 'Authentication failed. Did you type the wrong username or password?'
46
47 if not error:
48 _, ssh_stdout, ssh_stderr = ssh.exec_command(command_to_run, get_pty=True)
49 output = ssh_stdout.read().decode()
50 error = ssh_stderr.read().decode()
51 else:
52 form = CommandForm()
53
54 return render(
55 request,
56 'account/commands/index.html', {
57 'title': 'Account commands',
58 'form': form,
59 'command': command_to_run,
60 'output': output,
61 'error': error,
62 },
63 )
64
65
66 class CommandForm(Form):
67 username = forms.CharField(
68 label='OCF username',
69 min_length=3,
70 max_length=16,
71 )
72 password = forms.CharField(
73 widget=forms.PasswordInput,
74 label='Password',
75 min_length=8,
76 max_length=256,
77 )
78
79 COMMAND_CHOICES = (
80 (
81 '/opt/share/utils/bin/paper',
82 'paper quota -- how many pages you have remaining this semester',
83 ),
84 (
85 '/usr/bin/quota -svQ',
86 'disk quota -- how much disk space you have used and how much you ' +
87 'have left',
88 ),
89 (
90 '/opt/share/utils/bin/makehttp',
91 'makehttp -- set up the web space for your OCF account',
92 ),
93 (
94 'echo yes | /opt/share/utils/bin/makemysql',
95 'makemysql -- reset your MySQL database password, or create a new ' +
96 'MySQL database (copy down the password somewhere secure)',
97 ),
98 )
99
100 command_to_run = forms.ChoiceField(
101 choices=COMMAND_CHOICES,
102 label='Command to run',
103 widget=widgets.RadioSelect,
104 )
105
[end of ocfweb/account/commands.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ocfweb/account/commands.py b/ocfweb/account/commands.py
--- a/ocfweb/account/commands.py
+++ b/ocfweb/account/commands.py
@@ -82,7 +82,7 @@
'paper quota -- how many pages you have remaining this semester',
),
(
- '/usr/bin/quota -svQ',
+ '/usr/bin/quota -vQs',
'disk quota -- how much disk space you have used and how much you ' +
'have left',
),
| {"golden_diff": "diff --git a/ocfweb/account/commands.py b/ocfweb/account/commands.py\n--- a/ocfweb/account/commands.py\n+++ b/ocfweb/account/commands.py\n@@ -82,7 +82,7 @@\n 'paper quota -- how many pages you have remaining this semester',\n ),\n (\n- '/usr/bin/quota -svQ',\n+ '/usr/bin/quota -vQs',\n 'disk quota -- how much disk space you have used and how much you ' +\n 'have left',\n ),\n", "issue": "disk quota on Account commands page errors\nWhen trying to check disk quota usage on the commands page (https://www.ocf.berkeley.edu/account/commands/)\r\nthis error appears\r\n\r\n`quota: Bad output format units for human readable output: vQ`\r\n\r\n\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.forms import widgets\nfrom django.http import HttpRequest\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom paramiko import AuthenticationException\nfrom paramiko import SSHClient\nfrom paramiko.hostkeys import HostKeyEntry\n\nfrom ocfweb.component.forms import Form\n\n\ndef commands(request: HttpRequest) -> HttpResponse:\n command_to_run = ''\n output = ''\n error = ''\n if request.method == 'POST':\n form = CommandForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n\n command_to_run = form.cleaned_data['command_to_run']\n\n ssh = SSHClient()\n\n host_keys = ssh.get_host_keys()\n entry = HostKeyEntry.from_line(\n 'ssh.ocf.berkeley.edu ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAqMkHVVoMl8md25iky7e2Xe3ARaC4H1PbIpv5Y+xT4KOT17gGvFSmfjGyW9P8ZTyqxq560iWdyELIn7efaGPbkUo9retcnT6WLmuh9nRIYwb6w7BGEEvlblBmH27Fkgt7JQ6+1sr5teuABfIMg22WTQAeDQe1jg0XsPu36OjbC7HjA3BXsiNBpxKDolYIXWzOD+r9FxZLP0lawh8dl//O5FW4ha1IbHklq2i9Mgl79wAH3jxf66kQJTvLmalKnQ0Dbp2+vYGGhIjVFXlGSzKsHAVhuVD6TBXZbxWOYoXanS7CC43MrEtBYYnc6zMn/k/rH0V+WeRhuzTnr/OZGJbBBw==', # noqa\n )\n assert entry is not None # should never be none as we are passing a static string above\n host_keys.add(\n 'ssh.ocf.berkeley.edu',\n 'ssh-rsa',\n entry.key,\n )\n\n try:\n ssh.connect(\n 'ssh.ocf.berkeley.edu',\n username=username,\n password=password,\n )\n except AuthenticationException:\n error = 'Authentication failed. Did you type the wrong username or password?'\n\n if not error:\n _, ssh_stdout, ssh_stderr = ssh.exec_command(command_to_run, get_pty=True)\n output = ssh_stdout.read().decode()\n error = ssh_stderr.read().decode()\n else:\n form = CommandForm()\n\n return render(\n request,\n 'account/commands/index.html', {\n 'title': 'Account commands',\n 'form': form,\n 'command': command_to_run,\n 'output': output,\n 'error': error,\n },\n )\n\n\nclass CommandForm(Form):\n username = forms.CharField(\n label='OCF username',\n min_length=3,\n max_length=16,\n )\n password = forms.CharField(\n widget=forms.PasswordInput,\n label='Password',\n min_length=8,\n max_length=256,\n )\n\n COMMAND_CHOICES = (\n (\n '/opt/share/utils/bin/paper',\n 'paper quota -- how many pages you have remaining this semester',\n ),\n (\n '/usr/bin/quota -svQ',\n 'disk quota -- how much disk space you have used and how much you ' +\n 'have left',\n ),\n (\n '/opt/share/utils/bin/makehttp',\n 'makehttp -- set up the web space for your OCF account',\n ),\n (\n 'echo yes | /opt/share/utils/bin/makemysql',\n 'makemysql -- reset your MySQL database password, or create a new ' +\n 'MySQL database (copy down the password somewhere secure)',\n ),\n )\n\n command_to_run = forms.ChoiceField(\n choices=COMMAND_CHOICES,\n label='Command to run',\n widget=widgets.RadioSelect,\n )\n", "path": "ocfweb/account/commands.py"}]} | 1,780 | 123 |
gh_patches_debug_23038 | rasdani/github-patches | git_diff | aws__aws-cli-2702 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cloudformation deploy does not honour tabs in JSON templates
Most of our templates are using tabs for the indentation and everything was fine until we tried to feed the template to `aws cloudformation deploy` which did not like the template despite that every other function in AWS CLI works with the template just fine:
```
[galaxy@athena templates]$ aws cloudformation validate-template --template-body file://codepipeline.template
{
"CapabilitiesReason": "The following resource(s) require capabilities: [AWS::IAM::Role]",
"Description": "Playing with CodeCommit, CodeBuild, and CodeDeploy",
"Parameters": [
```
and the only function that fails to parse the template is:
```
[galaxy@athena templates]$ aws cloudformation deploy --stack-name "galaxy-ccc" --template-file codepipeline.template --capabilities CAPABILITY_IAM
while scanning for the next token
found character '\t' that cannot start any token
in "<string>", line 2, column 1:
"AWSTemplateFormatVersion": "20 ...
^
```
A quick fix is to replace tabs with spaces:
```
[galaxy@athena templates]$ sed 's,\t, ,g' codepipeline.template > c.template
[galaxy@athena templates]$ aws cloudformation deploy --stack-name "galaxy-ccc" --template-file c.template --capabilities CAPABILITY_IAM
Waiting for changeset to be created..
Waiting for stack create/update to complete
```
... but it would mean that we would need to fix all our templates which are valid JSON just to workaround a bug in the tool! :(
</issue>
<code>
[start of awscli/customizations/cloudformation/yamlhelper.py]
1 # Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13
14 import yaml
15 from awscli.compat import six
16 from yaml.resolver import ScalarNode, SequenceNode
17
18
19 def intrinsics_multi_constructor(loader, tag_prefix, node):
20 """
21 YAML constructor to parse CloudFormation intrinsics.
22 This will return a dictionary with key being the instrinsic name
23 """
24
25 # Get the actual tag name excluding the first exclamation
26 tag = node.tag[1:]
27
28 # All CloudFormation intrinsics have prefix Fn:: except Ref
29 prefix = "Fn::"
30 if tag == "Ref":
31 prefix = ""
32
33 cfntag = prefix + tag
34
35 if tag == "GetAtt" and isinstance(node.value, six.string_types):
36 # ShortHand notation for !GetAtt accepts Resource.Attribute format
37 # while the standard notation is to use an array
38 # [Resource, Attribute]. Convert shorthand to standard format
39 value = node.value.split(".", 1)
40
41 elif isinstance(node, ScalarNode):
42 # Value of this node is scalar
43 value = loader.construct_scalar(node)
44
45 elif isinstance(node, SequenceNode):
46 # Value of this node is an array (Ex: [1,2])
47 value = loader.construct_sequence(node)
48
49 else:
50 # Value of this node is an mapping (ex: {foo: bar})
51 value = loader.construct_mapping(node)
52
53 return {cfntag: value}
54
55
56 def yaml_dump(dict_to_dump):
57 """
58 Dumps the dictionary as a YAML document
59 :param dict_to_dump:
60 :return:
61 """
62 return yaml.safe_dump(dict_to_dump, default_flow_style=False)
63
64
65 def yaml_parse(yamlstr):
66
67 yaml.SafeLoader.add_multi_constructor("!", intrinsics_multi_constructor)
68
69 return yaml.safe_load(yamlstr)
70
[end of awscli/customizations/cloudformation/yamlhelper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/customizations/cloudformation/yamlhelper.py b/awscli/customizations/cloudformation/yamlhelper.py
--- a/awscli/customizations/cloudformation/yamlhelper.py
+++ b/awscli/customizations/cloudformation/yamlhelper.py
@@ -10,11 +10,12 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-
+import json
import yaml
-from awscli.compat import six
from yaml.resolver import ScalarNode, SequenceNode
+from awscli.compat import six
+
def intrinsics_multi_constructor(loader, tag_prefix, node):
"""
@@ -63,7 +64,13 @@
def yaml_parse(yamlstr):
-
- yaml.SafeLoader.add_multi_constructor("!", intrinsics_multi_constructor)
-
- return yaml.safe_load(yamlstr)
+ """Parse a yaml string"""
+ try:
+ # PyYAML doesn't support json as well as it should, so if the input
+ # is actually just json it is better to parse it with the standard
+ # json parser.
+ return json.loads(yamlstr)
+ except ValueError:
+ yaml.SafeLoader.add_multi_constructor(
+ "!", intrinsics_multi_constructor)
+ return yaml.safe_load(yamlstr)
| {"golden_diff": "diff --git a/awscli/customizations/cloudformation/yamlhelper.py b/awscli/customizations/cloudformation/yamlhelper.py\n--- a/awscli/customizations/cloudformation/yamlhelper.py\n+++ b/awscli/customizations/cloudformation/yamlhelper.py\n@@ -10,11 +10,12 @@\n # distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n # ANY KIND, either express or implied. See the License for the specific\n # language governing permissions and limitations under the License.\n-\n+import json\n import yaml\n-from awscli.compat import six\n from yaml.resolver import ScalarNode, SequenceNode\n \n+from awscli.compat import six\n+\n \n def intrinsics_multi_constructor(loader, tag_prefix, node):\n \"\"\"\n@@ -63,7 +64,13 @@\n \n \n def yaml_parse(yamlstr):\n-\n- yaml.SafeLoader.add_multi_constructor(\"!\", intrinsics_multi_constructor)\n-\n- return yaml.safe_load(yamlstr)\n+ \"\"\"Parse a yaml string\"\"\"\n+ try:\n+ # PyYAML doesn't support json as well as it should, so if the input\n+ # is actually just json it is better to parse it with the standard\n+ # json parser.\n+ return json.loads(yamlstr)\n+ except ValueError:\n+ yaml.SafeLoader.add_multi_constructor(\n+ \"!\", intrinsics_multi_constructor)\n+ return yaml.safe_load(yamlstr)\n", "issue": "cloudformation deploy does not honour tabs in JSON templates\nMost of our templates are using tabs for the indentation and everything was fine until we tried to feed the template to `aws cloudformation deploy` which did not like the template despite that every other function in AWS CLI works with the template just fine:\r\n\r\n```\r\n[galaxy@athena templates]$ aws cloudformation validate-template --template-body file://codepipeline.template\r\n{\r\n \"CapabilitiesReason\": \"The following resource(s) require capabilities: [AWS::IAM::Role]\", \r\n \"Description\": \"Playing with CodeCommit, CodeBuild, and CodeDeploy\", \r\n \"Parameters\": [\r\n```\r\nand the only function that fails to parse the template is:\r\n```\r\n[galaxy@athena templates]$ aws cloudformation deploy --stack-name \"galaxy-ccc\" --template-file codepipeline.template --capabilities CAPABILITY_IAM\r\n\r\nwhile scanning for the next token\r\nfound character '\\t' that cannot start any token\r\n in \"<string>\", line 2, column 1:\r\n \t\"AWSTemplateFormatVersion\": \"20 ... \r\n ^\r\n```\r\nA quick fix is to replace tabs with spaces:\r\n```\r\n[galaxy@athena templates]$ sed 's,\\t, ,g' codepipeline.template > c.template\r\n[galaxy@athena templates]$ aws cloudformation deploy --stack-name \"galaxy-ccc\" --template-file c.template --capabilities CAPABILITY_IAM\r\nWaiting for changeset to be created..\r\nWaiting for stack create/update to complete\r\n```\r\n\r\n... but it would mean that we would need to fix all our templates which are valid JSON just to workaround a bug in the tool! :(\n", "before_files": [{"content": "# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport yaml\nfrom awscli.compat import six\nfrom yaml.resolver import ScalarNode, SequenceNode\n\n\ndef intrinsics_multi_constructor(loader, tag_prefix, node):\n \"\"\"\n YAML constructor to parse CloudFormation intrinsics.\n This will return a dictionary with key being the instrinsic name\n \"\"\"\n\n # Get the actual tag name excluding the first exclamation\n tag = node.tag[1:]\n\n # All CloudFormation intrinsics have prefix Fn:: except Ref\n prefix = \"Fn::\"\n if tag == \"Ref\":\n prefix = \"\"\n\n cfntag = prefix + tag\n\n if tag == \"GetAtt\" and isinstance(node.value, six.string_types):\n # ShortHand notation for !GetAtt accepts Resource.Attribute format\n # while the standard notation is to use an array\n # [Resource, Attribute]. Convert shorthand to standard format\n value = node.value.split(\".\", 1)\n\n elif isinstance(node, ScalarNode):\n # Value of this node is scalar\n value = loader.construct_scalar(node)\n\n elif isinstance(node, SequenceNode):\n # Value of this node is an array (Ex: [1,2])\n value = loader.construct_sequence(node)\n\n else:\n # Value of this node is an mapping (ex: {foo: bar})\n value = loader.construct_mapping(node)\n\n return {cfntag: value}\n\n\ndef yaml_dump(dict_to_dump):\n \"\"\"\n Dumps the dictionary as a YAML document\n :param dict_to_dump:\n :return:\n \"\"\"\n return yaml.safe_dump(dict_to_dump, default_flow_style=False)\n\n\ndef yaml_parse(yamlstr):\n\n yaml.SafeLoader.add_multi_constructor(\"!\", intrinsics_multi_constructor)\n\n return yaml.safe_load(yamlstr)\n", "path": "awscli/customizations/cloudformation/yamlhelper.py"}]} | 1,539 | 305 |
gh_patches_debug_15678 | rasdani/github-patches | git_diff | celery__celery-8338 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
database backend does not store children
The [`Task`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L20-L27) and [`TaskExtended`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L57-L62) models for the `database` backend do not include `children`. This means that when using any `database` backend, [`AsyncResult.children`](https://github.com/celery/celery/blob/main/celery/result.py#L424) is always empty, even if a task does have children.
</issue>
<code>
[start of celery/backends/database/models.py]
1 """Database models used by the SQLAlchemy result store backend."""
2 from datetime import datetime
3
4 import sqlalchemy as sa
5 from sqlalchemy.types import PickleType
6
7 from celery import states
8
9 from .session import ResultModelBase
10
11 __all__ = ('Task', 'TaskExtended', 'TaskSet')
12
13
14 class Task(ResultModelBase):
15 """Task result/status."""
16
17 __tablename__ = 'celery_taskmeta'
18 __table_args__ = {'sqlite_autoincrement': True}
19
20 id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'),
21 primary_key=True, autoincrement=True)
22 task_id = sa.Column(sa.String(155), unique=True)
23 status = sa.Column(sa.String(50), default=states.PENDING)
24 result = sa.Column(PickleType, nullable=True)
25 date_done = sa.Column(sa.DateTime, default=datetime.utcnow,
26 onupdate=datetime.utcnow, nullable=True)
27 traceback = sa.Column(sa.Text, nullable=True)
28
29 def __init__(self, task_id):
30 self.task_id = task_id
31
32 def to_dict(self):
33 return {
34 'task_id': self.task_id,
35 'status': self.status,
36 'result': self.result,
37 'traceback': self.traceback,
38 'date_done': self.date_done,
39 }
40
41 def __repr__(self):
42 return '<Task {0.task_id} state: {0.status}>'.format(self)
43
44 @classmethod
45 def configure(cls, schema=None, name=None):
46 cls.__table__.schema = schema
47 cls.id.default.schema = schema
48 cls.__table__.name = name or cls.__tablename__
49
50
51 class TaskExtended(Task):
52 """For the extend result."""
53
54 __tablename__ = 'celery_taskmeta'
55 __table_args__ = {'sqlite_autoincrement': True, 'extend_existing': True}
56
57 name = sa.Column(sa.String(155), nullable=True)
58 args = sa.Column(sa.LargeBinary, nullable=True)
59 kwargs = sa.Column(sa.LargeBinary, nullable=True)
60 worker = sa.Column(sa.String(155), nullable=True)
61 retries = sa.Column(sa.Integer, nullable=True)
62 queue = sa.Column(sa.String(155), nullable=True)
63
64 def to_dict(self):
65 task_dict = super().to_dict()
66 task_dict.update({
67 'name': self.name,
68 'args': self.args,
69 'kwargs': self.kwargs,
70 'worker': self.worker,
71 'retries': self.retries,
72 'queue': self.queue,
73 })
74 return task_dict
75
76
77 class TaskSet(ResultModelBase):
78 """TaskSet result."""
79
80 __tablename__ = 'celery_tasksetmeta'
81 __table_args__ = {'sqlite_autoincrement': True}
82
83 id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'),
84 autoincrement=True, primary_key=True)
85 taskset_id = sa.Column(sa.String(155), unique=True)
86 result = sa.Column(PickleType, nullable=True)
87 date_done = sa.Column(sa.DateTime, default=datetime.utcnow,
88 nullable=True)
89
90 def __init__(self, taskset_id, result):
91 self.taskset_id = taskset_id
92 self.result = result
93
94 def to_dict(self):
95 return {
96 'taskset_id': self.taskset_id,
97 'result': self.result,
98 'date_done': self.date_done,
99 }
100
101 def __repr__(self):
102 return f'<TaskSet: {self.taskset_id}>'
103
104 @classmethod
105 def configure(cls, schema=None, name=None):
106 cls.__table__.schema = schema
107 cls.id.default.schema = schema
108 cls.__table__.name = name or cls.__tablename__
109
[end of celery/backends/database/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/celery/backends/database/models.py b/celery/backends/database/models.py
--- a/celery/backends/database/models.py
+++ b/celery/backends/database/models.py
@@ -25,6 +25,7 @@
date_done = sa.Column(sa.DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow, nullable=True)
traceback = sa.Column(sa.Text, nullable=True)
+ children = sa.Column(PickleType, nullable=True)
def __init__(self, task_id):
self.task_id = task_id
@@ -36,6 +37,7 @@
'result': self.result,
'traceback': self.traceback,
'date_done': self.date_done,
+ 'children': self.children,
}
def __repr__(self):
| {"golden_diff": "diff --git a/celery/backends/database/models.py b/celery/backends/database/models.py\n--- a/celery/backends/database/models.py\n+++ b/celery/backends/database/models.py\n@@ -25,6 +25,7 @@\n date_done = sa.Column(sa.DateTime, default=datetime.utcnow,\n onupdate=datetime.utcnow, nullable=True)\n traceback = sa.Column(sa.Text, nullable=True)\n+ children = sa.Column(PickleType, nullable=True)\n \n def __init__(self, task_id):\n self.task_id = task_id\n@@ -36,6 +37,7 @@\n 'result': self.result,\n 'traceback': self.traceback,\n 'date_done': self.date_done,\n+ 'children': self.children,\n }\n \n def __repr__(self):\n", "issue": "database backend does not store children\nThe [`Task`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L20-L27) and [`TaskExtended`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L57-L62) models for the `database` backend do not include `children`. This means that when using any `database` backend, [`AsyncResult.children`](https://github.com/celery/celery/blob/main/celery/result.py#L424) is always empty, even if a task does have children.\n", "before_files": [{"content": "\"\"\"Database models used by the SQLAlchemy result store backend.\"\"\"\nfrom datetime import datetime\n\nimport sqlalchemy as sa\nfrom sqlalchemy.types import PickleType\n\nfrom celery import states\n\nfrom .session import ResultModelBase\n\n__all__ = ('Task', 'TaskExtended', 'TaskSet')\n\n\nclass Task(ResultModelBase):\n \"\"\"Task result/status.\"\"\"\n\n __tablename__ = 'celery_taskmeta'\n __table_args__ = {'sqlite_autoincrement': True}\n\n id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'),\n primary_key=True, autoincrement=True)\n task_id = sa.Column(sa.String(155), unique=True)\n status = sa.Column(sa.String(50), default=states.PENDING)\n result = sa.Column(PickleType, nullable=True)\n date_done = sa.Column(sa.DateTime, default=datetime.utcnow,\n onupdate=datetime.utcnow, nullable=True)\n traceback = sa.Column(sa.Text, nullable=True)\n\n def __init__(self, task_id):\n self.task_id = task_id\n\n def to_dict(self):\n return {\n 'task_id': self.task_id,\n 'status': self.status,\n 'result': self.result,\n 'traceback': self.traceback,\n 'date_done': self.date_done,\n }\n\n def __repr__(self):\n return '<Task {0.task_id} state: {0.status}>'.format(self)\n\n @classmethod\n def configure(cls, schema=None, name=None):\n cls.__table__.schema = schema\n cls.id.default.schema = schema\n cls.__table__.name = name or cls.__tablename__\n\n\nclass TaskExtended(Task):\n \"\"\"For the extend result.\"\"\"\n\n __tablename__ = 'celery_taskmeta'\n __table_args__ = {'sqlite_autoincrement': True, 'extend_existing': True}\n\n name = sa.Column(sa.String(155), nullable=True)\n args = sa.Column(sa.LargeBinary, nullable=True)\n kwargs = sa.Column(sa.LargeBinary, nullable=True)\n worker = sa.Column(sa.String(155), nullable=True)\n retries = sa.Column(sa.Integer, nullable=True)\n queue = sa.Column(sa.String(155), nullable=True)\n\n def to_dict(self):\n task_dict = super().to_dict()\n task_dict.update({\n 'name': self.name,\n 'args': self.args,\n 'kwargs': self.kwargs,\n 'worker': self.worker,\n 'retries': self.retries,\n 'queue': self.queue,\n })\n return task_dict\n\n\nclass TaskSet(ResultModelBase):\n \"\"\"TaskSet result.\"\"\"\n\n __tablename__ = 'celery_tasksetmeta'\n __table_args__ = {'sqlite_autoincrement': True}\n\n id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'),\n autoincrement=True, primary_key=True)\n taskset_id = sa.Column(sa.String(155), unique=True)\n result = sa.Column(PickleType, nullable=True)\n date_done = sa.Column(sa.DateTime, default=datetime.utcnow,\n nullable=True)\n\n def __init__(self, taskset_id, result):\n self.taskset_id = taskset_id\n self.result = result\n\n def to_dict(self):\n return {\n 'taskset_id': self.taskset_id,\n 'result': self.result,\n 'date_done': self.date_done,\n }\n\n def __repr__(self):\n return f'<TaskSet: {self.taskset_id}>'\n\n @classmethod\n def configure(cls, schema=None, name=None):\n cls.__table__.schema = schema\n cls.id.default.schema = schema\n cls.__table__.name = name or cls.__tablename__\n", "path": "celery/backends/database/models.py"}]} | 1,688 | 177 |
gh_patches_debug_2226 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-5568 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support matplotlib-3.4.0rc1
## Description of the issue
`matplotlib._get_data_path` no longer exists since 3.4.0rc1: https://github.com/matplotlib/matplotlib/commit/e1352c71f07aee7eab004b73dd9bda2a260ab31b.
This is on schedule for the removal of the deprecations that occurred around the time of #5006. The missing function leads to a traceback output during build, and the whole `cwd` being crammed into `mpl-data`. Finally, `matplotlib` cannot be imported in the packaged app because it cannot find `mpl-data/matplotlibrc`.
## Context information (for bug reports)
* Output of `pyinstaller --version`: ```4.2```
* Version of Python: `3.8`
* Platform: `Windows`
* Did you also try this on another platform? Does it work there? `Surely it is a cross platform bug`
> * try the latest development version, using the following command:
>
> ```shell
> pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip
> ```
>
> * follow *all* the instructions in our "If Things Go Wrong" Guide
> (https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and
>
> ### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly)
>
> * [ ] start with clean installation
> * [ ] use the latest development version
> * [ ] Run your frozen program **from a command window (shell)** — instead of double-clicking on it
> * [ ] Package your program in **--onedir mode**
> * [ ] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file
> * [ ] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file.
>
This part of the template is irrelevant, as the responsible code is unchanged on `develop`
### A minimal example program which shows the error
```
pip install --pre matplotlib==3.4.0rc1 pyinstaller
echo "import matplotlib" > test.py
pyinstaller test.py
```
### Stacktrace / full error message
Building `pyinstaller test.py`:
```
20391 INFO: Loading module hook 'hook-matplotlib.py' from 'XXXXXXX'...
Traceback (most recent call last):
File "<string>", line 1, in <module>
AttributeError: module 'matplotlib' has no attribute '_get_data_path'
```
Running `test.exe`:
```
Traceback (most recent call last):
File "test.py", line 1, in <module>
import matplotlib
File "<frozen importlib._bootstrap>", line 991, in _find_and_load
File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 671, in _load_unlocked
File "PyInstaller\loader\pyimod03_importers.py", line 531, in exec_module
File "matplotlib\__init__.py", line 820, in <module>
File "matplotlib\__init__.py", line 725, in _rc_params_in_file
File "contextlib.py", line 113, in __enter__
File "matplotlib\__init__.py", line 703, in _open_file_or_url
FileNotFoundError: [Errno 2] No such file or directory: 'xxxxx\\matplotlib\\mpl-data\\matplotlibrc'
[2688] Failed to execute script test
```
## Possible resolution
Simply remove the first underscore in `matplotlib._get_data_path` in
https://github.com/pyinstaller/pyinstaller/blob/b9fcbbf86bc71addafc830debe289e7edb2a5697/PyInstaller/hooks/hook-matplotlib.py#L16
This works on my system.
I'm a little confused as to why the private function was being used in the first place. `matplotlib.get_data_path` has been available for some time.
</issue>
<code>
[start of PyInstaller/hooks/hook-matplotlib.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2013-2021, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12
13 from PyInstaller.utils.hooks import exec_statement
14
15 mpl_data_dir = exec_statement(
16 "import matplotlib; print(matplotlib._get_data_path())")
17
18 datas = [
19 (mpl_data_dir, "matplotlib/mpl-data"),
20 ]
21
[end of PyInstaller/hooks/hook-matplotlib.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/hook-matplotlib.py b/PyInstaller/hooks/hook-matplotlib.py
--- a/PyInstaller/hooks/hook-matplotlib.py
+++ b/PyInstaller/hooks/hook-matplotlib.py
@@ -13,7 +13,7 @@
from PyInstaller.utils.hooks import exec_statement
mpl_data_dir = exec_statement(
- "import matplotlib; print(matplotlib._get_data_path())")
+ "import matplotlib; print(matplotlib.get_data_path())")
datas = [
(mpl_data_dir, "matplotlib/mpl-data"),
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-matplotlib.py b/PyInstaller/hooks/hook-matplotlib.py\n--- a/PyInstaller/hooks/hook-matplotlib.py\n+++ b/PyInstaller/hooks/hook-matplotlib.py\n@@ -13,7 +13,7 @@\n from PyInstaller.utils.hooks import exec_statement\n \n mpl_data_dir = exec_statement(\n- \"import matplotlib; print(matplotlib._get_data_path())\")\n+ \"import matplotlib; print(matplotlib.get_data_path())\")\n \n datas = [\n (mpl_data_dir, \"matplotlib/mpl-data\"),\n", "issue": "Support matplotlib-3.4.0rc1\n## Description of the issue\r\n`matplotlib._get_data_path` no longer exists since 3.4.0rc1: https://github.com/matplotlib/matplotlib/commit/e1352c71f07aee7eab004b73dd9bda2a260ab31b.\r\nThis is on schedule for the removal of the deprecations that occurred around the time of #5006. The missing function leads to a traceback output during build, and the whole `cwd` being crammed into `mpl-data`. Finally, `matplotlib` cannot be imported in the packaged app because it cannot find `mpl-data/matplotlibrc`.\r\n## Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```4.2```\r\n* Version of Python: `3.8`\r\n* Platform: `Windows`\r\n* Did you also try this on another platform? Does it work there? `Surely it is a cross platform bug`\r\n\r\n\r\n> * try the latest development version, using the following command: \r\n> \r\n> ```shell\r\n> pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip\r\n> ```\r\n> \r\n> * follow *all* the instructions in our \"If Things Go Wrong\" Guide\r\n> (https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and\r\n> \r\n> ### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly)\r\n> \r\n> * [ ] start with clean installation\r\n> * [ ] use the latest development version\r\n> * [ ] Run your frozen program **from a command window (shell)** \u2014 instead of double-clicking on it\r\n> * [ ] Package your program in **--onedir mode**\r\n> * [ ] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file\r\n> * [ ] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file.\r\n> \r\nThis part of the template is irrelevant, as the responsible code is unchanged on `develop`\r\n\r\n### A minimal example program which shows the error\r\n\r\n```\r\npip install --pre matplotlib==3.4.0rc1 pyinstaller\r\necho \"import matplotlib\" > test.py\r\npyinstaller test.py\r\n```\r\n\r\n### Stacktrace / full error message\r\nBuilding `pyinstaller test.py`:\r\n```\r\n20391 INFO: Loading module hook 'hook-matplotlib.py' from 'XXXXXXX'...\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\nAttributeError: module 'matplotlib' has no attribute '_get_data_path'\r\n\r\n```\r\nRunning `test.exe`:\r\n```\r\nTraceback (most recent call last):\r\n File \"test.py\", line 1, in <module>\r\n import matplotlib\r\n File \"<frozen importlib._bootstrap>\", line 991, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 975, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 671, in _load_unlocked\r\n File \"PyInstaller\\loader\\pyimod03_importers.py\", line 531, in exec_module\r\n File \"matplotlib\\__init__.py\", line 820, in <module>\r\n File \"matplotlib\\__init__.py\", line 725, in _rc_params_in_file\r\n File \"contextlib.py\", line 113, in __enter__\r\n File \"matplotlib\\__init__.py\", line 703, in _open_file_or_url\r\nFileNotFoundError: [Errno 2] No such file or directory: 'xxxxx\\\\matplotlib\\\\mpl-data\\\\matplotlibrc'\r\n[2688] Failed to execute script test\r\n\r\n```\r\n\r\n## Possible resolution\r\n\r\nSimply remove the first underscore in `matplotlib._get_data_path` in \r\n\r\nhttps://github.com/pyinstaller/pyinstaller/blob/b9fcbbf86bc71addafc830debe289e7edb2a5697/PyInstaller/hooks/hook-matplotlib.py#L16\r\n\r\nThis works on my system.\r\n\r\nI'm a little confused as to why the private function was being used in the first place. `matplotlib.get_data_path` has been available for some time.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2021, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\n\nfrom PyInstaller.utils.hooks import exec_statement\n\nmpl_data_dir = exec_statement(\n \"import matplotlib; print(matplotlib._get_data_path())\")\n\ndatas = [\n (mpl_data_dir, \"matplotlib/mpl-data\"),\n]\n", "path": "PyInstaller/hooks/hook-matplotlib.py"}]} | 1,730 | 127 |
gh_patches_debug_22927 | rasdani/github-patches | git_diff | beeware__toga-1193 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ImageView only works with square images
I created a hello world app following this tutorial under Linux Mint 20: [https://docs.beeware.org/en/latest/tutorial/tutorial-0.html](https://docs.beeware.org/en/latest/tutorial/tutorial-0.html)
My python version is 3.8.5
When trying to add images some work and some not.
This code doesn't work:
`image_from_url = toga.Image("https://dummyimage.com/100x67/000/fff")
imageview_from_url = toga.ImageView(image=image_from_url, style=Pack(width=100, height=67))`
And this is working fine:
`image_from_url = toga.Image("https://dummyimage.com/100x100/000/fff")
imageview_from_url = toga.ImageView(image=image_from_url, style=Pack(width=100, height=100))`
The error I get is:
> (__main__.py:67130): GdkPixbuf-CRITICAL **: 16:12:00.644: gdk_pixbuf_scale_simple: assertion 'dest_height > 0' failed
> ... TypeError: Argument 0 does not allow None as a value
</issue>
<code>
[start of src/gtk/toga_gtk/widgets/imageview.py]
1 from ..libs import GdkPixbuf, Gtk, Gdk
2 from .base import Widget
3
4
5 class ImageView(Widget):
6 def create(self):
7 self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
8 self._image = Gtk.Image()
9 self._pixbuf = None
10 self.native.add(self._image)
11 self.native.interface = self.interface
12
13 def set_image(self, image):
14 self._pixbuf = image._impl.native
15
16 def set_bounds(self, x, y, width, height):
17 super().set_bounds(x, y, width, height)
18 # rehint to update scaling of pixbuf
19 self.rehint()
20
21 def rehint(self):
22 if self._pixbuf:
23 height, width = self._resize_max(
24 original_height=self._pixbuf.get_height(),
25 original_width=self._pixbuf.get_width(),
26 max_height=self.native.get_allocated_height(),
27 max_width=self.native.get_allocated_width(),
28 )
29
30 dpr = self.native.get_scale_factor()
31
32 scaled_pixbuf = self._pixbuf.scale_simple(
33 width * dpr, height * dpr, GdkPixbuf.InterpType.BILINEAR
34 )
35
36 surface = Gdk.cairo_surface_create_from_pixbuf(
37 scaled_pixbuf, 0, self.native.get_window() # scale: 0 = same as window
38 )
39 self._image.set_from_surface(surface)
40
41 @staticmethod
42 def _resize_max(original_height, original_width, max_height, max_width):
43
44 # Check to make sure all dimensions have valid sizes
45 if min(original_height, original_width, max_height, max_width) <= 0:
46 return 1, 1
47
48 width_ratio = max_width / original_width
49 height_ratio = max_height / original_height
50
51 height = original_height * width_ratio
52 if height <= max_height:
53 width = original_width * width_ratio
54 else:
55 height = original_height * height_ratio
56 width = original_width * height_ratio
57
58 return int(height), int(width)
59
[end of src/gtk/toga_gtk/widgets/imageview.py]
[start of examples/imageview/imageview/app.py]
1 import toga
2 from toga.style.pack import CENTER, COLUMN
3
4
5 class ImageViewApp(toga.App):
6 def startup(self):
7 self.main_window = toga.MainWindow(title=self.name)
8
9 box = toga.Box()
10 box.style.padding = 40
11 box.style.update(alignment=CENTER)
12 box.style.update(direction=COLUMN)
13
14 # image from local path
15 # load brutus.png from the package
16 # We set the style width/height parameters for this one
17 image_from_path = toga.Image('resources/brutus.png')
18 imageview_from_path = toga.ImageView(image_from_path)
19 imageview_from_path.style.update(height=72)
20 imageview_from_path.style.update(width=72)
21 box.add(imageview_from_path)
22
23 # image from remote URL
24 # no style parameters - we let Pack determine how to allocate
25 # the space
26 image_from_url = toga.Image('https://beeware.org/project/projects/libraries/toga/toga.png')
27 imageview_from_url = toga.ImageView(image_from_url)
28 box.add(imageview_from_url)
29
30 self.main_window.content = box
31 self.main_window.show()
32
33
34 def main():
35 return ImageViewApp('ImageView', 'org.beeware.widgets.imageview')
36
37
38 if __name__ == '__main__':
39 app = main()
40 app.main_loop()
41
[end of examples/imageview/imageview/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/imageview/imageview/app.py b/examples/imageview/imageview/app.py
--- a/examples/imageview/imageview/app.py
+++ b/examples/imageview/imageview/app.py
@@ -14,10 +14,9 @@
# image from local path
# load brutus.png from the package
# We set the style width/height parameters for this one
- image_from_path = toga.Image('resources/brutus.png')
+ image_from_path = toga.Image('resources/pride-brutus.png')
imageview_from_path = toga.ImageView(image_from_path)
imageview_from_path.style.update(height=72)
- imageview_from_path.style.update(width=72)
box.add(imageview_from_path)
# image from remote URL
diff --git a/src/gtk/toga_gtk/widgets/imageview.py b/src/gtk/toga_gtk/widgets/imageview.py
--- a/src/gtk/toga_gtk/widgets/imageview.py
+++ b/src/gtk/toga_gtk/widgets/imageview.py
@@ -55,4 +55,8 @@
height = original_height * height_ratio
width = original_width * height_ratio
- return int(height), int(width)
+ # On the first display the allocated height/width will be 1x1.
+ # If the image isn't square, this will result in one of the dimensions
+ # scaling to 0, which breaks GTK. So; constraint the minimum height
+ # and width to 1.
+ return max(int(height), 1), max(int(width), 1)
| {"golden_diff": "diff --git a/examples/imageview/imageview/app.py b/examples/imageview/imageview/app.py\n--- a/examples/imageview/imageview/app.py\n+++ b/examples/imageview/imageview/app.py\n@@ -14,10 +14,9 @@\n # image from local path\n # load brutus.png from the package\n # We set the style width/height parameters for this one\n- image_from_path = toga.Image('resources/brutus.png')\n+ image_from_path = toga.Image('resources/pride-brutus.png')\n imageview_from_path = toga.ImageView(image_from_path)\n imageview_from_path.style.update(height=72)\n- imageview_from_path.style.update(width=72)\n box.add(imageview_from_path)\n \n # image from remote URL\ndiff --git a/src/gtk/toga_gtk/widgets/imageview.py b/src/gtk/toga_gtk/widgets/imageview.py\n--- a/src/gtk/toga_gtk/widgets/imageview.py\n+++ b/src/gtk/toga_gtk/widgets/imageview.py\n@@ -55,4 +55,8 @@\n height = original_height * height_ratio\n width = original_width * height_ratio\n \n- return int(height), int(width)\n+ # On the first display the allocated height/width will be 1x1.\n+ # If the image isn't square, this will result in one of the dimensions\n+ # scaling to 0, which breaks GTK. So; constraint the minimum height\n+ # and width to 1.\n+ return max(int(height), 1), max(int(width), 1)\n", "issue": "ImageView only works with square images\nI created a hello world app following this tutorial under Linux Mint 20: [https://docs.beeware.org/en/latest/tutorial/tutorial-0.html](https://docs.beeware.org/en/latest/tutorial/tutorial-0.html)\r\nMy python version is 3.8.5\r\nWhen trying to add images some work and some not.\r\n\r\nThis code doesn't work:\r\n`image_from_url = toga.Image(\"https://dummyimage.com/100x67/000/fff\")\r\nimageview_from_url = toga.ImageView(image=image_from_url, style=Pack(width=100, height=67))`\r\n\r\nAnd this is working fine:\r\n`image_from_url = toga.Image(\"https://dummyimage.com/100x100/000/fff\")\r\nimageview_from_url = toga.ImageView(image=image_from_url, style=Pack(width=100, height=100))`\r\n\r\nThe error I get is:\r\n\r\n> (__main__.py:67130): GdkPixbuf-CRITICAL **: 16:12:00.644: gdk_pixbuf_scale_simple: assertion 'dest_height > 0' failed\r\n> ... TypeError: Argument 0 does not allow None as a value\r\n\n", "before_files": [{"content": "from ..libs import GdkPixbuf, Gtk, Gdk\nfrom .base import Widget\n\n\nclass ImageView(Widget):\n def create(self):\n self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self._image = Gtk.Image()\n self._pixbuf = None\n self.native.add(self._image)\n self.native.interface = self.interface\n\n def set_image(self, image):\n self._pixbuf = image._impl.native\n\n def set_bounds(self, x, y, width, height):\n super().set_bounds(x, y, width, height)\n # rehint to update scaling of pixbuf\n self.rehint()\n\n def rehint(self):\n if self._pixbuf:\n height, width = self._resize_max(\n original_height=self._pixbuf.get_height(),\n original_width=self._pixbuf.get_width(),\n max_height=self.native.get_allocated_height(),\n max_width=self.native.get_allocated_width(),\n )\n\n dpr = self.native.get_scale_factor()\n\n scaled_pixbuf = self._pixbuf.scale_simple(\n width * dpr, height * dpr, GdkPixbuf.InterpType.BILINEAR\n )\n\n surface = Gdk.cairo_surface_create_from_pixbuf(\n scaled_pixbuf, 0, self.native.get_window() # scale: 0 = same as window\n )\n self._image.set_from_surface(surface)\n\n @staticmethod\n def _resize_max(original_height, original_width, max_height, max_width):\n\n # Check to make sure all dimensions have valid sizes\n if min(original_height, original_width, max_height, max_width) <= 0:\n return 1, 1\n\n width_ratio = max_width / original_width\n height_ratio = max_height / original_height\n\n height = original_height * width_ratio\n if height <= max_height:\n width = original_width * width_ratio\n else:\n height = original_height * height_ratio\n width = original_width * height_ratio\n\n return int(height), int(width)\n", "path": "src/gtk/toga_gtk/widgets/imageview.py"}, {"content": "import toga\nfrom toga.style.pack import CENTER, COLUMN\n\n\nclass ImageViewApp(toga.App):\n def startup(self):\n self.main_window = toga.MainWindow(title=self.name)\n\n box = toga.Box()\n box.style.padding = 40\n box.style.update(alignment=CENTER)\n box.style.update(direction=COLUMN)\n\n # image from local path\n # load brutus.png from the package\n # We set the style width/height parameters for this one\n image_from_path = toga.Image('resources/brutus.png')\n imageview_from_path = toga.ImageView(image_from_path)\n imageview_from_path.style.update(height=72)\n imageview_from_path.style.update(width=72)\n box.add(imageview_from_path)\n\n # image from remote URL\n # no style parameters - we let Pack determine how to allocate\n # the space\n image_from_url = toga.Image('https://beeware.org/project/projects/libraries/toga/toga.png')\n imageview_from_url = toga.ImageView(image_from_url)\n box.add(imageview_from_url)\n\n self.main_window.content = box\n self.main_window.show()\n\n\ndef main():\n return ImageViewApp('ImageView', 'org.beeware.widgets.imageview')\n\n\nif __name__ == '__main__':\n app = main()\n app.main_loop()\n", "path": "examples/imageview/imageview/app.py"}]} | 1,758 | 352 |
gh_patches_debug_63334 | rasdani/github-patches | git_diff | sanic-org__sanic-1527 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Publish 19.3 release to PyPI
Thank you for the release 3 days ago!
https://github.com/huge-success/sanic/releases/tag/19.3
It's missing from PyPI at the moment:
https://pypi.org/project/sanic/#history
Please publish it at your convenience 🙇
Keep up the awesome work ❤️
</issue>
<code>
[start of sanic/__init__.py]
1 from sanic.app import Sanic
2 from sanic.blueprints import Blueprint
3
4
5 __version__ = "18.12.0"
6
7 __all__ = ["Sanic", "Blueprint"]
8
[end of sanic/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sanic/__init__.py b/sanic/__init__.py
--- a/sanic/__init__.py
+++ b/sanic/__init__.py
@@ -2,6 +2,6 @@
from sanic.blueprints import Blueprint
-__version__ = "18.12.0"
+__version__ = "19.03.0"
__all__ = ["Sanic", "Blueprint"]
| {"golden_diff": "diff --git a/sanic/__init__.py b/sanic/__init__.py\n--- a/sanic/__init__.py\n+++ b/sanic/__init__.py\n@@ -2,6 +2,6 @@\n from sanic.blueprints import Blueprint\n \n \n-__version__ = \"18.12.0\"\n+__version__ = \"19.03.0\"\n \n __all__ = [\"Sanic\", \"Blueprint\"]\n", "issue": "Publish 19.3 release to PyPI\nThank you for the release 3 days ago!\r\n\r\nhttps://github.com/huge-success/sanic/releases/tag/19.3\r\n\r\nIt's missing from PyPI at the moment:\r\n\r\nhttps://pypi.org/project/sanic/#history\r\n\r\nPlease publish it at your convenience \ud83d\ude47 \r\n\r\nKeep up the awesome work \u2764\ufe0f \n", "before_files": [{"content": "from sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\n\n\n__version__ = \"18.12.0\"\n\n__all__ = [\"Sanic\", \"Blueprint\"]\n", "path": "sanic/__init__.py"}]} | 667 | 96 |
gh_patches_debug_2102 | rasdani/github-patches | git_diff | rucio__rucio-1372 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix activity in BB8
Motivation
----------
BB8 uses activity `Data Rebalancing` but the activity defined in ATLAS schema is `Data rebalancing`. We should use the same activity everywhere, and it should be consistent with the share defined in FTS
</issue>
<code>
[start of lib/rucio/vcsversion.py]
1
2 '''
3 This file is automatically generated; Do not edit it. :)
4 '''
5 VERSION_INFO = {
6 'final': True,
7 'version': '1.17.4',
8 'branch_nick': 'patch-0-1_17_4_client_release_prep',
9 'revision_id': 'ba996ce9bf8366cd7d8d1fb60a7f1daf8d4f517e',
10 'revno': 6827
11 }
12
[end of lib/rucio/vcsversion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py
--- a/lib/rucio/vcsversion.py
+++ b/lib/rucio/vcsversion.py
@@ -4,8 +4,8 @@
'''
VERSION_INFO = {
'final': True,
- 'version': '1.17.4',
- 'branch_nick': 'patch-0-1_17_4_client_release_prep',
- 'revision_id': 'ba996ce9bf8366cd7d8d1fb60a7f1daf8d4f517e',
- 'revno': 6827
+ 'version': '1.17.5',
+ 'branch_nick': 'patch-0-1_17_5_preparation',
+ 'revision_id': '537e1e47eb627741394b6bb9bc21d0f046296275',
+ 'revno': 6837
}
| {"golden_diff": "diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py\n--- a/lib/rucio/vcsversion.py\n+++ b/lib/rucio/vcsversion.py\n@@ -4,8 +4,8 @@\n '''\n VERSION_INFO = {\n 'final': True,\n- 'version': '1.17.4',\n- 'branch_nick': 'patch-0-1_17_4_client_release_prep',\n- 'revision_id': 'ba996ce9bf8366cd7d8d1fb60a7f1daf8d4f517e',\n- 'revno': 6827\n+ 'version': '1.17.5',\n+ 'branch_nick': 'patch-0-1_17_5_preparation',\n+ 'revision_id': '537e1e47eb627741394b6bb9bc21d0f046296275',\n+ 'revno': 6837\n }\n", "issue": "Fix activity in BB8\nMotivation\r\n----------\r\n\r\nBB8 uses activity `Data Rebalancing` but the activity defined in ATLAS schema is `Data rebalancing`. We should use the same activity everywhere, and it should be consistent with the share defined in FTS\n", "before_files": [{"content": "\n'''\nThis file is automatically generated; Do not edit it. :)\n'''\nVERSION_INFO = {\n 'final': True,\n 'version': '1.17.4',\n 'branch_nick': 'patch-0-1_17_4_client_release_prep',\n 'revision_id': 'ba996ce9bf8366cd7d8d1fb60a7f1daf8d4f517e',\n 'revno': 6827\n}\n", "path": "lib/rucio/vcsversion.py"}]} | 723 | 244 |
gh_patches_debug_17555 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-5324 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of extensions/base_extension.py]
1 import hashlib
2 import os
3 from abc import ABC, abstractmethod
4 from typing import Union
5
6 __all__ = ["_Extension"]
7
8
9 class _Extension(ABC):
10 def __init__(self, name: str, support_aot: bool, support_jit: bool, priority: int = 1):
11 self._name = name
12 self._support_aot = support_aot
13 self._support_jit = support_jit
14 self.priority = priority
15
16 @property
17 def name(self):
18 return self._name
19
20 @property
21 def support_aot(self):
22 return self._support_aot
23
24 @property
25 def support_jit(self):
26 return self._support_jit
27
28 @staticmethod
29 def get_jit_extension_folder_path():
30 """
31 Kernels which are compiled during runtime will be stored in the same cache folder for reuse.
32 The folder is in the path ~/.cache/colossalai/torch_extensions/<cache-folder>.
33 The name of the <cache-folder> follows a common format:
34 torch<torch_version_major>.<torch_version_minor>_<device_name><device_version>-<hash>
35
36 The <hash> suffix is the hash value of the path of the `colossalai` file.
37 """
38 import torch
39
40 import colossalai
41 from colossalai.accelerator import get_accelerator
42
43 # get torch version
44 torch_version_major = torch.__version__.split(".")[0]
45 torch_version_minor = torch.__version__.split(".")[1]
46
47 # get device version
48 device_name = get_accelerator().name
49 device_version = get_accelerator().get_version()
50
51 # use colossalai's file path as hash
52 hash_suffix = hashlib.sha256(colossalai.__file__.encode()).hexdigest()
53
54 # concat
55 home_directory = os.path.expanduser("~")
56 extension_directory = f".cache/colossalai/torch_extensions/torch{torch_version_major}.{torch_version_minor}_{device_name}-{device_version}-{hash_suffix}"
57 cache_directory = os.path.join(home_directory, extension_directory)
58 return cache_directory
59
60 @abstractmethod
61 def is_hardware_available(self) -> bool:
62 """
63 Check if the hardware required by the kernel is available.
64 """
65
66 @abstractmethod
67 def assert_hardware_compatible(self) -> bool:
68 """
69 Check if the hardware required by the kernel is compatible.
70 """
71
72 @abstractmethod
73 def build_aot(self) -> Union["CppExtension", "CUDAExtension"]:
74 pass
75
76 @abstractmethod
77 def build_jit(self) -> None:
78 pass
79
80 @abstractmethod
81 def load(self):
82 pass
83
[end of extensions/base_extension.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/extensions/base_extension.py b/extensions/base_extension.py
--- a/extensions/base_extension.py
+++ b/extensions/base_extension.py
@@ -1,7 +1,7 @@
import hashlib
import os
from abc import ABC, abstractmethod
-from typing import Union
+from typing import Callable, Union
__all__ = ["_Extension"]
@@ -64,7 +64,7 @@
"""
@abstractmethod
- def assert_hardware_compatible(self) -> bool:
+ def assert_hardware_compatible(self) -> None:
"""
Check if the hardware required by the kernel is compatible.
"""
@@ -74,9 +74,9 @@
pass
@abstractmethod
- def build_jit(self) -> None:
+ def build_jit(self) -> Callable:
pass
@abstractmethod
- def load(self):
+ def load(self) -> Callable:
pass
| {"golden_diff": "diff --git a/extensions/base_extension.py b/extensions/base_extension.py\n--- a/extensions/base_extension.py\n+++ b/extensions/base_extension.py\n@@ -1,7 +1,7 @@\n import hashlib\n import os\n from abc import ABC, abstractmethod\n-from typing import Union\n+from typing import Callable, Union\n \n __all__ = [\"_Extension\"]\n \n@@ -64,7 +64,7 @@\n \"\"\"\n \n @abstractmethod\n- def assert_hardware_compatible(self) -> bool:\n+ def assert_hardware_compatible(self) -> None:\n \"\"\"\n Check if the hardware required by the kernel is compatible.\n \"\"\"\n@@ -74,9 +74,9 @@\n pass\n \n @abstractmethod\n- def build_jit(self) -> None:\n+ def build_jit(self) -> Callable:\n pass\n \n @abstractmethod\n- def load(self):\n+ def load(self) -> Callable:\n pass\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import hashlib\nimport os\nfrom abc import ABC, abstractmethod\nfrom typing import Union\n\n__all__ = [\"_Extension\"]\n\n\nclass _Extension(ABC):\n def __init__(self, name: str, support_aot: bool, support_jit: bool, priority: int = 1):\n self._name = name\n self._support_aot = support_aot\n self._support_jit = support_jit\n self.priority = priority\n\n @property\n def name(self):\n return self._name\n\n @property\n def support_aot(self):\n return self._support_aot\n\n @property\n def support_jit(self):\n return self._support_jit\n\n @staticmethod\n def get_jit_extension_folder_path():\n \"\"\"\n Kernels which are compiled during runtime will be stored in the same cache folder for reuse.\n The folder is in the path ~/.cache/colossalai/torch_extensions/<cache-folder>.\n The name of the <cache-folder> follows a common format:\n torch<torch_version_major>.<torch_version_minor>_<device_name><device_version>-<hash>\n\n The <hash> suffix is the hash value of the path of the `colossalai` file.\n \"\"\"\n import torch\n\n import colossalai\n from colossalai.accelerator import get_accelerator\n\n # get torch version\n torch_version_major = torch.__version__.split(\".\")[0]\n torch_version_minor = torch.__version__.split(\".\")[1]\n\n # get device version\n device_name = get_accelerator().name\n device_version = get_accelerator().get_version()\n\n # use colossalai's file path as hash\n hash_suffix = hashlib.sha256(colossalai.__file__.encode()).hexdigest()\n\n # concat\n home_directory = os.path.expanduser(\"~\")\n extension_directory = f\".cache/colossalai/torch_extensions/torch{torch_version_major}.{torch_version_minor}_{device_name}-{device_version}-{hash_suffix}\"\n cache_directory = os.path.join(home_directory, extension_directory)\n return cache_directory\n\n @abstractmethod\n def is_hardware_available(self) -> bool:\n \"\"\"\n Check if the hardware required by the kernel is available.\n \"\"\"\n\n @abstractmethod\n def assert_hardware_compatible(self) -> bool:\n \"\"\"\n Check if the hardware required by the kernel is compatible.\n \"\"\"\n\n @abstractmethod\n def build_aot(self) -> Union[\"CppExtension\", \"CUDAExtension\"]:\n pass\n\n @abstractmethod\n def build_jit(self) -> None:\n pass\n\n @abstractmethod\n def load(self):\n pass\n", "path": "extensions/base_extension.py"}]} | 1,291 | 202 |
gh_patches_debug_407 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-200 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature: allow magic numbers in async functions constructors
We check that some magic numbers can be used in function constructors like so:
```python
def some_function(price, delta=0.1):
return price * delta
```
But, we only allow regular functions, not `async` ones: https://github.com/wemake-services/wemake-python-styleguide/blob/master/wemake_python_styleguide/visitors/ast/numbers.py#L19-L21
What we need to do is:
1. Add `ast.AsyncFunctionDef` to the allowed list
2. Write a unit test for it: https://github.com/wemake-services/wemake-python-styleguide/blob/master/tests/test_visitors/test_ast/test_general/test_magic_numbers.py
</issue>
<code>
[start of wemake_python_styleguide/visitors/ast/numbers.py]
1 # -*- coding: utf-8 -*-
2
3 import ast
4 from typing import Optional
5
6 from wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST
7 from wemake_python_styleguide.violations.best_practices import (
8 MagicNumberViolation,
9 )
10 from wemake_python_styleguide.visitors.base import BaseNodeVisitor
11
12
13 class MagicNumberVisitor(BaseNodeVisitor):
14 """Checks magic numbers used in the code."""
15
16 _ALLOWED_PARENTS = (
17 ast.Assign,
18
19 # Constructor usages:
20 ast.FunctionDef,
21 ast.arguments,
22
23 # Primitives:
24 ast.List,
25 ast.Dict,
26 ast.Set,
27 ast.Tuple,
28 )
29
30 # TODO: make consistent naming rules for class attributes:
31 _PROXY_PARENTS = (
32 ast.UnaryOp,
33 )
34
35 def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:
36 """
37 Returns real number's parent.
38
39 What can go wrong?
40
41 1. Number can be negative: ``x = -1``,
42 so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``
43
44 """
45 parent = getattr(node, 'parent', None)
46 if isinstance(parent, self._PROXY_PARENTS):
47 return self._get_real_parent(parent)
48 return parent
49
50 def _check_is_magic(self, node: ast.Num) -> None:
51 parent = self._get_real_parent(node)
52 if isinstance(parent, self._ALLOWED_PARENTS):
53 return
54
55 if node.n in MAGIC_NUMBERS_WHITELIST:
56 return
57
58 if isinstance(node.n, int) and node.n <= 10:
59 return
60
61 self.add_violation(MagicNumberViolation(node, text=str(node.n)))
62
63 def visit_Num(self, node: ast.Num) -> None:
64 """
65 Checks numbers not to be magic constants inside the code.
66
67 Raises:
68 MagicNumberViolation
69
70 """
71 self._check_is_magic(node)
72 self.generic_visit(node)
73
[end of wemake_python_styleguide/visitors/ast/numbers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wemake_python_styleguide/visitors/ast/numbers.py b/wemake_python_styleguide/visitors/ast/numbers.py
--- a/wemake_python_styleguide/visitors/ast/numbers.py
+++ b/wemake_python_styleguide/visitors/ast/numbers.py
@@ -18,6 +18,7 @@
# Constructor usages:
ast.FunctionDef,
+ ast.AsyncFunctionDef,
ast.arguments,
# Primitives:
| {"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/numbers.py b/wemake_python_styleguide/visitors/ast/numbers.py\n--- a/wemake_python_styleguide/visitors/ast/numbers.py\n+++ b/wemake_python_styleguide/visitors/ast/numbers.py\n@@ -18,6 +18,7 @@\n \n # Constructor usages:\n ast.FunctionDef,\n+ ast.AsyncFunctionDef,\n ast.arguments,\n \n # Primitives:\n", "issue": "Feature: allow magic numbers in async functions constructors\nWe check that some magic numbers can be used in function constructors like so:\r\n\r\n```python\r\ndef some_function(price, delta=0.1):\r\n return price * delta\r\n```\r\n\r\nBut, we only allow regular functions, not `async` ones: https://github.com/wemake-services/wemake-python-styleguide/blob/master/wemake_python_styleguide/visitors/ast/numbers.py#L19-L21\r\n\r\nWhat we need to do is:\r\n1. Add `ast.AsyncFunctionDef` to the allowed list\r\n2. Write a unit test for it: https://github.com/wemake-services/wemake-python-styleguide/blob/master/tests/test_visitors/test_ast/test_general/test_magic_numbers.py\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import Optional\n\nfrom wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST\nfrom wemake_python_styleguide.violations.best_practices import (\n MagicNumberViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass MagicNumberVisitor(BaseNodeVisitor):\n \"\"\"Checks magic numbers used in the code.\"\"\"\n\n _ALLOWED_PARENTS = (\n ast.Assign,\n\n # Constructor usages:\n ast.FunctionDef,\n ast.arguments,\n\n # Primitives:\n ast.List,\n ast.Dict,\n ast.Set,\n ast.Tuple,\n )\n\n # TODO: make consistent naming rules for class attributes:\n _PROXY_PARENTS = (\n ast.UnaryOp,\n )\n\n def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:\n \"\"\"\n Returns real number's parent.\n\n What can go wrong?\n\n 1. Number can be negative: ``x = -1``,\n so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``\n\n \"\"\"\n parent = getattr(node, 'parent', None)\n if isinstance(parent, self._PROXY_PARENTS):\n return self._get_real_parent(parent)\n return parent\n\n def _check_is_magic(self, node: ast.Num) -> None:\n parent = self._get_real_parent(node)\n if isinstance(parent, self._ALLOWED_PARENTS):\n return\n\n if node.n in MAGIC_NUMBERS_WHITELIST:\n return\n\n if isinstance(node.n, int) and node.n <= 10:\n return\n\n self.add_violation(MagicNumberViolation(node, text=str(node.n)))\n\n def visit_Num(self, node: ast.Num) -> None:\n \"\"\"\n Checks numbers not to be magic constants inside the code.\n\n Raises:\n MagicNumberViolation\n\n \"\"\"\n self._check_is_magic(node)\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/numbers.py"}]} | 1,290 | 110 |
gh_patches_debug_16733 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1729 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: adur_worthing_gov_uk has changed format of address
### I Have A Problem With:
A specific source
### What's Your Problem
The Adur and Worthing council used to return my address as “12 Roadname”, so that’s what I had in my source args. But the format has recently changed to “12 ROADNAME”, causing the lookup in adur_worthing_gov_uk.py to fail.
As the council is just as likely to change it back at some point can I suggest that the lookup is made case independent?
### Source (if relevant)
adur_worthing_gov_uk
### Logs
_No response_
### Relevant Configuration
_No response_
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py]
1 from datetime import datetime
2
3 import bs4
4 import requests
5 from waste_collection_schedule import Collection # type: ignore[attr-defined]
6
7 TITLE = "Adur & Worthing Councils"
8 DESCRIPTION = "Source for adur-worthing.gov.uk services for Adur & Worthing, UK."
9 URL = "https://adur-worthing.gov.uk"
10 TEST_CASES = {
11 "Test_001": {"postcode": "BN15 9UX", "address": "1 Western Road North"},
12 "Test_002": {"postcode": "BN43 5WE", "address": "6 Hebe Road"},
13 }
14 HEADERS = {
15 "user-agent": "Mozilla/5.0",
16 }
17 ICON_MAP = {
18 "Recycling": "mdi:recycle",
19 "Refuse": "mdi:trash-can",
20 "Garden": "mdi:leaf",
21 }
22
23
24 class Source:
25 def __init__(self, postcode, address):
26 self._postcode = postcode
27 self._address = address
28
29 def fetch(self):
30
31 if self._postcode is None or self._address is None:
32 raise ValueError("Either postcode or address is None")
33
34 s = requests.Session()
35
36 postcode_search_request = s.get(
37 f"https://www.adur-worthing.gov.uk/bin-day/?brlu-address-postcode={self._postcode}&return-url=/bin-day/&action=search",
38 headers=HEADERS,
39 )
40 html_addresses = postcode_search_request.content
41 addresses = bs4.BeautifulSoup(html_addresses, "html.parser")
42 addresses_select = addresses.find("select", {"id": "brlu-selected-address"})
43
44 found_address = None
45 for address in addresses_select.find_all("option"):
46 if self._address in address.get_text():
47 found_address = address
48
49 if found_address is None:
50 raise ValueError("Address not found")
51
52 collections_request = s.get(
53 f"https://www.adur-worthing.gov.uk/bin-day/?brlu-selected-address={address['value']}&return-url=/bin-day/",
54 headers=HEADERS,
55 )
56 html_collections = collections_request.content
57 bin_collections = bs4.BeautifulSoup(html_collections, "html.parser")
58
59 bin_days_table = bin_collections.find("table", class_="bin-days")
60 bin_days_table_body = bin_days_table.find("tbody")
61 bin_days_by_type = bin_days_table_body.find_all("tr")
62
63 entries = []
64
65 for bin_by_type in bin_days_by_type:
66 bin_type = bin_by_type.find("th").text
67 icon = ICON_MAP.get(bin_type)
68 bin_days = bin_by_type.find_all("td")[-1].get_text(separator="\n")
69 for bin_day in bin_days.split("\n"):
70 bin_datetime = datetime.strptime(bin_day, "%A %d %b %Y").date()
71 entries.append(Collection(t=bin_type, date=bin_datetime, icon=icon))
72
73 return entries
74
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py
@@ -27,7 +27,6 @@
self._address = address
def fetch(self):
-
if self._postcode is None or self._address is None:
raise ValueError("Either postcode or address is None")
@@ -43,7 +42,7 @@
found_address = None
for address in addresses_select.find_all("option"):
- if self._address in address.get_text():
+ if self._address.upper() in address.get_text().upper():
found_address = address
if found_address is None:
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py\n@@ -27,7 +27,6 @@\n self._address = address\n \n def fetch(self):\n-\n if self._postcode is None or self._address is None:\n raise ValueError(\"Either postcode or address is None\")\n \n@@ -43,7 +42,7 @@\n \n found_address = None\n for address in addresses_select.find_all(\"option\"):\n- if self._address in address.get_text():\n+ if self._address.upper() in address.get_text().upper():\n found_address = address\n \n if found_address is None:\n", "issue": "[Bug]: adur_worthing_gov_uk has changed format of address\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nThe Adur and Worthing council used to return my address as \u201c12 Roadname\u201d, so that\u2019s what I had in my source args. But the format has recently changed to \u201c12 ROADNAME\u201d, causing the lookup in adur_worthing_gov_uk.py to fail. \r\n\r\nAs the council is just as likely to change it back at some point can I suggest that the lookup is made case independent?\n\n### Source (if relevant)\n\nadur_worthing_gov_uk\n\n### Logs\n\n_No response_\n\n### Relevant Configuration\n\n_No response_\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from datetime import datetime\n\nimport bs4\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Adur & Worthing Councils\"\nDESCRIPTION = \"Source for adur-worthing.gov.uk services for Adur & Worthing, UK.\"\nURL = \"https://adur-worthing.gov.uk\"\nTEST_CASES = {\n \"Test_001\": {\"postcode\": \"BN15 9UX\", \"address\": \"1 Western Road North\"},\n \"Test_002\": {\"postcode\": \"BN43 5WE\", \"address\": \"6 Hebe Road\"},\n}\nHEADERS = {\n \"user-agent\": \"Mozilla/5.0\",\n}\nICON_MAP = {\n \"Recycling\": \"mdi:recycle\",\n \"Refuse\": \"mdi:trash-can\",\n \"Garden\": \"mdi:leaf\",\n}\n\n\nclass Source:\n def __init__(self, postcode, address):\n self._postcode = postcode\n self._address = address\n\n def fetch(self):\n\n if self._postcode is None or self._address is None:\n raise ValueError(\"Either postcode or address is None\")\n\n s = requests.Session()\n\n postcode_search_request = s.get(\n f\"https://www.adur-worthing.gov.uk/bin-day/?brlu-address-postcode={self._postcode}&return-url=/bin-day/&action=search\",\n headers=HEADERS,\n )\n html_addresses = postcode_search_request.content\n addresses = bs4.BeautifulSoup(html_addresses, \"html.parser\")\n addresses_select = addresses.find(\"select\", {\"id\": \"brlu-selected-address\"})\n\n found_address = None\n for address in addresses_select.find_all(\"option\"):\n if self._address in address.get_text():\n found_address = address\n\n if found_address is None:\n raise ValueError(\"Address not found\")\n\n collections_request = s.get(\n f\"https://www.adur-worthing.gov.uk/bin-day/?brlu-selected-address={address['value']}&return-url=/bin-day/\",\n headers=HEADERS,\n )\n html_collections = collections_request.content\n bin_collections = bs4.BeautifulSoup(html_collections, \"html.parser\")\n\n bin_days_table = bin_collections.find(\"table\", class_=\"bin-days\")\n bin_days_table_body = bin_days_table.find(\"tbody\")\n bin_days_by_type = bin_days_table_body.find_all(\"tr\")\n\n entries = []\n\n for bin_by_type in bin_days_by_type:\n bin_type = bin_by_type.find(\"th\").text\n icon = ICON_MAP.get(bin_type)\n bin_days = bin_by_type.find_all(\"td\")[-1].get_text(separator=\"\\n\")\n for bin_day in bin_days.split(\"\\n\"):\n bin_datetime = datetime.strptime(bin_day, \"%A %d %b %Y\").date()\n entries.append(Collection(t=bin_type, date=bin_datetime, icon=icon))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py"}]} | 1,698 | 217 |
gh_patches_debug_659 | rasdani/github-patches | git_diff | pex-tool__pex-2214 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.142
On the docket:
+ [x] KeyError when locking awscli on Python 3.11 #2211
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.141"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.141"
+__version__ = "2.1.142"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.141\"\n+__version__ = \"2.1.142\"\n", "issue": "Release 2.1.142\nOn the docket:\r\n+ [x] KeyError when locking awscli on Python 3.11 #2211\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.141\"\n", "path": "pex/version.py"}]} | 622 | 99 |
gh_patches_debug_36890 | rasdani/github-patches | git_diff | bokeh__bokeh-4021 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow for the injection of raw HTML code
Currently, the Widget library contains a `Paragraph` and a `PreText` widget allowing the user to put basic text on the rendered page. Neither of these widgets allows for the inclusion of formatted text using HTML markup. A widget should be added to support the inclusion of raw HTML. The widget can be a simple div named div tag.
</issue>
<code>
[start of bokeh/models/widgets/markups.py]
1 """ Various kinds of markup (static content) widgets.
2
3 """
4 from __future__ import absolute_import
5
6 from ...core.properties import abstract
7 from ...core.properties import Int, String
8 from .widget import Widget
9
10 @abstract
11 class Markup(Widget):
12 """ Base class for HTML markup widget models. """
13
14 class Paragraph(Markup):
15 """ A block (paragraph) of text.
16
17 """
18
19 text = String(default="", help="""
20 The contents of the widget.
21 """)
22
23 width = Int(500, help="""
24 The width of the block in pixels.
25 """)
26
27 height = Int(400, help="""
28 The height of the block in pixels.
29 """)
30
31
32 class PreText(Paragraph):
33 """ A block (paragraph) of pre-formatted text.
34
35 """
36
[end of bokeh/models/widgets/markups.py]
[start of examples/plotting/file/slider_callback_policy.py]
1 from bokeh.io import vform, output_file, show
2 from bokeh.models import CustomJS, Slider, Paragraph, PreText
3
4 # NOTE: the JS functions to forvide the format code for strings is found the answer
5 # from the user fearphage at http://stackoverflow.com/questions/610406/javascript-equivalent-to-printf-string-format
6 callback = CustomJS(code="""
7 var s1 = slider1.get('value')
8 var s2 = slider2.get('value')
9 var s3 = slider3.get('value')
10
11 if (!String.prototype.format) {
12 String.prototype.format = function() {
13 var args = arguments;
14 return this.replace(/{(\d+)}/g, function(match, number) {
15 return typeof args[number] != 'undefined'
16 ? args[number]
17 : match
18 ;
19 });
20 };
21 }
22
23 para.set('text', "Slider Values\\n\\n Slider 1: {0}\\n Slider 2: {1}\\n Slider 3: {2}".format(s1, s2, s3))
24 """)
25
26 para = PreText(text = "Slider Values:\n\n Slider 1: 0\n Slider 2: 0\n Slider 3: 0", width = 200, height = 150)
27
28 s1 = Slider(title="Slider 1 (Continuous)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="continuous")
29 s2 = Slider(title="Slider 2 (Throttle)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="throttle", callback_throttle=2000)
30 s3 = Slider(title="Slider 3 (Mouse Up)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="mouseup")
31
32 callback.args['para'] = para
33 callback.args['slider1'] = s1
34 callback.args['slider2'] = s2
35 callback.args['slider3'] = s3
36
37 output_file('slider_callback_policy.html')
38
39 show(vform(s1, s2, s3, para))
40
[end of examples/plotting/file/slider_callback_policy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bokeh/models/widgets/markups.py b/bokeh/models/widgets/markups.py
--- a/bokeh/models/widgets/markups.py
+++ b/bokeh/models/widgets/markups.py
@@ -4,18 +4,13 @@
from __future__ import absolute_import
from ...core.properties import abstract
-from ...core.properties import Int, String
+from ...core.properties import Int, String, Bool
from .widget import Widget
@abstract
class Markup(Widget):
""" Base class for HTML markup widget models. """
-class Paragraph(Markup):
- """ A block (paragraph) of text.
-
- """
-
text = String(default="", help="""
The contents of the widget.
""")
@@ -29,6 +24,20 @@
""")
+class Paragraph(Markup):
+ """ A block (paragraph) of text.
+
+ """
+
+class Div(Markup):
+ """ A block (div) of text.
+
+ """
+
+ render_as_text = Bool(False, help="""
+ Should the text be rendered as raw text (False, default), or should the text be interprited as an HTML string (True)
+ """)
+
class PreText(Paragraph):
""" A block (paragraph) of pre-formatted text.
diff --git a/examples/plotting/file/slider_callback_policy.py b/examples/plotting/file/slider_callback_policy.py
--- a/examples/plotting/file/slider_callback_policy.py
+++ b/examples/plotting/file/slider_callback_policy.py
@@ -1,5 +1,5 @@
from bokeh.io import vform, output_file, show
-from bokeh.models import CustomJS, Slider, Paragraph, PreText
+from bokeh.models import CustomJS, Slider, Div
# NOTE: the JS functions to forvide the format code for strings is found the answer
# from the user fearphage at http://stackoverflow.com/questions/610406/javascript-equivalent-to-printf-string-format
@@ -20,10 +20,10 @@
};
}
- para.set('text', "Slider Values\\n\\n Slider 1: {0}\\n Slider 2: {1}\\n Slider 3: {2}".format(s1, s2, s3))
+ para.set('text', "<h1>Slider Values</h1><p>Slider 1: {0}<p>Slider 2: {1}<p>Slider 3: {2}".format(s1, s2, s3))
""")
-para = PreText(text = "Slider Values:\n\n Slider 1: 0\n Slider 2: 0\n Slider 3: 0", width = 200, height = 150)
+para = Div(text="<h1>Slider Values:</h1><p>Slider 1: 0<p>Slider 2: 0<p>Slider 3: 0", width=200, height=150, render_as_text=False)
s1 = Slider(title="Slider 1 (Continuous)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="continuous")
s2 = Slider(title="Slider 2 (Throttle)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="throttle", callback_throttle=2000)
| {"golden_diff": "diff --git a/bokeh/models/widgets/markups.py b/bokeh/models/widgets/markups.py\n--- a/bokeh/models/widgets/markups.py\n+++ b/bokeh/models/widgets/markups.py\n@@ -4,18 +4,13 @@\n from __future__ import absolute_import\n \n from ...core.properties import abstract\n-from ...core.properties import Int, String\n+from ...core.properties import Int, String, Bool\n from .widget import Widget\n \n @abstract\n class Markup(Widget):\n \"\"\" Base class for HTML markup widget models. \"\"\"\n \n-class Paragraph(Markup):\n- \"\"\" A block (paragraph) of text.\n-\n- \"\"\"\n-\n text = String(default=\"\", help=\"\"\"\n The contents of the widget.\n \"\"\")\n@@ -29,6 +24,20 @@\n \"\"\")\n \n \n+class Paragraph(Markup):\n+ \"\"\" A block (paragraph) of text.\n+\n+ \"\"\"\n+\n+class Div(Markup):\n+ \"\"\" A block (div) of text.\n+\n+ \"\"\"\n+\n+ render_as_text = Bool(False, help=\"\"\"\n+ Should the text be rendered as raw text (False, default), or should the text be interprited as an HTML string (True)\n+ \"\"\")\n+\n class PreText(Paragraph):\n \"\"\" A block (paragraph) of pre-formatted text.\n \ndiff --git a/examples/plotting/file/slider_callback_policy.py b/examples/plotting/file/slider_callback_policy.py\n--- a/examples/plotting/file/slider_callback_policy.py\n+++ b/examples/plotting/file/slider_callback_policy.py\n@@ -1,5 +1,5 @@\n from bokeh.io import vform, output_file, show\n-from bokeh.models import CustomJS, Slider, Paragraph, PreText\n+from bokeh.models import CustomJS, Slider, Div\n \n # NOTE: the JS functions to forvide the format code for strings is found the answer\n # from the user fearphage at http://stackoverflow.com/questions/610406/javascript-equivalent-to-printf-string-format\n@@ -20,10 +20,10 @@\n };\n }\n \n- para.set('text', \"Slider Values\\\\n\\\\n Slider 1: {0}\\\\n Slider 2: {1}\\\\n Slider 3: {2}\".format(s1, s2, s3))\n+ para.set('text', \"<h1>Slider Values</h1><p>Slider 1: {0}<p>Slider 2: {1}<p>Slider 3: {2}\".format(s1, s2, s3))\n \"\"\")\n \n-para = PreText(text = \"Slider Values:\\n\\n Slider 1: 0\\n Slider 2: 0\\n Slider 3: 0\", width = 200, height = 150)\n+para = Div(text=\"<h1>Slider Values:</h1><p>Slider 1: 0<p>Slider 2: 0<p>Slider 3: 0\", width=200, height=150, render_as_text=False)\n \n s1 = Slider(title=\"Slider 1 (Continuous)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"continuous\")\n s2 = Slider(title=\"Slider 2 (Throttle)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"throttle\", callback_throttle=2000)\n", "issue": "Allow for the injection of raw HTML code\nCurrently, the Widget library contains a `Paragraph` and a `PreText` widget allowing the user to put basic text on the rendered page. Neither of these widgets allows for the inclusion of formatted text using HTML markup. A widget should be added to support the inclusion of raw HTML. The widget can be a simple div named div tag.\n\n", "before_files": [{"content": "\"\"\" Various kinds of markup (static content) widgets.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom ...core.properties import abstract\nfrom ...core.properties import Int, String\nfrom .widget import Widget\n\n@abstract\nclass Markup(Widget):\n \"\"\" Base class for HTML markup widget models. \"\"\"\n\nclass Paragraph(Markup):\n \"\"\" A block (paragraph) of text.\n\n \"\"\"\n\n text = String(default=\"\", help=\"\"\"\n The contents of the widget.\n \"\"\")\n\n width = Int(500, help=\"\"\"\n The width of the block in pixels.\n \"\"\")\n\n height = Int(400, help=\"\"\"\n The height of the block in pixels.\n \"\"\")\n\n\nclass PreText(Paragraph):\n \"\"\" A block (paragraph) of pre-formatted text.\n\n \"\"\"\n", "path": "bokeh/models/widgets/markups.py"}, {"content": "from bokeh.io import vform, output_file, show\nfrom bokeh.models import CustomJS, Slider, Paragraph, PreText\n\n# NOTE: the JS functions to forvide the format code for strings is found the answer\n# from the user fearphage at http://stackoverflow.com/questions/610406/javascript-equivalent-to-printf-string-format\ncallback = CustomJS(code=\"\"\"\n var s1 = slider1.get('value')\n var s2 = slider2.get('value')\n var s3 = slider3.get('value')\n\n if (!String.prototype.format) {\n String.prototype.format = function() {\n var args = arguments;\n return this.replace(/{(\\d+)}/g, function(match, number) {\n return typeof args[number] != 'undefined'\n ? args[number]\n : match\n ;\n });\n };\n }\n\n para.set('text', \"Slider Values\\\\n\\\\n Slider 1: {0}\\\\n Slider 2: {1}\\\\n Slider 3: {2}\".format(s1, s2, s3))\n\"\"\")\n\npara = PreText(text = \"Slider Values:\\n\\n Slider 1: 0\\n Slider 2: 0\\n Slider 3: 0\", width = 200, height = 150)\n\ns1 = Slider(title=\"Slider 1 (Continuous)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"continuous\")\ns2 = Slider(title=\"Slider 2 (Throttle)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"throttle\", callback_throttle=2000)\ns3 = Slider(title=\"Slider 3 (Mouse Up)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"mouseup\")\n\ncallback.args['para'] = para\ncallback.args['slider1'] = s1\ncallback.args['slider2'] = s2\ncallback.args['slider3'] = s3\n\noutput_file('slider_callback_policy.html')\n\nshow(vform(s1, s2, s3, para))\n", "path": "examples/plotting/file/slider_callback_policy.py"}]} | 1,441 | 763 |
gh_patches_debug_14897 | rasdani/github-patches | git_diff | qtile__qtile-3099 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Widget for updatable image
I don't want to reinvent the wheel, so will check before.
I use GenPollText for my keyboard layout indicator but instead I want to see the flag (image). As I change layout the image should be changed. Do Qtile has such widget or proper way to do that?
</issue>
<code>
[start of libqtile/widget/image.py]
1 # Copyright (c) 2013 dequis
2 # Copyright (c) 2014 Sean Vig
3 # Copyright (c) 2014 Adi Sieker
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining a copy
6 # of this software and associated documentation files (the "Software"), to deal
7 # in the Software without restriction, including without limitation the rights
8 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 # copies of the Software, and to permit persons to whom the Software is
10 # furnished to do so, subject to the following conditions:
11 #
12 # The above copyright notice and this permission notice shall be included in
13 # all copies or substantial portions of the Software.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 # SOFTWARE.
22 import os
23
24 from libqtile import bar
25 from libqtile.images import Img
26 from libqtile.log_utils import logger
27 from libqtile.widget import base
28
29
30 class Image(base._Widget, base.MarginMixin):
31 """Display a PNG image on the bar"""
32 orientations = base.ORIENTATION_BOTH
33 defaults = [
34 ("scale", True, "Enable/Disable image scaling"),
35 ("rotate", 0.0, "rotate the image in degrees counter-clockwise"),
36 ("filename", None, "Image filename. Can contain '~'"),
37 ]
38
39 def __init__(self, length=bar.CALCULATED, **config):
40 base._Widget.__init__(self, length, **config)
41 self.add_defaults(Image.defaults)
42 self.add_defaults(base.MarginMixin.defaults)
43
44 # make the default 0 instead
45 self._variable_defaults["margin"] = 0
46
47 def _configure(self, qtile, bar):
48 base._Widget._configure(self, qtile, bar)
49 self.img = None
50
51 if not self.filename:
52 logger.warning("Image filename not set!")
53 return
54
55 self.filename = os.path.expanduser(self.filename)
56
57 if not os.path.exists(self.filename):
58 logger.warning("Image does not exist: {}".format(self.filename))
59 return
60
61 img = Img.from_path(self.filename)
62 self.img = img
63 img.theta = self.rotate
64 if not self.scale:
65 return
66 if self.bar.horizontal:
67 new_height = self.bar.height - (self.margin_y * 2)
68 img.resize(height=new_height)
69 else:
70 new_width = self.bar.width - (self.margin_x * 2)
71 img.resize(width=new_width)
72
73 def draw(self):
74 if self.img is None:
75 return
76
77 self.drawer.clear(self.background or self.bar.background)
78 self.drawer.ctx.save()
79 self.drawer.ctx.translate(self.margin_x, self.margin_y)
80 self.drawer.ctx.set_source(self.img.pattern)
81 self.drawer.ctx.paint()
82 self.drawer.ctx.restore()
83
84 if self.bar.horizontal:
85 self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.width)
86 else:
87 self.drawer.draw(offsety=self.offset, offsetx=self.offsetx, height=self.width)
88
89 def calculate_length(self):
90 if self.img is None:
91 return 0
92
93 if self.bar.horizontal:
94 return self.img.width + (self.margin_x * 2)
95 else:
96 return self.img.height + (self.margin_y * 2)
97
[end of libqtile/widget/image.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/widget/image.py b/libqtile/widget/image.py
--- a/libqtile/widget/image.py
+++ b/libqtile/widget/image.py
@@ -46,6 +46,9 @@
def _configure(self, qtile, bar):
base._Widget._configure(self, qtile, bar)
+ self._update_image()
+
+ def _update_image(self):
self.img = None
if not self.filename:
@@ -94,3 +97,13 @@
return self.img.width + (self.margin_x * 2)
else:
return self.img.height + (self.margin_y * 2)
+
+ def cmd_update(self, filename):
+ old_length = self.calculate_length()
+ self.filename = filename
+ self._update_image()
+
+ if self.calculate_length() == old_length:
+ self.draw()
+ else:
+ self.bar.draw()
| {"golden_diff": "diff --git a/libqtile/widget/image.py b/libqtile/widget/image.py\n--- a/libqtile/widget/image.py\n+++ b/libqtile/widget/image.py\n@@ -46,6 +46,9 @@\n \n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n+ self._update_image()\n+\n+ def _update_image(self):\n self.img = None\n \n if not self.filename:\n@@ -94,3 +97,13 @@\n return self.img.width + (self.margin_x * 2)\n else:\n return self.img.height + (self.margin_y * 2)\n+\n+ def cmd_update(self, filename):\n+ old_length = self.calculate_length()\n+ self.filename = filename\n+ self._update_image()\n+\n+ if self.calculate_length() == old_length:\n+ self.draw()\n+ else:\n+ self.bar.draw()\n", "issue": "Widget for updatable image\nI don't want to reinvent the wheel, so will check before.\r\nI use GenPollText for my keyboard layout indicator but instead I want to see the flag (image). As I change layout the image should be changed. Do Qtile has such widget or proper way to do that?\n", "before_files": [{"content": "# Copyright (c) 2013 dequis\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 Adi Sieker\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport os\n\nfrom libqtile import bar\nfrom libqtile.images import Img\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass Image(base._Widget, base.MarginMixin):\n \"\"\"Display a PNG image on the bar\"\"\"\n orientations = base.ORIENTATION_BOTH\n defaults = [\n (\"scale\", True, \"Enable/Disable image scaling\"),\n (\"rotate\", 0.0, \"rotate the image in degrees counter-clockwise\"),\n (\"filename\", None, \"Image filename. Can contain '~'\"),\n ]\n\n def __init__(self, length=bar.CALCULATED, **config):\n base._Widget.__init__(self, length, **config)\n self.add_defaults(Image.defaults)\n self.add_defaults(base.MarginMixin.defaults)\n\n # make the default 0 instead\n self._variable_defaults[\"margin\"] = 0\n\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n self.img = None\n\n if not self.filename:\n logger.warning(\"Image filename not set!\")\n return\n\n self.filename = os.path.expanduser(self.filename)\n\n if not os.path.exists(self.filename):\n logger.warning(\"Image does not exist: {}\".format(self.filename))\n return\n\n img = Img.from_path(self.filename)\n self.img = img\n img.theta = self.rotate\n if not self.scale:\n return\n if self.bar.horizontal:\n new_height = self.bar.height - (self.margin_y * 2)\n img.resize(height=new_height)\n else:\n new_width = self.bar.width - (self.margin_x * 2)\n img.resize(width=new_width)\n\n def draw(self):\n if self.img is None:\n return\n\n self.drawer.clear(self.background or self.bar.background)\n self.drawer.ctx.save()\n self.drawer.ctx.translate(self.margin_x, self.margin_y)\n self.drawer.ctx.set_source(self.img.pattern)\n self.drawer.ctx.paint()\n self.drawer.ctx.restore()\n\n if self.bar.horizontal:\n self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.width)\n else:\n self.drawer.draw(offsety=self.offset, offsetx=self.offsetx, height=self.width)\n\n def calculate_length(self):\n if self.img is None:\n return 0\n\n if self.bar.horizontal:\n return self.img.width + (self.margin_x * 2)\n else:\n return self.img.height + (self.margin_y * 2)\n", "path": "libqtile/widget/image.py"}]} | 1,590 | 206 |
gh_patches_debug_11910 | rasdani/github-patches | git_diff | web2py__web2py-1682 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
heroku ADAPTERS error
I'm looking to use Heroku for deployment of my web2py project. However, when I add
`from gluon.contrib.heroku import get_db`
`db = get_db(name=None, pool_size=myconf.get('db.pool_size'))`
I get a ticket with the error:
> File "/Users/huangyu/dev/web2py/gluon/contrib/heroku.py", line 10, in <module>
from pydal.adapters import ADAPTERS, PostgreSQLAdapter
ImportError: cannot import name ADAPTERS
It looks like web2py has moved on from using ADAPTERS? Has that been replaced by
`@adapters.register_for('postgres')`
But the heroku file has not been updated.
</issue>
<code>
[start of gluon/contrib/heroku.py]
1 """
2 Usage: in web2py models/db.py
3
4 from gluon.contrib.heroku import get_db
5 db = get_db()
6
7 """
8 import os
9 from gluon import *
10 from pydal.adapters import ADAPTERS, PostgreSQLAdapter
11 from pydal.helpers.classes import UseDatabaseStoredFile
12
13 class HerokuPostgresAdapter(UseDatabaseStoredFile,PostgreSQLAdapter):
14 drivers = ('psycopg2',)
15 uploads_in_blob = True
16
17 ADAPTERS['postgres'] = HerokuPostgresAdapter
18
19 def get_db(name = None, pool_size=10):
20 if not name:
21 names = [n for n in os.environ.keys()
22 if n[:18]+n[-4:]=='HEROKU_POSTGRESQL__URL']
23 if names:
24 name = names[0]
25 if name:
26 db = DAL(os.environ[name], pool_size=pool_size)
27 current.session.connect(current.request, current.response, db=db)
28 else:
29 db = DAL('sqlite://heroku.test.sqlite')
30 return db
31
[end of gluon/contrib/heroku.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gluon/contrib/heroku.py b/gluon/contrib/heroku.py
--- a/gluon/contrib/heroku.py
+++ b/gluon/contrib/heroku.py
@@ -7,15 +7,13 @@
"""
import os
from gluon import *
-from pydal.adapters import ADAPTERS, PostgreSQLAdapter
-from pydal.helpers.classes import UseDatabaseStoredFile
+from pydal.adapters import adapters, PostgrePsyco
+from pydal.helpers.classes import DatabaseStoredFile
-class HerokuPostgresAdapter(UseDatabaseStoredFile,PostgreSQLAdapter):
- drivers = ('psycopg2',)
[email protected]_for('postgres')
+class HerokuPostgresAdapter(DatabaseStoredFile, PostgrePsyco):
uploads_in_blob = True
-ADAPTERS['postgres'] = HerokuPostgresAdapter
-
def get_db(name = None, pool_size=10):
if not name:
names = [n for n in os.environ.keys()
| {"golden_diff": "diff --git a/gluon/contrib/heroku.py b/gluon/contrib/heroku.py\n--- a/gluon/contrib/heroku.py\n+++ b/gluon/contrib/heroku.py\n@@ -7,15 +7,13 @@\n \"\"\"\n import os\n from gluon import *\n-from pydal.adapters import ADAPTERS, PostgreSQLAdapter\n-from pydal.helpers.classes import UseDatabaseStoredFile\n+from pydal.adapters import adapters, PostgrePsyco\n+from pydal.helpers.classes import DatabaseStoredFile\n \n-class HerokuPostgresAdapter(UseDatabaseStoredFile,PostgreSQLAdapter):\n- drivers = ('psycopg2',)\[email protected]_for('postgres')\n+class HerokuPostgresAdapter(DatabaseStoredFile, PostgrePsyco):\n uploads_in_blob = True\n \n-ADAPTERS['postgres'] = HerokuPostgresAdapter\n-\n def get_db(name = None, pool_size=10):\n if not name:\n names = [n for n in os.environ.keys()\n", "issue": "heroku ADAPTERS error\nI'm looking to use Heroku for deployment of my web2py project. However, when I add \r\n`from gluon.contrib.heroku import get_db`\r\n`db = get_db(name=None, pool_size=myconf.get('db.pool_size'))`\r\n\r\nI get a ticket with the error:\r\n\r\n> File \"/Users/huangyu/dev/web2py/gluon/contrib/heroku.py\", line 10, in <module>\r\n from pydal.adapters import ADAPTERS, PostgreSQLAdapter\r\nImportError: cannot import name ADAPTERS\r\n\r\nIt looks like web2py has moved on from using ADAPTERS? Has that been replaced by \r\n`@adapters.register_for('postgres')`\r\n\r\nBut the heroku file has not been updated. \n", "before_files": [{"content": "\"\"\"\nUsage: in web2py models/db.py\n\nfrom gluon.contrib.heroku import get_db\ndb = get_db()\n\n\"\"\"\nimport os\nfrom gluon import *\nfrom pydal.adapters import ADAPTERS, PostgreSQLAdapter\nfrom pydal.helpers.classes import UseDatabaseStoredFile\n\nclass HerokuPostgresAdapter(UseDatabaseStoredFile,PostgreSQLAdapter):\n drivers = ('psycopg2',)\n uploads_in_blob = True\n\nADAPTERS['postgres'] = HerokuPostgresAdapter\n\ndef get_db(name = None, pool_size=10):\n if not name:\n names = [n for n in os.environ.keys()\n if n[:18]+n[-4:]=='HEROKU_POSTGRESQL__URL']\n if names:\n name = names[0]\n if name:\n db = DAL(os.environ[name], pool_size=pool_size)\n current.session.connect(current.request, current.response, db=db)\n else:\n db = DAL('sqlite://heroku.test.sqlite')\n return db\n", "path": "gluon/contrib/heroku.py"}]} | 973 | 215 |
gh_patches_debug_34290 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-306 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Switch unit tests from `unittest.mock` to SDK & in-memory exporter
See https://github.com/open-telemetry/opentelemetry-python/pull/290#issuecomment-558091283.
Currently tests are cumbersome to write and actually we probably don't want to test which API calls are made but what Spans would result in most cases. For this a SDK with in-memory exporter would be better than using `unittest.mock`.
</issue>
<code>
[start of ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py]
1 import io
2 import unittest
3 import unittest.mock as mock
4 import wsgiref.util as wsgiref_util
5
6 from opentelemetry import trace as trace_api
7
8
9 class WsgiTestBase(unittest.TestCase):
10 def setUp(self):
11 self.span = mock.create_autospec(trace_api.Span, spec_set=True)
12 tracer = trace_api.Tracer()
13 self.get_tracer_patcher = mock.patch.object(
14 trace_api.TracerSource,
15 "get_tracer",
16 autospec=True,
17 spec_set=True,
18 return_value=tracer,
19 )
20 self.get_tracer_patcher.start()
21
22 self.start_span_patcher = mock.patch.object(
23 tracer,
24 "start_span",
25 autospec=True,
26 spec_set=True,
27 return_value=self.span,
28 )
29 self.start_span = self.start_span_patcher.start()
30 self.write_buffer = io.BytesIO()
31 self.write = self.write_buffer.write
32
33 self.environ = {}
34 wsgiref_util.setup_testing_defaults(self.environ)
35
36 self.status = None
37 self.response_headers = None
38 self.exc_info = None
39
40 def tearDown(self):
41 self.get_tracer_patcher.stop()
42 self.start_span_patcher.stop()
43
44 def start_response(self, status, response_headers, exc_info=None):
45 self.status = status
46 self.response_headers = response_headers
47 self.exc_info = exc_info
48 return self.write
49
[end of ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py b/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py
--- a/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py
+++ b/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py
@@ -1,32 +1,38 @@
import io
import unittest
-import unittest.mock as mock
import wsgiref.util as wsgiref_util
+from importlib import reload
from opentelemetry import trace as trace_api
+from opentelemetry.sdk.trace import TracerSource, export
+from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
+ InMemorySpanExporter,
+)
+
+_MEMORY_EXPORTER = None
class WsgiTestBase(unittest.TestCase):
- def setUp(self):
- self.span = mock.create_autospec(trace_api.Span, spec_set=True)
- tracer = trace_api.Tracer()
- self.get_tracer_patcher = mock.patch.object(
- trace_api.TracerSource,
- "get_tracer",
- autospec=True,
- spec_set=True,
- return_value=tracer,
- )
- self.get_tracer_patcher.start()
-
- self.start_span_patcher = mock.patch.object(
- tracer,
- "start_span",
- autospec=True,
- spec_set=True,
- return_value=self.span,
+ @classmethod
+ def setUpClass(cls):
+ global _MEMORY_EXPORTER # pylint:disable=global-statement
+ trace_api.set_preferred_tracer_source_implementation(
+ lambda T: TracerSource()
)
- self.start_span = self.start_span_patcher.start()
+ tracer_source = trace_api.tracer_source()
+ _MEMORY_EXPORTER = InMemorySpanExporter()
+ span_processor = export.SimpleExportSpanProcessor(_MEMORY_EXPORTER)
+ tracer_source.add_span_processor(span_processor)
+
+ @classmethod
+ def tearDownClass(cls):
+ reload(trace_api)
+
+ def setUp(self):
+
+ self.memory_exporter = _MEMORY_EXPORTER
+ self.memory_exporter.clear()
+
self.write_buffer = io.BytesIO()
self.write = self.write_buffer.write
@@ -37,10 +43,6 @@
self.response_headers = None
self.exc_info = None
- def tearDown(self):
- self.get_tracer_patcher.stop()
- self.start_span_patcher.stop()
-
def start_response(self, status, response_headers, exc_info=None):
self.status = status
self.response_headers = response_headers
| {"golden_diff": "diff --git a/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py b/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py\n--- a/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py\n+++ b/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py\n@@ -1,32 +1,38 @@\n import io\n import unittest\n-import unittest.mock as mock\n import wsgiref.util as wsgiref_util\n+from importlib import reload\n \n from opentelemetry import trace as trace_api\n+from opentelemetry.sdk.trace import TracerSource, export\n+from opentelemetry.sdk.trace.export.in_memory_span_exporter import (\n+ InMemorySpanExporter,\n+)\n+\n+_MEMORY_EXPORTER = None\n \n \n class WsgiTestBase(unittest.TestCase):\n- def setUp(self):\n- self.span = mock.create_autospec(trace_api.Span, spec_set=True)\n- tracer = trace_api.Tracer()\n- self.get_tracer_patcher = mock.patch.object(\n- trace_api.TracerSource,\n- \"get_tracer\",\n- autospec=True,\n- spec_set=True,\n- return_value=tracer,\n- )\n- self.get_tracer_patcher.start()\n-\n- self.start_span_patcher = mock.patch.object(\n- tracer,\n- \"start_span\",\n- autospec=True,\n- spec_set=True,\n- return_value=self.span,\n+ @classmethod\n+ def setUpClass(cls):\n+ global _MEMORY_EXPORTER # pylint:disable=global-statement\n+ trace_api.set_preferred_tracer_source_implementation(\n+ lambda T: TracerSource()\n )\n- self.start_span = self.start_span_patcher.start()\n+ tracer_source = trace_api.tracer_source()\n+ _MEMORY_EXPORTER = InMemorySpanExporter()\n+ span_processor = export.SimpleExportSpanProcessor(_MEMORY_EXPORTER)\n+ tracer_source.add_span_processor(span_processor)\n+\n+ @classmethod\n+ def tearDownClass(cls):\n+ reload(trace_api)\n+\n+ def setUp(self):\n+\n+ self.memory_exporter = _MEMORY_EXPORTER\n+ self.memory_exporter.clear()\n+\n self.write_buffer = io.BytesIO()\n self.write = self.write_buffer.write\n \n@@ -37,10 +43,6 @@\n self.response_headers = None\n self.exc_info = None\n \n- def tearDown(self):\n- self.get_tracer_patcher.stop()\n- self.start_span_patcher.stop()\n-\n def start_response(self, status, response_headers, exc_info=None):\n self.status = status\n self.response_headers = response_headers\n", "issue": "Switch unit tests from `unittest.mock` to SDK & in-memory exporter\nSee https://github.com/open-telemetry/opentelemetry-python/pull/290#issuecomment-558091283.\r\nCurrently tests are cumbersome to write and actually we probably don't want to test which API calls are made but what Spans would result in most cases. For this a SDK with in-memory exporter would be better than using `unittest.mock`.\n", "before_files": [{"content": "import io\nimport unittest\nimport unittest.mock as mock\nimport wsgiref.util as wsgiref_util\n\nfrom opentelemetry import trace as trace_api\n\n\nclass WsgiTestBase(unittest.TestCase):\n def setUp(self):\n self.span = mock.create_autospec(trace_api.Span, spec_set=True)\n tracer = trace_api.Tracer()\n self.get_tracer_patcher = mock.patch.object(\n trace_api.TracerSource,\n \"get_tracer\",\n autospec=True,\n spec_set=True,\n return_value=tracer,\n )\n self.get_tracer_patcher.start()\n\n self.start_span_patcher = mock.patch.object(\n tracer,\n \"start_span\",\n autospec=True,\n spec_set=True,\n return_value=self.span,\n )\n self.start_span = self.start_span_patcher.start()\n self.write_buffer = io.BytesIO()\n self.write = self.write_buffer.write\n\n self.environ = {}\n wsgiref_util.setup_testing_defaults(self.environ)\n\n self.status = None\n self.response_headers = None\n self.exc_info = None\n\n def tearDown(self):\n self.get_tracer_patcher.stop()\n self.start_span_patcher.stop()\n\n def start_response(self, status, response_headers, exc_info=None):\n self.status = status\n self.response_headers = response_headers\n self.exc_info = exc_info\n return self.write\n", "path": "ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py"}]} | 1,048 | 594 |
gh_patches_debug_7939 | rasdani/github-patches | git_diff | mozilla__bugbug-3401 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refactor logging statements to use lazy % formatting
Example of logging statements that we want to refactor:
https://github.com/mozilla/bugbug/blob/d53595391dbd75379bb49bff12dee4821e4b956c/bugbug/github.py#L61
https://github.com/mozilla/bugbug/blob/69972a1684f788319bf5c2944bbe8eeb79428c7d/scripts/regressor_finder.py#L396
More details can be found in the [pylint docs](https://pylint.readthedocs.io/en/latest/user_guide/messages/warning/logging-fstring-interpolation.html).
</issue>
<code>
[start of scripts/retrieve_training_metrics.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import argparse
7 import logging
8 import os
9 import sys
10 from os.path import abspath, join
11
12 import requests
13 import taskcluster
14
15 from bugbug.utils import get_taskcluster_options
16
17 ROOT_URI = "train_{}.per_date"
18 DATE_URI = "train_{}.per_date.{}"
19 BASE_URL = "https://community-tc.services.mozilla.com/api/index/v1/task/{}/artifacts/public/metrics.json"
20 NAMESPACE_URI = "project.bugbug.{}"
21
22 LOGGER = logging.getLogger(__name__)
23
24 logging.basicConfig(level=logging.INFO)
25
26
27 def get_task_metrics_from_uri(index_uri):
28 index_url = BASE_URL.format(index_uri)
29 LOGGER.info("Retrieving metrics from %s", index_url)
30 r = requests.get(index_url)
31
32 if r.status_code == 404:
33 LOGGER.error(f"File not found for URL {index_url}, check your arguments")
34 sys.exit(1)
35
36 r.raise_for_status()
37
38 return r
39
40
41 def get_namespaces(index, index_uri):
42 index_namespaces = index.listNamespaces(index_uri)
43
44 return index_namespaces["namespaces"]
45
46
47 def is_later_or_equal(partial_date, from_date):
48 for partial_date_part, from_date_part in zip(partial_date, from_date):
49 if int(partial_date_part) > int(from_date_part):
50 return True
51 elif int(partial_date_part) < int(from_date_part):
52 return False
53 else:
54 continue
55
56 return True
57
58
59 def get_task_metrics_from_date(model, date, output_directory):
60 options = get_taskcluster_options()
61
62 index = taskcluster.Index(options)
63
64 index.ping()
65
66 # Split the date
67 from_date = date.split(".")
68
69 namespaces = []
70
71 # Start at the root level
72 # We need an empty list in order to append namespaces part to it
73 namespaces.append([])
74
75 # Recursively list all namespaces greater or equals than the given date
76 while namespaces:
77 current_ns = namespaces.pop()
78
79 # Handle version level namespaces
80 if not current_ns:
81 ns_uri = ROOT_URI.format(model)
82 else:
83 current_ns_date = ".".join(current_ns)
84 ns_uri = DATE_URI.format(model, current_ns_date)
85
86 ns_full_uri = NAMESPACE_URI.format(ns_uri)
87
88 tasks = index.listTasks(ns_full_uri)
89 for task in tasks["tasks"]:
90 task_uri = task["namespace"]
91 r = get_task_metrics_from_uri(task_uri)
92
93 # Write the file on disk
94 file_name = f"metric_{'_'.join(task_uri.split('.'))}.json"
95 file_path = abspath(join(output_directory, file_name))
96 with open(file_path, "w") as metric_file:
97 metric_file.write(r.text)
98 LOGGER.info(f"Metrics saved to {file_path!r}")
99
100 for namespace in get_namespaces(index, ns_full_uri):
101 new_ns = current_ns.copy()
102 new_ns.append(namespace["name"])
103
104 if not is_later_or_equal(new_ns, from_date):
105 LOGGER.debug("NEW namespace %s is before %s", new_ns, from_date)
106 continue
107
108 # Might not be efficient but size of `namespaces` shouldn't be too
109 # big as we are doing a depth-first traversal
110 if new_ns not in namespaces:
111 namespaces.append(new_ns)
112
113
114 def main():
115 description = "Retrieve a model training metrics"
116 parser = argparse.ArgumentParser(description=description)
117
118 parser.add_argument(
119 "-d",
120 "--output-directory",
121 default=os.getcwd(),
122 help="In which directory the script should save the metrics file. The directory must exists",
123 )
124 parser.add_argument("model", help="Which model to retrieve training metrics from.")
125 parser.add_argument(
126 "date",
127 nargs="?",
128 help="Which date should we retrieve training metrics from. Default to latest",
129 )
130
131 args = parser.parse_args()
132
133 get_task_metrics_from_date(args.model, args.date, args.output_directory)
134
135
136 if __name__ == "__main__":
137 main()
138
[end of scripts/retrieve_training_metrics.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/retrieve_training_metrics.py b/scripts/retrieve_training_metrics.py
--- a/scripts/retrieve_training_metrics.py
+++ b/scripts/retrieve_training_metrics.py
@@ -95,7 +95,7 @@
file_path = abspath(join(output_directory, file_name))
with open(file_path, "w") as metric_file:
metric_file.write(r.text)
- LOGGER.info(f"Metrics saved to {file_path!r}")
+ LOGGER.info("Metrics saved to %r", file_path)
for namespace in get_namespaces(index, ns_full_uri):
new_ns = current_ns.copy()
| {"golden_diff": "diff --git a/scripts/retrieve_training_metrics.py b/scripts/retrieve_training_metrics.py\n--- a/scripts/retrieve_training_metrics.py\n+++ b/scripts/retrieve_training_metrics.py\n@@ -95,7 +95,7 @@\n file_path = abspath(join(output_directory, file_name))\n with open(file_path, \"w\") as metric_file:\n metric_file.write(r.text)\n- LOGGER.info(f\"Metrics saved to {file_path!r}\")\n+ LOGGER.info(\"Metrics saved to %r\", file_path)\n \n for namespace in get_namespaces(index, ns_full_uri):\n new_ns = current_ns.copy()\n", "issue": "Refactor logging statements to use lazy % formatting\nExample of logging statements that we want to refactor:\r\n\r\nhttps://github.com/mozilla/bugbug/blob/d53595391dbd75379bb49bff12dee4821e4b956c/bugbug/github.py#L61\r\n\r\nhttps://github.com/mozilla/bugbug/blob/69972a1684f788319bf5c2944bbe8eeb79428c7d/scripts/regressor_finder.py#L396\r\n\r\nMore details can be found in the [pylint docs](https://pylint.readthedocs.io/en/latest/user_guide/messages/warning/logging-fstring-interpolation.html).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport logging\nimport os\nimport sys\nfrom os.path import abspath, join\n\nimport requests\nimport taskcluster\n\nfrom bugbug.utils import get_taskcluster_options\n\nROOT_URI = \"train_{}.per_date\"\nDATE_URI = \"train_{}.per_date.{}\"\nBASE_URL = \"https://community-tc.services.mozilla.com/api/index/v1/task/{}/artifacts/public/metrics.json\"\nNAMESPACE_URI = \"project.bugbug.{}\"\n\nLOGGER = logging.getLogger(__name__)\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef get_task_metrics_from_uri(index_uri):\n index_url = BASE_URL.format(index_uri)\n LOGGER.info(\"Retrieving metrics from %s\", index_url)\n r = requests.get(index_url)\n\n if r.status_code == 404:\n LOGGER.error(f\"File not found for URL {index_url}, check your arguments\")\n sys.exit(1)\n\n r.raise_for_status()\n\n return r\n\n\ndef get_namespaces(index, index_uri):\n index_namespaces = index.listNamespaces(index_uri)\n\n return index_namespaces[\"namespaces\"]\n\n\ndef is_later_or_equal(partial_date, from_date):\n for partial_date_part, from_date_part in zip(partial_date, from_date):\n if int(partial_date_part) > int(from_date_part):\n return True\n elif int(partial_date_part) < int(from_date_part):\n return False\n else:\n continue\n\n return True\n\n\ndef get_task_metrics_from_date(model, date, output_directory):\n options = get_taskcluster_options()\n\n index = taskcluster.Index(options)\n\n index.ping()\n\n # Split the date\n from_date = date.split(\".\")\n\n namespaces = []\n\n # Start at the root level\n # We need an empty list in order to append namespaces part to it\n namespaces.append([])\n\n # Recursively list all namespaces greater or equals than the given date\n while namespaces:\n current_ns = namespaces.pop()\n\n # Handle version level namespaces\n if not current_ns:\n ns_uri = ROOT_URI.format(model)\n else:\n current_ns_date = \".\".join(current_ns)\n ns_uri = DATE_URI.format(model, current_ns_date)\n\n ns_full_uri = NAMESPACE_URI.format(ns_uri)\n\n tasks = index.listTasks(ns_full_uri)\n for task in tasks[\"tasks\"]:\n task_uri = task[\"namespace\"]\n r = get_task_metrics_from_uri(task_uri)\n\n # Write the file on disk\n file_name = f\"metric_{'_'.join(task_uri.split('.'))}.json\"\n file_path = abspath(join(output_directory, file_name))\n with open(file_path, \"w\") as metric_file:\n metric_file.write(r.text)\n LOGGER.info(f\"Metrics saved to {file_path!r}\")\n\n for namespace in get_namespaces(index, ns_full_uri):\n new_ns = current_ns.copy()\n new_ns.append(namespace[\"name\"])\n\n if not is_later_or_equal(new_ns, from_date):\n LOGGER.debug(\"NEW namespace %s is before %s\", new_ns, from_date)\n continue\n\n # Might not be efficient but size of `namespaces` shouldn't be too\n # big as we are doing a depth-first traversal\n if new_ns not in namespaces:\n namespaces.append(new_ns)\n\n\ndef main():\n description = \"Retrieve a model training metrics\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\n \"-d\",\n \"--output-directory\",\n default=os.getcwd(),\n help=\"In which directory the script should save the metrics file. The directory must exists\",\n )\n parser.add_argument(\"model\", help=\"Which model to retrieve training metrics from.\")\n parser.add_argument(\n \"date\",\n nargs=\"?\",\n help=\"Which date should we retrieve training metrics from. Default to latest\",\n )\n\n args = parser.parse_args()\n\n get_task_metrics_from_date(args.model, args.date, args.output_directory)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/retrieve_training_metrics.py"}]} | 1,946 | 133 |
gh_patches_debug_29495 | rasdani/github-patches | git_diff | bridgecrewio__checkov-1215 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
False positive for CKV_AWS_143 ("Ensure S3 bucket has lock configuration enabled by default")
**Describe the bug**
Checkov expects argument `object_lock_configuration` to be an object, i.e.
```hcl
object_lock_configuration = {
object_lock_enabled = "Enabled"
}
```
Terraform works with the above configuration, but when also declaring rules for the object lock configuration, it expects a block instead, e.g.
```hcl
object_lock_configuration {
object_lock_enabled = "Enabled"
rule {
default_retention {
mode = "GOVERNANCE"
days = 366
}
}
}
```
**Expected behavior**
Checkov should pass for a `object_lock_configuration` argument block.
**Desktop (please complete the following information):**
- OS: macOS Big Sur 11.3.1
- Checkov Version: 2.0.135
- Terraform version: v0.14.8
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/S3BucketObjectLock.py]
1 from checkov.common.models.enums import CheckCategories, CheckResult
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck
3
4
5 class S3BucketObjectLock(BaseResourceCheck):
6 def __init__(self):
7 name = "Ensure that S3 bucket has lock configuration enabled by default"
8 id = "CKV_AWS_143"
9 supported_resources = ['aws_s3_bucket']
10 categories = [CheckCategories.GENERAL_SECURITY]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def scan_resource_conf(self, conf):
14 if 'object_lock_configuration' in conf:
15 if 'object_lock_enabled' in conf['object_lock_configuration'][0]:
16 lock = conf['object_lock_configuration'][0]['object_lock_enabled']
17 if lock == "Enabled":
18 return CheckResult.PASSED
19 else:
20 return CheckResult.FAILED
21 else:
22 return CheckResult.PASSED
23
24
25 check = S3BucketObjectLock()
26
[end of checkov/terraform/checks/resource/aws/S3BucketObjectLock.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py b/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py
--- a/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py
+++ b/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py
@@ -1,25 +1,26 @@
+from typing import Dict, List, Any
+
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck
class S3BucketObjectLock(BaseResourceCheck):
- def __init__(self):
+ def __init__(self) -> None:
name = "Ensure that S3 bucket has lock configuration enabled by default"
id = "CKV_AWS_143"
- supported_resources = ['aws_s3_bucket']
+ supported_resources = ["aws_s3_bucket"]
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def scan_resource_conf(self, conf):
- if 'object_lock_configuration' in conf:
- if 'object_lock_enabled' in conf['object_lock_configuration'][0]:
- lock = conf['object_lock_configuration'][0]['object_lock_enabled']
- if lock == "Enabled":
- return CheckResult.PASSED
- else:
- return CheckResult.FAILED
- else:
- return CheckResult.PASSED
+ def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
+ lock_conf = conf.get("object_lock_configuration")
+ if lock_conf and lock_conf[0]:
+ lock_enabled = lock_conf[0].get("object_lock_enabled")
+ if lock_enabled in ["Enabled", ["Enabled"]]:
+ return CheckResult.PASSED
+ return CheckResult.FAILED
+
+ return CheckResult.UNKNOWN
check = S3BucketObjectLock()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py b/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py\n--- a/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py\n+++ b/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py\n@@ -1,25 +1,26 @@\n+from typing import Dict, List, Any\n+\n from checkov.common.models.enums import CheckCategories, CheckResult\n from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\n \n \n class S3BucketObjectLock(BaseResourceCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n name = \"Ensure that S3 bucket has lock configuration enabled by default\"\n id = \"CKV_AWS_143\"\n- supported_resources = ['aws_s3_bucket']\n+ supported_resources = [\"aws_s3_bucket\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def scan_resource_conf(self, conf):\n- if 'object_lock_configuration' in conf:\n- if 'object_lock_enabled' in conf['object_lock_configuration'][0]:\n- lock = conf['object_lock_configuration'][0]['object_lock_enabled']\n- if lock == \"Enabled\":\n- return CheckResult.PASSED\n- else:\n- return CheckResult.FAILED\n- else:\n- return CheckResult.PASSED\n+ def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n+ lock_conf = conf.get(\"object_lock_configuration\")\n+ if lock_conf and lock_conf[0]:\n+ lock_enabled = lock_conf[0].get(\"object_lock_enabled\")\n+ if lock_enabled in [\"Enabled\", [\"Enabled\"]]:\n+ return CheckResult.PASSED\n+ return CheckResult.FAILED\n+\n+ return CheckResult.UNKNOWN\n \n \n check = S3BucketObjectLock()\n", "issue": "False positive for CKV_AWS_143 (\"Ensure S3 bucket has lock configuration enabled by default\")\n**Describe the bug**\r\n\r\nCheckov expects argument `object_lock_configuration` to be an object, i.e.\r\n\r\n```hcl\r\nobject_lock_configuration = {\r\n object_lock_enabled = \"Enabled\"\r\n}\r\n```\r\n\r\nTerraform works with the above configuration, but when also declaring rules for the object lock configuration, it expects a block instead, e.g.\r\n```hcl\r\nobject_lock_configuration {\r\n object_lock_enabled = \"Enabled\"\r\n\r\n rule {\r\n default_retention {\r\n mode = \"GOVERNANCE\"\r\n days = 366\r\n }\r\n }\r\n}\r\n```\r\n\r\n**Expected behavior**\r\nCheckov should pass for a `object_lock_configuration` argument block.\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: macOS Big Sur 11.3.1\r\n - Checkov Version: 2.0.135\r\n - Terraform version: v0.14.8\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\n\n\nclass S3BucketObjectLock(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure that S3 bucket has lock configuration enabled by default\"\n id = \"CKV_AWS_143\"\n supported_resources = ['aws_s3_bucket']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'object_lock_configuration' in conf:\n if 'object_lock_enabled' in conf['object_lock_configuration'][0]:\n lock = conf['object_lock_configuration'][0]['object_lock_enabled']\n if lock == \"Enabled\":\n return CheckResult.PASSED\n else:\n return CheckResult.FAILED\n else:\n return CheckResult.PASSED\n\n\ncheck = S3BucketObjectLock()\n", "path": "checkov/terraform/checks/resource/aws/S3BucketObjectLock.py"}]} | 1,029 | 440 |
gh_patches_debug_15777 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-1112 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tests missing for `User.mention_markdown` and `User.mention_html`
And while we're at it. Maybe `helpers.mention_markdown/html` too.
</issue>
<code>
[start of telegram/utils/helpers.py]
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2018
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains helper functions."""
20 from html import escape
21
22 import re
23 import signal
24 from datetime import datetime
25
26 # From https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python
27 _signames = {v: k
28 for k, v in reversed(sorted(vars(signal).items()))
29 if k.startswith('SIG') and not k.startswith('SIG_')}
30
31
32 def get_signal_name(signum):
33 """Returns the signal name of the given signal number."""
34 return _signames[signum]
35
36
37 # Not using future.backports.datetime here as datetime value might be an input from the user,
38 # making every isinstace() call more delicate. So we just use our own compat layer.
39 if hasattr(datetime, 'timestamp'):
40 # Python 3.3+
41 def _timestamp(dt_obj):
42 return dt_obj.timestamp()
43 else:
44 # Python < 3.3 (incl 2.7)
45 from time import mktime
46
47 def _timestamp(dt_obj):
48 return mktime(dt_obj.timetuple())
49
50
51 def escape_markdown(text):
52 """Helper function to escape telegram markup symbols."""
53 escape_chars = '\*_`\['
54 return re.sub(r'([%s])' % escape_chars, r'\\\1', text)
55
56
57 def to_timestamp(dt_obj):
58 """
59 Args:
60 dt_obj (:class:`datetime.datetime`):
61
62 Returns:
63 int:
64
65 """
66 if not dt_obj:
67 return None
68
69 return int(_timestamp(dt_obj))
70
71
72 def from_timestamp(unixtime):
73 """
74 Args:
75 unixtime (int):
76
77 Returns:
78 datetime.datetime:
79
80 """
81 if not unixtime:
82 return None
83
84 return datetime.fromtimestamp(unixtime)
85
86
87 def mention_html(user_id, name):
88 """
89 Args:
90 user_id (:obj:`int`) The user's id which you want to mention.
91 name (:obj:`str`) The name the mention is showing.
92
93 Returns:
94 :obj:`str`: The inline mention for the user as html.
95 """
96 if isinstance(user_id, int):
97 return '<a href="tg://user?id={}">{}</a>'.format(user_id, escape(name))
98
99
100 def mention_markdown(user_id, name):
101 """
102 Args:
103 user_id (:obj:`int`) The user's id which you want to mention.
104 name (:obj:`str`) The name the mention is showing.
105
106 Returns:
107 :obj:`str`: The inline mention for the user as markdown.
108 """
109 if isinstance(user_id, int):
110 return '[{}](tg://user?id={})'.format(escape_markdown(name), user_id)
111
112
113 def effective_message_type(entity):
114 """
115 Extracts the type of message as a string identifier from a :class:`telegram.Message` or a
116 :class:`telegram.Update`.
117
118 Args:
119 entity (:obj:`Update` | :obj:`Message`) The ``update`` or ``message`` to extract from
120
121 Returns:
122 str: One of ``Message.MESSAGE_TYPES``
123
124 """
125
126 # Importing on file-level yields cyclic Import Errors
127 from telegram import Message
128 from telegram import Update
129
130 if isinstance(entity, Message):
131 message = entity
132 elif isinstance(entity, Update):
133 message = entity.effective_message
134 else:
135 raise TypeError("entity is not Message or Update (got: {})".format(type(entity)))
136
137 for i in Message.MESSAGE_TYPES:
138 if getattr(message, i, None):
139 return i
140
141 return None
142
[end of telegram/utils/helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/telegram/utils/helpers.py b/telegram/utils/helpers.py
--- a/telegram/utils/helpers.py
+++ b/telegram/utils/helpers.py
@@ -94,7 +94,7 @@
:obj:`str`: The inline mention for the user as html.
"""
if isinstance(user_id, int):
- return '<a href="tg://user?id={}">{}</a>'.format(user_id, escape(name))
+ return u'<a href="tg://user?id={}">{}</a>'.format(user_id, escape(name))
def mention_markdown(user_id, name):
@@ -107,7 +107,7 @@
:obj:`str`: The inline mention for the user as markdown.
"""
if isinstance(user_id, int):
- return '[{}](tg://user?id={})'.format(escape_markdown(name), user_id)
+ return u'[{}](tg://user?id={})'.format(escape_markdown(name), user_id)
def effective_message_type(entity):
| {"golden_diff": "diff --git a/telegram/utils/helpers.py b/telegram/utils/helpers.py\n--- a/telegram/utils/helpers.py\n+++ b/telegram/utils/helpers.py\n@@ -94,7 +94,7 @@\n :obj:`str`: The inline mention for the user as html.\n \"\"\"\n if isinstance(user_id, int):\n- return '<a href=\"tg://user?id={}\">{}</a>'.format(user_id, escape(name))\n+ return u'<a href=\"tg://user?id={}\">{}</a>'.format(user_id, escape(name))\n \n \n def mention_markdown(user_id, name):\n@@ -107,7 +107,7 @@\n :obj:`str`: The inline mention for the user as markdown.\n \"\"\"\n if isinstance(user_id, int):\n- return '[{}](tg://user?id={})'.format(escape_markdown(name), user_id)\n+ return u'[{}](tg://user?id={})'.format(escape_markdown(name), user_id)\n \n \n def effective_message_type(entity):\n", "issue": "Tests missing for `User.mention_markdown` and `User.mention_html`\nAnd while we're at it. Maybe `helpers.mention_markdown/html` too.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains helper functions.\"\"\"\nfrom html import escape\n\nimport re\nimport signal\nfrom datetime import datetime\n\n# From https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python\n_signames = {v: k\n for k, v in reversed(sorted(vars(signal).items()))\n if k.startswith('SIG') and not k.startswith('SIG_')}\n\n\ndef get_signal_name(signum):\n \"\"\"Returns the signal name of the given signal number.\"\"\"\n return _signames[signum]\n\n\n# Not using future.backports.datetime here as datetime value might be an input from the user,\n# making every isinstace() call more delicate. So we just use our own compat layer.\nif hasattr(datetime, 'timestamp'):\n # Python 3.3+\n def _timestamp(dt_obj):\n return dt_obj.timestamp()\nelse:\n # Python < 3.3 (incl 2.7)\n from time import mktime\n\n def _timestamp(dt_obj):\n return mktime(dt_obj.timetuple())\n\n\ndef escape_markdown(text):\n \"\"\"Helper function to escape telegram markup symbols.\"\"\"\n escape_chars = '\\*_`\\['\n return re.sub(r'([%s])' % escape_chars, r'\\\\\\1', text)\n\n\ndef to_timestamp(dt_obj):\n \"\"\"\n Args:\n dt_obj (:class:`datetime.datetime`):\n\n Returns:\n int:\n\n \"\"\"\n if not dt_obj:\n return None\n\n return int(_timestamp(dt_obj))\n\n\ndef from_timestamp(unixtime):\n \"\"\"\n Args:\n unixtime (int):\n\n Returns:\n datetime.datetime:\n\n \"\"\"\n if not unixtime:\n return None\n\n return datetime.fromtimestamp(unixtime)\n\n\ndef mention_html(user_id, name):\n \"\"\"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as html.\n \"\"\"\n if isinstance(user_id, int):\n return '<a href=\"tg://user?id={}\">{}</a>'.format(user_id, escape(name))\n\n\ndef mention_markdown(user_id, name):\n \"\"\"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as markdown.\n \"\"\"\n if isinstance(user_id, int):\n return '[{}](tg://user?id={})'.format(escape_markdown(name), user_id)\n\n\ndef effective_message_type(entity):\n \"\"\"\n Extracts the type of message as a string identifier from a :class:`telegram.Message` or a\n :class:`telegram.Update`.\n\n Args:\n entity (:obj:`Update` | :obj:`Message`) The ``update`` or ``message`` to extract from\n\n Returns:\n str: One of ``Message.MESSAGE_TYPES``\n\n \"\"\"\n\n # Importing on file-level yields cyclic Import Errors\n from telegram import Message\n from telegram import Update\n\n if isinstance(entity, Message):\n message = entity\n elif isinstance(entity, Update):\n message = entity.effective_message\n else:\n raise TypeError(\"entity is not Message or Update (got: {})\".format(type(entity)))\n\n for i in Message.MESSAGE_TYPES:\n if getattr(message, i, None):\n return i\n\n return None\n", "path": "telegram/utils/helpers.py"}]} | 1,848 | 220 |
gh_patches_debug_29983 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-3146 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
testing interactive event: remove + on call to action
**URL:** https://meinberlin-dev.liqd.net/projekte/module/interaktive-veranstaltung-2/
**user:** unregistered user
**expected behaviour:** buttons should be same all over platform
**behaviour:** there is a + on the button
**important screensize:**
**device & browser:**
**Comment/Question:** please take out the + before add question
Screenshot?
<img width="692" alt="Bildschirmfoto 2020-09-22 um 17 51 38" src="https://user-images.githubusercontent.com/35491681/93906276-494d9200-fcfc-11ea-9614-3a9359b5ec97.png">
</issue>
<code>
[start of meinberlin/apps/projects/templatetags/meinberlin_project_tags.py]
1 from django import template
2
3 from adhocracy4.comments.models import Comment
4 from meinberlin.apps.budgeting.models import Proposal as budget_proposal
5 from meinberlin.apps.ideas.models import Idea
6 from meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal
7 from meinberlin.apps.mapideas.models import MapIdea
8 from meinberlin.apps.polls.models import Vote
9 from meinberlin.apps.projects import get_project_type
10
11 register = template.Library()
12
13
14 @register.filter
15 def project_url(project):
16 if (project.project_type == 'meinberlin_bplan.Bplan'
17 or project.project_type ==
18 'meinberlin_extprojects.ExternalProject'):
19 return project.externalproject.url
20 return project.get_absolute_url()
21
22
23 @register.filter
24 def project_type(project):
25 return get_project_type(project)
26
27
28 @register.filter
29 def is_external(project):
30 return (project.project_type == 'meinberlin_bplan.Bplan'
31 or project.project_type ==
32 'meinberlin_extprojects.ExternalProject')
33
34
35 @register.filter
36 def is_a4_project(project):
37 return (project.project_type == 'a4projects.Project')
38
39
40 @register.simple_tag
41 def get_num_entries(module):
42 """Count all user-generated items."""
43 item_count = \
44 Idea.objects.filter(module=module).count() \
45 + MapIdea.objects.filter(module=module).count() \
46 + budget_proposal.objects.filter(module=module).count() \
47 + kiezkasse_proposal.objects.filter(module=module).count() \
48 + Comment.objects.filter(idea__module=module).count() \
49 + Comment.objects.filter(mapidea__module=module).count() \
50 + Comment.objects.filter(budget_proposal__module=module).count() \
51 + Comment.objects.filter(kiezkasse_proposal__module=module).count() \
52 + Comment.objects.filter(topic__module=module).count() \
53 + Comment.objects.filter(maptopic__module=module).count() \
54 + Comment.objects.filter(paragraph__chapter__module=module).count() \
55 + Comment.objects.filter(chapter__module=module).count() \
56 + Comment.objects.filter(poll__module=module).count() \
57 + Vote.objects.filter(choice__question__poll__module=module).count()
58 return item_count
59
[end of meinberlin/apps/projects/templatetags/meinberlin_project_tags.py]
[start of meinberlin/apps/livequestions/phases.py]
1 from django.utils.translation import ugettext_lazy as _
2
3 from adhocracy4 import phases
4
5 from . import apps
6 from . import models
7 from . import views
8
9
10 class IssuePhase(phases.PhaseContent):
11 app = apps.Config.label
12 phase = 'issue'
13 view = views.LiveQuestionModuleDetail
14
15 name = _('Issue phase')
16 description = _('Add question.')
17 module_name = _('Interactive Event')
18 icon = 'lightbulb-o'
19
20 features = {
21 'crud': (models.LiveQuestion,),
22 'like': (models.LiveQuestion,)
23 }
24
25
26 phases.content.register(IssuePhase())
27
[end of meinberlin/apps/livequestions/phases.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/livequestions/phases.py b/meinberlin/apps/livequestions/phases.py
--- a/meinberlin/apps/livequestions/phases.py
+++ b/meinberlin/apps/livequestions/phases.py
@@ -13,7 +13,7 @@
view = views.LiveQuestionModuleDetail
name = _('Issue phase')
- description = _('Add question.')
+ description = _('Add questions and support.')
module_name = _('Interactive Event')
icon = 'lightbulb-o'
diff --git a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py
--- a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py
+++ b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py
@@ -4,6 +4,8 @@
from meinberlin.apps.budgeting.models import Proposal as budget_proposal
from meinberlin.apps.ideas.models import Idea
from meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal
+from meinberlin.apps.likes.models import Like
+from meinberlin.apps.livequestions.models import LiveQuestion
from meinberlin.apps.mapideas.models import MapIdea
from meinberlin.apps.polls.models import Vote
from meinberlin.apps.projects import get_project_type
@@ -54,5 +56,7 @@
+ Comment.objects.filter(paragraph__chapter__module=module).count() \
+ Comment.objects.filter(chapter__module=module).count() \
+ Comment.objects.filter(poll__module=module).count() \
- + Vote.objects.filter(choice__question__poll__module=module).count()
+ + Vote.objects.filter(choice__question__poll__module=module).count() \
+ + LiveQuestion.objects.filter(module=module).count() \
+ + Like.objects.filter(question__module=module).count()
return item_count
| {"golden_diff": "diff --git a/meinberlin/apps/livequestions/phases.py b/meinberlin/apps/livequestions/phases.py\n--- a/meinberlin/apps/livequestions/phases.py\n+++ b/meinberlin/apps/livequestions/phases.py\n@@ -13,7 +13,7 @@\n view = views.LiveQuestionModuleDetail\n \n name = _('Issue phase')\n- description = _('Add question.')\n+ description = _('Add questions and support.')\n module_name = _('Interactive Event')\n icon = 'lightbulb-o'\n \ndiff --git a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n--- a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n+++ b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n@@ -4,6 +4,8 @@\n from meinberlin.apps.budgeting.models import Proposal as budget_proposal\n from meinberlin.apps.ideas.models import Idea\n from meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal\n+from meinberlin.apps.likes.models import Like\n+from meinberlin.apps.livequestions.models import LiveQuestion\n from meinberlin.apps.mapideas.models import MapIdea\n from meinberlin.apps.polls.models import Vote\n from meinberlin.apps.projects import get_project_type\n@@ -54,5 +56,7 @@\n + Comment.objects.filter(paragraph__chapter__module=module).count() \\\n + Comment.objects.filter(chapter__module=module).count() \\\n + Comment.objects.filter(poll__module=module).count() \\\n- + Vote.objects.filter(choice__question__poll__module=module).count()\n+ + Vote.objects.filter(choice__question__poll__module=module).count() \\\n+ + LiveQuestion.objects.filter(module=module).count() \\\n+ + Like.objects.filter(question__module=module).count()\n return item_count\n", "issue": "testing interactive event: remove + on call to action\n**URL:** https://meinberlin-dev.liqd.net/projekte/module/interaktive-veranstaltung-2/\r\n**user:** unregistered user\r\n**expected behaviour:** buttons should be same all over platform\r\n**behaviour:** there is a + on the button\r\n**important screensize:**\r\n**device & browser:** \r\n**Comment/Question:** please take out the + before add question\r\n\r\n\r\nScreenshot?\r\n<img width=\"692\" alt=\"Bildschirmfoto 2020-09-22 um 17 51 38\" src=\"https://user-images.githubusercontent.com/35491681/93906276-494d9200-fcfc-11ea-9614-3a9359b5ec97.png\">\r\n\n", "before_files": [{"content": "from django import template\n\nfrom adhocracy4.comments.models import Comment\nfrom meinberlin.apps.budgeting.models import Proposal as budget_proposal\nfrom meinberlin.apps.ideas.models import Idea\nfrom meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal\nfrom meinberlin.apps.mapideas.models import MapIdea\nfrom meinberlin.apps.polls.models import Vote\nfrom meinberlin.apps.projects import get_project_type\n\nregister = template.Library()\n\n\[email protected]\ndef project_url(project):\n if (project.project_type == 'meinberlin_bplan.Bplan'\n or project.project_type ==\n 'meinberlin_extprojects.ExternalProject'):\n return project.externalproject.url\n return project.get_absolute_url()\n\n\[email protected]\ndef project_type(project):\n return get_project_type(project)\n\n\[email protected]\ndef is_external(project):\n return (project.project_type == 'meinberlin_bplan.Bplan'\n or project.project_type ==\n 'meinberlin_extprojects.ExternalProject')\n\n\[email protected]\ndef is_a4_project(project):\n return (project.project_type == 'a4projects.Project')\n\n\[email protected]_tag\ndef get_num_entries(module):\n \"\"\"Count all user-generated items.\"\"\"\n item_count = \\\n Idea.objects.filter(module=module).count() \\\n + MapIdea.objects.filter(module=module).count() \\\n + budget_proposal.objects.filter(module=module).count() \\\n + kiezkasse_proposal.objects.filter(module=module).count() \\\n + Comment.objects.filter(idea__module=module).count() \\\n + Comment.objects.filter(mapidea__module=module).count() \\\n + Comment.objects.filter(budget_proposal__module=module).count() \\\n + Comment.objects.filter(kiezkasse_proposal__module=module).count() \\\n + Comment.objects.filter(topic__module=module).count() \\\n + Comment.objects.filter(maptopic__module=module).count() \\\n + Comment.objects.filter(paragraph__chapter__module=module).count() \\\n + Comment.objects.filter(chapter__module=module).count() \\\n + Comment.objects.filter(poll__module=module).count() \\\n + Vote.objects.filter(choice__question__poll__module=module).count()\n return item_count\n", "path": "meinberlin/apps/projects/templatetags/meinberlin_project_tags.py"}, {"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4 import phases\n\nfrom . import apps\nfrom . import models\nfrom . import views\n\n\nclass IssuePhase(phases.PhaseContent):\n app = apps.Config.label\n phase = 'issue'\n view = views.LiveQuestionModuleDetail\n\n name = _('Issue phase')\n description = _('Add question.')\n module_name = _('Interactive Event')\n icon = 'lightbulb-o'\n\n features = {\n 'crud': (models.LiveQuestion,),\n 'like': (models.LiveQuestion,)\n }\n\n\nphases.content.register(IssuePhase())\n", "path": "meinberlin/apps/livequestions/phases.py"}]} | 1,570 | 446 |
gh_patches_debug_17824 | rasdani/github-patches | git_diff | hydroshare__hydroshare-5083 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Track user agent for metrics
**Describe the feature you'd like and what it will do**
In HS v2.5.4, we don't track user_agent in our metrics. This makes it difficult to tell when requests to HS are occurring via direct UI interactions, or via other tools like hsclient.
**Why is this feature important?**
We need more insight into how HS' ecosystem of tools are being used. This information should drive our continued development on existing tools and our consideration of additions for future use.
**Is your feature request related to a problem? Please describe.**
It is difficult to make decisions without information.
</issue>
<code>
[start of hs_tracking/utils.py]
1 import robot_detection
2 from ipware.ip import get_ip
3 from hs_tools_resource.models import RequestUrlBase, RequestUrlBaseAggregation, RequestUrlBaseFile
4 from urllib.parse import urlparse
5
6
7 def get_client_ip(request):
8 return get_ip(request)
9
10
11 def get_user_type(session):
12 try:
13 user = session.visitor.user
14 usertype = user.userprofile.user_type
15 except AttributeError:
16 usertype = None
17 return usertype
18
19
20 def get_user_email_domain(session):
21 try:
22 user = session.visitor.user
23 emaildomain = user.email.split('@')[-1]
24 except AttributeError:
25 emaildomain = None
26 return emaildomain
27
28
29 def get_user_email_tld(session, emaildomain=None):
30 try:
31 if not emaildomain:
32 emaildomain = get_user_email_domain(session)
33 if emaildomain:
34 shortdomain = '.'.join(emaildomain.split('.')[1:])
35 return shortdomain
36 except AttributeError:
37 return None
38
39
40 def is_human(user_agent):
41 if robot_detection.is_robot(user_agent):
42 return False
43 return True
44
45
46 def get_std_log_fields(request, session=None):
47 """ returns a standard set of metadata that to each receiver function.
48 This ensures that all activities are reporting a consistent set of metrics
49 """
50 user_type = None
51 user_email_tld = None
52 full_domain = None
53 if session is not None:
54 user_type = get_user_type(session)
55 full_domain = get_user_email_domain(session)
56 user_email_tld = get_user_email_tld(session, full_domain)
57
58 return {
59 'user_ip': get_client_ip(request),
60 'user_type': user_type,
61 'user_email_domain': user_email_tld,
62 'user_email_domain_full': full_domain
63 }
64
65
66 def authentic_redirect_url(url):
67 """ Validates a url scheme and netloc is in an existing web app
68 :param url: String of a url
69 :return: Boolean, True if the url exists in a web app
70 """
71 if not url:
72 return False
73 u = urlparse(url)
74 url_base = "{}://{}".format(u.scheme, u.netloc)
75 return RequestUrlBase.objects.filter(value__startswith=url_base).exists() \
76 or RequestUrlBaseAggregation.objects.filter(value__startswith=url_base).exists() \
77 or RequestUrlBaseFile.objects.filter(value__startswith=url_base).exists()
78
[end of hs_tracking/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hs_tracking/utils.py b/hs_tracking/utils.py
--- a/hs_tracking/utils.py
+++ b/hs_tracking/utils.py
@@ -47,6 +47,12 @@
""" returns a standard set of metadata that to each receiver function.
This ensures that all activities are reporting a consistent set of metrics
"""
+ try:
+ user_agent = request.META['HTTP_USER_AGENT']
+ human = is_human(user_agent)
+ except KeyError:
+ user_agent = None
+ human = None
user_type = None
user_email_tld = None
full_domain = None
@@ -59,7 +65,9 @@
'user_ip': get_client_ip(request),
'user_type': user_type,
'user_email_domain': user_email_tld,
- 'user_email_domain_full': full_domain
+ 'user_email_domain_full': full_domain,
+ 'is_human': human,
+ 'user_agent': user_agent
}
| {"golden_diff": "diff --git a/hs_tracking/utils.py b/hs_tracking/utils.py\n--- a/hs_tracking/utils.py\n+++ b/hs_tracking/utils.py\n@@ -47,6 +47,12 @@\n \"\"\" returns a standard set of metadata that to each receiver function.\n This ensures that all activities are reporting a consistent set of metrics\n \"\"\"\n+ try:\n+ user_agent = request.META['HTTP_USER_AGENT']\n+ human = is_human(user_agent)\n+ except KeyError:\n+ user_agent = None\n+ human = None\n user_type = None\n user_email_tld = None\n full_domain = None\n@@ -59,7 +65,9 @@\n 'user_ip': get_client_ip(request),\n 'user_type': user_type,\n 'user_email_domain': user_email_tld,\n- 'user_email_domain_full': full_domain\n+ 'user_email_domain_full': full_domain,\n+ 'is_human': human,\n+ 'user_agent': user_agent\n }\n", "issue": "Track user agent for metrics\n**Describe the feature you'd like and what it will do**\r\nIn HS v2.5.4, we don't track user_agent in our metrics. This makes it difficult to tell when requests to HS are occurring via direct UI interactions, or via other tools like hsclient.\r\n\r\n\r\n**Why is this feature important?**\r\nWe need more insight into how HS' ecosystem of tools are being used. This information should drive our continued development on existing tools and our consideration of additions for future use.\r\n\r\n**Is your feature request related to a problem? Please describe.**\r\nIt is difficult to make decisions without information.\r\n\n", "before_files": [{"content": "import robot_detection\nfrom ipware.ip import get_ip\nfrom hs_tools_resource.models import RequestUrlBase, RequestUrlBaseAggregation, RequestUrlBaseFile\nfrom urllib.parse import urlparse\n\n\ndef get_client_ip(request):\n return get_ip(request)\n\n\ndef get_user_type(session):\n try:\n user = session.visitor.user\n usertype = user.userprofile.user_type\n except AttributeError:\n usertype = None\n return usertype\n\n\ndef get_user_email_domain(session):\n try:\n user = session.visitor.user\n emaildomain = user.email.split('@')[-1]\n except AttributeError:\n emaildomain = None\n return emaildomain\n\n\ndef get_user_email_tld(session, emaildomain=None):\n try:\n if not emaildomain:\n emaildomain = get_user_email_domain(session)\n if emaildomain:\n shortdomain = '.'.join(emaildomain.split('.')[1:])\n return shortdomain\n except AttributeError:\n return None\n\n\ndef is_human(user_agent):\n if robot_detection.is_robot(user_agent):\n return False\n return True\n\n\ndef get_std_log_fields(request, session=None):\n \"\"\" returns a standard set of metadata that to each receiver function.\n This ensures that all activities are reporting a consistent set of metrics\n \"\"\"\n user_type = None\n user_email_tld = None\n full_domain = None\n if session is not None:\n user_type = get_user_type(session)\n full_domain = get_user_email_domain(session)\n user_email_tld = get_user_email_tld(session, full_domain)\n\n return {\n 'user_ip': get_client_ip(request),\n 'user_type': user_type,\n 'user_email_domain': user_email_tld,\n 'user_email_domain_full': full_domain\n }\n\n\ndef authentic_redirect_url(url):\n \"\"\" Validates a url scheme and netloc is in an existing web app\n :param url: String of a url\n :return: Boolean, True if the url exists in a web app\n \"\"\"\n if not url:\n return False\n u = urlparse(url)\n url_base = \"{}://{}\".format(u.scheme, u.netloc)\n return RequestUrlBase.objects.filter(value__startswith=url_base).exists() \\\n or RequestUrlBaseAggregation.objects.filter(value__startswith=url_base).exists() \\\n or RequestUrlBaseFile.objects.filter(value__startswith=url_base).exists()\n", "path": "hs_tracking/utils.py"}]} | 1,326 | 222 |
gh_patches_debug_12997 | rasdani/github-patches | git_diff | conan-io__conan-center-index-18559 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] util-linux-libuuid uses wrong cmake target
### Description
In the following lines, the `util-linux-libuuid` recipe sets the cmake target to be `LibUUID::LibUUID` with a filename of `LibUUID-config.cmake`:
https://github.com/conan-io/conan-center-index/blob/61c4f7819e6cd3594a57f6c3847f94ab86de623f/recipes/util-linux-libuuid/all/conanfile.py#L112-L113
This was based on the internal practice that Kitware has for their internal libuuid cmake module, however this is not public and a number of packages (czmq, cppcommon) seem to assume a `libuuid::libuuid` target. These change should be reverted such that these packages can utilise util-linux-libuuid without a requirement to be patched.
### Package and Environment Details
N/A
### Conan profile
N/A
### Steps to reproduce
N/A
### Logs
<details><summary>Click to expand log</summary>
```
Put your log output here
```
</details>
</issue>
<code>
[start of recipes/util-linux-libuuid/all/conanfile.py]
1 from conan import ConanFile
2 from conan.errors import ConanInvalidConfiguration
3 from conan.tools.apple import fix_apple_shared_install_name
4 from conan.tools.files import copy, get, rm, rmdir
5 from conan.tools.gnu import Autotools, AutotoolsToolchain, AutotoolsDeps
6 from conan.tools.layout import basic_layout
7 from conan.tools.scm import Version
8 import os
9
10 required_conan_version = ">=1.53.0"
11
12
13 class UtilLinuxLibuuidConan(ConanFile):
14 name = "util-linux-libuuid"
15 description = "Universally unique id library"
16 url = "https://github.com/conan-io/conan-center-index"
17 homepage = "https://github.com/util-linux/util-linux.git"
18 license = "BSD-3-Clause"
19 topics = "id", "identifier", "unique", "uuid"
20 package_type = "library"
21 provides = "libuuid"
22 settings = "os", "arch", "compiler", "build_type"
23 options = {
24 "shared": [True, False],
25 "fPIC": [True, False],
26 }
27 default_options = {
28 "shared": False,
29 "fPIC": True,
30 }
31
32 @property
33 def _has_sys_file_header(self):
34 return self.settings.os in ["FreeBSD", "Linux", "Macos"]
35
36 def config_options(self):
37 if self.settings.os == "Windows":
38 del self.options.fPIC
39
40 def configure(self):
41 if self.options.shared:
42 self.options.rm_safe("fPIC")
43 self.settings.rm_safe("compiler.cppstd")
44 self.settings.rm_safe("compiler.libcxx")
45
46 def layout(self):
47 basic_layout(self, src_folder="src")
48
49 def _minimum_compiler_version(self, compiler, build_type):
50 min_version = {
51 "gcc": {
52 "Release": "4",
53 "Debug": "8",
54 },
55 "clang": {
56 "Release": "3",
57 "Debug": "3",
58 },
59 "apple-clang": {
60 "Release": "5",
61 "Debug": "5",
62 },
63 }
64 return min_version.get(str(compiler), {}).get(str(build_type), "0")
65
66 def validate(self):
67 min_version = self._minimum_compiler_version(self.settings.compiler, self.settings.build_type)
68 if Version(self.settings.compiler.version) < min_version:
69 raise ConanInvalidConfiguration(f"{self.settings.compiler} {self.settings.compiler.version} does not meet the minimum version requirement of version {min_version}")
70 if self.settings.os == "Windows":
71 raise ConanInvalidConfiguration(f"{self.ref} is not supported on Windows")
72
73 def requirements(self):
74 if self.settings.os == "Macos":
75 # Required because libintl.{a,dylib} is not distributed via libc on Macos
76 self.requires("libgettext/0.21")
77
78 def source(self):
79 get(self, **self.conan_data["sources"][self.version], strip_root=True)
80
81 def generate(self):
82 tc = AutotoolsToolchain(self)
83 tc.configure_args.append("--disable-all-programs")
84 tc.configure_args.append("--enable-libuuid")
85 if self._has_sys_file_header:
86 tc.extra_defines.append("HAVE_SYS_FILE_H")
87 if "x86" in self.settings.arch:
88 tc.extra_cflags.append("-mstackrealign")
89 tc.generate()
90
91 deps = AutotoolsDeps(self)
92 deps.generate()
93
94 def build(self):
95 autotools = Autotools(self)
96 autotools.configure()
97 autotools.make()
98
99 def package(self):
100 copy(self, "COPYING.BSD-3-Clause", src=os.path.join(self.source_folder, "Documentation", "licenses"), dst=os.path.join(self.package_folder, "licenses"))
101 autotools = Autotools(self)
102 autotools.install()
103 rm(self, "*.la", os.path.join(self.package_folder, "lib"))
104 rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
105 rmdir(self, os.path.join(self.package_folder, "bin"))
106 rmdir(self, os.path.join(self.package_folder, "sbin"))
107 rmdir(self, os.path.join(self.package_folder, "share"))
108 fix_apple_shared_install_name(self)
109
110 def package_info(self):
111 self.cpp_info.set_property("pkg_config_name", "uuid")
112 self.cpp_info.set_property("cmake_target_name", "LibUUID::LibUUID")
113 self.cpp_info.set_property("cmake_file_name", "LibUUID")
114 self.cpp_info.libs = ["uuid"]
115 self.cpp_info.includedirs.append(os.path.join("include", "uuid"))
116
[end of recipes/util-linux-libuuid/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/util-linux-libuuid/all/conanfile.py b/recipes/util-linux-libuuid/all/conanfile.py
--- a/recipes/util-linux-libuuid/all/conanfile.py
+++ b/recipes/util-linux-libuuid/all/conanfile.py
@@ -109,7 +109,10 @@
def package_info(self):
self.cpp_info.set_property("pkg_config_name", "uuid")
- self.cpp_info.set_property("cmake_target_name", "LibUUID::LibUUID")
- self.cpp_info.set_property("cmake_file_name", "LibUUID")
+ self.cpp_info.set_property("cmake_target_name", "libuuid::libuuid")
+ self.cpp_info.set_property("cmake_file_name", "libuuid")
+ # Maintain alias to `LibUUID::LibUUID` for previous version of the recipe
+ self.cpp_info.set_property("cmake_target_aliases", ["LibUUID::LibUUID"])
+
self.cpp_info.libs = ["uuid"]
self.cpp_info.includedirs.append(os.path.join("include", "uuid"))
| {"golden_diff": "diff --git a/recipes/util-linux-libuuid/all/conanfile.py b/recipes/util-linux-libuuid/all/conanfile.py\n--- a/recipes/util-linux-libuuid/all/conanfile.py\n+++ b/recipes/util-linux-libuuid/all/conanfile.py\n@@ -109,7 +109,10 @@\n \n def package_info(self):\n self.cpp_info.set_property(\"pkg_config_name\", \"uuid\")\n- self.cpp_info.set_property(\"cmake_target_name\", \"LibUUID::LibUUID\")\n- self.cpp_info.set_property(\"cmake_file_name\", \"LibUUID\")\n+ self.cpp_info.set_property(\"cmake_target_name\", \"libuuid::libuuid\")\n+ self.cpp_info.set_property(\"cmake_file_name\", \"libuuid\")\n+ # Maintain alias to `LibUUID::LibUUID` for previous version of the recipe\n+ self.cpp_info.set_property(\"cmake_target_aliases\", [\"LibUUID::LibUUID\"])\n+\n self.cpp_info.libs = [\"uuid\"]\n self.cpp_info.includedirs.append(os.path.join(\"include\", \"uuid\"))\n", "issue": "[package] util-linux-libuuid uses wrong cmake target\n### Description\n\nIn the following lines, the `util-linux-libuuid` recipe sets the cmake target to be `LibUUID::LibUUID` with a filename of `LibUUID-config.cmake`:\r\n\r\nhttps://github.com/conan-io/conan-center-index/blob/61c4f7819e6cd3594a57f6c3847f94ab86de623f/recipes/util-linux-libuuid/all/conanfile.py#L112-L113\r\n\r\nThis was based on the internal practice that Kitware has for their internal libuuid cmake module, however this is not public and a number of packages (czmq, cppcommon) seem to assume a `libuuid::libuuid` target. These change should be reverted such that these packages can utilise util-linux-libuuid without a requirement to be patched.\n\n### Package and Environment Details\n\nN/A\n\n### Conan profile\n\nN/A\r\n\n\n### Steps to reproduce\n\nN/A\n\n### Logs\n\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nPut your log output here\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.apple import fix_apple_shared_install_name\nfrom conan.tools.files import copy, get, rm, rmdir\nfrom conan.tools.gnu import Autotools, AutotoolsToolchain, AutotoolsDeps\nfrom conan.tools.layout import basic_layout\nfrom conan.tools.scm import Version\nimport os\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass UtilLinuxLibuuidConan(ConanFile):\n name = \"util-linux-libuuid\"\n description = \"Universally unique id library\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/util-linux/util-linux.git\"\n license = \"BSD-3-Clause\"\n topics = \"id\", \"identifier\", \"unique\", \"uuid\"\n package_type = \"library\"\n provides = \"libuuid\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n @property\n def _has_sys_file_header(self):\n return self.settings.os in [\"FreeBSD\", \"Linux\", \"Macos\"]\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n self.settings.rm_safe(\"compiler.cppstd\")\n self.settings.rm_safe(\"compiler.libcxx\")\n\n def layout(self):\n basic_layout(self, src_folder=\"src\")\n\n def _minimum_compiler_version(self, compiler, build_type):\n min_version = {\n \"gcc\": {\n \"Release\": \"4\",\n \"Debug\": \"8\",\n },\n \"clang\": {\n \"Release\": \"3\",\n \"Debug\": \"3\",\n },\n \"apple-clang\": {\n \"Release\": \"5\",\n \"Debug\": \"5\",\n },\n }\n return min_version.get(str(compiler), {}).get(str(build_type), \"0\")\n\n def validate(self):\n min_version = self._minimum_compiler_version(self.settings.compiler, self.settings.build_type)\n if Version(self.settings.compiler.version) < min_version:\n raise ConanInvalidConfiguration(f\"{self.settings.compiler} {self.settings.compiler.version} does not meet the minimum version requirement of version {min_version}\")\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(f\"{self.ref} is not supported on Windows\")\n\n def requirements(self):\n if self.settings.os == \"Macos\":\n # Required because libintl.{a,dylib} is not distributed via libc on Macos\n self.requires(\"libgettext/0.21\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = AutotoolsToolchain(self)\n tc.configure_args.append(\"--disable-all-programs\")\n tc.configure_args.append(\"--enable-libuuid\")\n if self._has_sys_file_header:\n tc.extra_defines.append(\"HAVE_SYS_FILE_H\")\n if \"x86\" in self.settings.arch:\n tc.extra_cflags.append(\"-mstackrealign\")\n tc.generate()\n\n deps = AutotoolsDeps(self)\n deps.generate()\n\n def build(self):\n autotools = Autotools(self)\n autotools.configure()\n autotools.make()\n\n def package(self):\n copy(self, \"COPYING.BSD-3-Clause\", src=os.path.join(self.source_folder, \"Documentation\", \"licenses\"), dst=os.path.join(self.package_folder, \"licenses\"))\n autotools = Autotools(self)\n autotools.install()\n rm(self, \"*.la\", os.path.join(self.package_folder, \"lib\"))\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n rmdir(self, os.path.join(self.package_folder, \"bin\"))\n rmdir(self, os.path.join(self.package_folder, \"sbin\"))\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n fix_apple_shared_install_name(self)\n\n def package_info(self):\n self.cpp_info.set_property(\"pkg_config_name\", \"uuid\")\n self.cpp_info.set_property(\"cmake_target_name\", \"LibUUID::LibUUID\")\n self.cpp_info.set_property(\"cmake_file_name\", \"LibUUID\")\n self.cpp_info.libs = [\"uuid\"]\n self.cpp_info.includedirs.append(os.path.join(\"include\", \"uuid\"))\n", "path": "recipes/util-linux-libuuid/all/conanfile.py"}]} | 2,041 | 234 |
gh_patches_debug_959 | rasdani/github-patches | git_diff | getsentry__sentry-52329 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
fix(django): Disable admin on prod
Reported here: https://forum.sentry.io/t/sentry-django-admin-portal/12787?u=byk
</issue>
<code>
[start of src/sentry/conf/urls.py]
1 from __future__ import annotations
2
3 from django.conf import settings
4 from django.urls import URLPattern, URLResolver, re_path
5
6 from sentry.web.frontend import csrf_failure
7 from sentry.web.frontend.error_404 import Error404View
8 from sentry.web.frontend.error_500 import Error500View
9 from sentry.web.urls import urlpatterns as web_urlpatterns
10
11 handler404 = Error404View.as_view()
12 handler500 = Error500View.as_view()
13
14 urlpatterns: list[URLResolver | URLPattern] = [
15 re_path(
16 r"^500/",
17 handler500,
18 name="error-500",
19 ),
20 re_path(
21 r"^404/",
22 handler404,
23 name="error-404",
24 ),
25 re_path(
26 r"^403-csrf-failure/",
27 csrf_failure.view,
28 name="error-403-csrf-failure",
29 ),
30 ]
31
32 if "django.contrib.admin" in settings.INSTALLED_APPS:
33 from sentry import django_admin
34
35 urlpatterns += django_admin.urlpatterns
36
37 urlpatterns += web_urlpatterns
38
[end of src/sentry/conf/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/conf/urls.py b/src/sentry/conf/urls.py
--- a/src/sentry/conf/urls.py
+++ b/src/sentry/conf/urls.py
@@ -29,7 +29,7 @@
),
]
-if "django.contrib.admin" in settings.INSTALLED_APPS:
+if "django.contrib.admin" in settings.INSTALLED_APPS and settings.ADMIN_ENABLED:
from sentry import django_admin
urlpatterns += django_admin.urlpatterns
| {"golden_diff": "diff --git a/src/sentry/conf/urls.py b/src/sentry/conf/urls.py\n--- a/src/sentry/conf/urls.py\n+++ b/src/sentry/conf/urls.py\n@@ -29,7 +29,7 @@\n ),\n ]\n \n-if \"django.contrib.admin\" in settings.INSTALLED_APPS:\n+if \"django.contrib.admin\" in settings.INSTALLED_APPS and settings.ADMIN_ENABLED:\n from sentry import django_admin\n \n urlpatterns += django_admin.urlpatterns\n", "issue": "fix(django): Disable admin on prod\nReported here: https://forum.sentry.io/t/sentry-django-admin-portal/12787?u=byk\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom django.conf import settings\nfrom django.urls import URLPattern, URLResolver, re_path\n\nfrom sentry.web.frontend import csrf_failure\nfrom sentry.web.frontend.error_404 import Error404View\nfrom sentry.web.frontend.error_500 import Error500View\nfrom sentry.web.urls import urlpatterns as web_urlpatterns\n\nhandler404 = Error404View.as_view()\nhandler500 = Error500View.as_view()\n\nurlpatterns: list[URLResolver | URLPattern] = [\n re_path(\n r\"^500/\",\n handler500,\n name=\"error-500\",\n ),\n re_path(\n r\"^404/\",\n handler404,\n name=\"error-404\",\n ),\n re_path(\n r\"^403-csrf-failure/\",\n csrf_failure.view,\n name=\"error-403-csrf-failure\",\n ),\n]\n\nif \"django.contrib.admin\" in settings.INSTALLED_APPS:\n from sentry import django_admin\n\n urlpatterns += django_admin.urlpatterns\n\nurlpatterns += web_urlpatterns\n", "path": "src/sentry/conf/urls.py"}]} | 904 | 104 |
gh_patches_debug_1305 | rasdani/github-patches | git_diff | oppia__oppia-7459 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Upgrade @typescript-eslint/eslint-plugin
`eslint-utils` is currently out of date, https://github.com/oppia/oppia/pull/7451 provides a temporary fix, but we need to upgrade the main package that requires `eslint-utils` to ensure that we have a long term fix.
When fixing this, please make sure that the lint tests run successfully.
</issue>
<code>
[start of core/domain/feedback_jobs_one_off.py]
1 # Copyright 2019 The Oppia Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS-IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """One-off jobs for feedback models."""
16
17 from core import jobs
18 from core.platform import models
19
20 (feedback_models,) = models.Registry.import_models([models.NAMES.feedback])
21
22
23 class GeneralFeedbackThreadUserOneOffJob(jobs.BaseMapReduceOneOffJobManager):
24 """One-off job for setting user_id and thread_id for all
25 GeneralFeedbackThreadUserModels.
26 """
27 @classmethod
28 def entity_classes_to_map_over(cls):
29 """Return a list of datastore class references to map over."""
30 return [feedback_models.GeneralFeedbackThreadUserModel]
31
32 @staticmethod
33 def map(model_instance):
34 """Implements the map function for this job."""
35 user_id, thread_id = model_instance.id.split('.', 1)
36 if model_instance.user_id is None:
37 model_instance.user_id = user_id
38 if model_instance.thread_id is None:
39 model_instance.thread_id = thread_id
40 model_instance.put(update_last_updated_time=False)
41 yield ('SUCCESS', model_instance.id)
42
43 @staticmethod
44 def reduce(key, values):
45 yield (key, len(values))
46
[end of core/domain/feedback_jobs_one_off.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/domain/feedback_jobs_one_off.py b/core/domain/feedback_jobs_one_off.py
--- a/core/domain/feedback_jobs_one_off.py
+++ b/core/domain/feedback_jobs_one_off.py
@@ -13,6 +13,7 @@
# limitations under the License.
"""One-off jobs for feedback models."""
+from __future__ import absolute_import # pylint: disable=import-only-modules
from core import jobs
from core.platform import models
| {"golden_diff": "diff --git a/core/domain/feedback_jobs_one_off.py b/core/domain/feedback_jobs_one_off.py\n--- a/core/domain/feedback_jobs_one_off.py\n+++ b/core/domain/feedback_jobs_one_off.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n \n \"\"\"One-off jobs for feedback models.\"\"\"\n+from __future__ import absolute_import # pylint: disable=import-only-modules\n \n from core import jobs\n from core.platform import models\n", "issue": "Upgrade @typescript-eslint/eslint-plugin\n`eslint-utils` is currently out of date, https://github.com/oppia/oppia/pull/7451 provides a temporary fix, but we need to upgrade the main package that requires `eslint-utils` to ensure that we have a long term fix. \r\n\r\nWhen fixing this, please make sure that the lint tests run successfully.\n", "before_files": [{"content": "# Copyright 2019 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"One-off jobs for feedback models.\"\"\"\n\nfrom core import jobs\nfrom core.platform import models\n\n(feedback_models,) = models.Registry.import_models([models.NAMES.feedback])\n\n\nclass GeneralFeedbackThreadUserOneOffJob(jobs.BaseMapReduceOneOffJobManager):\n \"\"\"One-off job for setting user_id and thread_id for all\n GeneralFeedbackThreadUserModels.\n \"\"\"\n @classmethod\n def entity_classes_to_map_over(cls):\n \"\"\"Return a list of datastore class references to map over.\"\"\"\n return [feedback_models.GeneralFeedbackThreadUserModel]\n\n @staticmethod\n def map(model_instance):\n \"\"\"Implements the map function for this job.\"\"\"\n user_id, thread_id = model_instance.id.split('.', 1)\n if model_instance.user_id is None:\n model_instance.user_id = user_id\n if model_instance.thread_id is None:\n model_instance.thread_id = thread_id\n model_instance.put(update_last_updated_time=False)\n yield ('SUCCESS', model_instance.id)\n\n @staticmethod\n def reduce(key, values):\n yield (key, len(values))\n", "path": "core/domain/feedback_jobs_one_off.py"}]} | 1,070 | 100 |
gh_patches_debug_21117 | rasdani/github-patches | git_diff | mlcommons__GaNDLF-614 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CCA failure when enabled
**Describe the bug**
The CCA (Largest Connected Component Analysis) function was implemented as a standalone function, which causes it to fail when called in the segmentation pipeline with post-processing enabled. The expected behavior is a likely failure due to this issue.
**To Reproduce**
Run a segmentation pipeline with CCA enabled for the post-processing.
**Expected behavior**
The CCA function should be corrected and integrated with the segmentation pipeline to work correctly and tested
**GaNDLF Version**
<!-- Put the output of the following command:
python -c 'import GANDLF as g;print(g.__version__)'
-->
Version information of the GaNDLF package in the virtual environment. 0.0.16-dev
**Desktop (please complete the following information):**
- OS: Linux, Ubuntu
- Version (including Build information, if any): 22.04
**Additional context**
None
</issue>
<code>
[start of GANDLF/data/post_process/morphology.py]
1 import torch
2 import torch.nn.functional as F
3 from skimage.measure import label
4 import numpy as np
5 from scipy.ndimage import binary_fill_holes, binary_closing
6 from GANDLF.utils.generic import get_array_from_image_or_tensor
7
8
9 def torch_morphological(input_image, kernel_size=1, mode="dilation"):
10 """
11 This function enables morphological operations using torch. Adapted from https://github.com/DIVA-DIA/Generating-Synthetic-Handwritten-Historical-Documents/blob/e6a798dc2b374f338804222747c56cb44869af5b/HTR_ctc/utils/auxilary_functions.py#L10.
12
13 Args:
14 input_image (torch.Tensor): The input image.
15 kernel_size (list): The size of the window to take a max over.
16 mode (str): The type of morphological operation to perform.
17
18 Returns:
19 torch.Tensor: The output image after morphological operations.
20 """
21
22 if len(input_image.shape) == 4:
23 max_pool = F.max_pool2d
24 elif len(input_image.shape) == 5:
25 max_pool = F.max_pool3d
26 else:
27 raise ValueError("Input image has invalid shape for morphological operations.")
28
29 if mode == "dilation":
30 output_image = max_pool(
31 input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2
32 )
33 elif mode == "erosion":
34 output_image = -max_pool(
35 -input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2
36 )
37 elif mode == "closing":
38 output_image = max_pool(
39 input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2
40 )
41 output_image = -max_pool(
42 -output_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2
43 )
44 elif mode == "opening":
45 output_image = -max_pool(
46 -input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2
47 )
48 output_image = max_pool(
49 output_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2
50 )
51
52 return output_image
53
54
55 def fill_holes(input_image, params=None):
56 """
57 This function fills holes in masks.
58
59 Args:
60 input_image (torch.Tensor): The input image.
61 params (dict): The parameters dict; unused.
62
63 Returns:
64 torch.Tensor: The output image after morphological operations.
65 """
66 input_image_array = get_array_from_image_or_tensor(input_image).astype(int)
67 input_image_array_closed = binary_closing(input_image_array)
68 # Fill the holes in binary objects
69 output_array = binary_fill_holes(input_image_array_closed).astype(int)
70
71 return torch.from_numpy(output_array)
72
73
74 def cca(input_image):
75 """
76 This function performs connected component analysis on the input image.
77
78 Args:
79 input_image (torch.Tensor): The input image.
80 params (dict): The parameters dict;
81
82 Returns:
83 torch.Tensor: The output image after morphological operations.
84 """
85 seg = get_array_from_image_or_tensor(input_image)
86 mask = seg != 0
87
88 connectivity = input_image.dim() - 1
89 labels_connected = label(mask, connectivity=connectivity)
90 labels_connected_sizes = [
91 np.sum(labels_connected == i) for i in np.unique(labels_connected)
92 ]
93 largest_region = np.argmax(labels_connected_sizes[1:]) + 1
94 seg[labels_connected != largest_region] = 0
95 return seg
96
[end of GANDLF/data/post_process/morphology.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/GANDLF/data/post_process/morphology.py b/GANDLF/data/post_process/morphology.py
--- a/GANDLF/data/post_process/morphology.py
+++ b/GANDLF/data/post_process/morphology.py
@@ -71,7 +71,7 @@
return torch.from_numpy(output_array)
-def cca(input_image):
+def cca(input_image, params=None):
"""
This function performs connected component analysis on the input image.
@@ -85,11 +85,15 @@
seg = get_array_from_image_or_tensor(input_image)
mask = seg != 0
- connectivity = input_image.dim() - 1
+ connectivity = input_image.ndim - 1
labels_connected = label(mask, connectivity=connectivity)
labels_connected_sizes = [
np.sum(labels_connected == i) for i in np.unique(labels_connected)
]
- largest_region = np.argmax(labels_connected_sizes[1:]) + 1
+ largest_region = 0
+ if len(labels_connected_sizes) > 1:
+ largest_region = np.argmax(labels_connected_sizes[1:]) + 1
seg[labels_connected != largest_region] = 0
+
return seg
+
| {"golden_diff": "diff --git a/GANDLF/data/post_process/morphology.py b/GANDLF/data/post_process/morphology.py\n--- a/GANDLF/data/post_process/morphology.py\n+++ b/GANDLF/data/post_process/morphology.py\n@@ -71,7 +71,7 @@\n return torch.from_numpy(output_array)\n \n \n-def cca(input_image):\n+def cca(input_image, params=None):\n \"\"\"\n This function performs connected component analysis on the input image.\n \n@@ -85,11 +85,15 @@\n seg = get_array_from_image_or_tensor(input_image)\n mask = seg != 0\n \n- connectivity = input_image.dim() - 1\n+ connectivity = input_image.ndim - 1\n labels_connected = label(mask, connectivity=connectivity)\n labels_connected_sizes = [\n np.sum(labels_connected == i) for i in np.unique(labels_connected)\n ]\n- largest_region = np.argmax(labels_connected_sizes[1:]) + 1\n+ largest_region = 0\n+ if len(labels_connected_sizes) > 1:\n+ largest_region = np.argmax(labels_connected_sizes[1:]) + 1\n seg[labels_connected != largest_region] = 0\n+\n return seg\n+\n", "issue": "CCA failure when enabled\n**Describe the bug**\r\nThe CCA (Largest Connected Component Analysis) function was implemented as a standalone function, which causes it to fail when called in the segmentation pipeline with post-processing enabled. The expected behavior is a likely failure due to this issue.\r\n\r\n**To Reproduce**\r\nRun a segmentation pipeline with CCA enabled for the post-processing.\r\n\r\n**Expected behavior**\r\nThe CCA function should be corrected and integrated with the segmentation pipeline to work correctly and tested\r\n\r\n**GaNDLF Version**\r\n<!-- Put the output of the following command:\r\npython -c 'import GANDLF as g;print(g.__version__)'\r\n-->\r\nVersion information of the GaNDLF package in the virtual environment. 0.0.16-dev\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Linux, Ubuntu\r\n - Version (including Build information, if any): 22.04\r\n\r\n**Additional context**\r\nNone\r\n\n", "before_files": [{"content": "import torch\nimport torch.nn.functional as F\nfrom skimage.measure import label\nimport numpy as np\nfrom scipy.ndimage import binary_fill_holes, binary_closing\nfrom GANDLF.utils.generic import get_array_from_image_or_tensor\n\n\ndef torch_morphological(input_image, kernel_size=1, mode=\"dilation\"):\n \"\"\"\n This function enables morphological operations using torch. Adapted from https://github.com/DIVA-DIA/Generating-Synthetic-Handwritten-Historical-Documents/blob/e6a798dc2b374f338804222747c56cb44869af5b/HTR_ctc/utils/auxilary_functions.py#L10.\n\n Args:\n input_image (torch.Tensor): The input image.\n kernel_size (list): The size of the window to take a max over.\n mode (str): The type of morphological operation to perform.\n\n Returns:\n torch.Tensor: The output image after morphological operations.\n \"\"\"\n\n if len(input_image.shape) == 4:\n max_pool = F.max_pool2d\n elif len(input_image.shape) == 5:\n max_pool = F.max_pool3d\n else:\n raise ValueError(\"Input image has invalid shape for morphological operations.\")\n\n if mode == \"dilation\":\n output_image = max_pool(\n input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n elif mode == \"erosion\":\n output_image = -max_pool(\n -input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n elif mode == \"closing\":\n output_image = max_pool(\n input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n output_image = -max_pool(\n -output_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n elif mode == \"opening\":\n output_image = -max_pool(\n -input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n output_image = max_pool(\n output_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n\n return output_image\n\n\ndef fill_holes(input_image, params=None):\n \"\"\"\n This function fills holes in masks.\n\n Args:\n input_image (torch.Tensor): The input image.\n params (dict): The parameters dict; unused.\n\n Returns:\n torch.Tensor: The output image after morphological operations.\n \"\"\"\n input_image_array = get_array_from_image_or_tensor(input_image).astype(int)\n input_image_array_closed = binary_closing(input_image_array)\n # Fill the holes in binary objects\n output_array = binary_fill_holes(input_image_array_closed).astype(int)\n\n return torch.from_numpy(output_array)\n\n\ndef cca(input_image):\n \"\"\"\n This function performs connected component analysis on the input image.\n\n Args:\n input_image (torch.Tensor): The input image.\n params (dict): The parameters dict;\n\n Returns:\n torch.Tensor: The output image after morphological operations.\n \"\"\"\n seg = get_array_from_image_or_tensor(input_image)\n mask = seg != 0\n\n connectivity = input_image.dim() - 1\n labels_connected = label(mask, connectivity=connectivity)\n labels_connected_sizes = [\n np.sum(labels_connected == i) for i in np.unique(labels_connected)\n ]\n largest_region = np.argmax(labels_connected_sizes[1:]) + 1\n seg[labels_connected != largest_region] = 0\n return seg\n", "path": "GANDLF/data/post_process/morphology.py"}]} | 1,729 | 271 |
gh_patches_debug_14052 | rasdani/github-patches | git_diff | openai__gym-1149 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Why is there a restiction on shape in multi discrete?
https://github.com/openai/gym/blob/422c9c7bb3c3c5a756c3b12dfe99733bfbfe3920/gym/spaces/multi_discrete.py#L10
Why is this imposed?
Say one may need a high dimensional space like a (3,3,3) grid with discretised values for each element in the grid.
It can be very easily circumvented by using np.random.random_sample and passing the shape
Is there some specific reason for doing this?
Also note this example provided in dict_space doesn't work currently.
https://github.com/openai/gym/blob/422c9c7bb3c3c5a756c3b12dfe99733bfbfe3920/gym/spaces/dict_space.py#L22
</issue>
<code>
[start of gym/spaces/multi_discrete.py]
1 import gym
2 import numpy as np
3
4 class MultiDiscrete(gym.Space):
5 def __init__(self, nvec):
6 """
7 nvec: vector of counts of each categorical variable
8 """
9 self.nvec = np.asarray(nvec, dtype=np.int32)
10 assert self.nvec.ndim == 1, 'nvec should be a 1d array (or list) of ints'
11 gym.Space.__init__(self, (self.nvec.size,), np.int8)
12 def sample(self):
13 return (gym.spaces.np_random.rand(self.nvec.size) * self.nvec).astype(self.dtype)
14 def contains(self, x):
15 return (0 <= x).all() and (x < self.nvec).all() and x.dtype.kind in 'ui'
16
17 __contains__ = contains
18
19 def to_jsonable(self, sample_n):
20 return [sample.tolist() for sample in sample_n]
21 def from_jsonable(self, sample_n):
22 return np.array(sample_n)
23
[end of gym/spaces/multi_discrete.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gym/spaces/multi_discrete.py b/gym/spaces/multi_discrete.py
--- a/gym/spaces/multi_discrete.py
+++ b/gym/spaces/multi_discrete.py
@@ -7,10 +7,9 @@
nvec: vector of counts of each categorical variable
"""
self.nvec = np.asarray(nvec, dtype=np.int32)
- assert self.nvec.ndim == 1, 'nvec should be a 1d array (or list) of ints'
- gym.Space.__init__(self, (self.nvec.size,), np.int8)
+ gym.Space.__init__(self, (self.nvec.shape,), np.int8)
def sample(self):
- return (gym.spaces.np_random.rand(self.nvec.size) * self.nvec).astype(self.dtype)
+ return (gym.spaces.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)
def contains(self, x):
return (0 <= x).all() and (x < self.nvec).all() and x.dtype.kind in 'ui'
| {"golden_diff": "diff --git a/gym/spaces/multi_discrete.py b/gym/spaces/multi_discrete.py\n--- a/gym/spaces/multi_discrete.py\n+++ b/gym/spaces/multi_discrete.py\n@@ -7,10 +7,9 @@\n nvec: vector of counts of each categorical variable\n \"\"\"\n self.nvec = np.asarray(nvec, dtype=np.int32)\n- assert self.nvec.ndim == 1, 'nvec should be a 1d array (or list) of ints'\n- gym.Space.__init__(self, (self.nvec.size,), np.int8)\n+ gym.Space.__init__(self, (self.nvec.shape,), np.int8)\n def sample(self):\n- return (gym.spaces.np_random.rand(self.nvec.size) * self.nvec).astype(self.dtype)\n+ return (gym.spaces.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)\n def contains(self, x):\n return (0 <= x).all() and (x < self.nvec).all() and x.dtype.kind in 'ui'\n", "issue": "Why is there a restiction on shape in multi discrete?\nhttps://github.com/openai/gym/blob/422c9c7bb3c3c5a756c3b12dfe99733bfbfe3920/gym/spaces/multi_discrete.py#L10\r\n\r\nWhy is this imposed?\r\nSay one may need a high dimensional space like a (3,3,3) grid with discretised values for each element in the grid. \r\nIt can be very easily circumvented by using np.random.random_sample and passing the shape\r\nIs there some specific reason for doing this?\r\n\r\nAlso note this example provided in dict_space doesn't work currently.\r\nhttps://github.com/openai/gym/blob/422c9c7bb3c3c5a756c3b12dfe99733bfbfe3920/gym/spaces/dict_space.py#L22\r\n\r\n\n", "before_files": [{"content": "import gym\nimport numpy as np\n\nclass MultiDiscrete(gym.Space):\n def __init__(self, nvec):\n \"\"\"\n nvec: vector of counts of each categorical variable\n \"\"\"\n self.nvec = np.asarray(nvec, dtype=np.int32)\n assert self.nvec.ndim == 1, 'nvec should be a 1d array (or list) of ints'\n gym.Space.__init__(self, (self.nvec.size,), np.int8)\n def sample(self):\n return (gym.spaces.np_random.rand(self.nvec.size) * self.nvec).astype(self.dtype)\n def contains(self, x):\n return (0 <= x).all() and (x < self.nvec).all() and x.dtype.kind in 'ui'\n \n __contains__ = contains\n \n def to_jsonable(self, sample_n):\n return [sample.tolist() for sample in sample_n]\n def from_jsonable(self, sample_n):\n return np.array(sample_n)\n", "path": "gym/spaces/multi_discrete.py"}]} | 1,004 | 248 |
gh_patches_debug_29594 | rasdani/github-patches | git_diff | fossasia__open-event-server-6739 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove version model
**Describe the bug**
The version model is not used currently and should be removed
https://github.com/fossasia/open-event-server/blob/development/app/models/version.py
**Additional context**
@iamareebjamal Taking this
</issue>
<code>
[start of app/models/version.py]
1 from sqlalchemy.orm import backref
2
3 from app.models import db
4
5
6 class Version(db.Model):
7 """Version model class"""
8 __tablename__ = 'versions'
9 id = db.Column(db.Integer, primary_key=True)
10 event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))
11 events = db.relationship("Event", backref=backref('version', uselist=False))
12
13 event_ver = db.Column(db.Integer, nullable=False, default=0)
14 sessions_ver = db.Column(db.Integer, nullable=False, default=0)
15 speakers_ver = db.Column(db.Integer, nullable=False, default=0)
16 tracks_ver = db.Column(db.Integer, nullable=False, default=0)
17 sponsors_ver = db.Column(db.Integer, nullable=False, default=0)
18 microlocations_ver = db.Column(db.Integer, nullable=False, default=0)
19
20 def __init__(self,
21 event_id=None,
22 event_ver=None,
23 sessions_ver=None,
24 speakers_ver=None,
25 tracks_ver=None,
26 sponsors_ver=None,
27 microlocations_ver=None):
28 self.event_id = event_id
29 self.event_ver = event_ver
30 self.sessions_ver = sessions_ver
31 self.speakers_ver = speakers_ver
32 self.tracks_ver = tracks_ver
33 self.sponsors_ver = sponsors_ver
34 self.microlocations_ver = microlocations_ver
35
36 def __repr__(self):
37 return '<Version %r>' % self.id
38
39 def __str__(self):
40 return self.__repr__()
41
42 @property
43 def serialize(self):
44 """Return object data in easily serializable format"""
45 return {
46 'version': [
47 {'id': self.id,
48 'event_id': self.event_id,
49 'event_ver': self.event_ver,
50 'sessions_ver': self.sessions_ver,
51 'speakers_ver': self.speakers_ver,
52 'tracks_ver': self.tracks_ver,
53 'sponsors_ver': self.sponsors_ver,
54 'microlocations_ver': self.microlocations_ver}
55 ]
56 }
57
[end of app/models/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/models/version.py b/app/models/version.py
deleted file mode 100644
--- a/app/models/version.py
+++ /dev/null
@@ -1,56 +0,0 @@
-from sqlalchemy.orm import backref
-
-from app.models import db
-
-
-class Version(db.Model):
- """Version model class"""
- __tablename__ = 'versions'
- id = db.Column(db.Integer, primary_key=True)
- event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))
- events = db.relationship("Event", backref=backref('version', uselist=False))
-
- event_ver = db.Column(db.Integer, nullable=False, default=0)
- sessions_ver = db.Column(db.Integer, nullable=False, default=0)
- speakers_ver = db.Column(db.Integer, nullable=False, default=0)
- tracks_ver = db.Column(db.Integer, nullable=False, default=0)
- sponsors_ver = db.Column(db.Integer, nullable=False, default=0)
- microlocations_ver = db.Column(db.Integer, nullable=False, default=0)
-
- def __init__(self,
- event_id=None,
- event_ver=None,
- sessions_ver=None,
- speakers_ver=None,
- tracks_ver=None,
- sponsors_ver=None,
- microlocations_ver=None):
- self.event_id = event_id
- self.event_ver = event_ver
- self.sessions_ver = sessions_ver
- self.speakers_ver = speakers_ver
- self.tracks_ver = tracks_ver
- self.sponsors_ver = sponsors_ver
- self.microlocations_ver = microlocations_ver
-
- def __repr__(self):
- return '<Version %r>' % self.id
-
- def __str__(self):
- return self.__repr__()
-
- @property
- def serialize(self):
- """Return object data in easily serializable format"""
- return {
- 'version': [
- {'id': self.id,
- 'event_id': self.event_id,
- 'event_ver': self.event_ver,
- 'sessions_ver': self.sessions_ver,
- 'speakers_ver': self.speakers_ver,
- 'tracks_ver': self.tracks_ver,
- 'sponsors_ver': self.sponsors_ver,
- 'microlocations_ver': self.microlocations_ver}
- ]
- }
| {"golden_diff": "diff --git a/app/models/version.py b/app/models/version.py\ndeleted file mode 100644\n--- a/app/models/version.py\n+++ /dev/null\n@@ -1,56 +0,0 @@\n-from sqlalchemy.orm import backref\n-\n-from app.models import db\n-\n-\n-class Version(db.Model):\n- \"\"\"Version model class\"\"\"\n- __tablename__ = 'versions'\n- id = db.Column(db.Integer, primary_key=True)\n- event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))\n- events = db.relationship(\"Event\", backref=backref('version', uselist=False))\n-\n- event_ver = db.Column(db.Integer, nullable=False, default=0)\n- sessions_ver = db.Column(db.Integer, nullable=False, default=0)\n- speakers_ver = db.Column(db.Integer, nullable=False, default=0)\n- tracks_ver = db.Column(db.Integer, nullable=False, default=0)\n- sponsors_ver = db.Column(db.Integer, nullable=False, default=0)\n- microlocations_ver = db.Column(db.Integer, nullable=False, default=0)\n-\n- def __init__(self,\n- event_id=None,\n- event_ver=None,\n- sessions_ver=None,\n- speakers_ver=None,\n- tracks_ver=None,\n- sponsors_ver=None,\n- microlocations_ver=None):\n- self.event_id = event_id\n- self.event_ver = event_ver\n- self.sessions_ver = sessions_ver\n- self.speakers_ver = speakers_ver\n- self.tracks_ver = tracks_ver\n- self.sponsors_ver = sponsors_ver\n- self.microlocations_ver = microlocations_ver\n-\n- def __repr__(self):\n- return '<Version %r>' % self.id\n-\n- def __str__(self):\n- return self.__repr__()\n-\n- @property\n- def serialize(self):\n- \"\"\"Return object data in easily serializable format\"\"\"\n- return {\n- 'version': [\n- {'id': self.id,\n- 'event_id': self.event_id,\n- 'event_ver': self.event_ver,\n- 'sessions_ver': self.sessions_ver,\n- 'speakers_ver': self.speakers_ver,\n- 'tracks_ver': self.tracks_ver,\n- 'sponsors_ver': self.sponsors_ver,\n- 'microlocations_ver': self.microlocations_ver}\n- ]\n- }\n", "issue": "Remove version model\n**Describe the bug**\r\nThe version model is not used currently and should be removed\r\n\r\nhttps://github.com/fossasia/open-event-server/blob/development/app/models/version.py\r\n\r\n\r\n**Additional context**\r\n@iamareebjamal Taking this\r\n\n", "before_files": [{"content": "from sqlalchemy.orm import backref\n\nfrom app.models import db\n\n\nclass Version(db.Model):\n \"\"\"Version model class\"\"\"\n __tablename__ = 'versions'\n id = db.Column(db.Integer, primary_key=True)\n event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))\n events = db.relationship(\"Event\", backref=backref('version', uselist=False))\n\n event_ver = db.Column(db.Integer, nullable=False, default=0)\n sessions_ver = db.Column(db.Integer, nullable=False, default=0)\n speakers_ver = db.Column(db.Integer, nullable=False, default=0)\n tracks_ver = db.Column(db.Integer, nullable=False, default=0)\n sponsors_ver = db.Column(db.Integer, nullable=False, default=0)\n microlocations_ver = db.Column(db.Integer, nullable=False, default=0)\n\n def __init__(self,\n event_id=None,\n event_ver=None,\n sessions_ver=None,\n speakers_ver=None,\n tracks_ver=None,\n sponsors_ver=None,\n microlocations_ver=None):\n self.event_id = event_id\n self.event_ver = event_ver\n self.sessions_ver = sessions_ver\n self.speakers_ver = speakers_ver\n self.tracks_ver = tracks_ver\n self.sponsors_ver = sponsors_ver\n self.microlocations_ver = microlocations_ver\n\n def __repr__(self):\n return '<Version %r>' % self.id\n\n def __str__(self):\n return self.__repr__()\n\n @property\n def serialize(self):\n \"\"\"Return object data in easily serializable format\"\"\"\n return {\n 'version': [\n {'id': self.id,\n 'event_id': self.event_id,\n 'event_ver': self.event_ver,\n 'sessions_ver': self.sessions_ver,\n 'speakers_ver': self.speakers_ver,\n 'tracks_ver': self.tracks_ver,\n 'sponsors_ver': self.sponsors_ver,\n 'microlocations_ver': self.microlocations_ver}\n ]\n }\n", "path": "app/models/version.py"}]} | 1,124 | 526 |
gh_patches_debug_2721 | rasdani/github-patches | git_diff | benoitc__gunicorn-1708 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
gunicorn crashed on start with --reload flag
Setup: Vagrant, virtualenv, gunicorn 19.3.0:
The following command produces this stack:
`gunicorn -c /data/shared/api/gunicorn_config.py -b unix:/tmp/api-dev-gunicorn.sock --log-level INFO --reload wsgi:app`
```
Exception in thread Thread-1:
Traceback (most recent call last):
File "/home/vagrant/.pyenv/versions/2.7.6/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/data/virtualenv/default/lib/python2.7/site-packages/gunicorn/reloader.py", line 41, in run
for filename in self.get_files():
File "/data/virtualenv/default/lib/python2.7/site-packages/gunicorn/reloader.py", line 30, in get_files
if hasattr(module, '__file__')
File "/data/virtualenv/default/lib/python2.7/re.py", line 151, in sub
return _compile(pattern, flags).sub(repl, string, count)
TypeError: expected string or buffer
```
If I remove --reload it boots up fine.
</issue>
<code>
[start of gunicorn/reloader.py]
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 import os
7 import os.path
8 import re
9 import sys
10 import time
11 import threading
12
13
14 class Reloader(threading.Thread):
15 def __init__(self, extra_files=None, interval=1, callback=None):
16 super(Reloader, self).__init__()
17 self.setDaemon(True)
18 self._extra_files = set(extra_files or ())
19 self._extra_files_lock = threading.RLock()
20 self._interval = interval
21 self._callback = callback
22
23 def add_extra_file(self, filename):
24 with self._extra_files_lock:
25 self._extra_files.add(filename)
26
27 def get_files(self):
28 fnames = [
29 re.sub('py[co]$', 'py', module.__file__)
30 for module in list(sys.modules.values())
31 if hasattr(module, '__file__')
32 ]
33
34 with self._extra_files_lock:
35 fnames.extend(self._extra_files)
36
37 return fnames
38
39 def run(self):
40 mtimes = {}
41 while True:
42 for filename in self.get_files():
43 try:
44 mtime = os.stat(filename).st_mtime
45 except OSError:
46 continue
47 old_time = mtimes.get(filename)
48 if old_time is None:
49 mtimes[filename] = mtime
50 continue
51 elif mtime > old_time:
52 if self._callback:
53 self._callback(filename)
54 time.sleep(self._interval)
55
56 has_inotify = False
57 if sys.platform.startswith('linux'):
58 try:
59 from inotify.adapters import Inotify
60 import inotify.constants
61 has_inotify = True
62 except ImportError:
63 pass
64
65
66 if has_inotify:
67
68 class InotifyReloader(threading.Thread):
69 event_mask = (inotify.constants.IN_CREATE | inotify.constants.IN_DELETE
70 | inotify.constants.IN_DELETE_SELF | inotify.constants.IN_MODIFY
71 | inotify.constants.IN_MOVE_SELF | inotify.constants.IN_MOVED_FROM
72 | inotify.constants.IN_MOVED_TO)
73
74 def __init__(self, extra_files=None, callback=None):
75 super(InotifyReloader, self).__init__()
76 self.setDaemon(True)
77 self._callback = callback
78 self._dirs = set()
79 self._watcher = Inotify()
80
81 for extra_file in extra_files:
82 self.add_extra_file(extra_file)
83
84 def add_extra_file(self, filename):
85 dirname = os.path.dirname(filename)
86
87 if dirname in self._dirs:
88 return
89
90 self._watcher.add_watch(dirname, mask=self.event_mask)
91 self._dirs.add(dirname)
92
93 def get_dirs(self):
94 fnames = [
95 os.path.dirname(re.sub('py[co]$', 'py', module.__file__))
96 for module in list(sys.modules.values())
97 if hasattr(module, '__file__')
98 ]
99
100 return set(fnames)
101
102 def run(self):
103 self._dirs = self.get_dirs()
104
105 for dirname in self._dirs:
106 self._watcher.add_watch(dirname, mask=self.event_mask)
107
108 for event in self._watcher.event_gen():
109 if event is None:
110 continue
111
112 filename = event[3]
113
114 self._callback(filename)
115
116 else:
117
118 class InotifyReloader(object):
119 def __init__(self, callback=None):
120 raise ImportError('You must have the inotify module installed to '
121 'use the inotify reloader')
122
123
124 preferred_reloader = InotifyReloader if has_inotify else Reloader
125
126 reloader_engines = {
127 'auto': preferred_reloader,
128 'poll': Reloader,
129 'inotify': InotifyReloader,
130 }
131
[end of gunicorn/reloader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gunicorn/reloader.py b/gunicorn/reloader.py
--- a/gunicorn/reloader.py
+++ b/gunicorn/reloader.py
@@ -28,7 +28,7 @@
fnames = [
re.sub('py[co]$', 'py', module.__file__)
for module in list(sys.modules.values())
- if hasattr(module, '__file__')
+ if getattr(module, '__file__', None)
]
with self._extra_files_lock:
| {"golden_diff": "diff --git a/gunicorn/reloader.py b/gunicorn/reloader.py\n--- a/gunicorn/reloader.py\n+++ b/gunicorn/reloader.py\n@@ -28,7 +28,7 @@\n fnames = [\n re.sub('py[co]$', 'py', module.__file__)\n for module in list(sys.modules.values())\n- if hasattr(module, '__file__')\n+ if getattr(module, '__file__', None)\n ]\n \n with self._extra_files_lock:\n", "issue": "gunicorn crashed on start with --reload flag\nSetup: Vagrant, virtualenv, gunicorn 19.3.0:\n\nThe following command produces this stack:\n\n`gunicorn -c /data/shared/api/gunicorn_config.py -b unix:/tmp/api-dev-gunicorn.sock --log-level INFO --reload wsgi:app`\n\n```\nException in thread Thread-1:\nTraceback (most recent call last):\n File \"/home/vagrant/.pyenv/versions/2.7.6/lib/python2.7/threading.py\", line 810, in __bootstrap_inner\n self.run()\n File \"/data/virtualenv/default/lib/python2.7/site-packages/gunicorn/reloader.py\", line 41, in run\n for filename in self.get_files():\n File \"/data/virtualenv/default/lib/python2.7/site-packages/gunicorn/reloader.py\", line 30, in get_files\n if hasattr(module, '__file__')\n File \"/data/virtualenv/default/lib/python2.7/re.py\", line 151, in sub\n return _compile(pattern, flags).sub(repl, string, count)\nTypeError: expected string or buffer\n```\n\nIf I remove --reload it boots up fine.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport os\nimport os.path\nimport re\nimport sys\nimport time\nimport threading\n\n\nclass Reloader(threading.Thread):\n def __init__(self, extra_files=None, interval=1, callback=None):\n super(Reloader, self).__init__()\n self.setDaemon(True)\n self._extra_files = set(extra_files or ())\n self._extra_files_lock = threading.RLock()\n self._interval = interval\n self._callback = callback\n\n def add_extra_file(self, filename):\n with self._extra_files_lock:\n self._extra_files.add(filename)\n\n def get_files(self):\n fnames = [\n re.sub('py[co]$', 'py', module.__file__)\n for module in list(sys.modules.values())\n if hasattr(module, '__file__')\n ]\n\n with self._extra_files_lock:\n fnames.extend(self._extra_files)\n\n return fnames\n\n def run(self):\n mtimes = {}\n while True:\n for filename in self.get_files():\n try:\n mtime = os.stat(filename).st_mtime\n except OSError:\n continue\n old_time = mtimes.get(filename)\n if old_time is None:\n mtimes[filename] = mtime\n continue\n elif mtime > old_time:\n if self._callback:\n self._callback(filename)\n time.sleep(self._interval)\n\nhas_inotify = False\nif sys.platform.startswith('linux'):\n try:\n from inotify.adapters import Inotify\n import inotify.constants\n has_inotify = True\n except ImportError:\n pass\n\n\nif has_inotify:\n\n class InotifyReloader(threading.Thread):\n event_mask = (inotify.constants.IN_CREATE | inotify.constants.IN_DELETE\n | inotify.constants.IN_DELETE_SELF | inotify.constants.IN_MODIFY\n | inotify.constants.IN_MOVE_SELF | inotify.constants.IN_MOVED_FROM\n | inotify.constants.IN_MOVED_TO)\n\n def __init__(self, extra_files=None, callback=None):\n super(InotifyReloader, self).__init__()\n self.setDaemon(True)\n self._callback = callback\n self._dirs = set()\n self._watcher = Inotify()\n\n for extra_file in extra_files:\n self.add_extra_file(extra_file)\n\n def add_extra_file(self, filename):\n dirname = os.path.dirname(filename)\n\n if dirname in self._dirs:\n return\n\n self._watcher.add_watch(dirname, mask=self.event_mask)\n self._dirs.add(dirname)\n\n def get_dirs(self):\n fnames = [\n os.path.dirname(re.sub('py[co]$', 'py', module.__file__))\n for module in list(sys.modules.values())\n if hasattr(module, '__file__')\n ]\n\n return set(fnames)\n\n def run(self):\n self._dirs = self.get_dirs()\n\n for dirname in self._dirs:\n self._watcher.add_watch(dirname, mask=self.event_mask)\n\n for event in self._watcher.event_gen():\n if event is None:\n continue\n\n filename = event[3]\n\n self._callback(filename)\n\nelse:\n\n class InotifyReloader(object):\n def __init__(self, callback=None):\n raise ImportError('You must have the inotify module installed to '\n 'use the inotify reloader')\n\n\npreferred_reloader = InotifyReloader if has_inotify else Reloader\n\nreloader_engines = {\n 'auto': preferred_reloader,\n 'poll': Reloader,\n 'inotify': InotifyReloader,\n}\n", "path": "gunicorn/reloader.py"}]} | 1,889 | 107 |
gh_patches_debug_21397 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-4092 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
testing 5024: order of filter
**URL:** https://meinberlin-dev.liqd.net/projekte/burgerhaushalt-spandau/?mode=list
**user:** any
**expected behaviour:**
**behaviour:**
**important screensize:**
**device & browser:**
**Comment/Question:** the order of the filter was 1) category 2) archive. It should stay this way but as we will touch the filter again later we can also do it then but need to remember. So if easy maybe change now?
Screenshot?
<img width="490" alt="Bildschirmfoto 2021-12-21 um 16 25 15" src="https://user-images.githubusercontent.com/35491681/146955180-11799600-c739-4d17-8f84-7581b57a861b.png">
</issue>
<code>
[start of meinberlin/apps/budgeting/api.py]
1 from django.utils.translation import get_language
2 from django.utils.translation import gettext_lazy as _
3 from django_filters.rest_framework import DjangoFilterBackend
4 from rest_framework import mixins
5 from rest_framework import viewsets
6 from rest_framework.filters import OrderingFilter
7 from rest_framework.pagination import PageNumberPagination
8
9 from adhocracy4.api.mixins import ModuleMixin
10 from adhocracy4.api.permissions import ViewSetRulesPermission
11 from adhocracy4.categories import get_category_icon_url
12 from adhocracy4.categories import has_icons
13 from adhocracy4.categories.models import Category
14 from meinberlin.apps.contrib.filters import IdeaCategoryFilterBackend
15 from meinberlin.apps.votes.api import VotingTokenInfoMixin
16
17 from .models import Proposal
18 from .serializers import ProposalSerializer
19
20
21 # To be changed to a more general IdeaPagination, when using
22 # pagination via rest api for more idea lists
23 class ProposalPagination(PageNumberPagination):
24 page_size = 15
25
26 def get_paginated_response(self, data):
27 response = super(ProposalPagination, self).get_paginated_response(data)
28 response.data['page_size'] = self.page_size
29 response.data['page_count'] = self.page.paginator.num_pages
30 return response
31
32
33 class LocaleInfoMixin:
34 def list(self, request, *args, **kwargs):
35 response = super().list(request, args, kwargs)
36 response.data['locale'] = get_language()
37 return response
38
39
40 class ProposalFilterInfoMixin(ModuleMixin):
41 def list(self, request, *args, **kwargs):
42 """Add the filter information to the data of the Proposal API.
43
44 Needs to be used with rest_framework.mixins.ListModelMixin
45 """
46 filters = {}
47
48 ordering_choices = [('-created', _('Most recent')), ]
49 if self.module.has_feature('rate', Proposal):
50 ordering_choices += ('-positive_rating_count', _('Most popular')),
51 ordering_choices += ('-comment_count', _('Most commented')),
52
53 filters['ordering'] = {
54 'label': _('Ordering'),
55 'choices': ordering_choices,
56 'default': '-created',
57 }
58
59 filters['is_archived'] = {
60 'label': _('Archived'),
61 'choices': [
62 ('', _('All')),
63 ('false', _('No')),
64 ('true', _('Yes')),
65 ],
66 'default': 'false',
67 }
68
69 categories = Category.objects.filter(
70 module=self.module
71 )
72 if categories:
73 category_choices = [('', _('All')), ]
74 if has_icons(self.module):
75 category_icons = []
76 for category in categories:
77 category_choices += (str(category.pk), category.name),
78 if has_icons(self.module):
79 icon_name = getattr(category, 'icon', None)
80 icon_url = get_category_icon_url(icon_name)
81 category_icons += (str(category.pk), icon_url),
82
83 filters['category'] = {
84 'label': _('Category'),
85 'choices': category_choices,
86 }
87 if has_icons(self.module):
88 filters['category']['icons'] = category_icons
89
90 response = super().list(request, args, kwargs)
91 response.data['filters'] = filters
92 return response
93
94
95 class ProposalViewSet(ProposalFilterInfoMixin,
96 LocaleInfoMixin,
97 VotingTokenInfoMixin,
98 mixins.ListModelMixin,
99 viewsets.GenericViewSet,
100 ):
101
102 pagination_class = ProposalPagination
103 serializer_class = ProposalSerializer
104 permission_classes = (ViewSetRulesPermission,)
105 filter_backends = (DjangoFilterBackend,
106 OrderingFilter,
107 IdeaCategoryFilterBackend,)
108 filter_fields = ('is_archived', 'category',)
109 ordering_fields = ('created',
110 'comment_count',
111 'positive_rating_count',)
112
113 def get_permission_object(self):
114 return self.module
115
116 def get_queryset(self):
117 proposals = Proposal.objects\
118 .filter(module=self.module) \
119 .annotate_comment_count() \
120 .annotate_positive_rating_count() \
121 .annotate_negative_rating_count() \
122 .order_by('-created')
123 return proposals
124
[end of meinberlin/apps/budgeting/api.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/budgeting/api.py b/meinberlin/apps/budgeting/api.py
--- a/meinberlin/apps/budgeting/api.py
+++ b/meinberlin/apps/budgeting/api.py
@@ -56,16 +56,6 @@
'default': '-created',
}
- filters['is_archived'] = {
- 'label': _('Archived'),
- 'choices': [
- ('', _('All')),
- ('false', _('No')),
- ('true', _('Yes')),
- ],
- 'default': 'false',
- }
-
categories = Category.objects.filter(
module=self.module
)
@@ -87,6 +77,16 @@
if has_icons(self.module):
filters['category']['icons'] = category_icons
+ filters['is_archived'] = {
+ 'label': _('Archived'),
+ 'choices': [
+ ('', _('All')),
+ ('false', _('No')),
+ ('true', _('Yes')),
+ ],
+ 'default': 'false',
+ }
+
response = super().list(request, args, kwargs)
response.data['filters'] = filters
return response
| {"golden_diff": "diff --git a/meinberlin/apps/budgeting/api.py b/meinberlin/apps/budgeting/api.py\n--- a/meinberlin/apps/budgeting/api.py\n+++ b/meinberlin/apps/budgeting/api.py\n@@ -56,16 +56,6 @@\n 'default': '-created',\n }\n \n- filters['is_archived'] = {\n- 'label': _('Archived'),\n- 'choices': [\n- ('', _('All')),\n- ('false', _('No')),\n- ('true', _('Yes')),\n- ],\n- 'default': 'false',\n- }\n-\n categories = Category.objects.filter(\n module=self.module\n )\n@@ -87,6 +77,16 @@\n if has_icons(self.module):\n filters['category']['icons'] = category_icons\n \n+ filters['is_archived'] = {\n+ 'label': _('Archived'),\n+ 'choices': [\n+ ('', _('All')),\n+ ('false', _('No')),\n+ ('true', _('Yes')),\n+ ],\n+ 'default': 'false',\n+ }\n+\n response = super().list(request, args, kwargs)\n response.data['filters'] = filters\n return response\n", "issue": "testing 5024: order of filter \n**URL:** https://meinberlin-dev.liqd.net/projekte/burgerhaushalt-spandau/?mode=list\r\n**user:** any\r\n**expected behaviour:** \r\n**behaviour:** \r\n**important screensize:**\r\n**device & browser:** \r\n**Comment/Question:** the order of the filter was 1) category 2) archive. It should stay this way but as we will touch the filter again later we can also do it then but need to remember. So if easy maybe change now?\r\n\r\nScreenshot?\r\n<img width=\"490\" alt=\"Bildschirmfoto 2021-12-21 um 16 25 15\" src=\"https://user-images.githubusercontent.com/35491681/146955180-11799600-c739-4d17-8f84-7581b57a861b.png\">\r\n\r\n\n", "before_files": [{"content": "from django.utils.translation import get_language\nfrom django.utils.translation import gettext_lazy as _\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import mixins\nfrom rest_framework import viewsets\nfrom rest_framework.filters import OrderingFilter\nfrom rest_framework.pagination import PageNumberPagination\n\nfrom adhocracy4.api.mixins import ModuleMixin\nfrom adhocracy4.api.permissions import ViewSetRulesPermission\nfrom adhocracy4.categories import get_category_icon_url\nfrom adhocracy4.categories import has_icons\nfrom adhocracy4.categories.models import Category\nfrom meinberlin.apps.contrib.filters import IdeaCategoryFilterBackend\nfrom meinberlin.apps.votes.api import VotingTokenInfoMixin\n\nfrom .models import Proposal\nfrom .serializers import ProposalSerializer\n\n\n# To be changed to a more general IdeaPagination, when using\n# pagination via rest api for more idea lists\nclass ProposalPagination(PageNumberPagination):\n page_size = 15\n\n def get_paginated_response(self, data):\n response = super(ProposalPagination, self).get_paginated_response(data)\n response.data['page_size'] = self.page_size\n response.data['page_count'] = self.page.paginator.num_pages\n return response\n\n\nclass LocaleInfoMixin:\n def list(self, request, *args, **kwargs):\n response = super().list(request, args, kwargs)\n response.data['locale'] = get_language()\n return response\n\n\nclass ProposalFilterInfoMixin(ModuleMixin):\n def list(self, request, *args, **kwargs):\n \"\"\"Add the filter information to the data of the Proposal API.\n\n Needs to be used with rest_framework.mixins.ListModelMixin\n \"\"\"\n filters = {}\n\n ordering_choices = [('-created', _('Most recent')), ]\n if self.module.has_feature('rate', Proposal):\n ordering_choices += ('-positive_rating_count', _('Most popular')),\n ordering_choices += ('-comment_count', _('Most commented')),\n\n filters['ordering'] = {\n 'label': _('Ordering'),\n 'choices': ordering_choices,\n 'default': '-created',\n }\n\n filters['is_archived'] = {\n 'label': _('Archived'),\n 'choices': [\n ('', _('All')),\n ('false', _('No')),\n ('true', _('Yes')),\n ],\n 'default': 'false',\n }\n\n categories = Category.objects.filter(\n module=self.module\n )\n if categories:\n category_choices = [('', _('All')), ]\n if has_icons(self.module):\n category_icons = []\n for category in categories:\n category_choices += (str(category.pk), category.name),\n if has_icons(self.module):\n icon_name = getattr(category, 'icon', None)\n icon_url = get_category_icon_url(icon_name)\n category_icons += (str(category.pk), icon_url),\n\n filters['category'] = {\n 'label': _('Category'),\n 'choices': category_choices,\n }\n if has_icons(self.module):\n filters['category']['icons'] = category_icons\n\n response = super().list(request, args, kwargs)\n response.data['filters'] = filters\n return response\n\n\nclass ProposalViewSet(ProposalFilterInfoMixin,\n LocaleInfoMixin,\n VotingTokenInfoMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet,\n ):\n\n pagination_class = ProposalPagination\n serializer_class = ProposalSerializer\n permission_classes = (ViewSetRulesPermission,)\n filter_backends = (DjangoFilterBackend,\n OrderingFilter,\n IdeaCategoryFilterBackend,)\n filter_fields = ('is_archived', 'category',)\n ordering_fields = ('created',\n 'comment_count',\n 'positive_rating_count',)\n\n def get_permission_object(self):\n return self.module\n\n def get_queryset(self):\n proposals = Proposal.objects\\\n .filter(module=self.module) \\\n .annotate_comment_count() \\\n .annotate_positive_rating_count() \\\n .annotate_negative_rating_count() \\\n .order_by('-created')\n return proposals\n", "path": "meinberlin/apps/budgeting/api.py"}]} | 1,877 | 272 |
gh_patches_debug_6641 | rasdani/github-patches | git_diff | ivy-llc__ivy-14502 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
generalized_normal
</issue>
<code>
[start of ivy/functional/frontends/jax/random.py]
1 # local
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes
4 from ivy.functional.frontends.jax.func_wrapper import (
5 to_ivy_arrays_and_back,
6 handle_jax_dtype,
7 )
8
9
10 @to_ivy_arrays_and_back
11 def PRNGKey(seed):
12 return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64)
13
14
15 @handle_jax_dtype
16 @to_ivy_arrays_and_back
17 def uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0):
18 return ivy.random_uniform(
19 low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])
20 )
21
22
23 @handle_jax_dtype
24 @to_ivy_arrays_and_back
25 def normal(key, shape=(), dtype=None):
26 return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]))
27
28
29 def _get_seed(key):
30 key1, key2 = int(key[0]), int(key[1])
31 return ivy.to_scalar(int("".join(map(str, [key1, key2]))))
32
33
34 @handle_jax_dtype
35 @to_ivy_arrays_and_back
36 @with_unsupported_dtypes(
37 {
38 "0.3.14 and below": (
39 "float16",
40 "bfloat16",
41 )
42 },
43 "jax",
44 )
45 def beta(key, a, b, shape=None, dtype=None):
46 seed = _get_seed(key)
47 return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed)
48
49
50 @handle_jax_dtype
51 @to_ivy_arrays_and_back
52 @with_unsupported_dtypes(
53 {
54 "0.3.14 and below": (
55 "float16",
56 "bfloat16",
57 )
58 },
59 "jax",
60 )
61 def dirichlet(key, alpha, shape=None, dtype="float32"):
62 seed = _get_seed(key)
63 alpha = ivy.astype(alpha, dtype)
64 return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed)
65
66
67 @handle_jax_dtype
68 @to_ivy_arrays_and_back
69 @with_unsupported_dtypes(
70 {"0.3.14 and below": ("unsigned", "int8", "int16")},
71 "jax",
72 )
73 def poisson(key, lam, shape=None, dtype=None):
74 seed = _get_seed(key)
75 return ivy.poisson(lam, shape=shape, dtype=dtype, seed=seed)
76
77
78 @handle_jax_dtype
79 @to_ivy_arrays_and_back
80 @with_unsupported_dtypes(
81 {
82 "0.3.14 and below": (
83 "float16",
84 "bfloat16",
85 )
86 },
87 "jax",
88 )
89 def gamma(key, a, shape=None, dtype="float64"):
90 seed = _get_seed(key)
91 return ivy.gamma(a, 1.0, shape=shape, dtype=dtype, seed=seed)
92
93
94 @handle_jax_dtype
95 @to_ivy_arrays_and_back
96 @with_unsupported_dtypes(
97 {
98 "0.3.14 and below": (
99 "float16",
100 "bfloat16",
101 )
102 },
103 "jax",
104 )
105 def gumbel(key, shape=(), dtype="float64"):
106 seed = _get_seed(key)
107 uniform_x = ivy.random_uniform(
108 low=0.0,
109 high=1.0,
110 shape=shape,
111 dtype=dtype,
112 seed=seed,
113 )
114 return -ivy.log(-ivy.log(uniform_x))
115
116
117 @handle_jax_dtype
118 @to_ivy_arrays_and_back
119 @with_unsupported_dtypes(
120 {
121 "0.3.14 and below": (
122 "float16",
123 "bfloat16",
124 )
125 },
126 "jax",
127 )
128 def t(key, df, shape=(), dtype="float64"):
129 seed = _get_seed(key)
130 n = ivy.random_normal(shape=shape, dtype=dtype, seed=seed)
131 half_df = df / 2.0
132 g = ivy.gamma(half_df, 1.0, shape=shape, dtype=dtype, seed=seed)
133 return n * ivy.sqrt(ivy.divide(half_df, g))
134
[end of ivy/functional/frontends/jax/random.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/jax/random.py b/ivy/functional/frontends/jax/random.py
--- a/ivy/functional/frontends/jax/random.py
+++ b/ivy/functional/frontends/jax/random.py
@@ -125,6 +125,14 @@
},
"jax",
)
+def generalized_normal(key, p, shape=(), dtype="float64"):
+ seed = _get_seed(key)
+ g = ivy.gamma(1 / p, 1.0, shape=shape, dtype=dtype, seed=seed)
+ b = ivy.bernoulli(ivy.array([0.5]), shape=shape, dtype=dtype, seed=seed)
+ r = 2 * b - 1
+ return r * g ** (1 / p)
+
+
def t(key, df, shape=(), dtype="float64"):
seed = _get_seed(key)
n = ivy.random_normal(shape=shape, dtype=dtype, seed=seed)
| {"golden_diff": "diff --git a/ivy/functional/frontends/jax/random.py b/ivy/functional/frontends/jax/random.py\n--- a/ivy/functional/frontends/jax/random.py\n+++ b/ivy/functional/frontends/jax/random.py\n@@ -125,6 +125,14 @@\n },\n \"jax\",\n )\n+def generalized_normal(key, p, shape=(), dtype=\"float64\"):\n+ seed = _get_seed(key)\n+ g = ivy.gamma(1 / p, 1.0, shape=shape, dtype=dtype, seed=seed)\n+ b = ivy.bernoulli(ivy.array([0.5]), shape=shape, dtype=dtype, seed=seed)\n+ r = 2 * b - 1\n+ return r * g ** (1 / p)\n+\n+\n def t(key, df, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n n = ivy.random_normal(shape=shape, dtype=dtype, seed=seed)\n", "issue": "generalized_normal\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_jax_dtype,\n)\n\n\n@to_ivy_arrays_and_back\ndef PRNGKey(seed):\n return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0):\n return ivy.random_uniform(\n low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])\n )\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef normal(key, shape=(), dtype=None):\n return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]))\n\n\ndef _get_seed(key):\n key1, key2 = int(key[0]), int(key[1])\n return ivy.to_scalar(int(\"\".join(map(str, [key1, key2]))))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef beta(key, a, b, shape=None, dtype=None):\n seed = _get_seed(key)\n return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef dirichlet(key, alpha, shape=None, dtype=\"float32\"):\n seed = _get_seed(key)\n alpha = ivy.astype(alpha, dtype)\n return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\"0.3.14 and below\": (\"unsigned\", \"int8\", \"int16\")},\n \"jax\",\n)\ndef poisson(key, lam, shape=None, dtype=None):\n seed = _get_seed(key)\n return ivy.poisson(lam, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef gamma(key, a, shape=None, dtype=\"float64\"):\n seed = _get_seed(key)\n return ivy.gamma(a, 1.0, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef gumbel(key, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n uniform_x = ivy.random_uniform(\n low=0.0,\n high=1.0,\n shape=shape,\n dtype=dtype,\n seed=seed,\n )\n return -ivy.log(-ivy.log(uniform_x))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef t(key, df, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n n = ivy.random_normal(shape=shape, dtype=dtype, seed=seed)\n half_df = df / 2.0\n g = ivy.gamma(half_df, 1.0, shape=shape, dtype=dtype, seed=seed)\n return n * ivy.sqrt(ivy.divide(half_df, g))\n", "path": "ivy/functional/frontends/jax/random.py"}]} | 1,826 | 228 |
gh_patches_debug_9245 | rasdani/github-patches | git_diff | translate__pootle-6371 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fulah locale exposes _XXX_LAST_SUBMISSION_
When in the Fulah (ff) locale, all pages expose the `_XXX_LAST_SUBMISSION_` text used to mangle timesince messages.

</issue>
<code>
[start of pootle/i18n/dates.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from datetime import datetime
10
11
12 from .formatter import get_locale_formats
13
14
15 def timesince(timestamp, locale=None):
16 timedelta = datetime.now() - datetime.fromtimestamp(timestamp)
17 return get_locale_formats(locale).timedelta(timedelta, format='long')
18
[end of pootle/i18n/dates.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/i18n/dates.py b/pootle/i18n/dates.py
--- a/pootle/i18n/dates.py
+++ b/pootle/i18n/dates.py
@@ -8,10 +8,15 @@
from datetime import datetime
+from django.conf import settings
from .formatter import get_locale_formats
def timesince(timestamp, locale=None):
timedelta = datetime.now() - datetime.fromtimestamp(timestamp)
- return get_locale_formats(locale).timedelta(timedelta, format='long')
+ formatted = get_locale_formats(locale).timedelta(timedelta, format='long')
+ if formatted:
+ return formatted
+ return get_locale_formats(
+ settings.LANGUAGE_CODE).timedelta(timedelta, format='long')
| {"golden_diff": "diff --git a/pootle/i18n/dates.py b/pootle/i18n/dates.py\n--- a/pootle/i18n/dates.py\n+++ b/pootle/i18n/dates.py\n@@ -8,10 +8,15 @@\n \n from datetime import datetime\n \n+from django.conf import settings\n \n from .formatter import get_locale_formats\n \n \n def timesince(timestamp, locale=None):\n timedelta = datetime.now() - datetime.fromtimestamp(timestamp)\n- return get_locale_formats(locale).timedelta(timedelta, format='long')\n+ formatted = get_locale_formats(locale).timedelta(timedelta, format='long')\n+ if formatted:\n+ return formatted\n+ return get_locale_formats(\n+ settings.LANGUAGE_CODE).timedelta(timedelta, format='long')\n", "issue": "Fulah locale exposes _XXX_LAST_SUBMISSION_\nWhen in the Fulah (ff) locale, all pages expose the `_XXX_LAST_SUBMISSION_` text used to mangle timesince messages.\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom datetime import datetime\n\n\nfrom .formatter import get_locale_formats\n\n\ndef timesince(timestamp, locale=None):\n timedelta = datetime.now() - datetime.fromtimestamp(timestamp)\n return get_locale_formats(locale).timedelta(timedelta, format='long')\n", "path": "pootle/i18n/dates.py"}]} | 822 | 178 |
gh_patches_debug_19940 | rasdani/github-patches | git_diff | mirumee__ariadne-153 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Raise ValueError when `field` or `source` decorator was called incorrectly
Currently there's no error when the developer forgets to follow the `field` or `source` decorator with `("name")`, tricking them into thinking that decorated function has been registered while in fact it wasn't.
We could update implementation for those functions to raise ValueError when `name` attr is not `str`.
</issue>
<code>
[start of ariadne/objects.py]
1 from typing import Callable, Dict, Optional, cast
2
3 from graphql.type import GraphQLNamedType, GraphQLObjectType, GraphQLSchema
4
5 from .resolvers import resolve_to
6 from .types import Resolver, SchemaBindable
7
8
9 class ObjectType(SchemaBindable):
10 _resolvers: Dict[str, Resolver]
11
12 def __init__(self, name: str) -> None:
13 self.name = name
14 self._resolvers = {}
15
16 def field(self, name: str) -> Callable[[Resolver], Resolver]:
17 return self.create_register_resolver(name)
18
19 def create_register_resolver(self, name: str) -> Callable[[Resolver], Resolver]:
20 def register_resolver(f: Resolver) -> Resolver:
21 self._resolvers[name] = f
22 return f
23
24 return register_resolver
25
26 def set_field(self, name, resolver: Resolver) -> Resolver:
27 self._resolvers[name] = resolver
28 return resolver
29
30 def set_alias(self, name: str, to: str) -> None:
31 self._resolvers[name] = resolve_to(to)
32
33 def bind_to_schema(self, schema: GraphQLSchema) -> None:
34 graphql_type = schema.type_map.get(self.name)
35 self.validate_graphql_type(graphql_type)
36 graphql_type = cast(GraphQLObjectType, graphql_type)
37 self.bind_resolvers_to_graphql_type(graphql_type)
38
39 def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:
40 if not graphql_type:
41 raise ValueError("Type %s is not defined in the schema" % self.name)
42 if not isinstance(graphql_type, GraphQLObjectType):
43 raise ValueError(
44 "%s is defined in the schema, but it is instance of %s (expected %s)"
45 % (self.name, type(graphql_type).__name__, GraphQLObjectType.__name__)
46 )
47
48 def bind_resolvers_to_graphql_type(self, graphql_type, replace_existing=True):
49 for field, resolver in self._resolvers.items():
50 if field not in graphql_type.fields:
51 raise ValueError(
52 "Field %s is not defined on type %s" % (field, self.name)
53 )
54 if graphql_type.fields[field].resolve is None or replace_existing:
55 graphql_type.fields[field].resolve = resolver
56
57
58 class QueryType(ObjectType):
59 """Convenience class for defining Query type"""
60
61 def __init__(self):
62 super().__init__("Query")
63
64
65 class MutationType(ObjectType):
66 """Convenience class for defining Mutation type"""
67
68 def __init__(self):
69 super().__init__("Mutation")
70
[end of ariadne/objects.py]
[start of ariadne/subscriptions.py]
1 from typing import Callable, Dict
2
3 from graphql.type import GraphQLSchema
4
5 from .objects import ObjectType
6 from .types import Subscriber
7
8
9 class SubscriptionType(ObjectType):
10 _subscribers: Dict[str, Subscriber]
11
12 def __init__(self) -> None:
13 super().__init__("Subscription")
14 self._subscribers = {}
15
16 def source(self, name: str) -> Callable[[Subscriber], Subscriber]:
17 return self.create_register_subscriber(name)
18
19 def create_register_subscriber(
20 self, name: str
21 ) -> Callable[[Subscriber], Subscriber]:
22 def register_subscriber(generator: Subscriber) -> Subscriber:
23 self._subscribers[name] = generator
24 return generator
25
26 return register_subscriber
27
28 def set_source(self, name, generator: Subscriber) -> Subscriber:
29 self._subscribers[name] = generator
30 return generator
31
32 def bind_to_schema(self, schema: GraphQLSchema) -> None:
33 graphql_type = schema.type_map.get(self.name)
34 self.validate_graphql_type(graphql_type)
35 self.bind_resolvers_to_graphql_type(graphql_type)
36 self.bind_subscribers_to_graphql_type(graphql_type)
37
38 def bind_subscribers_to_graphql_type(self, graphql_type):
39 for field, subscriber in self._subscribers.items():
40 if field not in graphql_type.fields:
41 raise ValueError(
42 "Field %s is not defined on type %s" % (field, self.name)
43 )
44
45 graphql_type.fields[field].subscribe = subscriber
46
[end of ariadne/subscriptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ariadne/objects.py b/ariadne/objects.py
--- a/ariadne/objects.py
+++ b/ariadne/objects.py
@@ -14,6 +14,10 @@
self._resolvers = {}
def field(self, name: str) -> Callable[[Resolver], Resolver]:
+ if not isinstance(name, str):
+ raise ValueError(
+ 'field decorator should be passed a field name: @foo.field("name")'
+ )
return self.create_register_resolver(name)
def create_register_resolver(self, name: str) -> Callable[[Resolver], Resolver]:
diff --git a/ariadne/subscriptions.py b/ariadne/subscriptions.py
--- a/ariadne/subscriptions.py
+++ b/ariadne/subscriptions.py
@@ -14,6 +14,10 @@
self._subscribers = {}
def source(self, name: str) -> Callable[[Subscriber], Subscriber]:
+ if not isinstance(name, str):
+ raise ValueError(
+ 'source decorator should be passed a field name: @foo.source("name")'
+ )
return self.create_register_subscriber(name)
def create_register_subscriber(
| {"golden_diff": "diff --git a/ariadne/objects.py b/ariadne/objects.py\n--- a/ariadne/objects.py\n+++ b/ariadne/objects.py\n@@ -14,6 +14,10 @@\n self._resolvers = {}\n \n def field(self, name: str) -> Callable[[Resolver], Resolver]:\n+ if not isinstance(name, str):\n+ raise ValueError(\n+ 'field decorator should be passed a field name: @foo.field(\"name\")'\n+ )\n return self.create_register_resolver(name)\n \n def create_register_resolver(self, name: str) -> Callable[[Resolver], Resolver]:\ndiff --git a/ariadne/subscriptions.py b/ariadne/subscriptions.py\n--- a/ariadne/subscriptions.py\n+++ b/ariadne/subscriptions.py\n@@ -14,6 +14,10 @@\n self._subscribers = {}\n \n def source(self, name: str) -> Callable[[Subscriber], Subscriber]:\n+ if not isinstance(name, str):\n+ raise ValueError(\n+ 'source decorator should be passed a field name: @foo.source(\"name\")'\n+ )\n return self.create_register_subscriber(name)\n \n def create_register_subscriber(\n", "issue": "Raise ValueError when `field` or `source` decorator was called incorrectly\nCurrently there's no error when the developer forgets to follow the `field` or `source` decorator with `(\"name\")`, tricking them into thinking that decorated function has been registered while in fact it wasn't.\r\n\r\nWe could update implementation for those functions to raise ValueError when `name` attr is not `str`.\n", "before_files": [{"content": "from typing import Callable, Dict, Optional, cast\n\nfrom graphql.type import GraphQLNamedType, GraphQLObjectType, GraphQLSchema\n\nfrom .resolvers import resolve_to\nfrom .types import Resolver, SchemaBindable\n\n\nclass ObjectType(SchemaBindable):\n _resolvers: Dict[str, Resolver]\n\n def __init__(self, name: str) -> None:\n self.name = name\n self._resolvers = {}\n\n def field(self, name: str) -> Callable[[Resolver], Resolver]:\n return self.create_register_resolver(name)\n\n def create_register_resolver(self, name: str) -> Callable[[Resolver], Resolver]:\n def register_resolver(f: Resolver) -> Resolver:\n self._resolvers[name] = f\n return f\n\n return register_resolver\n\n def set_field(self, name, resolver: Resolver) -> Resolver:\n self._resolvers[name] = resolver\n return resolver\n\n def set_alias(self, name: str, to: str) -> None:\n self._resolvers[name] = resolve_to(to)\n\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n graphql_type = schema.type_map.get(self.name)\n self.validate_graphql_type(graphql_type)\n graphql_type = cast(GraphQLObjectType, graphql_type)\n self.bind_resolvers_to_graphql_type(graphql_type)\n\n def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:\n if not graphql_type:\n raise ValueError(\"Type %s is not defined in the schema\" % self.name)\n if not isinstance(graphql_type, GraphQLObjectType):\n raise ValueError(\n \"%s is defined in the schema, but it is instance of %s (expected %s)\"\n % (self.name, type(graphql_type).__name__, GraphQLObjectType.__name__)\n )\n\n def bind_resolvers_to_graphql_type(self, graphql_type, replace_existing=True):\n for field, resolver in self._resolvers.items():\n if field not in graphql_type.fields:\n raise ValueError(\n \"Field %s is not defined on type %s\" % (field, self.name)\n )\n if graphql_type.fields[field].resolve is None or replace_existing:\n graphql_type.fields[field].resolve = resolver\n\n\nclass QueryType(ObjectType):\n \"\"\"Convenience class for defining Query type\"\"\"\n\n def __init__(self):\n super().__init__(\"Query\")\n\n\nclass MutationType(ObjectType):\n \"\"\"Convenience class for defining Mutation type\"\"\"\n\n def __init__(self):\n super().__init__(\"Mutation\")\n", "path": "ariadne/objects.py"}, {"content": "from typing import Callable, Dict\n\nfrom graphql.type import GraphQLSchema\n\nfrom .objects import ObjectType\nfrom .types import Subscriber\n\n\nclass SubscriptionType(ObjectType):\n _subscribers: Dict[str, Subscriber]\n\n def __init__(self) -> None:\n super().__init__(\"Subscription\")\n self._subscribers = {}\n\n def source(self, name: str) -> Callable[[Subscriber], Subscriber]:\n return self.create_register_subscriber(name)\n\n def create_register_subscriber(\n self, name: str\n ) -> Callable[[Subscriber], Subscriber]:\n def register_subscriber(generator: Subscriber) -> Subscriber:\n self._subscribers[name] = generator\n return generator\n\n return register_subscriber\n\n def set_source(self, name, generator: Subscriber) -> Subscriber:\n self._subscribers[name] = generator\n return generator\n\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n graphql_type = schema.type_map.get(self.name)\n self.validate_graphql_type(graphql_type)\n self.bind_resolvers_to_graphql_type(graphql_type)\n self.bind_subscribers_to_graphql_type(graphql_type)\n\n def bind_subscribers_to_graphql_type(self, graphql_type):\n for field, subscriber in self._subscribers.items():\n if field not in graphql_type.fields:\n raise ValueError(\n \"Field %s is not defined on type %s\" % (field, self.name)\n )\n\n graphql_type.fields[field].subscribe = subscriber\n", "path": "ariadne/subscriptions.py"}]} | 1,715 | 271 |
gh_patches_debug_1362 | rasdani/github-patches | git_diff | UTNkar__moore-59 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Login is per-subdomain
</issue>
<code>
[start of website/website/settings/production.py]
1 """
2 Django settings for the production environment of Project Moore.
3
4 For more information regarding running in production see,
5 See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
6
7 For more information on this file, see
8 https://docs.djangoproject.com/en/1.10/topics/settings/
9
10 For the full list of settings and their values, see
11 https://docs.djangoproject.com/en/1.10/ref/settings/
12 """
13 from __future__ import absolute_import, unicode_literals
14
15 from .base import *
16
17 # SECURITY WARNING: don't run with debug turned on in production!
18 DEBUG = False
19
20 # SECURITY WARNING: keep the secret key used in production secret!
21 SECRET_KEY = os.environ.get(
22 'DJANGO_SECRET',
23 'za7^0@54n&p-dg4)_l12q_3^o5awz_uym0osqaz2!myki_8kw0'
24 )
25
26 # Database
27 # https://docs.djangoproject.com/en/1.10/ref/settings/#databases
28
29 DATABASES = {
30 'default': {
31 'ENGINE': 'django.db.backends.postgresql',
32 'NAME': os.environ.get('DJANGO_DB'),
33 'USER': os.environ.get('DJANGO_DB_USER'),
34 'PASSWORD': os.environ.get('DJANGO_DB_PASS'),
35 'HOST': os.environ.get('DJANGO_DB_HOST', '127.0.0.1'),
36 'PORT': os.environ.get('DJANGO_DB_PORT', '5432'),
37 }
38 }
39
40 # CONN_MAX_AGE = 0
41
42 # Base URL to use when referring to full URLs within the Wagtail admin
43 # backend - e.g. in notification emails. Don't include '/admin' or a
44 # trailing slash
45 BASE_URL = 'https://dev.utn.se'
46
47 ALLOWED_HOSTS = ['.utn.se']
48
49 # Email settings
50 DEFAULT_FROM_EMAIL = '[email protected]'
51
52 EMAIL_SUBJECT_PREFIX = '[UTN] '
53
54 # Admins - will be sent error messages
55 ADMINS = [('UTN System Administrator', '[email protected]')]
56
57 LOGGING_CONFIG = None
58
59 # TODO: HTTPS security
60 # CSRF_COOKIE_SECURE = True
61 #
62 # SESSION_COOKIE_SECURE = True
63
64 # Membership API
65 MEMBERSHIP_API_USER = 'moore'
66 MEMBERSHIP_API_PASSWORD = os.environ.get('MEMBERSHIP_API_PASSWORD')
67
68 try:
69 from .local import *
70 except ImportError:
71 pass
72
[end of website/website/settings/production.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/website/settings/production.py b/website/website/settings/production.py
--- a/website/website/settings/production.py
+++ b/website/website/settings/production.py
@@ -56,10 +56,11 @@
LOGGING_CONFIG = None
-# TODO: HTTPS security
-# CSRF_COOKIE_SECURE = True
-#
-# SESSION_COOKIE_SECURE = True
+CSRF_COOKIE_SECURE = True
+
+SESSION_COOKIE_DOMAIN = '.utn.se'
+
+SESSION_COOKIE_SECURE = True
# Membership API
MEMBERSHIP_API_USER = 'moore'
| {"golden_diff": "diff --git a/website/website/settings/production.py b/website/website/settings/production.py\n--- a/website/website/settings/production.py\n+++ b/website/website/settings/production.py\n@@ -56,10 +56,11 @@\n \n LOGGING_CONFIG = None\n \n-# TODO: HTTPS security\n-# CSRF_COOKIE_SECURE = True\n-#\n-# SESSION_COOKIE_SECURE = True\n+CSRF_COOKIE_SECURE = True\n+\n+SESSION_COOKIE_DOMAIN = '.utn.se'\n+\n+SESSION_COOKIE_SECURE = True\n \n # Membership API\n MEMBERSHIP_API_USER = 'moore'\n", "issue": "Login is per-subdomain\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for the production environment of Project Moore.\n\nFor more information regarding running in production see,\nSee https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nfrom .base import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'DJANGO_SECRET',\n 'za7^0@54n&p-dg4)_l12q_3^o5awz_uym0osqaz2!myki_8kw0'\n)\n\n# Database\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ.get('DJANGO_DB'),\n 'USER': os.environ.get('DJANGO_DB_USER'),\n 'PASSWORD': os.environ.get('DJANGO_DB_PASS'),\n 'HOST': os.environ.get('DJANGO_DB_HOST', '127.0.0.1'),\n 'PORT': os.environ.get('DJANGO_DB_PORT', '5432'),\n }\n}\n\n# CONN_MAX_AGE = 0\n\n# Base URL to use when referring to full URLs within the Wagtail admin\n# backend - e.g. in notification emails. Don't include '/admin' or a\n# trailing slash\nBASE_URL = 'https://dev.utn.se'\n\nALLOWED_HOSTS = ['.utn.se']\n\n# Email settings\nDEFAULT_FROM_EMAIL = '[email protected]'\n\nEMAIL_SUBJECT_PREFIX = '[UTN] '\n\n# Admins - will be sent error messages\nADMINS = [('UTN System Administrator', '[email protected]')]\n\nLOGGING_CONFIG = None\n\n# TODO: HTTPS security\n# CSRF_COOKIE_SECURE = True\n#\n# SESSION_COOKIE_SECURE = True\n\n# Membership API\nMEMBERSHIP_API_USER = 'moore'\nMEMBERSHIP_API_PASSWORD = os.environ.get('MEMBERSHIP_API_PASSWORD')\n\ntry:\n from .local import *\nexcept ImportError:\n pass\n", "path": "website/website/settings/production.py"}]} | 1,207 | 132 |
gh_patches_debug_3121 | rasdani/github-patches | git_diff | mirumee__ariadne-961 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support Starlette 0.21.0
Starlette 0.21.0 fix important issues on the BaseHttpMiddleware side.
https://github.com/encode/starlette/pull/1715
https://github.com/tiangolo/fastapi/issues/4544
</issue>
<code>
[start of setup.py]
1 #! /usr/bin/env python
2 import os
3 from setuptools import setup
4
5 CLASSIFIERS = [
6 "Development Status :: 4 - Beta",
7 "Intended Audience :: Developers",
8 "License :: OSI Approved :: BSD License",
9 "Operating System :: OS Independent",
10 "Programming Language :: Python",
11 "Programming Language :: Python :: 3.7",
12 "Programming Language :: Python :: 3.8",
13 "Programming Language :: Python :: 3.9",
14 "Programming Language :: Python :: 3.10",
15 "Topic :: Software Development :: Libraries :: Python Modules",
16 ]
17
18 README_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md")
19 with open(README_PATH, "r", encoding="utf8") as f:
20 README = f.read()
21
22 setup(
23 name="ariadne",
24 author="Mirumee Software",
25 author_email="[email protected]",
26 description="Ariadne is a Python library for implementing GraphQL servers.",
27 long_description=README,
28 long_description_content_type="text/markdown",
29 license="BSD",
30 version="0.16.1",
31 url="https://github.com/mirumee/ariadne",
32 packages=["ariadne"],
33 include_package_data=True,
34 install_requires=[
35 "graphql-core>=3.2.0,<3.3",
36 "starlette>0.17,<0.21",
37 "typing_extensions>=3.6.0",
38 ],
39 extras_require={"asgi-file-uploads": ["python-multipart>=0.0.5"]},
40 classifiers=CLASSIFIERS,
41 platforms=["any"],
42 zip_safe=False,
43 )
44
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@
include_package_data=True,
install_requires=[
"graphql-core>=3.2.0,<3.3",
- "starlette>0.17,<0.21",
+ "starlette>0.17,<1.0",
"typing_extensions>=3.6.0",
],
extras_require={"asgi-file-uploads": ["python-multipart>=0.0.5"]},
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,7 @@\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.2.0,<3.3\",\n- \"starlette>0.17,<0.21\",\n+ \"starlette>0.17,<1.0\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n", "issue": "Support Starlette 0.21.0\nStarlette 0.21.0 fix important issues on the BaseHttpMiddleware side. \r\n\r\nhttps://github.com/encode/starlette/pull/1715\r\nhttps://github.com/tiangolo/fastapi/issues/4544\n", "before_files": [{"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\", encoding=\"utf8\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.16.1\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.2.0,<3.3\",\n \"starlette>0.17,<0.21\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 1,034 | 124 |
gh_patches_debug_20924 | rasdani/github-patches | git_diff | OCA__server-tools-211 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tree_view_record_id: causes a warning in the logs
in the runbot logs I get:
2015-07-17 13:09:05,793 27838 WARNING 3110977-7-0-458127-all openerp.modules.loading: The model module.tree.view.record.id.installed has no access rules, consider adding one. E.g. access_module_tree_view_record_id_installed,access_module_tree_view_record_id_installed,model_module_tree_view_record_id_installed,,1,1,1,1
I tracked down module.tree.view.record.id.installed to tree_view_record_id
I totally don't understand why the pseudo dynamic a weird name generated that way, but an ACL is missing
</issue>
<code>
[start of sentry_logger/__init__.py]
1 # -*- coding: utf-8 -*-
2 ###############################################################################
3 #
4 # OpenERP, Open Source Management Solution
5 # This module copyright (C) 2010 - 2014 Savoir-faire Linux
6 # (<http://www.savoirfairelinux.com>).
7 #
8 # This program is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU Affero General Public License as
10 # published by the Free Software Foundation, either version 3 of the
11 # License, or (at your option) any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Affero General Public License for more details.
17 #
18 # You should have received a copy of the GNU Affero General Public License
19 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #
21 ###############################################################################
22
23 import logging
24 import cgitb
25
26 from openerp.tools import config
27 from openerp.addons.web.controllers.main import Session
28
29 _DEFAULT_LOGGING_LEVEL = logging.ERROR
30
31 try:
32 from .odoo_sentry_client import OdooClient
33 from .odoo_sentry_handler import OdooSentryHandler
34
35 root_logger = logging.root
36
37 processors = (
38 'raven.processors.SanitizePasswordsProcessor',
39 'raven_sanitize_openerp.OpenerpPasswordsProcessor'
40 )
41 if config.get(u'sentry_dsn'):
42 cgitb.enable()
43 # Get DSN info from config file or ~/.openerp_serverrc (recommended)
44 dsn = config.get('sentry_dsn')
45 try:
46 level = getattr(logging, config.get('sentry_logging_level'))
47 except (AttributeError, TypeError):
48 level = _DEFAULT_LOGGING_LEVEL
49 # Create Client
50 client = OdooClient(
51 dsn=dsn,
52 processors=processors,
53 )
54 handler = OdooSentryHandler(client, level=level)
55 root_logger.addHandler(handler)
56 else:
57 root_logger.warn(u"Sentry DSN not defined in config file")
58 client = None
59
60 # Inject sentry_activated to session to display error message or not
61 old_session_info = Session.session_info
62
63 def session_info(self, req):
64 res = old_session_info(self, req)
65 res['sentry_activated'] = bool(client)
66 return res
67
68 Session.session_info = session_info
69 except ImportError:
70 pass
71
[end of sentry_logger/__init__.py]
[start of tree_view_record_id/__openerp__.py]
1 # -*- coding: utf-8 -*-
2 ###############################################################################
3 #
4 # Copyright (C) 2012-TODAY Akretion <http://www.akretion.com>.
5 # All Rights Reserved
6 # @author David BEAL <[email protected]>
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
16 #
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #
20 ###############################################################################
21
22 {
23 'name': 'Tree View Record Id',
24 'version': '0.1',
25 'category': 'Other modules',
26 'sequence': 10,
27 'author': "Akretion,Odoo Community Association (OCA)",
28 'summary': "Adds id field to tree views",
29 'description': """
30 Adds Id field in all tree views of any modules/models, except:
31
32 * Arborescent tree views like 'Products by Category', 'Chart of accounts', etc.
33 * Tree views (like in wizard 'Change password') built on transient models
34 which don't have this column in their table.
35
36 Id field is the primary key of standard sql tables
37 defined by the orm (Odoo model).
38 """,
39 'website': 'http://www.akretion.com',
40 'depends': [
41 'base',
42 ],
43 'data': [
44 ],
45 'demo': [
46 ],
47 'installable': True,
48 'auto_install': False,
49 'application': False,
50 'images': [
51 ],
52 'css': [
53 ],
54 'js': [
55 ],
56 'qweb': [
57 ],
58 }
59
[end of tree_view_record_id/__openerp__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sentry_logger/__init__.py b/sentry_logger/__init__.py
--- a/sentry_logger/__init__.py
+++ b/sentry_logger/__init__.py
@@ -20,6 +20,7 @@
#
###############################################################################
+import os
import logging
import cgitb
@@ -54,7 +55,12 @@
handler = OdooSentryHandler(client, level=level)
root_logger.addHandler(handler)
else:
- root_logger.warn(u"Sentry DSN not defined in config file")
+ msg = u"Sentry DSN not defined in config file"
+ if os.environ.get('OCA_CI'):
+ # don't fail the build on runbot for this
+ root_logger.info(msg)
+ else:
+ root_logger.warn(msg)
client = None
# Inject sentry_activated to session to display error message or not
diff --git a/tree_view_record_id/__openerp__.py b/tree_view_record_id/__openerp__.py
--- a/tree_view_record_id/__openerp__.py
+++ b/tree_view_record_id/__openerp__.py
@@ -41,6 +41,7 @@
'base',
],
'data': [
+ 'security/ir.model.access.csv',
],
'demo': [
],
| {"golden_diff": "diff --git a/sentry_logger/__init__.py b/sentry_logger/__init__.py\n--- a/sentry_logger/__init__.py\n+++ b/sentry_logger/__init__.py\n@@ -20,6 +20,7 @@\n #\n ###############################################################################\n \n+import os\n import logging\n import cgitb\n \n@@ -54,7 +55,12 @@\n handler = OdooSentryHandler(client, level=level)\n root_logger.addHandler(handler)\n else:\n- root_logger.warn(u\"Sentry DSN not defined in config file\")\n+ msg = u\"Sentry DSN not defined in config file\"\n+ if os.environ.get('OCA_CI'):\n+ # don't fail the build on runbot for this\n+ root_logger.info(msg)\n+ else:\n+ root_logger.warn(msg)\n client = None\n \n # Inject sentry_activated to session to display error message or not\ndiff --git a/tree_view_record_id/__openerp__.py b/tree_view_record_id/__openerp__.py\n--- a/tree_view_record_id/__openerp__.py\n+++ b/tree_view_record_id/__openerp__.py\n@@ -41,6 +41,7 @@\n 'base',\n ],\n 'data': [\n+ 'security/ir.model.access.csv',\n ],\n 'demo': [\n ],\n", "issue": "tree_view_record_id: causes a warning in the logs\nin the runbot logs I get:\n\n2015-07-17 13:09:05,793 27838 WARNING 3110977-7-0-458127-all openerp.modules.loading: The model module.tree.view.record.id.installed has no access rules, consider adding one. E.g. access_module_tree_view_record_id_installed,access_module_tree_view_record_id_installed,model_module_tree_view_record_id_installed,,1,1,1,1\n\nI tracked down module.tree.view.record.id.installed to tree_view_record_id\n\nI totally don't understand why the pseudo dynamic a weird name generated that way, but an ACL is missing\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n###############################################################################\n#\n# OpenERP, Open Source Management Solution\n# This module copyright (C) 2010 - 2014 Savoir-faire Linux\n# (<http://www.savoirfairelinux.com>).\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n###############################################################################\n\nimport logging\nimport cgitb\n\nfrom openerp.tools import config\nfrom openerp.addons.web.controllers.main import Session\n\n_DEFAULT_LOGGING_LEVEL = logging.ERROR\n\ntry:\n from .odoo_sentry_client import OdooClient\n from .odoo_sentry_handler import OdooSentryHandler\n\n root_logger = logging.root\n\n processors = (\n 'raven.processors.SanitizePasswordsProcessor',\n 'raven_sanitize_openerp.OpenerpPasswordsProcessor'\n )\n if config.get(u'sentry_dsn'):\n cgitb.enable()\n # Get DSN info from config file or ~/.openerp_serverrc (recommended)\n dsn = config.get('sentry_dsn')\n try:\n level = getattr(logging, config.get('sentry_logging_level'))\n except (AttributeError, TypeError):\n level = _DEFAULT_LOGGING_LEVEL\n # Create Client\n client = OdooClient(\n dsn=dsn,\n processors=processors,\n )\n handler = OdooSentryHandler(client, level=level)\n root_logger.addHandler(handler)\n else:\n root_logger.warn(u\"Sentry DSN not defined in config file\")\n client = None\n\n # Inject sentry_activated to session to display error message or not\n old_session_info = Session.session_info\n\n def session_info(self, req):\n res = old_session_info(self, req)\n res['sentry_activated'] = bool(client)\n return res\n\n Session.session_info = session_info\nexcept ImportError:\n pass\n", "path": "sentry_logger/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n###############################################################################\n#\n# Copyright (C) 2012-TODAY Akretion <http://www.akretion.com>.\n# All Rights Reserved\n# @author David BEAL <[email protected]>\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n###############################################################################\n\n{\n 'name': 'Tree View Record Id',\n 'version': '0.1',\n 'category': 'Other modules',\n 'sequence': 10,\n 'author': \"Akretion,Odoo Community Association (OCA)\",\n 'summary': \"Adds id field to tree views\",\n 'description': \"\"\"\nAdds Id field in all tree views of any modules/models, except:\n\n* Arborescent tree views like 'Products by Category', 'Chart of accounts', etc.\n* Tree views (like in wizard 'Change password') built on transient models\n which don't have this column in their table.\n\nId field is the primary key of standard sql tables\ndefined by the orm (Odoo model).\n \"\"\",\n 'website': 'http://www.akretion.com',\n 'depends': [\n 'base',\n ],\n 'data': [\n ],\n 'demo': [\n ],\n 'installable': True,\n 'auto_install': False,\n 'application': False,\n 'images': [\n ],\n 'css': [\n ],\n 'js': [\n ],\n 'qweb': [\n ],\n}\n", "path": "tree_view_record_id/__openerp__.py"}]} | 1,988 | 294 |
gh_patches_debug_5906 | rasdani/github-patches | git_diff | mesonbuild__meson-2743 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
run_target Permission Denied error should be clearer
Minimal example -
[folder.zip](https://github.com/mesonbuild/meson/files/1530489/folder.zip)
I ran - `mkdir build && cd build && meson .. && ninja` and everything works. Now I run - `ninja myscript` and it throws errors -
```
[0/1] Running external command myscript.
Traceback (most recent call last):
File "/usr/bin/meson", line 37, in <module>
sys.exit(main())
File "/usr/bin/meson", line 34, in main
return mesonmain.run(sys.argv[1:], launcher)
File "/usr/lib/python3.6/site-packages/mesonbuild/mesonmain.py", line 311, in run
sys.exit(run_script_command(args[1:]))
File "/usr/lib/python3.6/site-packages/mesonbuild/mesonmain.py", line 278, in run_script_command
return cmdfunc(cmdargs)
File "/usr/lib/python3.6/site-packages/mesonbuild/scripts/commandrunner.py", line 60, in run
pc = run_command(src_dir, build_dir, subdir, meson_command, command, arguments)
File "/usr/lib/python3.6/site-packages/mesonbuild/scripts/commandrunner.py", line 39, in run_command
return subprocess.Popen(command_array, env=child_env, cwd=cwd)
File "/usr/lib/python3.6/subprocess.py", line 709, in __init__
restore_signals, start_new_session)
File "/usr/lib/python3.6/subprocess.py", line 1344, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
PermissionError: [Errno 13] Permission denied: '/home/agauniyal/temp/scripts/script.sh'
FAILED: meson-myscript
/usr/bin/python /usr/bin/meson --internal commandrunner /home/agauniyal/temp/ /home/agauniyal/temp/build '' /usr/bin/python /usr/bin/meson /home/agauniyal/temp/scripts/script.sh
ninja: build stopped: subcommand failed.
```
</issue>
<code>
[start of mesonbuild/scripts/commandrunner.py]
1 # Copyright 2014 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """This program is a wrapper to run external commands. It determines
16 what to run, sets up the environment and executes the command."""
17
18 import sys, os, subprocess, shutil, shlex
19
20 def run_command(source_dir, build_dir, subdir, meson_command, command, arguments):
21 env = {'MESON_SOURCE_ROOT': source_dir,
22 'MESON_BUILD_ROOT': build_dir,
23 'MESON_SUBDIR': subdir,
24 'MESONINTROSPECT': ' '.join([shlex.quote(x) for x in meson_command + ['introspect']]),
25 }
26 cwd = os.path.join(source_dir, subdir)
27 child_env = os.environ.copy()
28 child_env.update(env)
29
30 # Is the command an executable in path?
31 exe = shutil.which(command)
32 if exe is not None:
33 command_array = [exe] + arguments
34 return subprocess.Popen(command_array, env=child_env, cwd=cwd)
35 # No? Maybe it is a script in the source tree.
36 fullpath = os.path.join(source_dir, subdir, command)
37 command_array = [fullpath] + arguments
38 try:
39 return subprocess.Popen(command_array, env=child_env, cwd=cwd)
40 except FileNotFoundError:
41 print('Could not execute command "%s".' % command)
42 sys.exit(1)
43
44 def run(args):
45 if len(args) < 4:
46 print('commandrunner.py <source dir> <build dir> <subdir> <command> [arguments]')
47 return 1
48 src_dir = args[0]
49 build_dir = args[1]
50 subdir = args[2]
51 meson_command = args[3]
52 if 'python' in meson_command: # Hack.
53 meson_command = [meson_command, args[4]]
54 command = args[5]
55 arguments = args[6:]
56 else:
57 meson_command = [meson_command]
58 command = args[4]
59 arguments = args[5:]
60 pc = run_command(src_dir, build_dir, subdir, meson_command, command, arguments)
61 pc.wait()
62 return pc.returncode
63
64 if __name__ == '__main__':
65 sys.exit(run(sys.argv[1:]))
66
[end of mesonbuild/scripts/commandrunner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mesonbuild/scripts/commandrunner.py b/mesonbuild/scripts/commandrunner.py
--- a/mesonbuild/scripts/commandrunner.py
+++ b/mesonbuild/scripts/commandrunner.py
@@ -38,7 +38,10 @@
try:
return subprocess.Popen(command_array, env=child_env, cwd=cwd)
except FileNotFoundError:
- print('Could not execute command "%s".' % command)
+ print('Could not execute command "%s". File not found.' % command)
+ sys.exit(1)
+ except PermissionError:
+ print('Could not execute command "%s". File not executable.' % command)
sys.exit(1)
def run(args):
| {"golden_diff": "diff --git a/mesonbuild/scripts/commandrunner.py b/mesonbuild/scripts/commandrunner.py\n--- a/mesonbuild/scripts/commandrunner.py\n+++ b/mesonbuild/scripts/commandrunner.py\n@@ -38,7 +38,10 @@\n try:\n return subprocess.Popen(command_array, env=child_env, cwd=cwd)\n except FileNotFoundError:\n- print('Could not execute command \"%s\".' % command)\n+ print('Could not execute command \"%s\". File not found.' % command)\n+ sys.exit(1)\n+ except PermissionError:\n+ print('Could not execute command \"%s\". File not executable.' % command)\n sys.exit(1)\n \n def run(args):\n", "issue": "run_target Permission Denied error should be clearer\nMinimal example - \r\n[folder.zip](https://github.com/mesonbuild/meson/files/1530489/folder.zip)\r\n\r\nI ran - `mkdir build && cd build && meson .. && ninja` and everything works. Now I run - `ninja myscript` and it throws errors -\r\n\r\n```\r\n[0/1] Running external command myscript.\r\nTraceback (most recent call last):\r\n File \"/usr/bin/meson\", line 37, in <module>\r\n sys.exit(main())\r\n File \"/usr/bin/meson\", line 34, in main\r\n return mesonmain.run(sys.argv[1:], launcher)\r\n File \"/usr/lib/python3.6/site-packages/mesonbuild/mesonmain.py\", line 311, in run\r\n sys.exit(run_script_command(args[1:]))\r\n File \"/usr/lib/python3.6/site-packages/mesonbuild/mesonmain.py\", line 278, in run_script_command\r\n return cmdfunc(cmdargs)\r\n File \"/usr/lib/python3.6/site-packages/mesonbuild/scripts/commandrunner.py\", line 60, in run\r\n pc = run_command(src_dir, build_dir, subdir, meson_command, command, arguments)\r\n File \"/usr/lib/python3.6/site-packages/mesonbuild/scripts/commandrunner.py\", line 39, in run_command\r\n return subprocess.Popen(command_array, env=child_env, cwd=cwd)\r\n File \"/usr/lib/python3.6/subprocess.py\", line 709, in __init__\r\n restore_signals, start_new_session)\r\n File \"/usr/lib/python3.6/subprocess.py\", line 1344, in _execute_child\r\n raise child_exception_type(errno_num, err_msg, err_filename)\r\nPermissionError: [Errno 13] Permission denied: '/home/agauniyal/temp/scripts/script.sh'\r\nFAILED: meson-myscript \r\n/usr/bin/python /usr/bin/meson --internal commandrunner /home/agauniyal/temp/ /home/agauniyal/temp/build '' /usr/bin/python /usr/bin/meson /home/agauniyal/temp/scripts/script.sh\r\nninja: build stopped: subcommand failed.\r\n```\n", "before_files": [{"content": "# Copyright 2014 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This program is a wrapper to run external commands. It determines\nwhat to run, sets up the environment and executes the command.\"\"\"\n\nimport sys, os, subprocess, shutil, shlex\n\ndef run_command(source_dir, build_dir, subdir, meson_command, command, arguments):\n env = {'MESON_SOURCE_ROOT': source_dir,\n 'MESON_BUILD_ROOT': build_dir,\n 'MESON_SUBDIR': subdir,\n 'MESONINTROSPECT': ' '.join([shlex.quote(x) for x in meson_command + ['introspect']]),\n }\n cwd = os.path.join(source_dir, subdir)\n child_env = os.environ.copy()\n child_env.update(env)\n\n # Is the command an executable in path?\n exe = shutil.which(command)\n if exe is not None:\n command_array = [exe] + arguments\n return subprocess.Popen(command_array, env=child_env, cwd=cwd)\n # No? Maybe it is a script in the source tree.\n fullpath = os.path.join(source_dir, subdir, command)\n command_array = [fullpath] + arguments\n try:\n return subprocess.Popen(command_array, env=child_env, cwd=cwd)\n except FileNotFoundError:\n print('Could not execute command \"%s\".' % command)\n sys.exit(1)\n\ndef run(args):\n if len(args) < 4:\n print('commandrunner.py <source dir> <build dir> <subdir> <command> [arguments]')\n return 1\n src_dir = args[0]\n build_dir = args[1]\n subdir = args[2]\n meson_command = args[3]\n if 'python' in meson_command: # Hack.\n meson_command = [meson_command, args[4]]\n command = args[5]\n arguments = args[6:]\n else:\n meson_command = [meson_command]\n command = args[4]\n arguments = args[5:]\n pc = run_command(src_dir, build_dir, subdir, meson_command, command, arguments)\n pc.wait()\n return pc.returncode\n\nif __name__ == '__main__':\n sys.exit(run(sys.argv[1:]))\n", "path": "mesonbuild/scripts/commandrunner.py"}]} | 1,748 | 152 |
gh_patches_debug_23694 | rasdani/github-patches | git_diff | pre-commit__pre-commit-718 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Handle when `core.hooksPath` is set?
As we found in https://github.com/pre-commit/pre-commit-hooks/issues/250, pre-commit (despite being installed) will be silently skipped if `core.hooksPath` is set.
A few options:
- during `pre-commit install`, check this variable and warn
- "" but error
- install into the directory at `core.hooksPath` (but it may be outside the working dir? probably not the best idea to write to it)
</issue>
<code>
[start of pre_commit/commands/install_uninstall.py]
1 from __future__ import print_function
2 from __future__ import unicode_literals
3
4 import io
5 import os.path
6 import sys
7
8 from pre_commit import output
9 from pre_commit.util import make_executable
10 from pre_commit.util import mkdirp
11 from pre_commit.util import resource_filename
12
13
14 # This is used to identify the hook file we install
15 PRIOR_HASHES = (
16 '4d9958c90bc262f47553e2c073f14cfe',
17 'd8ee923c46731b42cd95cc869add4062',
18 '49fd668cb42069aa1b6048464be5d395',
19 '79f09a650522a87b0da915d0d983b2de',
20 'e358c9dae00eac5d06b38dfdb1e33a8c',
21 )
22 CURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'
23 TEMPLATE_START = '# start templated\n'
24 TEMPLATE_END = '# end templated\n'
25
26
27 def is_our_script(filename):
28 if not os.path.exists(filename):
29 return False
30 contents = io.open(filename).read()
31 return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)
32
33
34 def install(
35 runner, overwrite=False, hooks=False, hook_type='pre-commit',
36 skip_on_missing_conf=False,
37 ):
38 """Install the pre-commit hooks."""
39 hook_path = runner.get_hook_path(hook_type)
40 legacy_path = hook_path + '.legacy'
41
42 mkdirp(os.path.dirname(hook_path))
43
44 # If we have an existing hook, move it to pre-commit.legacy
45 if os.path.lexists(hook_path) and not is_our_script(hook_path):
46 os.rename(hook_path, legacy_path)
47
48 # If we specify overwrite, we simply delete the legacy file
49 if overwrite and os.path.exists(legacy_path):
50 os.remove(legacy_path)
51 elif os.path.exists(legacy_path):
52 output.write_line(
53 'Running in migration mode with existing hooks at {}\n'
54 'Use -f to use only pre-commit.'.format(legacy_path),
55 )
56
57 params = {
58 'CONFIG': runner.config_file,
59 'HOOK_TYPE': hook_type,
60 'INSTALL_PYTHON': sys.executable,
61 'SKIP_ON_MISSING_CONFIG': skip_on_missing_conf,
62 }
63
64 with io.open(hook_path, 'w') as hook_file:
65 with io.open(resource_filename('hook-tmpl')) as f:
66 contents = f.read()
67 before, rest = contents.split(TEMPLATE_START)
68 to_template, after = rest.split(TEMPLATE_END)
69
70 hook_file.write(before + TEMPLATE_START)
71 for line in to_template.splitlines():
72 var = line.split()[0]
73 hook_file.write('{} = {!r}\n'.format(var, params[var]))
74 hook_file.write(TEMPLATE_END + after)
75 make_executable(hook_path)
76
77 output.write_line('pre-commit installed at {}'.format(hook_path))
78
79 # If they requested we install all of the hooks, do so.
80 if hooks:
81 install_hooks(runner)
82
83 return 0
84
85
86 def install_hooks(runner):
87 for repository in runner.repositories:
88 repository.require_installed()
89
90
91 def uninstall(runner, hook_type='pre-commit'):
92 """Uninstall the pre-commit hooks."""
93 hook_path = runner.get_hook_path(hook_type)
94 legacy_path = hook_path + '.legacy'
95 # If our file doesn't exist or it isn't ours, gtfo.
96 if not os.path.exists(hook_path) or not is_our_script(hook_path):
97 return 0
98
99 os.remove(hook_path)
100 output.write_line('{} uninstalled'.format(hook_type))
101
102 if os.path.exists(legacy_path):
103 os.rename(legacy_path, hook_path)
104 output.write_line('Restored previous hooks to {}'.format(hook_path))
105
106 return 0
107
[end of pre_commit/commands/install_uninstall.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py
--- a/pre_commit/commands/install_uninstall.py
+++ b/pre_commit/commands/install_uninstall.py
@@ -2,15 +2,19 @@
from __future__ import unicode_literals
import io
+import logging
import os.path
import sys
from pre_commit import output
+from pre_commit.util import cmd_output
from pre_commit.util import make_executable
from pre_commit.util import mkdirp
from pre_commit.util import resource_filename
+logger = logging.getLogger(__name__)
+
# This is used to identify the hook file we install
PRIOR_HASHES = (
'4d9958c90bc262f47553e2c073f14cfe',
@@ -36,6 +40,13 @@
skip_on_missing_conf=False,
):
"""Install the pre-commit hooks."""
+ if cmd_output('git', 'config', 'core.hooksPath', retcode=None)[1].strip():
+ logger.error(
+ 'Cowardly refusing to install hooks with `core.hooksPath` set.\n'
+ 'hint: `git config --unset-all core.hooksPath`',
+ )
+ return 1
+
hook_path = runner.get_hook_path(hook_type)
legacy_path = hook_path + '.legacy'
| {"golden_diff": "diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py\n--- a/pre_commit/commands/install_uninstall.py\n+++ b/pre_commit/commands/install_uninstall.py\n@@ -2,15 +2,19 @@\n from __future__ import unicode_literals\n \n import io\n+import logging\n import os.path\n import sys\n \n from pre_commit import output\n+from pre_commit.util import cmd_output\n from pre_commit.util import make_executable\n from pre_commit.util import mkdirp\n from pre_commit.util import resource_filename\n \n \n+logger = logging.getLogger(__name__)\n+\n # This is used to identify the hook file we install\n PRIOR_HASHES = (\n '4d9958c90bc262f47553e2c073f14cfe',\n@@ -36,6 +40,13 @@\n skip_on_missing_conf=False,\n ):\n \"\"\"Install the pre-commit hooks.\"\"\"\n+ if cmd_output('git', 'config', 'core.hooksPath', retcode=None)[1].strip():\n+ logger.error(\n+ 'Cowardly refusing to install hooks with `core.hooksPath` set.\\n'\n+ 'hint: `git config --unset-all core.hooksPath`',\n+ )\n+ return 1\n+\n hook_path = runner.get_hook_path(hook_type)\n legacy_path = hook_path + '.legacy'\n", "issue": "Handle when `core.hooksPath` is set?\nAs we found in https://github.com/pre-commit/pre-commit-hooks/issues/250, pre-commit (despite being installed) will be silently skipped if `core.hooksPath` is set.\r\n\r\nA few options:\r\n- during `pre-commit install`, check this variable and warn\r\n- \"\" but error\r\n- install into the directory at `core.hooksPath` (but it may be outside the working dir? probably not the best idea to write to it)\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport io\nimport os.path\nimport sys\n\nfrom pre_commit import output\nfrom pre_commit.util import make_executable\nfrom pre_commit.util import mkdirp\nfrom pre_commit.util import resource_filename\n\n\n# This is used to identify the hook file we install\nPRIOR_HASHES = (\n '4d9958c90bc262f47553e2c073f14cfe',\n 'd8ee923c46731b42cd95cc869add4062',\n '49fd668cb42069aa1b6048464be5d395',\n '79f09a650522a87b0da915d0d983b2de',\n 'e358c9dae00eac5d06b38dfdb1e33a8c',\n)\nCURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'\nTEMPLATE_START = '# start templated\\n'\nTEMPLATE_END = '# end templated\\n'\n\n\ndef is_our_script(filename):\n if not os.path.exists(filename):\n return False\n contents = io.open(filename).read()\n return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)\n\n\ndef install(\n runner, overwrite=False, hooks=False, hook_type='pre-commit',\n skip_on_missing_conf=False,\n):\n \"\"\"Install the pre-commit hooks.\"\"\"\n hook_path = runner.get_hook_path(hook_type)\n legacy_path = hook_path + '.legacy'\n\n mkdirp(os.path.dirname(hook_path))\n\n # If we have an existing hook, move it to pre-commit.legacy\n if os.path.lexists(hook_path) and not is_our_script(hook_path):\n os.rename(hook_path, legacy_path)\n\n # If we specify overwrite, we simply delete the legacy file\n if overwrite and os.path.exists(legacy_path):\n os.remove(legacy_path)\n elif os.path.exists(legacy_path):\n output.write_line(\n 'Running in migration mode with existing hooks at {}\\n'\n 'Use -f to use only pre-commit.'.format(legacy_path),\n )\n\n params = {\n 'CONFIG': runner.config_file,\n 'HOOK_TYPE': hook_type,\n 'INSTALL_PYTHON': sys.executable,\n 'SKIP_ON_MISSING_CONFIG': skip_on_missing_conf,\n }\n\n with io.open(hook_path, 'w') as hook_file:\n with io.open(resource_filename('hook-tmpl')) as f:\n contents = f.read()\n before, rest = contents.split(TEMPLATE_START)\n to_template, after = rest.split(TEMPLATE_END)\n\n hook_file.write(before + TEMPLATE_START)\n for line in to_template.splitlines():\n var = line.split()[0]\n hook_file.write('{} = {!r}\\n'.format(var, params[var]))\n hook_file.write(TEMPLATE_END + after)\n make_executable(hook_path)\n\n output.write_line('pre-commit installed at {}'.format(hook_path))\n\n # If they requested we install all of the hooks, do so.\n if hooks:\n install_hooks(runner)\n\n return 0\n\n\ndef install_hooks(runner):\n for repository in runner.repositories:\n repository.require_installed()\n\n\ndef uninstall(runner, hook_type='pre-commit'):\n \"\"\"Uninstall the pre-commit hooks.\"\"\"\n hook_path = runner.get_hook_path(hook_type)\n legacy_path = hook_path + '.legacy'\n # If our file doesn't exist or it isn't ours, gtfo.\n if not os.path.exists(hook_path) or not is_our_script(hook_path):\n return 0\n\n os.remove(hook_path)\n output.write_line('{} uninstalled'.format(hook_type))\n\n if os.path.exists(legacy_path):\n os.rename(legacy_path, hook_path)\n output.write_line('Restored previous hooks to {}'.format(hook_path))\n\n return 0\n", "path": "pre_commit/commands/install_uninstall.py"}]} | 1,775 | 309 |
gh_patches_debug_8601 | rasdani/github-patches | git_diff | getmoto__moto-589 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
botocore.exceptions.ConnectionClosedError raised when calling change_resource_record_sets (Boto3)
I am not sure whether or not I should expect this to work, but I see there are currently similar tests in moto against boto so I thought I would inquire.
When using the Route53 client from boto3, a call to the change_resource_record_set method raises a botocore.exceptions.ConnectionClosedError. (botocore.exceptions.ConnectionClosedError: Connection was closed before we received a valid response from endpoint URL: "https://route53.amazonaws.com/2013-04-01/hostedzone/cc11c883/rrset/")
A test case to reproduce is below.
``` python
import boto3
import uuid
from moto import mock_route53
def guid():
return str(uuid.uuid4())
@mock_route53
def test_route53_rrset_fail():
client = boto3.client('route53')
# Create a new zone
zone_name = '{0}.com'.format(guid())
zone = client.create_hosted_zone(
Name=zone_name,
CallerReference=guid(),
HostedZoneConfig={'Comment': guid()}
)
zone_id = zone['HostedZone']['Id']
# Verify the zone is retrievable
z = client.get_hosted_zone(Id=zone_id)
assert z['HostedZone']['Id'] == zone_id
# Try to create a record set
# Raises botocore.exceptions.ConnectionClosedError
client.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
'Comment': guid(),
'Changes': [{
'Action': 'CREATE',
'ResourceRecordSet': {
'Name': 'foo.{0}'.format(zone_name),
'Type': 'A',
'ResourceRecords': [{'Value': '1.2.3.4'}]
}
}]
}
)
```
</issue>
<code>
[start of moto/route53/urls.py]
1 from __future__ import unicode_literals
2 from . import responses
3
4 url_bases = [
5 "https://route53.amazonaws.com/201.-..-../",
6 ]
7
8 url_paths = {
9 '{0}hostedzone$': responses.list_or_create_hostzone_response,
10 '{0}hostedzone/[^/]+$': responses.get_or_delete_hostzone_response,
11 '{0}hostedzone/[^/]+/rrset$': responses.rrset_response,
12 '{0}healthcheck': responses.health_check_response,
13 '{0}tags|trafficpolicyinstances/*': responses.not_implemented_response,
14 }
15
[end of moto/route53/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/moto/route53/urls.py b/moto/route53/urls.py
--- a/moto/route53/urls.py
+++ b/moto/route53/urls.py
@@ -8,7 +8,7 @@
url_paths = {
'{0}hostedzone$': responses.list_or_create_hostzone_response,
'{0}hostedzone/[^/]+$': responses.get_or_delete_hostzone_response,
- '{0}hostedzone/[^/]+/rrset$': responses.rrset_response,
+ '{0}hostedzone/[^/]+/rrset/?$': responses.rrset_response,
'{0}healthcheck': responses.health_check_response,
'{0}tags|trafficpolicyinstances/*': responses.not_implemented_response,
}
| {"golden_diff": "diff --git a/moto/route53/urls.py b/moto/route53/urls.py\n--- a/moto/route53/urls.py\n+++ b/moto/route53/urls.py\n@@ -8,7 +8,7 @@\n url_paths = {\n '{0}hostedzone$': responses.list_or_create_hostzone_response,\n '{0}hostedzone/[^/]+$': responses.get_or_delete_hostzone_response,\n- '{0}hostedzone/[^/]+/rrset$': responses.rrset_response,\n+ '{0}hostedzone/[^/]+/rrset/?$': responses.rrset_response,\n '{0}healthcheck': responses.health_check_response,\n '{0}tags|trafficpolicyinstances/*': responses.not_implemented_response,\n }\n", "issue": "botocore.exceptions.ConnectionClosedError raised when calling change_resource_record_sets (Boto3)\nI am not sure whether or not I should expect this to work, but I see there are currently similar tests in moto against boto so I thought I would inquire.\n\nWhen using the Route53 client from boto3, a call to the change_resource_record_set method raises a botocore.exceptions.ConnectionClosedError. (botocore.exceptions.ConnectionClosedError: Connection was closed before we received a valid response from endpoint URL: \"https://route53.amazonaws.com/2013-04-01/hostedzone/cc11c883/rrset/\")\n\nA test case to reproduce is below.\n\n``` python\nimport boto3\nimport uuid\nfrom moto import mock_route53\n\ndef guid():\n return str(uuid.uuid4())\n\n@mock_route53\ndef test_route53_rrset_fail():\n\n client = boto3.client('route53')\n\n # Create a new zone\n zone_name = '{0}.com'.format(guid())\n zone = client.create_hosted_zone(\n Name=zone_name,\n CallerReference=guid(),\n HostedZoneConfig={'Comment': guid()}\n )\n zone_id = zone['HostedZone']['Id']\n\n # Verify the zone is retrievable\n z = client.get_hosted_zone(Id=zone_id)\n assert z['HostedZone']['Id'] == zone_id\n\n # Try to create a record set\n # Raises botocore.exceptions.ConnectionClosedError\n client.change_resource_record_sets(\n HostedZoneId=zone_id,\n ChangeBatch={\n 'Comment': guid(),\n 'Changes': [{\n 'Action': 'CREATE',\n 'ResourceRecordSet': {\n 'Name': 'foo.{0}'.format(zone_name),\n 'Type': 'A',\n 'ResourceRecords': [{'Value': '1.2.3.4'}]\n }\n }]\n }\n )\n```\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom . import responses\n\nurl_bases = [\n \"https://route53.amazonaws.com/201.-..-../\",\n]\n\nurl_paths = {\n '{0}hostedzone$': responses.list_or_create_hostzone_response,\n '{0}hostedzone/[^/]+$': responses.get_or_delete_hostzone_response,\n '{0}hostedzone/[^/]+/rrset$': responses.rrset_response,\n '{0}healthcheck': responses.health_check_response,\n '{0}tags|trafficpolicyinstances/*': responses.not_implemented_response,\n}\n", "path": "moto/route53/urls.py"}]} | 1,107 | 177 |
gh_patches_debug_9212 | rasdani/github-patches | git_diff | jazzband__pip-tools-956 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add python 3.8 support
#### What's the problem this feature will solve?
<!-- What are you trying to do, that you are unable to achieve with pip-tools as it currently stands? -->
Python 3.8 is released, so it's time to support it.
#### Describe the solution you'd like
<!-- A clear and concise description of what you want to happen. -->
1. add "py37" env to `tox.ini`
1. remove 3.8-dev from `.travis.yml`
1. add "Programming Language :: Python :: 3.8" classifier to `setup.py`
1. add "3.8" dimension to `.travis.yml` (supported, see https://travis-ci.community/t/add-python-3-8-support/5463)
1. add "py37" dimension to `.appveyor.yml` (not supported yet, but will be on the nex image update, tracking issue: https://github.com/appveyor/ci/issues/3142)
1. add "3.8" to python-version list in `.github/workflows/cron.yml` (not supported yet, tracking issue: https://github.com/actions/setup-python/issues/30)
<!-- Provide examples of real-world use cases that this would enable and how it solves the problem described above. -->
#### Alternative Solutions
<!-- Have you tried to workaround the problem using pip-tools or other tools? Or a different approach to solving this issue? Please elaborate here. -->
N/A
#### Additional context
<!-- Add any other context, links, etc. about the feature here. -->
https://discuss.python.org/t/python-3-8-0-is-now-available/2478
</issue>
<code>
[start of setup.py]
1 """
2 pip-tools keeps your pinned dependencies fresh.
3 """
4 from os.path import abspath, dirname, join
5
6 from setuptools import find_packages, setup
7
8
9 def read_file(filename):
10 """Read the contents of a file located relative to setup.py"""
11 with open(join(abspath(dirname(__file__)), filename)) as thefile:
12 return thefile.read()
13
14
15 setup(
16 name="pip-tools",
17 use_scm_version=True,
18 url="https://github.com/jazzband/pip-tools/",
19 license="BSD",
20 author="Vincent Driessen",
21 author_email="[email protected]",
22 description=__doc__.strip(),
23 long_description=read_file("README.rst"),
24 long_description_content_type="text/x-rst",
25 packages=find_packages(exclude=["tests"]),
26 package_data={},
27 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
28 setup_requires=["setuptools_scm"],
29 install_requires=["click>=6", "six"],
30 zip_safe=False,
31 entry_points={
32 "console_scripts": [
33 "pip-compile = piptools.scripts.compile:cli",
34 "pip-sync = piptools.scripts.sync:cli",
35 ]
36 },
37 platforms="any",
38 classifiers=[
39 "Development Status :: 5 - Production/Stable",
40 "Intended Audience :: Developers",
41 "Intended Audience :: System Administrators",
42 "License :: OSI Approved :: BSD License",
43 "Operating System :: OS Independent",
44 "Programming Language :: Python",
45 "Programming Language :: Python :: 2",
46 "Programming Language :: Python :: 2.7",
47 "Programming Language :: Python :: 3",
48 "Programming Language :: Python :: 3.5",
49 "Programming Language :: Python :: 3.6",
50 "Programming Language :: Python :: 3.7",
51 "Programming Language :: Python :: Implementation :: CPython",
52 "Programming Language :: Python :: Implementation :: PyPy",
53 "Topic :: System :: Systems Administration",
54 ],
55 )
56
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -48,6 +48,7 @@
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: System :: Systems Administration",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -48,6 +48,7 @@\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: System :: Systems Administration\",\n", "issue": "Add python 3.8 support\n#### What's the problem this feature will solve?\r\n<!-- What are you trying to do, that you are unable to achieve with pip-tools as it currently stands? -->\r\n\r\nPython 3.8 is released, so it's time to support it. \r\n\r\n#### Describe the solution you'd like\r\n<!-- A clear and concise description of what you want to happen. -->\r\n\r\n1. add \"py37\" env to `tox.ini`\r\n1. remove 3.8-dev from `.travis.yml`\r\n1. add \"Programming Language :: Python :: 3.8\" classifier to `setup.py`\r\n1. add \"3.8\" dimension to `.travis.yml` (supported, see https://travis-ci.community/t/add-python-3-8-support/5463)\r\n1. add \"py37\" dimension to `.appveyor.yml` (not supported yet, but will be on the nex image update, tracking issue: https://github.com/appveyor/ci/issues/3142)\r\n1. add \"3.8\" to python-version list in `.github/workflows/cron.yml` (not supported yet, tracking issue: https://github.com/actions/setup-python/issues/30)\r\n\r\n<!-- Provide examples of real-world use cases that this would enable and how it solves the problem described above. -->\r\n\r\n#### Alternative Solutions\r\n<!-- Have you tried to workaround the problem using pip-tools or other tools? Or a different approach to solving this issue? Please elaborate here. -->\r\n\r\nN/A\r\n\r\n#### Additional context\r\n<!-- Add any other context, links, etc. about the feature here. -->\r\n\r\nhttps://discuss.python.org/t/python-3-8-0-is-now-available/2478\n", "before_files": [{"content": "\"\"\"\npip-tools keeps your pinned dependencies fresh.\n\"\"\"\nfrom os.path import abspath, dirname, join\n\nfrom setuptools import find_packages, setup\n\n\ndef read_file(filename):\n \"\"\"Read the contents of a file located relative to setup.py\"\"\"\n with open(join(abspath(dirname(__file__)), filename)) as thefile:\n return thefile.read()\n\n\nsetup(\n name=\"pip-tools\",\n use_scm_version=True,\n url=\"https://github.com/jazzband/pip-tools/\",\n license=\"BSD\",\n author=\"Vincent Driessen\",\n author_email=\"[email protected]\",\n description=__doc__.strip(),\n long_description=read_file(\"README.rst\"),\n long_description_content_type=\"text/x-rst\",\n packages=find_packages(exclude=[\"tests\"]),\n package_data={},\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*\",\n setup_requires=[\"setuptools_scm\"],\n install_requires=[\"click>=6\", \"six\"],\n zip_safe=False,\n entry_points={\n \"console_scripts\": [\n \"pip-compile = piptools.scripts.compile:cli\",\n \"pip-sync = piptools.scripts.sync:cli\",\n ]\n },\n platforms=\"any\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: System :: Systems Administration\",\n ],\n)\n", "path": "setup.py"}]} | 1,431 | 115 |
gh_patches_debug_23998 | rasdani/github-patches | git_diff | dotkom__onlineweb4-203 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Auth templates does not use crispy forms
https://github.com/dotKom/onlineweb4/commit/26ae7847c2907895e6842061a848a2c0f47090a0
Håvard did some weird shit. Undo this and test that it still works.
</issue>
<code>
[start of apps/authentication/urls.py]
1 # -*- coding: utf-8 -*-
2
3 from django.conf.urls import patterns, url
4
5 urlpatterns = patterns('apps.authentication.views',
6 url(r'^login/$', 'login', name='auth_login'),
7 url(r'^logout/$', 'logout', name='auth_logout'),
8 url(r'^register/$', 'register', name='auth_register'),
9 url(r'^verify/(\w+)/$', 'verify', name='auth_verify'),
10 url(r'^recover/$', 'recover', name='auth_recover'),
11 url(r'^set_password/(\w+)/$', 'set_password', name='auth_set_password'),
12 )
13
[end of apps/authentication/urls.py]
[start of apps/authentication/forms.py]
1 # -*- coding: utf-8 -*-
2
3 import datetime
4 import re
5
6 from django import forms
7 from django.contrib import auth
8
9 from apps.authentication.models import OnlineUser as User
10
11 class LoginForm(forms.Form):
12 username = forms.CharField(widget=forms.TextInput(), label="Username", max_length=50)
13 password = forms.CharField(widget=forms.PasswordInput(render_value=False), label="Password")
14 user = None
15
16 def clean(self):
17 if self._errors:
18 return
19
20 user = auth.authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password'])
21
22 if user:
23 if user.is_active:
24 self.user = user
25 else:
26 self._errors['username'] = self.error_class(["Your account is inactive, try to recover it."])
27 else:
28 self._errors['username'] = self.error_class(["The account does not exist, or username/password combination is incorrect."])
29 return self.cleaned_data
30
31 def login(self, request):
32 try:
33 User.objects.get(username=request.POST['username'])
34 except:
35 return False
36 if self.is_valid():
37 auth.login(request, self.user)
38 request.session.set_expiry(0)
39 return True
40 return False
41
42 class RegisterForm(forms.Form):
43 username = forms.CharField(label="Username", max_length=20)
44 first_name = forms.CharField(label="First name", max_length=50)
45 last_name = forms.CharField(label="Last name", max_length=50)
46 email = forms.EmailField(label="Email", max_length=50)
47 password = forms.CharField(widget=forms.PasswordInput(render_value=False), label="Password")
48 repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label="Repeat password")
49 address = forms.CharField(label="Address", max_length=50)
50 zip_code = forms.CharField(label="ZIP code", max_length=4)
51 phone = forms.CharField(label="Phone number", max_length=20)
52
53 def clean(self):
54 super(RegisterForm, self).clean()
55 if self.is_valid():
56 cleaned_data = self.cleaned_data
57
58 # Check passwords
59 if cleaned_data['password'] != cleaned_data['repeat_password']:
60 self._errors['repeat_password'] = self.error_class(["Passwords did not match."])
61
62 # Check username
63 username = cleaned_data['username']
64 if User.objects.filter(username=username).count() > 0:
65 self._errors['username'] = self.error_class(["There is already a user with that username."])
66 if not re.match("^[a-zA-Z0-9_-]+$", username):
67 self._errors['username'] = self.error_class(["Your desired username contains illegal characters. Valid: a-Z 0-9 - _"])
68
69 # Check email
70 email = cleaned_data['email']
71 if User.objects.filter(email=email).count() > 0:
72 self._errors['email'] = self.error_class(["There is already a user with that email."])
73
74 # ZIP code digits only
75 zip_code = cleaned_data['zip_code']
76 if len(zip_code) != 4 or not zip_code.isdigit():
77 self._errors['zip_code'] = self.error_class(["The ZIP code must be 4 digit number."])
78
79 return cleaned_data
80
81 class RecoveryForm(forms.Form):
82 email = forms.EmailField(label="Email", max_length=50)
83
84 class ChangePasswordForm(forms.Form):
85 old_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label="Old password", required=False)
86 new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label="New password")
87 repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label="Repeat new password")
88
89 def clean(self):
90 super(ChangePasswordForm, self).clean()
91 if self.is_valid():
92 cleaned_data = self.cleaned_data
93
94 # Check passwords
95 if cleaned_data['new_password'] != cleaned_data['repeat_password']:
96 self._errors['repeat_password'] = self.error_class(["Passwords did not match."])
97
98 return cleaned_data
99
[end of apps/authentication/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/authentication/forms.py b/apps/authentication/forms.py
--- a/apps/authentication/forms.py
+++ b/apps/authentication/forms.py
@@ -82,7 +82,6 @@
email = forms.EmailField(label="Email", max_length=50)
class ChangePasswordForm(forms.Form):
- old_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label="Old password", required=False)
new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label="New password")
repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label="Repeat new password")
diff --git a/apps/authentication/urls.py b/apps/authentication/urls.py
--- a/apps/authentication/urls.py
+++ b/apps/authentication/urls.py
@@ -6,7 +6,7 @@
url(r'^login/$', 'login', name='auth_login'),
url(r'^logout/$', 'logout', name='auth_logout'),
url(r'^register/$', 'register', name='auth_register'),
- url(r'^verify/(\w+)/$', 'verify', name='auth_verify'),
+ url(r'^verify/(?P<token>\w+)/$', 'verify', name='auth_verify'),
url(r'^recover/$', 'recover', name='auth_recover'),
- url(r'^set_password/(\w+)/$', 'set_password', name='auth_set_password'),
+ url(r'^set_password/(?P<token>\w+)/$', 'set_password', name='auth_set_password'),
)
| {"golden_diff": "diff --git a/apps/authentication/forms.py b/apps/authentication/forms.py\n--- a/apps/authentication/forms.py\n+++ b/apps/authentication/forms.py\n@@ -82,7 +82,6 @@\n email = forms.EmailField(label=\"Email\", max_length=50)\n \n class ChangePasswordForm(forms.Form):\n- old_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=\"Old password\", required=False)\n new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=\"New password\")\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=\"Repeat new password\")\n \ndiff --git a/apps/authentication/urls.py b/apps/authentication/urls.py\n--- a/apps/authentication/urls.py\n+++ b/apps/authentication/urls.py\n@@ -6,7 +6,7 @@\n url(r'^login/$', 'login', name='auth_login'),\n url(r'^logout/$', 'logout', name='auth_logout'),\n url(r'^register/$', 'register', name='auth_register'),\n- url(r'^verify/(\\w+)/$', 'verify', name='auth_verify'),\n+ url(r'^verify/(?P<token>\\w+)/$', 'verify', name='auth_verify'),\n url(r'^recover/$', 'recover', name='auth_recover'),\n- url(r'^set_password/(\\w+)/$', 'set_password', name='auth_set_password'),\n+ url(r'^set_password/(?P<token>\\w+)/$', 'set_password', name='auth_set_password'),\n )\n", "issue": "Auth templates does not use crispy forms\nhttps://github.com/dotKom/onlineweb4/commit/26ae7847c2907895e6842061a848a2c0f47090a0\n\nH\u00e5vard did some weird shit. Undo this and test that it still works.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import patterns, url\n\nurlpatterns = patterns('apps.authentication.views',\n url(r'^login/$', 'login', name='auth_login'),\n url(r'^logout/$', 'logout', name='auth_logout'),\n url(r'^register/$', 'register', name='auth_register'),\n url(r'^verify/(\\w+)/$', 'verify', name='auth_verify'),\n url(r'^recover/$', 'recover', name='auth_recover'),\n url(r'^set_password/(\\w+)/$', 'set_password', name='auth_set_password'),\n)\n", "path": "apps/authentication/urls.py"}, {"content": "# -*- coding: utf-8 -*-\n\nimport datetime\nimport re\n\nfrom django import forms\nfrom django.contrib import auth\n\nfrom apps.authentication.models import OnlineUser as User\n\nclass LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(), label=\"Username\", max_length=50)\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=\"Password\")\n user = None\n\n def clean(self):\n if self._errors:\n return\n \n user = auth.authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password'])\n\n if user:\n if user.is_active:\n self.user = user\n else:\n self._errors['username'] = self.error_class([\"Your account is inactive, try to recover it.\"])\n else:\n self._errors['username'] = self.error_class([\"The account does not exist, or username/password combination is incorrect.\"])\n return self.cleaned_data\n\n def login(self, request):\n try:\n User.objects.get(username=request.POST['username'])\n except:\n return False\n if self.is_valid():\n auth.login(request, self.user)\n request.session.set_expiry(0)\n return True\n return False\n\nclass RegisterForm(forms.Form):\n username = forms.CharField(label=\"Username\", max_length=20)\n first_name = forms.CharField(label=\"First name\", max_length=50)\n last_name = forms.CharField(label=\"Last name\", max_length=50)\n email = forms.EmailField(label=\"Email\", max_length=50)\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=\"Password\")\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=\"Repeat password\")\n address = forms.CharField(label=\"Address\", max_length=50)\n zip_code = forms.CharField(label=\"ZIP code\", max_length=4)\n phone = forms.CharField(label=\"Phone number\", max_length=20)\n \n def clean(self):\n super(RegisterForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # Check passwords\n if cleaned_data['password'] != cleaned_data['repeat_password']:\n self._errors['repeat_password'] = self.error_class([\"Passwords did not match.\"])\n\n # Check username\n username = cleaned_data['username']\n if User.objects.filter(username=username).count() > 0:\n self._errors['username'] = self.error_class([\"There is already a user with that username.\"])\n if not re.match(\"^[a-zA-Z0-9_-]+$\", username):\n self._errors['username'] = self.error_class([\"Your desired username contains illegal characters. Valid: a-Z 0-9 - _\"])\n\n # Check email\n email = cleaned_data['email']\n if User.objects.filter(email=email).count() > 0:\n self._errors['email'] = self.error_class([\"There is already a user with that email.\"])\n\n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n if len(zip_code) != 4 or not zip_code.isdigit():\n self._errors['zip_code'] = self.error_class([\"The ZIP code must be 4 digit number.\"])\n\n return cleaned_data \n\nclass RecoveryForm(forms.Form):\n email = forms.EmailField(label=\"Email\", max_length=50)\n\nclass ChangePasswordForm(forms.Form):\n old_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=\"Old password\", required=False)\n new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=\"New password\")\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=\"Repeat new password\")\n\n def clean(self):\n super(ChangePasswordForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # Check passwords\n if cleaned_data['new_password'] != cleaned_data['repeat_password']:\n self._errors['repeat_password'] = self.error_class([\"Passwords did not match.\"])\n\n return cleaned_data\n", "path": "apps/authentication/forms.py"}]} | 1,820 | 316 |
gh_patches_debug_60844 | rasdani/github-patches | git_diff | uclapi__uclapi-128 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] Search People should return HTTP status 400 when query is missing
Currently, the `/search/people` returns a HTTP 200 code when even for an incorrect API request. For example, if you leave out the `query` param it returns the following body:
```json
{ "error": "No query provided", "ok": false}
```
Yet, the HTTP status code is 200, while it should be 400.
</issue>
<code>
[start of backend/uclapi/search/views.py]
1 from rest_framework.decorators import api_view
2 from django.http import JsonResponse
3
4 from roombookings.decorators import does_token_exist, log_api_call, throttle
5
6 import os
7 import requests
8
9
10 @api_view(['GET'])
11 @does_token_exist
12 @throttle
13 @log_api_call
14 def people(request):
15 if "query" not in request.GET:
16 return JsonResponse({
17 "ok": False,
18 "error": "No query provided"
19 })
20
21 query = request.GET["query"]
22
23 url = (
24 "{}?{}={}"
25 .format(
26 os.environ["SEARCH_API_URL"],
27 os.environ["SEARCH_API_QUERY_PARAMS"],
28 query,
29 )
30 )
31
32 r = requests.get(url)
33
34 results = r.json()["response"]["resultPacket"]["results"][:20]
35
36 def serialize_person(person):
37 return {
38 "name": person["title"],
39 "department": person["metaData"].get("7", ""),
40 "email": person["metaData"].get("E", ""),
41 "status": person["metaData"].get("g", ""),
42 }
43
44 people = [serialize_person(person) for person in results]
45
46 return JsonResponse({
47 "ok": True,
48 "people": people
49 })
50
[end of backend/uclapi/search/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/uclapi/search/views.py b/backend/uclapi/search/views.py
--- a/backend/uclapi/search/views.py
+++ b/backend/uclapi/search/views.py
@@ -13,10 +13,12 @@
@log_api_call
def people(request):
if "query" not in request.GET:
- return JsonResponse({
+ response = JsonResponse({
"ok": False,
- "error": "No query provided"
+ "error": "No query provided."
})
+ response.status_code = 400
+ return response
query = request.GET["query"]
| {"golden_diff": "diff --git a/backend/uclapi/search/views.py b/backend/uclapi/search/views.py\n--- a/backend/uclapi/search/views.py\n+++ b/backend/uclapi/search/views.py\n@@ -13,10 +13,12 @@\n @log_api_call\n def people(request):\n if \"query\" not in request.GET:\n- return JsonResponse({\n+ response = JsonResponse({\n \"ok\": False,\n- \"error\": \"No query provided\"\n+ \"error\": \"No query provided.\"\n })\n+ response.status_code = 400\n+ return response\n \n query = request.GET[\"query\"]\n", "issue": "[Bug] Search People should return HTTP status 400 when query is missing\nCurrently, the `/search/people` returns a HTTP 200 code when even for an incorrect API request. For example, if you leave out the `query` param it returns the following body:\r\n\r\n```json\r\n{ \"error\": \"No query provided\", \"ok\": false}\r\n```\r\n\r\nYet, the HTTP status code is 200, while it should be 400.\r\n\n", "before_files": [{"content": "from rest_framework.decorators import api_view\nfrom django.http import JsonResponse\n\nfrom roombookings.decorators import does_token_exist, log_api_call, throttle\n\nimport os\nimport requests\n\n\n@api_view(['GET'])\n@does_token_exist\n@throttle\n@log_api_call\ndef people(request):\n if \"query\" not in request.GET:\n return JsonResponse({\n \"ok\": False,\n \"error\": \"No query provided\"\n })\n\n query = request.GET[\"query\"]\n\n url = (\n \"{}?{}={}\"\n .format(\n os.environ[\"SEARCH_API_URL\"],\n os.environ[\"SEARCH_API_QUERY_PARAMS\"],\n query,\n )\n )\n\n r = requests.get(url)\n\n results = r.json()[\"response\"][\"resultPacket\"][\"results\"][:20]\n\n def serialize_person(person):\n return {\n \"name\": person[\"title\"],\n \"department\": person[\"metaData\"].get(\"7\", \"\"),\n \"email\": person[\"metaData\"].get(\"E\", \"\"),\n \"status\": person[\"metaData\"].get(\"g\", \"\"),\n }\n\n people = [serialize_person(person) for person in results]\n\n return JsonResponse({\n \"ok\": True,\n \"people\": people\n })\n", "path": "backend/uclapi/search/views.py"}]} | 996 | 137 |
gh_patches_debug_14186 | rasdani/github-patches | git_diff | bokeh__bokeh-4129 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docs for styling selection overlays
There is currently no way to style the box or poly overlays that various selection tools use.
</issue>
<code>
[start of sphinx/source/docs/user_guide/source_examples/styling_tool_overlays.py]
1 import numpy as np
2
3 from bokeh.models import BoxSelectTool, BoxZoomTool, LassoSelectTool
4 from bokeh.plotting import figure, output_file, show
5
6 output_file("styling_tool_overlays.html")
7
8 x = np.random.random(size=200)
9 y = np.random.random(size=200)
10
11 # Basic plot setup
12 plot = figure(width=400, height=400, title='Select and Zoom',
13 tools="box_select,box_zoom,lasso_select,reset")
14
15 plot.circle(x, y, size=5)
16
17 plot.select_one(BoxSelectTool).overlay.fill_color = "firebrick"
18 plot.select_one(BoxSelectTool).overlay.line_color = None
19
20 plot.select_one(BoxZoomTool).overlay.line_color = "olive"
21 plot.select_one(BoxZoomTool).overlay.line_width = 8
22 plot.select_one(BoxZoomTool).overlay.line_dash = "solid"
23 plot.select_one(BoxZoomTool).overlay.fill_color = None
24
25 plot.select_one(LassoSelectTool).overlay.line_dash = [10, 10]
26
27 show(plot)
[end of sphinx/source/docs/user_guide/source_examples/styling_tool_overlays.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sphinx/source/docs/user_guide/source_examples/styling_tool_overlays.py b/sphinx/source/docs/user_guide/source_examples/styling_tool_overlays.py
--- a/sphinx/source/docs/user_guide/source_examples/styling_tool_overlays.py
+++ b/sphinx/source/docs/user_guide/source_examples/styling_tool_overlays.py
@@ -14,14 +14,18 @@
plot.circle(x, y, size=5)
-plot.select_one(BoxSelectTool).overlay.fill_color = "firebrick"
-plot.select_one(BoxSelectTool).overlay.line_color = None
+select_overlay = plot.select_one(BoxSelectTool).overlay
-plot.select_one(BoxZoomTool).overlay.line_color = "olive"
-plot.select_one(BoxZoomTool).overlay.line_width = 8
-plot.select_one(BoxZoomTool).overlay.line_dash = "solid"
-plot.select_one(BoxZoomTool).overlay.fill_color = None
+select_overlay.fill_color = "firebrick"
+select_overlay.line_color = None
+
+zoom_overlay = plot.select_one(BoxZoomTool).overlay
+
+zoom_overlay.line_color = "olive"
+zoom_overlay.line_width = 8
+zoom_overlay.line_dash = "solid"
+zoom_overlay.fill_color = None
plot.select_one(LassoSelectTool).overlay.line_dash = [10, 10]
-show(plot)
\ No newline at end of file
+show(plot)
| {"golden_diff": "diff --git a/sphinx/source/docs/user_guide/source_examples/styling_tool_overlays.py b/sphinx/source/docs/user_guide/source_examples/styling_tool_overlays.py\n--- a/sphinx/source/docs/user_guide/source_examples/styling_tool_overlays.py\n+++ b/sphinx/source/docs/user_guide/source_examples/styling_tool_overlays.py\n@@ -14,14 +14,18 @@\n \n plot.circle(x, y, size=5)\n \n-plot.select_one(BoxSelectTool).overlay.fill_color = \"firebrick\"\n-plot.select_one(BoxSelectTool).overlay.line_color = None\n+select_overlay = plot.select_one(BoxSelectTool).overlay\n \n-plot.select_one(BoxZoomTool).overlay.line_color = \"olive\"\n-plot.select_one(BoxZoomTool).overlay.line_width = 8\n-plot.select_one(BoxZoomTool).overlay.line_dash = \"solid\"\n-plot.select_one(BoxZoomTool).overlay.fill_color = None\n+select_overlay.fill_color = \"firebrick\"\n+select_overlay.line_color = None\n+\n+zoom_overlay = plot.select_one(BoxZoomTool).overlay\n+\n+zoom_overlay.line_color = \"olive\"\n+zoom_overlay.line_width = 8\n+zoom_overlay.line_dash = \"solid\"\n+zoom_overlay.fill_color = None\n \n plot.select_one(LassoSelectTool).overlay.line_dash = [10, 10]\n \n-show(plot)\n\\ No newline at end of file\n+show(plot)\n", "issue": "Docs for styling selection overlays\nThere is currently no way to style the box or poly overlays that various selection tools use. \n\n", "before_files": [{"content": "import numpy as np\n\nfrom bokeh.models import BoxSelectTool, BoxZoomTool, LassoSelectTool\nfrom bokeh.plotting import figure, output_file, show\n\noutput_file(\"styling_tool_overlays.html\")\n\nx = np.random.random(size=200)\ny = np.random.random(size=200)\n\n# Basic plot setup\nplot = figure(width=400, height=400, title='Select and Zoom',\n tools=\"box_select,box_zoom,lasso_select,reset\")\n\nplot.circle(x, y, size=5)\n\nplot.select_one(BoxSelectTool).overlay.fill_color = \"firebrick\"\nplot.select_one(BoxSelectTool).overlay.line_color = None\n\nplot.select_one(BoxZoomTool).overlay.line_color = \"olive\"\nplot.select_one(BoxZoomTool).overlay.line_width = 8\nplot.select_one(BoxZoomTool).overlay.line_dash = \"solid\"\nplot.select_one(BoxZoomTool).overlay.fill_color = None\n\nplot.select_one(LassoSelectTool).overlay.line_dash = [10, 10]\n\nshow(plot)", "path": "sphinx/source/docs/user_guide/source_examples/styling_tool_overlays.py"}]} | 858 | 303 |
gh_patches_debug_8429 | rasdani/github-patches | git_diff | scrapy__scrapy-1644 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
_monkeypatches.py: 'NoneType' object has no attribute 'startswith'
Did not try yet to come up with minimal example to demonstrate this issue but it is reproducible for me:
```
$> datalad --dbg crawl
Traceback (most recent call last):
File "/home/yoh/proj/datalad/datalad/venv-tests/bin/datalad", line 9, in <module>
load_entry_point('datalad==0.1.dev0', 'console_scripts', 'datalad')()
File "/home/yoh/proj/datalad/datalad/datalad/cmdline/main.py", line 199, in main
cmdlineargs.func(cmdlineargs)
File "/home/yoh/proj/datalad/datalad/datalad/interface/base.py", line 151, in call_from_parser
return self(**kwargs)
File "/home/yoh/proj/datalad/datalad/datalad/interface/crawl.py", line 44, in __call__
from datalad.crawler.pipeline import load_pipeline_from_config, get_pipeline_config_path
File "/home/yoh/proj/datalad/datalad/datalad/crawler/pipeline.py", line 21, in <module>
from .newmain import lgr
File "/home/yoh/proj/datalad/datalad/datalad/crawler/newmain.py", line 21, in <module>
from .nodes.matches import *
File "/home/yoh/proj/datalad/datalad/datalad/crawler/nodes/matches.py", line 18, in <module>
from scrapy.selector import Selector
File "/home/yoh/proj/datalad/datalad/venv-tests/local/lib/python2.7/site-packages/scrapy/__init__.py", line 27, in <module>
from . import _monkeypatches
File "/home/yoh/proj/datalad/datalad/venv-tests/local/lib/python2.7/site-packages/scrapy/_monkeypatches.py", line 24, in <module>
and getattr(v, '__module__', '').startswith('twisted'):
AttributeError: 'NoneType' object has no attribute 'startswith'
()
> /home/yoh/proj/datalad/datalad/venv-tests/local/lib/python2.7/site-packages/scrapy/_monkeypatches.py(24)<module>()
-> and getattr(v, '__module__', '').startswith('twisted'):
(Pdb) l
19 # to prevent bugs like Twisted#7989 while serializing requests
20 import twisted.persisted.styles # NOQA
21 # Remove only entries with twisted serializers for non-twisted types.
22 for k, v in frozenset(copyreg.dispatch_table.items()):
23 if not getattr(k, '__module__', '').startswith('twisted') \
24 -> and getattr(v, '__module__', '').startswith('twisted'):
25 copyreg.dispatch_table.pop(k)
[EOF]
(Pdb) p k
None
(Pdb) p v
None
(Pdb) p copyreg
None
```
not sure it came to it but the issue is (if I pdb before this madness happens):
```
(Pdb) p getattr(k, '__module__', '')
'__builtin__'
(Pdb) p getattr(v, '__module__', '')
None
(Pdb) p v
<function mpq_reducer at 0x7f474bb4ab90>
(Pdb) p v.__module__
None
(Pdb) p k, v
(<type 'mpq'>, <function mpq_reducer at 0x7f474bb4ab90>)
```
so assigned `__module__` is None. As a quick resolution wrapped into str() call to assure str there
```
and str(getattr(v, '__module__', '')).startswith('twisted'):
```
</issue>
<code>
[start of scrapy/_monkeypatches.py]
1 import sys
2 from six.moves import copyreg
3
4 if sys.version_info[0] == 2:
5 from urlparse import urlparse
6
7 # workaround for http://bugs.python.org/issue7904 - Python < 2.7
8 if urlparse('s3://bucket/key').netloc != 'bucket':
9 from urlparse import uses_netloc
10 uses_netloc.append('s3')
11
12 # workaround for http://bugs.python.org/issue9374 - Python < 2.7.4
13 if urlparse('s3://bucket/key?key=value').query != 'key=value':
14 from urlparse import uses_query
15 uses_query.append('s3')
16
17
18 # Undo what Twisted's perspective broker adds to pickle register
19 # to prevent bugs like Twisted#7989 while serializing requests
20 import twisted.persisted.styles # NOQA
21 # Remove only entries with twisted serializers for non-twisted types.
22 for k, v in frozenset(copyreg.dispatch_table.items()):
23 if not getattr(k, '__module__', '').startswith('twisted') \
24 and getattr(v, '__module__', '').startswith('twisted'):
25 copyreg.dispatch_table.pop(k)
26
[end of scrapy/_monkeypatches.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/_monkeypatches.py b/scrapy/_monkeypatches.py
--- a/scrapy/_monkeypatches.py
+++ b/scrapy/_monkeypatches.py
@@ -20,6 +20,6 @@
import twisted.persisted.styles # NOQA
# Remove only entries with twisted serializers for non-twisted types.
for k, v in frozenset(copyreg.dispatch_table.items()):
- if not getattr(k, '__module__', '').startswith('twisted') \
- and getattr(v, '__module__', '').startswith('twisted'):
+ if not str(getattr(k, '__module__', '')).startswith('twisted') \
+ and str(getattr(v, '__module__', '')).startswith('twisted'):
copyreg.dispatch_table.pop(k)
| {"golden_diff": "diff --git a/scrapy/_monkeypatches.py b/scrapy/_monkeypatches.py\n--- a/scrapy/_monkeypatches.py\n+++ b/scrapy/_monkeypatches.py\n@@ -20,6 +20,6 @@\n import twisted.persisted.styles # NOQA\n # Remove only entries with twisted serializers for non-twisted types.\n for k, v in frozenset(copyreg.dispatch_table.items()):\n- if not getattr(k, '__module__', '').startswith('twisted') \\\n- and getattr(v, '__module__', '').startswith('twisted'):\n+ if not str(getattr(k, '__module__', '')).startswith('twisted') \\\n+ and str(getattr(v, '__module__', '')).startswith('twisted'):\n copyreg.dispatch_table.pop(k)\n", "issue": "_monkeypatches.py: 'NoneType' object has no attribute 'startswith'\nDid not try yet to come up with minimal example to demonstrate this issue but it is reproducible for me:\n\n```\n$> datalad --dbg crawl\nTraceback (most recent call last):\n File \"/home/yoh/proj/datalad/datalad/venv-tests/bin/datalad\", line 9, in <module>\n load_entry_point('datalad==0.1.dev0', 'console_scripts', 'datalad')()\n File \"/home/yoh/proj/datalad/datalad/datalad/cmdline/main.py\", line 199, in main\n cmdlineargs.func(cmdlineargs)\n File \"/home/yoh/proj/datalad/datalad/datalad/interface/base.py\", line 151, in call_from_parser\n return self(**kwargs)\n File \"/home/yoh/proj/datalad/datalad/datalad/interface/crawl.py\", line 44, in __call__\n from datalad.crawler.pipeline import load_pipeline_from_config, get_pipeline_config_path\n File \"/home/yoh/proj/datalad/datalad/datalad/crawler/pipeline.py\", line 21, in <module>\n from .newmain import lgr\n File \"/home/yoh/proj/datalad/datalad/datalad/crawler/newmain.py\", line 21, in <module>\n from .nodes.matches import *\n File \"/home/yoh/proj/datalad/datalad/datalad/crawler/nodes/matches.py\", line 18, in <module>\n from scrapy.selector import Selector\n File \"/home/yoh/proj/datalad/datalad/venv-tests/local/lib/python2.7/site-packages/scrapy/__init__.py\", line 27, in <module>\n from . import _monkeypatches\n File \"/home/yoh/proj/datalad/datalad/venv-tests/local/lib/python2.7/site-packages/scrapy/_monkeypatches.py\", line 24, in <module>\n and getattr(v, '__module__', '').startswith('twisted'):\nAttributeError: 'NoneType' object has no attribute 'startswith'\n()\n> /home/yoh/proj/datalad/datalad/venv-tests/local/lib/python2.7/site-packages/scrapy/_monkeypatches.py(24)<module>()\n-> and getattr(v, '__module__', '').startswith('twisted'):\n(Pdb) l\n 19 # to prevent bugs like Twisted#7989 while serializing requests\n 20 import twisted.persisted.styles # NOQA\n 21 # Remove only entries with twisted serializers for non-twisted types.\n 22 for k, v in frozenset(copyreg.dispatch_table.items()):\n 23 if not getattr(k, '__module__', '').startswith('twisted') \\\n 24 -> and getattr(v, '__module__', '').startswith('twisted'):\n 25 copyreg.dispatch_table.pop(k)\n[EOF]\n(Pdb) p k\nNone\n(Pdb) p v\nNone\n(Pdb) p copyreg\nNone\n```\n\nnot sure it came to it but the issue is (if I pdb before this madness happens):\n\n```\n(Pdb) p getattr(k, '__module__', '')\n'__builtin__'\n(Pdb) p getattr(v, '__module__', '')\nNone\n(Pdb) p v\n<function mpq_reducer at 0x7f474bb4ab90>\n(Pdb) p v.__module__\nNone\n(Pdb) p k, v\n(<type 'mpq'>, <function mpq_reducer at 0x7f474bb4ab90>)\n```\n\nso assigned `__module__` is None. As a quick resolution wrapped into str() call to assure str there\n\n```\nand str(getattr(v, '__module__', '')).startswith('twisted'):\n```\n\n", "before_files": [{"content": "import sys\nfrom six.moves import copyreg\n\nif sys.version_info[0] == 2:\n from urlparse import urlparse\n\n # workaround for http://bugs.python.org/issue7904 - Python < 2.7\n if urlparse('s3://bucket/key').netloc != 'bucket':\n from urlparse import uses_netloc\n uses_netloc.append('s3')\n\n # workaround for http://bugs.python.org/issue9374 - Python < 2.7.4\n if urlparse('s3://bucket/key?key=value').query != 'key=value':\n from urlparse import uses_query\n uses_query.append('s3')\n\n\n# Undo what Twisted's perspective broker adds to pickle register\n# to prevent bugs like Twisted#7989 while serializing requests\nimport twisted.persisted.styles # NOQA\n# Remove only entries with twisted serializers for non-twisted types.\nfor k, v in frozenset(copyreg.dispatch_table.items()):\n if not getattr(k, '__module__', '').startswith('twisted') \\\n and getattr(v, '__module__', '').startswith('twisted'):\n copyreg.dispatch_table.pop(k)\n", "path": "scrapy/_monkeypatches.py"}]} | 1,686 | 164 |
gh_patches_debug_41775 | rasdani/github-patches | git_diff | kymatio__kymatio-185 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sphinx-gallery: 2d/plot_filters
the wavelets does not display.
Please close this issue only when you're happy with the sphinx-gallery.
</issue>
<code>
[start of examples/2d/plot_filters.py]
1 """
2 Plot the 2D wavelet filters
3 ===========================
4 See :meth:`scattering.scattering1d.filter_bank` for more informations about the used wavelets.
5 """
6
7 import numpy as np
8 import matplotlib.pyplot as plt
9 from kymatio.scattering2d.filter_bank import filter_bank
10 from kymatio.scattering2d.utils import fft2
11
12
13 ###############################################################################
14 # Initial parameters of the filter bank
15 # -------------------------------------
16 M = 32
17 J = 3
18 L = 8
19 filters_set = filter_bank(M, M, J, L=L)
20
21
22 ###############################################################################
23 # Imshow complex images
24 # -------------------------------------
25 # Thanks to https://stackoverflow.com/questions/17044052/mathplotlib-imshow-complex-2d-array
26 from colorsys import hls_to_rgb
27 def colorize(z):
28 n, m = z.shape
29 c = np.zeros((n, m, 3))
30 c[np.isinf(z)] = (1.0, 1.0, 1.0)
31 c[np.isnan(z)] = (0.5, 0.5, 0.5)
32
33 idx = ~(np.isinf(z) + np.isnan(z))
34 A = (np.angle(z[idx]) + np.pi) / (2*np.pi)
35 A = (A + 0.5) % 1.0
36 B = 1.0/(1.0+abs(z[idx])**0.3)
37 c[idx] = [hls_to_rgb(a, b, 0.8) for a,b in zip(A,B)]
38 return c
39
40 fig, axs = plt.subplots(J+1, L, sharex=True, sharey=True)
41 plt.rc('text', usetex=True)
42 plt.rc('font', family='serif')
43
44 ###############################################################################
45 # Bandpass filters
46 # ----------------
47 # First, we display each wavelets according to each scale and orientation.
48 i=0
49 for filter in filters_set['psi']:
50 f_r = filter[0][...,0].numpy()
51 f_i = filter[0][..., 1].numpy()
52 f = f_r + 1j*f_i
53 filter_c = fft2(f)
54 filter_c = np.fft.fftshift(filter_c)
55 axs[i // L, i % L].imshow(colorize(filter_c))
56 axs[i // L, i % L].axis('off')
57 axs[i // L, i % L].set_title("$j = {}$ \n $\\theta={}".format(i // L, i % L))
58 i = i+1
59
60
61 # Add blanks for pretty display
62 for z in range(L):
63 axs[i // L, i % L].axis('off')
64 i = i+1
65
66 ###############################################################################
67 # Lowpass filter
68 # ----------------
69 # We finally display the Gaussian filter.
70 f_r = filters_set['phi'][0][...,0].numpy()
71 f_i = filters_set['phi'][0][..., 1].numpy()
72 f = f_r + 1j*f_i
73 filter_c = fft2(f)
74 filter_c = np.fft.fftshift(filter_c)
75 axs[J, L // 2].imshow(colorize(filter_c))
76
77 # Final caption.
78 fig.suptitle("Wavelets for each scales $j$ and angles $\\theta$ used, with the corresponding low-pass filter."
79 "\n The contrast corresponds to the amplitude and the color to the phase.", fontsize=13)
80
81
82 plt.show()
83
[end of examples/2d/plot_filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/2d/plot_filters.py b/examples/2d/plot_filters.py
--- a/examples/2d/plot_filters.py
+++ b/examples/2d/plot_filters.py
@@ -1,11 +1,11 @@
"""
Plot the 2D wavelet filters
===========================
-See :meth:`scattering.scattering1d.filter_bank` for more informations about the used wavelets.
+See :meth:`kymatio.scattering2d.filter_bank` for more informations about the used wavelets.
"""
-import numpy as np
import matplotlib.pyplot as plt
+import numpy as np
from kymatio.scattering2d.filter_bank import filter_bank
from kymatio.scattering2d.utils import fft2
@@ -18,10 +18,9 @@
L = 8
filters_set = filter_bank(M, M, J, L=L)
-
###############################################################################
# Imshow complex images
-# -------------------------------------
+# ---------------------
# Thanks to https://stackoverflow.com/questions/17044052/mathplotlib-imshow-complex-2d-array
from colorsys import hls_to_rgb
def colorize(z):
@@ -37,14 +36,15 @@
c[idx] = [hls_to_rgb(a, b, 0.8) for a,b in zip(A,B)]
return c
-fig, axs = plt.subplots(J+1, L, sharex=True, sharey=True)
-plt.rc('text', usetex=True)
-plt.rc('font', family='serif')
-
###############################################################################
# Bandpass filters
# ----------------
-# First, we display each wavelets according to each scale and orientation.
+# First, we display each wavelet according to its scale and orientation.
+fig, axs = plt.subplots(J, L, sharex=True, sharey=True)
+fig.set_figheight(6)
+fig.set_figwidth(6)
+plt.rc('text', usetex=True)
+plt.rc('font', family='serif')
i=0
for filter in filters_set['psi']:
f_r = filter[0][...,0].numpy()
@@ -54,29 +54,30 @@
filter_c = np.fft.fftshift(filter_c)
axs[i // L, i % L].imshow(colorize(filter_c))
axs[i // L, i % L].axis('off')
- axs[i // L, i % L].set_title("$j = {}$ \n $\\theta={}".format(i // L, i % L))
+ axs[i // L, i % L].set_title("$j = {}$ \n $\\theta={}$".format(i // L, i % L))
i = i+1
-
-# Add blanks for pretty display
-for z in range(L):
- axs[i // L, i % L].axis('off')
- i = i+1
+fig.suptitle("Wavelets for each scales $j$ and angles $\\theta$ used."
+"\n Color saturation and color hue respectively denote complex magnitude and complex phase.", fontsize=13)
+fig.show()
###############################################################################
# Lowpass filter
-# ----------------
-# We finally display the Gaussian filter.
-f_r = filters_set['phi'][0][...,0].numpy()
+# --------------
+# We finally display the low-pass filter.
+plt.figure()
+plt.rc('text', usetex=True)
+plt.rc('font', family='serif')
+plt.axis('off')
+plt.set_cmap('gray_r')
+
+f_r = filters_set['phi'][0][..., 0].numpy()
f_i = filters_set['phi'][0][..., 1].numpy()
f = f_r + 1j*f_i
+
filter_c = fft2(f)
filter_c = np.fft.fftshift(filter_c)
-axs[J, L // 2].imshow(colorize(filter_c))
-
-# Final caption.
-fig.suptitle("Wavelets for each scales $j$ and angles $\\theta$ used, with the corresponding low-pass filter."
- "\n The contrast corresponds to the amplitude and the color to the phase.", fontsize=13)
-
-
-plt.show()
+plt.suptitle("The corresponding low-pass filter, also known as scaling function."
+"Color saturation and color hue respectively denote complex magnitude and complex phase", fontsize=13)
+filter_c = np.abs(filter_c)
+plt.imshow(filter_c)
| {"golden_diff": "diff --git a/examples/2d/plot_filters.py b/examples/2d/plot_filters.py\n--- a/examples/2d/plot_filters.py\n+++ b/examples/2d/plot_filters.py\n@@ -1,11 +1,11 @@\n \"\"\"\n Plot the 2D wavelet filters\n ===========================\n-See :meth:`scattering.scattering1d.filter_bank` for more informations about the used wavelets.\n+See :meth:`kymatio.scattering2d.filter_bank` for more informations about the used wavelets.\n \"\"\"\n \n-import numpy as np\n import matplotlib.pyplot as plt\n+import numpy as np\n from kymatio.scattering2d.filter_bank import filter_bank\n from kymatio.scattering2d.utils import fft2\n \n@@ -18,10 +18,9 @@\n L = 8\n filters_set = filter_bank(M, M, J, L=L)\n \n-\n ###############################################################################\n # Imshow complex images\n-# -------------------------------------\n+# ---------------------\n # Thanks to https://stackoverflow.com/questions/17044052/mathplotlib-imshow-complex-2d-array\n from colorsys import hls_to_rgb\n def colorize(z):\n@@ -37,14 +36,15 @@\n c[idx] = [hls_to_rgb(a, b, 0.8) for a,b in zip(A,B)]\n return c\n \n-fig, axs = plt.subplots(J+1, L, sharex=True, sharey=True)\n-plt.rc('text', usetex=True)\n-plt.rc('font', family='serif')\n-\n ###############################################################################\n # Bandpass filters\n # ----------------\n-# First, we display each wavelets according to each scale and orientation.\n+# First, we display each wavelet according to its scale and orientation.\n+fig, axs = plt.subplots(J, L, sharex=True, sharey=True)\n+fig.set_figheight(6)\n+fig.set_figwidth(6)\n+plt.rc('text', usetex=True)\n+plt.rc('font', family='serif')\n i=0\n for filter in filters_set['psi']:\n f_r = filter[0][...,0].numpy()\n@@ -54,29 +54,30 @@\n filter_c = np.fft.fftshift(filter_c)\n axs[i // L, i % L].imshow(colorize(filter_c))\n axs[i // L, i % L].axis('off')\n- axs[i // L, i % L].set_title(\"$j = {}$ \\n $\\\\theta={}\".format(i // L, i % L))\n+ axs[i // L, i % L].set_title(\"$j = {}$ \\n $\\\\theta={}$\".format(i // L, i % L))\n i = i+1\n \n-\n-# Add blanks for pretty display\n-for z in range(L):\n- axs[i // L, i % L].axis('off')\n- i = i+1\n+fig.suptitle(\"Wavelets for each scales $j$ and angles $\\\\theta$ used.\"\n+\"\\n Color saturation and color hue respectively denote complex magnitude and complex phase.\", fontsize=13)\n+fig.show()\n \n ###############################################################################\n # Lowpass filter\n-# ----------------\n-# We finally display the Gaussian filter.\n-f_r = filters_set['phi'][0][...,0].numpy()\n+# --------------\n+# We finally display the low-pass filter.\n+plt.figure()\n+plt.rc('text', usetex=True)\n+plt.rc('font', family='serif')\n+plt.axis('off')\n+plt.set_cmap('gray_r')\n+\n+f_r = filters_set['phi'][0][..., 0].numpy()\n f_i = filters_set['phi'][0][..., 1].numpy()\n f = f_r + 1j*f_i\n+\n filter_c = fft2(f)\n filter_c = np.fft.fftshift(filter_c)\n-axs[J, L // 2].imshow(colorize(filter_c))\n-\n-# Final caption.\n-fig.suptitle(\"Wavelets for each scales $j$ and angles $\\\\theta$ used, with the corresponding low-pass filter.\"\n- \"\\n The contrast corresponds to the amplitude and the color to the phase.\", fontsize=13)\n-\n-\n-plt.show()\n+plt.suptitle(\"The corresponding low-pass filter, also known as scaling function.\"\n+\"Color saturation and color hue respectively denote complex magnitude and complex phase\", fontsize=13)\n+filter_c = np.abs(filter_c)\n+plt.imshow(filter_c)\n", "issue": "sphinx-gallery: 2d/plot_filters\nthe wavelets does not display.\r\n\r\nPlease close this issue only when you're happy with the sphinx-gallery.\n", "before_files": [{"content": "\"\"\"\nPlot the 2D wavelet filters\n===========================\nSee :meth:`scattering.scattering1d.filter_bank` for more informations about the used wavelets.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom kymatio.scattering2d.filter_bank import filter_bank\nfrom kymatio.scattering2d.utils import fft2\n\n\n###############################################################################\n# Initial parameters of the filter bank\n# -------------------------------------\nM = 32\nJ = 3\nL = 8\nfilters_set = filter_bank(M, M, J, L=L)\n\n\n###############################################################################\n# Imshow complex images\n# -------------------------------------\n# Thanks to https://stackoverflow.com/questions/17044052/mathplotlib-imshow-complex-2d-array\nfrom colorsys import hls_to_rgb\ndef colorize(z):\n n, m = z.shape\n c = np.zeros((n, m, 3))\n c[np.isinf(z)] = (1.0, 1.0, 1.0)\n c[np.isnan(z)] = (0.5, 0.5, 0.5)\n\n idx = ~(np.isinf(z) + np.isnan(z))\n A = (np.angle(z[idx]) + np.pi) / (2*np.pi)\n A = (A + 0.5) % 1.0\n B = 1.0/(1.0+abs(z[idx])**0.3)\n c[idx] = [hls_to_rgb(a, b, 0.8) for a,b in zip(A,B)]\n return c\n\nfig, axs = plt.subplots(J+1, L, sharex=True, sharey=True)\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\n\n###############################################################################\n# Bandpass filters\n# ----------------\n# First, we display each wavelets according to each scale and orientation.\ni=0\nfor filter in filters_set['psi']:\n f_r = filter[0][...,0].numpy()\n f_i = filter[0][..., 1].numpy()\n f = f_r + 1j*f_i\n filter_c = fft2(f)\n filter_c = np.fft.fftshift(filter_c)\n axs[i // L, i % L].imshow(colorize(filter_c))\n axs[i // L, i % L].axis('off')\n axs[i // L, i % L].set_title(\"$j = {}$ \\n $\\\\theta={}\".format(i // L, i % L))\n i = i+1\n\n\n# Add blanks for pretty display\nfor z in range(L):\n axs[i // L, i % L].axis('off')\n i = i+1\n\n###############################################################################\n# Lowpass filter\n# ----------------\n# We finally display the Gaussian filter.\nf_r = filters_set['phi'][0][...,0].numpy()\nf_i = filters_set['phi'][0][..., 1].numpy()\nf = f_r + 1j*f_i\nfilter_c = fft2(f)\nfilter_c = np.fft.fftshift(filter_c)\naxs[J, L // 2].imshow(colorize(filter_c))\n\n# Final caption.\nfig.suptitle(\"Wavelets for each scales $j$ and angles $\\\\theta$ used, with the corresponding low-pass filter.\"\n \"\\n The contrast corresponds to the amplitude and the color to the phase.\", fontsize=13)\n\n\nplt.show()\n", "path": "examples/2d/plot_filters.py"}]} | 1,469 | 962 |
gh_patches_debug_31257 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-5761 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Corriger et refactoriser remove_url_scheme et ses tests
On a une fonction utilitaire qui s'appelle [remove_url_scheme](https://github.com/zestedesavoir/zds-site/blob/03c8f316c46e51d42afb8b8d2d9553cdd8fb0f08/zds/utils/templatetags/remove_url_scheme.py#L9), qui permet d'enlever le schéma des urls (http ou https) et le nom du domaine, afin de toujours servir les ressources locales au site avec le bon protocole (http ou https).
**Description du problème**
Le problème actuellement, c'est qu'elle gère mal le nom du domaine spécifié dans l'environnement de dev, à savoir `ZDS_APP['site']['dns'] = 127.0.0.1:8000`, ce qui a pour effet de faire rater un des tests (zds.utils.tests.test_misc.Misc.test_remove_url_scheme), mais en **local seulement**, pas sur Travis. Il s'agit donc d'un faux positif sur l'environnement de dev, ce qui est pénible.
**Comportement attendu**
On devrait avoir le bon fonctionnement sur l'environnement de dev en gérant correctement le numéro de port à la fin de l'url. Au passage, on devrait aussi :
* utiliser `urllib.parse` au lieu du module de `six` ;
* réunir tous les tests de `remove_url_scheme` dans le bon fichier (actuellement c'est réparti dans `test_misc` et un fichier `test_remove_url_scheme`).
</issue>
<code>
[start of zds/utils/templatetags/remove_url_scheme.py]
1 from django import template
2 from django.conf import settings
3 from six.moves import urllib_parse as urlparse
4
5 register = template.Library()
6
7
8 @register.filter('remove_url_scheme')
9 def remove_url_scheme(input_url):
10 """
11 make every image url pointing to this website protocol independant so that if we use https, we are sure
12 that all our media are served with this protocol.
13
14 .. notice::
15
16 this also removes the ``settings.ZDS_APP['site']['dns']`` from the url.
17
18 :return: the url without its scheme, e.g. ``http://zestedesavoir.com/media/gallery/1/1.png`` becomes
19 ``/media/gallery/1/1.png``
20
21 """
22
23 schemeless_url = input_url[len(urlparse.urlparse(input_url).scheme):]
24 schemeless_url = schemeless_url[len('://'):] if schemeless_url.startswith('://') else schemeless_url
25 if schemeless_url.startswith(settings.ZDS_APP['site']['dns']):
26 return schemeless_url[len(settings.ZDS_APP['site']['dns']):]
27 return input_url
28
[end of zds/utils/templatetags/remove_url_scheme.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/utils/templatetags/remove_url_scheme.py b/zds/utils/templatetags/remove_url_scheme.py
--- a/zds/utils/templatetags/remove_url_scheme.py
+++ b/zds/utils/templatetags/remove_url_scheme.py
@@ -1,27 +1,37 @@
+import urllib.parse
+
from django import template
from django.conf import settings
-from six.moves import urllib_parse as urlparse
register = template.Library()
@register.filter('remove_url_scheme')
-def remove_url_scheme(input_url):
+def remove_url_scheme(url):
"""
- make every image url pointing to this website protocol independant so that if we use https, we are sure
- that all our media are served with this protocol.
-
- .. notice::
+ Remove the scheme and hostname from a URL if it is internal, but leave it unchanged otherwise.
- this also removes the ``settings.ZDS_APP['site']['dns']`` from the url.
+ The internal hostname is determined using the value of ``ZDS_APP['site']['dns']``.
+ URLs with no scheme are accepted. URLs with no hostname are treated as internal.
- :return: the url without its scheme, e.g. ``http://zestedesavoir.com/media/gallery/1/1.png`` becomes
- ``/media/gallery/1/1.png``
+ For example, ``http://zestedesavoir.com/media/gallery/1/1.png`` becomes ``/media/gallery/1/1.png``,
+ whereas ``/media/gallery/1/1.png`` and ``example.com/media/gallery/1/1.png`` stay the same.
+ :return: the url without its scheme and hostname.
"""
- schemeless_url = input_url[len(urlparse.urlparse(input_url).scheme):]
- schemeless_url = schemeless_url[len('://'):] if schemeless_url.startswith('://') else schemeless_url
- if schemeless_url.startswith(settings.ZDS_APP['site']['dns']):
- return schemeless_url[len(settings.ZDS_APP['site']['dns']):]
- return input_url
+ # Parse URLs after adding a prefix if necessary (e.g 'zestedesavoir.com' becomes '//zestedesavoir.com')
+ url_normalized = url
+ if '//' not in url:
+ url_normalized = '//' + url
+ url_parsed = urllib.parse.urlsplit(url_normalized)
+
+ # Return external URLs unchanged
+ if url_parsed.netloc != settings.ZDS_APP['site']['dns']:
+ return url
+
+ # Clean internal URLs
+ url_noscheme = urllib.parse.urlunsplit(['', '', url_parsed.path, url_parsed.query, url_parsed.fragment])
+ url_cleaned = url_noscheme[0:] # remove first "/"
+
+ return url_cleaned
| {"golden_diff": "diff --git a/zds/utils/templatetags/remove_url_scheme.py b/zds/utils/templatetags/remove_url_scheme.py\n--- a/zds/utils/templatetags/remove_url_scheme.py\n+++ b/zds/utils/templatetags/remove_url_scheme.py\n@@ -1,27 +1,37 @@\n+import urllib.parse\n+\n from django import template\n from django.conf import settings\n-from six.moves import urllib_parse as urlparse\n \n register = template.Library()\n \n \n @register.filter('remove_url_scheme')\n-def remove_url_scheme(input_url):\n+def remove_url_scheme(url):\n \"\"\"\n- make every image url pointing to this website protocol independant so that if we use https, we are sure\n- that all our media are served with this protocol.\n-\n- .. notice::\n+ Remove the scheme and hostname from a URL if it is internal, but leave it unchanged otherwise.\n \n- this also removes the ``settings.ZDS_APP['site']['dns']`` from the url.\n+ The internal hostname is determined using the value of ``ZDS_APP['site']['dns']``.\n+ URLs with no scheme are accepted. URLs with no hostname are treated as internal.\n \n- :return: the url without its scheme, e.g. ``http://zestedesavoir.com/media/gallery/1/1.png`` becomes\n- ``/media/gallery/1/1.png``\n+ For example, ``http://zestedesavoir.com/media/gallery/1/1.png`` becomes ``/media/gallery/1/1.png``,\n+ whereas ``/media/gallery/1/1.png`` and ``example.com/media/gallery/1/1.png`` stay the same.\n \n+ :return: the url without its scheme and hostname.\n \"\"\"\n \n- schemeless_url = input_url[len(urlparse.urlparse(input_url).scheme):]\n- schemeless_url = schemeless_url[len('://'):] if schemeless_url.startswith('://') else schemeless_url\n- if schemeless_url.startswith(settings.ZDS_APP['site']['dns']):\n- return schemeless_url[len(settings.ZDS_APP['site']['dns']):]\n- return input_url\n+ # Parse URLs after adding a prefix if necessary (e.g 'zestedesavoir.com' becomes '//zestedesavoir.com')\n+ url_normalized = url\n+ if '//' not in url:\n+ url_normalized = '//' + url\n+ url_parsed = urllib.parse.urlsplit(url_normalized)\n+\n+ # Return external URLs unchanged\n+ if url_parsed.netloc != settings.ZDS_APP['site']['dns']:\n+ return url\n+\n+ # Clean internal URLs\n+ url_noscheme = urllib.parse.urlunsplit(['', '', url_parsed.path, url_parsed.query, url_parsed.fragment])\n+ url_cleaned = url_noscheme[0:] # remove first \"/\"\n+\n+ return url_cleaned\n", "issue": "Corriger et refactoriser remove_url_scheme et ses tests\nOn a une fonction utilitaire qui s'appelle [remove_url_scheme](https://github.com/zestedesavoir/zds-site/blob/03c8f316c46e51d42afb8b8d2d9553cdd8fb0f08/zds/utils/templatetags/remove_url_scheme.py#L9), qui permet d'enlever le sch\u00e9ma des urls (http ou https) et le nom du domaine, afin de toujours servir les ressources locales au site avec le bon protocole (http ou https).\r\n\r\n**Description du probl\u00e8me**\r\n\r\nLe probl\u00e8me actuellement, c'est qu'elle g\u00e8re mal le nom du domaine sp\u00e9cifi\u00e9 dans l'environnement de dev, \u00e0 savoir `ZDS_APP['site']['dns'] = 127.0.0.1:8000`, ce qui a pour effet de faire rater un des tests (zds.utils.tests.test_misc.Misc.test_remove_url_scheme), mais en **local seulement**, pas sur Travis. Il s'agit donc d'un faux positif sur l'environnement de dev, ce qui est p\u00e9nible.\r\n\r\n**Comportement attendu**\r\n\r\nOn devrait avoir le bon fonctionnement sur l'environnement de dev en g\u00e9rant correctement le num\u00e9ro de port \u00e0 la fin de l'url. Au passage, on devrait aussi :\r\n\r\n* utiliser `urllib.parse` au lieu du module de `six` ;\r\n* r\u00e9unir tous les tests de `remove_url_scheme` dans le bon fichier (actuellement c'est r\u00e9parti dans `test_misc` et un fichier `test_remove_url_scheme`).\r\n\n", "before_files": [{"content": "from django import template\nfrom django.conf import settings\nfrom six.moves import urllib_parse as urlparse\n\nregister = template.Library()\n\n\[email protected]('remove_url_scheme')\ndef remove_url_scheme(input_url):\n \"\"\"\n make every image url pointing to this website protocol independant so that if we use https, we are sure\n that all our media are served with this protocol.\n\n .. notice::\n\n this also removes the ``settings.ZDS_APP['site']['dns']`` from the url.\n\n :return: the url without its scheme, e.g. ``http://zestedesavoir.com/media/gallery/1/1.png`` becomes\n ``/media/gallery/1/1.png``\n\n \"\"\"\n\n schemeless_url = input_url[len(urlparse.urlparse(input_url).scheme):]\n schemeless_url = schemeless_url[len('://'):] if schemeless_url.startswith('://') else schemeless_url\n if schemeless_url.startswith(settings.ZDS_APP['site']['dns']):\n return schemeless_url[len(settings.ZDS_APP['site']['dns']):]\n return input_url\n", "path": "zds/utils/templatetags/remove_url_scheme.py"}]} | 1,194 | 626 |
gh_patches_debug_10073 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2457 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider sheetz is broken
During the global build at 2021-06-23-14-42-18, spider **sheetz** failed with **526 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/logs/sheetz.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/sheetz.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/sheetz.geojson))
</issue>
<code>
[start of locations/spiders/sheetz.py]
1 import json
2 import re
3 import scrapy
4 from locations.items import GeojsonPointItem
5
6
7 class SheetzSpider(scrapy.Spider):
8 name = "sheetz"
9 item_attributes = {'brand': "Sheetz"}
10 allowed_domains = ["orderz.sheetz.com"]
11 start_urls = (
12 "https://orderz.sheetz.com/sas/store",
13 )
14
15 def parse(self, response):
16 stores = json.loads(response.body_as_unicode())
17
18 for store in stores:
19 properties = {
20 'addr_full': store['address'],
21 'city': store['city'],
22 'state': store['state'],
23 'postcode': store['zip'],
24 'ref': store['storeNumber'],
25 'phone': store['phone'],
26 'website': 'https://orderz.sheetz.com/#/main/location/store/'+store['storeNumber'],
27 'lat': float(store['latitude']),
28 'lon': float(store['longitude']),
29 'opening_hours': '24/7' if store['open24x7'] else None,
30 'extras': {
31 'amenity:chargingstation': store['evCharger'],
32 'amenity:fuel': True,
33 'atm': store['atm'],
34 'car_wash': store['carWash'],
35 'fax': store['fax'] if 'fax' in store else None,
36 'fuel:diesel': store['diesel'],
37 'fuel:e15': store['e15'],
38 'fuel:e85': store['e85'],
39 'fuel:kerosene': store['kerosene'],
40 'fuel:propane': store['propane'],
41 }
42 }
43
44 yield GeojsonPointItem(**properties)
45
[end of locations/spiders/sheetz.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/sheetz.py b/locations/spiders/sheetz.py
--- a/locations/spiders/sheetz.py
+++ b/locations/spiders/sheetz.py
@@ -22,7 +22,7 @@
'state': store['state'],
'postcode': store['zip'],
'ref': store['storeNumber'],
- 'phone': store['phone'],
+ 'phone': store.get('phone'),
'website': 'https://orderz.sheetz.com/#/main/location/store/'+store['storeNumber'],
'lat': float(store['latitude']),
'lon': float(store['longitude']),
| {"golden_diff": "diff --git a/locations/spiders/sheetz.py b/locations/spiders/sheetz.py\n--- a/locations/spiders/sheetz.py\n+++ b/locations/spiders/sheetz.py\n@@ -22,7 +22,7 @@\n 'state': store['state'],\n 'postcode': store['zip'],\n 'ref': store['storeNumber'],\n- 'phone': store['phone'],\n+ 'phone': store.get('phone'),\n 'website': 'https://orderz.sheetz.com/#/main/location/store/'+store['storeNumber'],\n 'lat': float(store['latitude']),\n 'lon': float(store['longitude']),\n", "issue": "Spider sheetz is broken\nDuring the global build at 2021-06-23-14-42-18, spider **sheetz** failed with **526 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/logs/sheetz.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/sheetz.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/sheetz.geojson))\n", "before_files": [{"content": "import json\nimport re\nimport scrapy\nfrom locations.items import GeojsonPointItem\n\n\nclass SheetzSpider(scrapy.Spider):\n name = \"sheetz\"\n item_attributes = {'brand': \"Sheetz\"}\n allowed_domains = [\"orderz.sheetz.com\"]\n start_urls = (\n \"https://orderz.sheetz.com/sas/store\",\n )\n\n def parse(self, response):\n stores = json.loads(response.body_as_unicode())\n\n for store in stores:\n properties = {\n 'addr_full': store['address'],\n 'city': store['city'],\n 'state': store['state'],\n 'postcode': store['zip'],\n 'ref': store['storeNumber'],\n 'phone': store['phone'],\n 'website': 'https://orderz.sheetz.com/#/main/location/store/'+store['storeNumber'],\n 'lat': float(store['latitude']),\n 'lon': float(store['longitude']),\n 'opening_hours': '24/7' if store['open24x7'] else None,\n 'extras': {\n 'amenity:chargingstation': store['evCharger'],\n 'amenity:fuel': True,\n 'atm': store['atm'],\n 'car_wash': store['carWash'],\n 'fax': store['fax'] if 'fax' in store else None,\n 'fuel:diesel': store['diesel'],\n 'fuel:e15': store['e15'],\n 'fuel:e85': store['e85'],\n 'fuel:kerosene': store['kerosene'],\n 'fuel:propane': store['propane'],\n }\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/sheetz.py"}]} | 1,170 | 142 |
gh_patches_debug_8051 | rasdani/github-patches | git_diff | mne-tools__mne-bids-67 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ADD: Configure CircleCI
So that we can check the artifacts tab for builds of the docs
</issue>
<code>
[start of setup.py]
1 #! /usr/bin/env python
2 from setuptools import setup
3
4 descr = """Experimental code for BIDS using MNE."""
5
6 DISTNAME = 'mne-bids'
7 DESCRIPTION = descr
8 MAINTAINER = 'Alexandre Gramfort'
9 MAINTAINER_EMAIL = '[email protected]'
10 URL = 'http://martinos.org/mne'
11 LICENSE = 'BSD (3-clause)'
12 DOWNLOAD_URL = 'http://github.com/mne-tools/mne-bids'
13 VERSION = '0.1.dev0'
14
15 if __name__ == "__main__":
16 setup(name=DISTNAME,
17 maintainer=MAINTAINER,
18 maintainer_email=MAINTAINER_EMAIL,
19 description=DESCRIPTION,
20 license=LICENSE,
21 url=URL,
22 version=VERSION,
23 download_url=DOWNLOAD_URL,
24 long_description=open('README.md').read(),
25 classifiers=[
26 'Intended Audience :: Science/Research',
27 'Intended Audience :: Developers',
28 'License :: OSI Approved',
29 'Programming Language :: Python',
30 'Topic :: Software Development',
31 'Topic :: Scientific/Engineering',
32 'Operating System :: Microsoft :: Windows',
33 'Operating System :: POSIX',
34 'Operating System :: Unix',
35 'Operating System :: MacOS',
36 ],
37 platforms='any',
38 packages=[
39 'mne_bids'
40 ],
41 scripts=['bin/mne_bids']
42 )
43
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,5 @@
#! /usr/bin/env python
-from setuptools import setup
+from setuptools import setup, find_packages
descr = """Experimental code for BIDS using MNE."""
@@ -35,8 +35,6 @@
'Operating System :: MacOS',
],
platforms='any',
- packages=[
- 'mne_bids'
- ],
+ packages=find_packages(),
scripts=['bin/mne_bids']
-)
+ )
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,5 +1,5 @@\n #! /usr/bin/env python\n-from setuptools import setup\n+from setuptools import setup, find_packages\n \n descr = \"\"\"Experimental code for BIDS using MNE.\"\"\"\n \n@@ -35,8 +35,6 @@\n 'Operating System :: MacOS',\n ],\n platforms='any',\n- packages=[\n- 'mne_bids'\n- ],\n+ packages=find_packages(),\n scripts=['bin/mne_bids']\n-)\n+ )\n", "issue": "ADD: Configure CircleCI\nSo that we can check the artifacts tab for builds of the docs\n", "before_files": [{"content": "#! /usr/bin/env python\nfrom setuptools import setup\n\ndescr = \"\"\"Experimental code for BIDS using MNE.\"\"\"\n\nDISTNAME = 'mne-bids'\nDESCRIPTION = descr\nMAINTAINER = 'Alexandre Gramfort'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'http://martinos.org/mne'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'http://github.com/mne-tools/mne-bids'\nVERSION = '0.1.dev0'\n\nif __name__ == \"__main__\":\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.md').read(),\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n platforms='any',\n packages=[\n 'mne_bids'\n ],\n scripts=['bin/mne_bids']\n)\n", "path": "setup.py"}]} | 921 | 127 |
gh_patches_debug_12829 | rasdani/github-patches | git_diff | feast-dev__feast-456 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deduplicate example notebooks
Currently we have two sets of example notebooks for Feast
1. [Examples](https://github.com/gojek/feast/tree/master/examples/basic)
2. [Docker compose](https://github.com/gojek/feast/tree/master/infra/docker-compose/jupyter/notebooks)
The docker compose notebooks can be deduplicated so that all examples are only contained in the root of the project. This would make management easier.
</issue>
<code>
[start of sdk/python/setup.py]
1 # Copyright 2019 The Feast Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16
17 from setuptools import find_packages, setup
18
19 NAME = "feast"
20 DESCRIPTION = "Python SDK for Feast"
21 URL = "https://github.com/gojek/feast"
22 AUTHOR = "Feast"
23 REQUIRES_PYTHON = ">=3.6.0"
24
25 REQUIRED = [
26 "Click==7.*",
27 "google-api-core==1.14.*",
28 "google-auth==1.6.*",
29 "google-cloud-bigquery==1.18.*",
30 "google-cloud-storage==1.20.*",
31 "google-cloud-core==1.0.*",
32 "googleapis-common-protos==1.*",
33 "google-cloud-bigquery-storage==0.7.*",
34 "grpcio==1.*",
35 "pandas==0.*",
36 "pandavro==1.5.*",
37 "protobuf>=3.10",
38 "PyYAML==5.1.*",
39 "fastavro==0.*",
40 "kafka-python==1.*",
41 "tabulate==0.8.*",
42 "toml==0.10.*",
43 "tqdm==4.*",
44 "pyarrow>=0.15.1",
45 "numpy",
46 "google",
47 "confluent_kafka",
48 ]
49
50 # README file from Feast repo root directory
51 README_FILE = os.path.join(os.path.dirname(__file__), "..", "..", "README.md")
52 with open(os.path.join(README_FILE), "r") as f:
53 LONG_DESCRIPTION = f.read()
54
55 setup(
56 name=NAME,
57 author=AUTHOR,
58 description=DESCRIPTION,
59 long_description=LONG_DESCRIPTION,
60 long_description_content_type="text/markdown",
61 python_requires=REQUIRES_PYTHON,
62 url=URL,
63 packages=find_packages(exclude=("tests",)),
64 install_requires=REQUIRED,
65 # https://stackoverflow.com/questions/28509965/setuptools-development-requirements
66 # Install dev requirements with: pip install -e .[dev]
67 extras_require={"dev": ["mypy-protobuf==1.*", "grpcio-testing==1.*"]},
68 include_package_data=True,
69 license="Apache",
70 classifiers=[
71 # Trove classifiers
72 # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
73 "License :: OSI Approved :: Apache Software License",
74 "Programming Language :: Python",
75 "Programming Language :: Python :: 3",
76 "Programming Language :: Python :: 3.6",
77 ],
78 entry_points={"console_scripts": ["feast=feast.cli:cli"]},
79 use_scm_version={"root": "../..", "relative_to": __file__},
80 setup_requires=["setuptools_scm"],
81 )
82
[end of sdk/python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/setup.py b/sdk/python/setup.py
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -13,6 +13,7 @@
# limitations under the License.
import os
+import subprocess
from setuptools import find_packages, setup
@@ -48,7 +49,13 @@
]
# README file from Feast repo root directory
-README_FILE = os.path.join(os.path.dirname(__file__), "..", "..", "README.md")
+repo_root = (
+ subprocess.Popen(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE)
+ .communicate()[0]
+ .rstrip()
+ .decode("utf-8")
+)
+README_FILE = os.path.join(repo_root, "README.md")
with open(os.path.join(README_FILE), "r") as f:
LONG_DESCRIPTION = f.read()
| {"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n \n import os\n+import subprocess\n \n from setuptools import find_packages, setup\n \n@@ -48,7 +49,13 @@\n ]\n \n # README file from Feast repo root directory\n-README_FILE = os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"README.md\")\n+repo_root = (\n+ subprocess.Popen([\"git\", \"rev-parse\", \"--show-toplevel\"], stdout=subprocess.PIPE)\n+ .communicate()[0]\n+ .rstrip()\n+ .decode(\"utf-8\")\n+)\n+README_FILE = os.path.join(repo_root, \"README.md\")\n with open(os.path.join(README_FILE), \"r\") as f:\n LONG_DESCRIPTION = f.read()\n", "issue": "Deduplicate example notebooks\nCurrently we have two sets of example notebooks for Feast\r\n1. [Examples](https://github.com/gojek/feast/tree/master/examples/basic)\r\n2. [Docker compose](https://github.com/gojek/feast/tree/master/infra/docker-compose/jupyter/notebooks)\r\n\r\nThe docker compose notebooks can be deduplicated so that all examples are only contained in the root of the project. This would make management easier.\n", "before_files": [{"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom setuptools import find_packages, setup\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/gojek/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.6.0\"\n\nREQUIRED = [\n \"Click==7.*\",\n \"google-api-core==1.14.*\",\n \"google-auth==1.6.*\",\n \"google-cloud-bigquery==1.18.*\",\n \"google-cloud-storage==1.20.*\",\n \"google-cloud-core==1.0.*\",\n \"googleapis-common-protos==1.*\",\n \"google-cloud-bigquery-storage==0.7.*\",\n \"grpcio==1.*\",\n \"pandas==0.*\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n \"PyYAML==5.1.*\",\n \"fastavro==0.*\",\n \"kafka-python==1.*\",\n \"tabulate==0.8.*\",\n \"toml==0.10.*\",\n \"tqdm==4.*\",\n \"pyarrow>=0.15.1\",\n \"numpy\",\n \"google\",\n \"confluent_kafka\",\n]\n\n# README file from Feast repo root directory\nREADME_FILE = os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"README.md\")\nwith open(os.path.join(README_FILE), \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\"dev\": [\"mypy-protobuf==1.*\", \"grpcio-testing==1.*\"]},\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__},\n setup_requires=[\"setuptools_scm\"],\n)\n", "path": "sdk/python/setup.py"}]} | 1,504 | 193 |
gh_patches_debug_9337 | rasdani/github-patches | git_diff | svthalia__concrexit-2962 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix escaped HTML in promorequest email remarks field
### Describe the bug

</issue>
<code>
[start of website/promotion/emails.py]
1 """The emails defined by the promotion request package."""
2 import logging
3
4 from django.conf import settings
5
6 from promotion.models import PromotionRequest
7 from utils.snippets import send_email
8
9 logger = logging.getLogger(__name__)
10
11
12 def send_weekly_overview():
13 new_requests = PromotionRequest.new_requests.all()
14 upcoming_requests = PromotionRequest.upcoming_requests.all()
15
16 send_email(
17 to=[settings.PROMO_REQUEST_NOTIFICATION_ADDRESS],
18 subject="[PROMO] Weekly request overview",
19 txt_template="requests/weekly_overview.txt",
20 html_template="requests/weekly_overview.html",
21 context={
22 "new_requests": new_requests,
23 "upcoming_requests": upcoming_requests,
24 },
25 )
26
[end of website/promotion/emails.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/promotion/emails.py b/website/promotion/emails.py
--- a/website/promotion/emails.py
+++ b/website/promotion/emails.py
@@ -16,8 +16,8 @@
send_email(
to=[settings.PROMO_REQUEST_NOTIFICATION_ADDRESS],
subject="[PROMO] Weekly request overview",
- txt_template="requests/weekly_overview.txt",
- html_template="requests/weekly_overview.html",
+ txt_template="promotion/email/weekly_overview.txt",
+ html_template="promotion/email/weekly_overview.html",
context={
"new_requests": new_requests,
"upcoming_requests": upcoming_requests,
| {"golden_diff": "diff --git a/website/promotion/emails.py b/website/promotion/emails.py\n--- a/website/promotion/emails.py\n+++ b/website/promotion/emails.py\n@@ -16,8 +16,8 @@\n send_email(\n to=[settings.PROMO_REQUEST_NOTIFICATION_ADDRESS],\n subject=\"[PROMO] Weekly request overview\",\n- txt_template=\"requests/weekly_overview.txt\",\n- html_template=\"requests/weekly_overview.html\",\n+ txt_template=\"promotion/email/weekly_overview.txt\",\n+ html_template=\"promotion/email/weekly_overview.html\",\n context={\n \"new_requests\": new_requests,\n \"upcoming_requests\": upcoming_requests,\n", "issue": "Fix escaped HTML in promorequest email remarks field\n### Describe the bug\n\n", "before_files": [{"content": "\"\"\"The emails defined by the promotion request package.\"\"\"\nimport logging\n\nfrom django.conf import settings\n\nfrom promotion.models import PromotionRequest\nfrom utils.snippets import send_email\n\nlogger = logging.getLogger(__name__)\n\n\ndef send_weekly_overview():\n new_requests = PromotionRequest.new_requests.all()\n upcoming_requests = PromotionRequest.upcoming_requests.all()\n\n send_email(\n to=[settings.PROMO_REQUEST_NOTIFICATION_ADDRESS],\n subject=\"[PROMO] Weekly request overview\",\n txt_template=\"requests/weekly_overview.txt\",\n html_template=\"requests/weekly_overview.html\",\n context={\n \"new_requests\": new_requests,\n \"upcoming_requests\": upcoming_requests,\n },\n )\n", "path": "website/promotion/emails.py"}]} | 808 | 149 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.