problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_13543 | rasdani/github-patches | git_diff | pre-commit__pre-commit-33 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pre-commit -i does not install the file with +x
No executable = no run :'(
</issue>
<code>
[start of pre_commit/git.py]
1 import functools
2 import os
3 import os.path
4 import pkg_resources
5 import re
6 from plumbum import local
7
8 from pre_commit.util import memoize_by_cwd
9
10
11 def _get_root_new():
12 path = os.getcwd()
13 while len(path) > 1:
14 if os.path.exists(os.path.join(path, '.git')):
15 return path
16 else:
17 path = os.path.normpath(os.path.join(path, '../'))
18 raise AssertionError('called from outside of the gits')
19
20
21 @memoize_by_cwd
22 def get_root():
23 return _get_root_new()
24
25
26 @memoize_by_cwd
27 def get_pre_commit_path():
28 return os.path.join(get_root(), '.git/hooks/pre-commit')
29
30
31 def create_pre_commit():
32 path = get_pre_commit_path()
33 pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')
34 local.path(path).write(local.path(pre_commit_file).read())
35
36
37 def remove_pre_commit():
38 local.path(get_pre_commit_path()).delete()
39
40
41 def get_head_sha(git_repo_path):
42 with local.cwd(git_repo_path):
43 return local['git']['rev-parse', 'HEAD']().strip()
44
45
46 @memoize_by_cwd
47 def get_staged_files():
48 return local['git']['diff', '--staged', '--name-only']().splitlines()
49
50
51 @memoize_by_cwd
52 def get_all_files():
53 return local['git']['ls-files']().splitlines()
54
55
56 def get_files_matching(all_file_list_strategy):
57 @functools.wraps(all_file_list_strategy)
58 @memoize_by_cwd
59 def wrapper(expr):
60 regex = re.compile(expr)
61 return set(filter(os.path.exists, (
62 filename
63 for filename in all_file_list_strategy()
64 if regex.search(filename)
65 )))
66 return wrapper
67
68
69 get_staged_files_matching = get_files_matching(get_staged_files)
70 get_all_files_matching = get_files_matching(get_all_files)
71
[end of pre_commit/git.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/git.py b/pre_commit/git.py
--- a/pre_commit/git.py
+++ b/pre_commit/git.py
@@ -3,6 +3,7 @@
import os.path
import pkg_resources
import re
+import stat
from plumbum import local
from pre_commit.util import memoize_by_cwd
@@ -32,6 +33,8 @@
path = get_pre_commit_path()
pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')
local.path(path).write(local.path(pre_commit_file).read())
+ original_mode = os.stat(path).st_mode
+ os.chmod(path, original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
def remove_pre_commit():
| {"golden_diff": "diff --git a/pre_commit/git.py b/pre_commit/git.py\n--- a/pre_commit/git.py\n+++ b/pre_commit/git.py\n@@ -3,6 +3,7 @@\n import os.path\n import pkg_resources\n import re\n+import stat\n from plumbum import local\n \n from pre_commit.util import memoize_by_cwd\n@@ -32,6 +33,8 @@\n path = get_pre_commit_path()\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\n local.path(path).write(local.path(pre_commit_file).read())\n+ original_mode = os.stat(path).st_mode\n+ os.chmod(path, original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n \n \n def remove_pre_commit():\n", "issue": "pre-commit -i does not install the file with +x\nNo executable = no run :'(\n\n", "before_files": [{"content": "import functools\nimport os\nimport os.path\nimport pkg_resources\nimport re\nfrom plumbum import local\n\nfrom pre_commit.util import memoize_by_cwd\n\n\ndef _get_root_new():\n path = os.getcwd()\n while len(path) > 1:\n if os.path.exists(os.path.join(path, '.git')):\n return path\n else:\n path = os.path.normpath(os.path.join(path, '../'))\n raise AssertionError('called from outside of the gits')\n\n\n@memoize_by_cwd\ndef get_root():\n return _get_root_new()\n\n\n@memoize_by_cwd\ndef get_pre_commit_path():\n return os.path.join(get_root(), '.git/hooks/pre-commit')\n\n\ndef create_pre_commit():\n path = get_pre_commit_path()\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\n local.path(path).write(local.path(pre_commit_file).read())\n\n\ndef remove_pre_commit():\n local.path(get_pre_commit_path()).delete()\n\n\ndef get_head_sha(git_repo_path):\n with local.cwd(git_repo_path):\n return local['git']['rev-parse', 'HEAD']().strip()\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return local['git']['diff', '--staged', '--name-only']().splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return local['git']['ls-files']().splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(expr):\n regex = re.compile(expr)\n return set(filter(os.path.exists, (\n filename\n for filename in all_file_list_strategy()\n if regex.search(filename)\n )))\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\n", "path": "pre_commit/git.py"}]} | 1,109 | 171 |
gh_patches_debug_39662 | rasdani/github-patches | git_diff | jupyterhub__zero-to-jupyterhub-k8s-531 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
culler is failing and exiting when culling users and servers are slow to stop
Two issues:
1. culler script seems to exit when the cull request fails. It's unclear why this happens, but we should confirm and fix this
2. the 'real' issue is that the culler is hitting 400 errors in the first place. The cause is servers that are slow to stop (DELETE /users/:name gives 400 if the user's server is running and cannot stop promptly). The previous request to stop the server will have returned 202 ACCEPTED instead of 204 DELETED in this case. If we delay deleting users if we get 202 ACCEPTED from the server deletion, we should be safe here.
</issue>
<code>
[start of images/hub/cull_idle_servers.py]
1 #!/usr/bin/env python3
2 # Imported from https://github.com/jupyterhub/jupyterhub/blob/0.8.0rc1/examples/cull-idle/cull_idle_servers.py
3 """script to monitor and cull idle single-user servers
4
5 Caveats:
6
7 last_activity is not updated with high frequency,
8 so cull timeout should be greater than the sum of:
9
10 - single-user websocket ping interval (default: 30s)
11 - JupyterHub.last_activity_interval (default: 5 minutes)
12
13 You can run this as a service managed by JupyterHub with this in your config::
14
15
16 c.JupyterHub.services = [
17 {
18 'name': 'cull-idle',
19 'admin': True,
20 'command': 'python cull_idle_servers.py --timeout=3600'.split(),
21 }
22 ]
23
24 Or run it manually by generating an API token and storing it in `JUPYTERHUB_API_TOKEN`:
25
26 export JUPYTERHUB_API_TOKEN=`jupyterhub token`
27 python cull_idle_servers.py [--timeout=900] [--url=http://127.0.0.1:8081/hub/api]
28 """
29
30 import datetime
31 import json
32 import os
33
34 from dateutil.parser import parse as parse_date
35
36 from tornado.gen import coroutine
37 from tornado.log import app_log
38 from tornado.httpclient import AsyncHTTPClient, HTTPRequest
39 from tornado.ioloop import IOLoop, PeriodicCallback
40 from tornado.options import define, options, parse_command_line
41
42
43 @coroutine
44 def cull_idle(url, api_token, timeout, cull_users=False):
45 """Shutdown idle single-user servers
46
47 If cull_users, inactive *users* will be deleted as well.
48 """
49 auth_header = {
50 'Authorization': 'token %s' % api_token
51 }
52 req = HTTPRequest(url=url + '/users',
53 headers=auth_header,
54 )
55 now = datetime.datetime.utcnow()
56 cull_limit = now - datetime.timedelta(seconds=timeout)
57 client = AsyncHTTPClient()
58 resp = yield client.fetch(req)
59 users = json.loads(resp.body.decode('utf8', 'replace'))
60 futures = []
61
62 @coroutine
63 def cull_one(user, last_activity):
64 """cull one user"""
65
66 # shutdown server first. Hub doesn't allow deleting users with running servers.
67 if user['server']:
68 app_log.info("Culling server for %s (inactive since %s)", user['name'], last_activity)
69 req = HTTPRequest(url=url + '/users/%s/server' % user['name'],
70 method='DELETE',
71 headers=auth_header,
72 )
73 yield client.fetch(req)
74 if cull_users:
75 app_log.info("Culling user %s (inactive since %s)", user['name'], last_activity)
76 req = HTTPRequest(url=url + '/users/%s' % user['name'],
77 method='DELETE',
78 headers=auth_header,
79 )
80 yield client.fetch(req)
81
82 for user in users:
83 if not user['server'] and not cull_users:
84 # server not running and not culling users, nothing to do
85 continue
86 last_activity = parse_date(user['last_activity'])
87 if last_activity < cull_limit:
88 futures.append((user['name'], cull_one(user, last_activity)))
89 else:
90 app_log.debug("Not culling %s (active since %s)", user['name'], last_activity)
91
92 for (name, f) in futures:
93 yield f
94 app_log.debug("Finished culling %s", name)
95
96
97 if __name__ == '__main__':
98 define('url', default=os.environ.get('JUPYTERHUB_API_URL'), help="The JupyterHub API URL")
99 define('timeout', default=600, help="The idle timeout (in seconds)")
100 define('cull_every', default=0, help="The interval (in seconds) for checking for idle servers to cull")
101 define('cull_users', default=False,
102 help="""Cull users in addition to servers.
103 This is for use in temporary-user cases such as tmpnb.""",
104 )
105
106 parse_command_line()
107 if not options.cull_every:
108 options.cull_every = options.timeout // 2
109 api_token = os.environ['JUPYTERHUB_API_TOKEN']
110
111 loop = IOLoop.current()
112 cull = lambda : cull_idle(options.url, api_token, options.timeout, options.cull_users)
113 # run once before scheduling periodic call
114 loop.run_sync(cull)
115 # schedule periodic cull
116 pc = PeriodicCallback(cull, 1e3 * options.cull_every)
117 pc.start()
118 try:
119 loop.start()
120 except KeyboardInterrupt:
121 pass
122
[end of images/hub/cull_idle_servers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/images/hub/cull_idle_servers.py b/images/hub/cull_idle_servers.py
--- a/images/hub/cull_idle_servers.py
+++ b/images/hub/cull_idle_servers.py
@@ -70,7 +70,15 @@
method='DELETE',
headers=auth_header,
)
- yield client.fetch(req)
+ resp = yield client.fetch(req)
+ if resp.code == 202:
+ msg = "Server for {} is slow to stop.".format(user['name'])
+ if cull_users:
+ app_log.warning(msg + " Not culling user yet.")
+ # return here so we don't continue to cull the user
+ # which will fail if the server is still trying to shutdown
+ return
+ app_log.warning(msg)
if cull_users:
app_log.info("Culling user %s (inactive since %s)", user['name'], last_activity)
req = HTTPRequest(url=url + '/users/%s' % user['name'],
@@ -83,12 +91,19 @@
if not user['server'] and not cull_users:
# server not running and not culling users, nothing to do
continue
+ if not user['last_activity']:
+ continue
last_activity = parse_date(user['last_activity'])
if last_activity < cull_limit:
+ # user might be in a transition (e.g. starting or stopping)
+ # don't try to cull if this is happening
+ if user['pending']:
+ app_log.warning("Not culling user %s with pending %s", user['name'], user['pending'])
+ continue
futures.append((user['name'], cull_one(user, last_activity)))
else:
app_log.debug("Not culling %s (active since %s)", user['name'], last_activity)
-
+
for (name, f) in futures:
yield f
app_log.debug("Finished culling %s", name)
@@ -102,16 +117,17 @@
help="""Cull users in addition to servers.
This is for use in temporary-user cases such as tmpnb.""",
)
-
+
parse_command_line()
if not options.cull_every:
options.cull_every = options.timeout // 2
api_token = os.environ['JUPYTERHUB_API_TOKEN']
-
+
loop = IOLoop.current()
cull = lambda : cull_idle(options.url, api_token, options.timeout, options.cull_users)
- # run once before scheduling periodic call
- loop.run_sync(cull)
+ # schedule first cull immediately
+ # because PeriodicCallback doesn't start until the end of the first interval
+ loop.add_callback(cull)
# schedule periodic cull
pc = PeriodicCallback(cull, 1e3 * options.cull_every)
pc.start()
| {"golden_diff": "diff --git a/images/hub/cull_idle_servers.py b/images/hub/cull_idle_servers.py\n--- a/images/hub/cull_idle_servers.py\n+++ b/images/hub/cull_idle_servers.py\n@@ -70,7 +70,15 @@\n method='DELETE',\n headers=auth_header,\n )\n- yield client.fetch(req)\n+ resp = yield client.fetch(req)\n+ if resp.code == 202:\n+ msg = \"Server for {} is slow to stop.\".format(user['name'])\n+ if cull_users:\n+ app_log.warning(msg + \" Not culling user yet.\")\n+ # return here so we don't continue to cull the user\n+ # which will fail if the server is still trying to shutdown\n+ return\n+ app_log.warning(msg)\n if cull_users:\n app_log.info(\"Culling user %s (inactive since %s)\", user['name'], last_activity)\n req = HTTPRequest(url=url + '/users/%s' % user['name'],\n@@ -83,12 +91,19 @@\n if not user['server'] and not cull_users:\n # server not running and not culling users, nothing to do\n continue\n+ if not user['last_activity']:\n+ continue\n last_activity = parse_date(user['last_activity'])\n if last_activity < cull_limit:\n+ # user might be in a transition (e.g. starting or stopping)\n+ # don't try to cull if this is happening\n+ if user['pending']:\n+ app_log.warning(\"Not culling user %s with pending %s\", user['name'], user['pending'])\n+ continue\n futures.append((user['name'], cull_one(user, last_activity)))\n else:\n app_log.debug(\"Not culling %s (active since %s)\", user['name'], last_activity)\n- \n+\n for (name, f) in futures:\n yield f\n app_log.debug(\"Finished culling %s\", name)\n@@ -102,16 +117,17 @@\n help=\"\"\"Cull users in addition to servers.\n This is for use in temporary-user cases such as tmpnb.\"\"\",\n )\n- \n+\n parse_command_line()\n if not options.cull_every:\n options.cull_every = options.timeout // 2\n api_token = os.environ['JUPYTERHUB_API_TOKEN']\n- \n+\n loop = IOLoop.current()\n cull = lambda : cull_idle(options.url, api_token, options.timeout, options.cull_users)\n- # run once before scheduling periodic call\n- loop.run_sync(cull)\n+ # schedule first cull immediately\n+ # because PeriodicCallback doesn't start until the end of the first interval\n+ loop.add_callback(cull)\n # schedule periodic cull\n pc = PeriodicCallback(cull, 1e3 * options.cull_every)\n pc.start()\n", "issue": "culler is failing and exiting when culling users and servers are slow to stop\nTwo issues:\r\n\r\n1. culler script seems to exit when the cull request fails. It's unclear why this happens, but we should confirm and fix this\r\n2. the 'real' issue is that the culler is hitting 400 errors in the first place. The cause is servers that are slow to stop (DELETE /users/:name gives 400 if the user's server is running and cannot stop promptly). The previous request to stop the server will have returned 202 ACCEPTED instead of 204 DELETED in this case. If we delay deleting users if we get 202 ACCEPTED from the server deletion, we should be safe here.\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Imported from https://github.com/jupyterhub/jupyterhub/blob/0.8.0rc1/examples/cull-idle/cull_idle_servers.py\n\"\"\"script to monitor and cull idle single-user servers\n\nCaveats:\n\nlast_activity is not updated with high frequency,\nso cull timeout should be greater than the sum of:\n\n- single-user websocket ping interval (default: 30s)\n- JupyterHub.last_activity_interval (default: 5 minutes)\n\nYou can run this as a service managed by JupyterHub with this in your config::\n\n\n c.JupyterHub.services = [\n {\n 'name': 'cull-idle',\n 'admin': True,\n 'command': 'python cull_idle_servers.py --timeout=3600'.split(),\n }\n ]\n\nOr run it manually by generating an API token and storing it in `JUPYTERHUB_API_TOKEN`:\n\n export JUPYTERHUB_API_TOKEN=`jupyterhub token`\n python cull_idle_servers.py [--timeout=900] [--url=http://127.0.0.1:8081/hub/api]\n\"\"\"\n\nimport datetime\nimport json\nimport os\n\nfrom dateutil.parser import parse as parse_date\n\nfrom tornado.gen import coroutine\nfrom tornado.log import app_log\nfrom tornado.httpclient import AsyncHTTPClient, HTTPRequest\nfrom tornado.ioloop import IOLoop, PeriodicCallback\nfrom tornado.options import define, options, parse_command_line\n\n\n@coroutine\ndef cull_idle(url, api_token, timeout, cull_users=False):\n \"\"\"Shutdown idle single-user servers\n\n If cull_users, inactive *users* will be deleted as well.\n \"\"\"\n auth_header = {\n 'Authorization': 'token %s' % api_token\n }\n req = HTTPRequest(url=url + '/users',\n headers=auth_header,\n )\n now = datetime.datetime.utcnow()\n cull_limit = now - datetime.timedelta(seconds=timeout)\n client = AsyncHTTPClient()\n resp = yield client.fetch(req)\n users = json.loads(resp.body.decode('utf8', 'replace'))\n futures = []\n\n @coroutine\n def cull_one(user, last_activity):\n \"\"\"cull one user\"\"\"\n\n # shutdown server first. Hub doesn't allow deleting users with running servers.\n if user['server']:\n app_log.info(\"Culling server for %s (inactive since %s)\", user['name'], last_activity)\n req = HTTPRequest(url=url + '/users/%s/server' % user['name'],\n method='DELETE',\n headers=auth_header,\n )\n yield client.fetch(req)\n if cull_users:\n app_log.info(\"Culling user %s (inactive since %s)\", user['name'], last_activity)\n req = HTTPRequest(url=url + '/users/%s' % user['name'],\n method='DELETE',\n headers=auth_header,\n )\n yield client.fetch(req)\n\n for user in users:\n if not user['server'] and not cull_users:\n # server not running and not culling users, nothing to do\n continue\n last_activity = parse_date(user['last_activity'])\n if last_activity < cull_limit:\n futures.append((user['name'], cull_one(user, last_activity)))\n else:\n app_log.debug(\"Not culling %s (active since %s)\", user['name'], last_activity)\n \n for (name, f) in futures:\n yield f\n app_log.debug(\"Finished culling %s\", name)\n\n\nif __name__ == '__main__':\n define('url', default=os.environ.get('JUPYTERHUB_API_URL'), help=\"The JupyterHub API URL\")\n define('timeout', default=600, help=\"The idle timeout (in seconds)\")\n define('cull_every', default=0, help=\"The interval (in seconds) for checking for idle servers to cull\")\n define('cull_users', default=False,\n help=\"\"\"Cull users in addition to servers.\n This is for use in temporary-user cases such as tmpnb.\"\"\",\n )\n \n parse_command_line()\n if not options.cull_every:\n options.cull_every = options.timeout // 2\n api_token = os.environ['JUPYTERHUB_API_TOKEN']\n \n loop = IOLoop.current()\n cull = lambda : cull_idle(options.url, api_token, options.timeout, options.cull_users)\n # run once before scheduling periodic call\n loop.run_sync(cull)\n # schedule periodic cull\n pc = PeriodicCallback(cull, 1e3 * options.cull_every)\n pc.start()\n try:\n loop.start()\n except KeyboardInterrupt:\n pass\n", "path": "images/hub/cull_idle_servers.py"}]} | 1,996 | 648 |
gh_patches_debug_19230 | rasdani/github-patches | git_diff | google__clusterfuzz-863 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fuzzers page does not work well with large number of jobs
For libFuzzer, if we have a lot of existing jobs (>100) and want to add a new job and associate it. Submit button feels stuck, does not show updates, and take 1-2 min to finish. Can we show some update or something better to optimize this when only one job is updated.
@oliverchang as fyi.
</issue>
<code>
[start of src/python/fuzzing/fuzzer_selection.py]
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Helper functions to update fuzzer-job mappings, and select fuzzers to run."""
15
16 import collections
17
18 from base import utils
19 from datastore import data_types
20 from datastore import fuzz_target_utils
21 from datastore import ndb
22 from datastore import ndb_utils
23 from metrics import logs
24 from system import environment
25
26 # Used to prepare targets to be passed to utils.random_weighted_choice.
27 WeightedTarget = collections.namedtuple('WeightedTarget', ['target', 'weight'])
28
29
30 def update_mappings_for_fuzzer(fuzzer, mappings=None):
31 """Clear existing mappings for a fuzzer, and replace them."""
32 if mappings is None:
33 mappings = fuzzer.jobs
34
35 query = data_types.FuzzerJob.query()
36 query = query.filter(data_types.FuzzerJob.fuzzer == fuzzer.name)
37 entities = ndb_utils.get_all_from_query(query)
38 old_mappings = {}
39 for entity in entities:
40 old_mappings[(entity.job, entity.platform)] = entity
41
42 new_mappings = []
43 for job_name in mappings:
44 job = data_types.Job.query(data_types.Job.name == job_name).get()
45 if not job:
46 logs.log_error('An unknown job %s was selected for fuzzer %s.' %
47 (job_name, fuzzer.name))
48 continue
49
50 mapping = old_mappings.pop((job_name, job.platform), None)
51 if mapping:
52 continue
53
54 mapping = data_types.FuzzerJob()
55 mapping.fuzzer = fuzzer.name
56 mapping.job = job_name
57 mapping.platform = job.platform
58 new_mappings.append(mapping)
59
60 ndb.put_multi(new_mappings)
61 ndb.delete_multi([m.key for m in list(old_mappings.values())])
62
63
64 def update_platform_for_job(job_name, new_platform):
65 """Update platform for all mappings for a particular job."""
66 query = data_types.FuzzerJob.query()
67 query = query.filter(data_types.FuzzerJob.job == job_name)
68 mappings = ndb_utils.get_all_from_query(query)
69 new_mappings = []
70 for mapping in mappings:
71 mapping.platform = new_platform
72 new_mappings.append(mapping)
73 ndb.put_multi(new_mappings)
74
75
76 def get_fuzz_task_payload(platform=None):
77 """Select a fuzzer that can run on this platform."""
78 if not platform:
79 queue_override = environment.get_value('QUEUE_OVERRIDE')
80 platform = queue_override if queue_override else environment.platform()
81
82 query = data_types.FuzzerJob.query()
83 query = query.filter(data_types.FuzzerJob.platform == platform)
84
85 mappings = list(ndb_utils.get_all_from_query(query))
86 if not mappings:
87 return None, None
88
89 selection = utils.random_weighted_choice(mappings)
90 return selection.fuzzer, selection.job
91
92
93 def select_fuzz_target(targets, target_weights):
94 """Select a fuzz target from a list of potential targets."""
95 assert targets
96
97 weighted_targets = []
98 for target in targets:
99 weight = target_weights.get(target, 1.0)
100 weighted_targets.append(WeightedTarget(target, weight))
101
102 return utils.random_weighted_choice(weighted_targets).target
103
104
105 def get_fuzz_target_weights():
106 """Get a list of fuzz target weights based on the current fuzzer."""
107 job_type = environment.get_value('JOB_NAME')
108
109 target_jobs = list(fuzz_target_utils.get_fuzz_target_jobs(job=job_type))
110 fuzz_targets = fuzz_target_utils.get_fuzz_targets_for_target_jobs(target_jobs)
111
112 weights = {}
113 for fuzz_target, target_job in zip(fuzz_targets, target_jobs):
114 if not fuzz_target:
115 logs.log_error('Skipping weight assignment for fuzz target %s.' %
116 target_job.fuzz_target_name)
117 continue
118
119 weights[fuzz_target.binary] = target_job.weight
120
121 return weights
122
[end of src/python/fuzzing/fuzzer_selection.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/fuzzing/fuzzer_selection.py b/src/python/fuzzing/fuzzer_selection.py
--- a/src/python/fuzzing/fuzzer_selection.py
+++ b/src/python/fuzzing/fuzzer_selection.py
@@ -37,20 +37,20 @@
entities = ndb_utils.get_all_from_query(query)
old_mappings = {}
for entity in entities:
- old_mappings[(entity.job, entity.platform)] = entity
+ old_mappings[entity.job] = entity
new_mappings = []
for job_name in mappings:
+ mapping = old_mappings.pop(job_name, None)
+ if mapping:
+ continue
+
job = data_types.Job.query(data_types.Job.name == job_name).get()
if not job:
logs.log_error('An unknown job %s was selected for fuzzer %s.' %
(job_name, fuzzer.name))
continue
- mapping = old_mappings.pop((job_name, job.platform), None)
- if mapping:
- continue
-
mapping = data_types.FuzzerJob()
mapping.fuzzer = fuzzer.name
mapping.job = job_name
| {"golden_diff": "diff --git a/src/python/fuzzing/fuzzer_selection.py b/src/python/fuzzing/fuzzer_selection.py\n--- a/src/python/fuzzing/fuzzer_selection.py\n+++ b/src/python/fuzzing/fuzzer_selection.py\n@@ -37,20 +37,20 @@\n entities = ndb_utils.get_all_from_query(query)\n old_mappings = {}\n for entity in entities:\n- old_mappings[(entity.job, entity.platform)] = entity\n+ old_mappings[entity.job] = entity\n \n new_mappings = []\n for job_name in mappings:\n+ mapping = old_mappings.pop(job_name, None)\n+ if mapping:\n+ continue\n+\n job = data_types.Job.query(data_types.Job.name == job_name).get()\n if not job:\n logs.log_error('An unknown job %s was selected for fuzzer %s.' %\n (job_name, fuzzer.name))\n continue\n \n- mapping = old_mappings.pop((job_name, job.platform), None)\n- if mapping:\n- continue\n-\n mapping = data_types.FuzzerJob()\n mapping.fuzzer = fuzzer.name\n mapping.job = job_name\n", "issue": "Fuzzers page does not work well with large number of jobs\nFor libFuzzer, if we have a lot of existing jobs (>100) and want to add a new job and associate it. Submit button feels stuck, does not show updates, and take 1-2 min to finish. Can we show some update or something better to optimize this when only one job is updated.\r\n\r\n@oliverchang as fyi.\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Helper functions to update fuzzer-job mappings, and select fuzzers to run.\"\"\"\n\nimport collections\n\nfrom base import utils\nfrom datastore import data_types\nfrom datastore import fuzz_target_utils\nfrom datastore import ndb\nfrom datastore import ndb_utils\nfrom metrics import logs\nfrom system import environment\n\n# Used to prepare targets to be passed to utils.random_weighted_choice.\nWeightedTarget = collections.namedtuple('WeightedTarget', ['target', 'weight'])\n\n\ndef update_mappings_for_fuzzer(fuzzer, mappings=None):\n \"\"\"Clear existing mappings for a fuzzer, and replace them.\"\"\"\n if mappings is None:\n mappings = fuzzer.jobs\n\n query = data_types.FuzzerJob.query()\n query = query.filter(data_types.FuzzerJob.fuzzer == fuzzer.name)\n entities = ndb_utils.get_all_from_query(query)\n old_mappings = {}\n for entity in entities:\n old_mappings[(entity.job, entity.platform)] = entity\n\n new_mappings = []\n for job_name in mappings:\n job = data_types.Job.query(data_types.Job.name == job_name).get()\n if not job:\n logs.log_error('An unknown job %s was selected for fuzzer %s.' %\n (job_name, fuzzer.name))\n continue\n\n mapping = old_mappings.pop((job_name, job.platform), None)\n if mapping:\n continue\n\n mapping = data_types.FuzzerJob()\n mapping.fuzzer = fuzzer.name\n mapping.job = job_name\n mapping.platform = job.platform\n new_mappings.append(mapping)\n\n ndb.put_multi(new_mappings)\n ndb.delete_multi([m.key for m in list(old_mappings.values())])\n\n\ndef update_platform_for_job(job_name, new_platform):\n \"\"\"Update platform for all mappings for a particular job.\"\"\"\n query = data_types.FuzzerJob.query()\n query = query.filter(data_types.FuzzerJob.job == job_name)\n mappings = ndb_utils.get_all_from_query(query)\n new_mappings = []\n for mapping in mappings:\n mapping.platform = new_platform\n new_mappings.append(mapping)\n ndb.put_multi(new_mappings)\n\n\ndef get_fuzz_task_payload(platform=None):\n \"\"\"Select a fuzzer that can run on this platform.\"\"\"\n if not platform:\n queue_override = environment.get_value('QUEUE_OVERRIDE')\n platform = queue_override if queue_override else environment.platform()\n\n query = data_types.FuzzerJob.query()\n query = query.filter(data_types.FuzzerJob.platform == platform)\n\n mappings = list(ndb_utils.get_all_from_query(query))\n if not mappings:\n return None, None\n\n selection = utils.random_weighted_choice(mappings)\n return selection.fuzzer, selection.job\n\n\ndef select_fuzz_target(targets, target_weights):\n \"\"\"Select a fuzz target from a list of potential targets.\"\"\"\n assert targets\n\n weighted_targets = []\n for target in targets:\n weight = target_weights.get(target, 1.0)\n weighted_targets.append(WeightedTarget(target, weight))\n\n return utils.random_weighted_choice(weighted_targets).target\n\n\ndef get_fuzz_target_weights():\n \"\"\"Get a list of fuzz target weights based on the current fuzzer.\"\"\"\n job_type = environment.get_value('JOB_NAME')\n\n target_jobs = list(fuzz_target_utils.get_fuzz_target_jobs(job=job_type))\n fuzz_targets = fuzz_target_utils.get_fuzz_targets_for_target_jobs(target_jobs)\n\n weights = {}\n for fuzz_target, target_job in zip(fuzz_targets, target_jobs):\n if not fuzz_target:\n logs.log_error('Skipping weight assignment for fuzz target %s.' %\n target_job.fuzz_target_name)\n continue\n\n weights[fuzz_target.binary] = target_job.weight\n\n return weights\n", "path": "src/python/fuzzing/fuzzer_selection.py"}]} | 1,818 | 249 |
gh_patches_debug_20320 | rasdani/github-patches | git_diff | praw-dev__praw-1104 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Replying to comments in quarantined subreddits returns an empty object
I have a bot that fetches comment ids from pushshift, then does some work and replies to the comment. If the comment is in a quarantined subreddit that I have not clicked through the quarantine warning for, I get the following error.
```
praw/models/reddit/mixins/replyable.py", line 26, in reply
return self._reddit.post(API_PATH["comment"], data=data)[0]
IndexError: list index out of range
```
The reply call succeeds, returning a valid http code and the following json
```
{'json': {'errors': [], 'data': {'things': []}}}
```
`objector.objectify` fails to parse this and returns an empty array. Importantly, the comment is successfully created.
I'm not really sure what should happen in this case, but I do think it should be a more clear error message. Happy to put together a pull request if anyone has any ideas.
</issue>
<code>
[start of praw/models/reddit/mixins/replyable.py]
1 """Provide the ReplyableMixin class."""
2 from ....const import API_PATH
3
4
5 class ReplyableMixin:
6 """Interface for RedditBase classes that can be replied to."""
7
8 def reply(self, body):
9 """Reply to the object.
10
11 :param body: The markdown formatted content for a comment.
12 :returns: A :class:`~.Comment` object for the newly created comment.
13
14 Example usage:
15
16 .. code:: python
17
18 submission = reddit.submission(id='5or86n')
19 submission.reply('reply')
20
21 comment = reddit.comment(id='dxolpyc')
22 comment.reply('reply')
23
24 """
25 data = {"text": body, "thing_id": self.fullname}
26 return self._reddit.post(API_PATH["comment"], data=data)[0]
27
[end of praw/models/reddit/mixins/replyable.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/praw/models/reddit/mixins/replyable.py b/praw/models/reddit/mixins/replyable.py
--- a/praw/models/reddit/mixins/replyable.py
+++ b/praw/models/reddit/mixins/replyable.py
@@ -9,7 +9,14 @@
"""Reply to the object.
:param body: The markdown formatted content for a comment.
- :returns: A :class:`~.Comment` object for the newly created comment.
+ :returns: A :class:`~.Comment` object for the newly created
+ comment or ``None`` if Reddit doesn't provide one.
+
+ A ``None`` value can be returned if the target is a comment or
+ submission in a quarantined subreddit and the authenticated user
+ has not opt-ed in to viewing the content. When this happens the
+ comment will be sucessfully created on Reddit and can be retried
+ by drawing the comment from the user's comment history.
Example usage:
@@ -23,4 +30,8 @@
"""
data = {"text": body, "thing_id": self.fullname}
- return self._reddit.post(API_PATH["comment"], data=data)[0]
+ comments = self._reddit.post(API_PATH["comment"], data=data)
+ try:
+ return comments[0]
+ except IndexError:
+ return None
| {"golden_diff": "diff --git a/praw/models/reddit/mixins/replyable.py b/praw/models/reddit/mixins/replyable.py\n--- a/praw/models/reddit/mixins/replyable.py\n+++ b/praw/models/reddit/mixins/replyable.py\n@@ -9,7 +9,14 @@\n \"\"\"Reply to the object.\n \n :param body: The markdown formatted content for a comment.\n- :returns: A :class:`~.Comment` object for the newly created comment.\n+ :returns: A :class:`~.Comment` object for the newly created\n+ comment or ``None`` if Reddit doesn't provide one.\n+\n+ A ``None`` value can be returned if the target is a comment or\n+ submission in a quarantined subreddit and the authenticated user\n+ has not opt-ed in to viewing the content. When this happens the\n+ comment will be sucessfully created on Reddit and can be retried\n+ by drawing the comment from the user's comment history.\n \n Example usage:\n \n@@ -23,4 +30,8 @@\n \n \"\"\"\n data = {\"text\": body, \"thing_id\": self.fullname}\n- return self._reddit.post(API_PATH[\"comment\"], data=data)[0]\n+ comments = self._reddit.post(API_PATH[\"comment\"], data=data)\n+ try:\n+ return comments[0]\n+ except IndexError:\n+ return None\n", "issue": "Replying to comments in quarantined subreddits returns an empty object\nI have a bot that fetches comment ids from pushshift, then does some work and replies to the comment. If the comment is in a quarantined subreddit that I have not clicked through the quarantine warning for, I get the following error.\r\n\r\n```\r\npraw/models/reddit/mixins/replyable.py\", line 26, in reply\r\n return self._reddit.post(API_PATH[\"comment\"], data=data)[0]\r\nIndexError: list index out of range\r\n```\r\nThe reply call succeeds, returning a valid http code and the following json\r\n```\r\n{'json': {'errors': [], 'data': {'things': []}}}\r\n```\r\n`objector.objectify` fails to parse this and returns an empty array. Importantly, the comment is successfully created.\r\n\r\nI'm not really sure what should happen in this case, but I do think it should be a more clear error message. Happy to put together a pull request if anyone has any ideas.\n", "before_files": [{"content": "\"\"\"Provide the ReplyableMixin class.\"\"\"\nfrom ....const import API_PATH\n\n\nclass ReplyableMixin:\n \"\"\"Interface for RedditBase classes that can be replied to.\"\"\"\n\n def reply(self, body):\n \"\"\"Reply to the object.\n\n :param body: The markdown formatted content for a comment.\n :returns: A :class:`~.Comment` object for the newly created comment.\n\n Example usage:\n\n .. code:: python\n\n submission = reddit.submission(id='5or86n')\n submission.reply('reply')\n\n comment = reddit.comment(id='dxolpyc')\n comment.reply('reply')\n\n \"\"\"\n data = {\"text\": body, \"thing_id\": self.fullname}\n return self._reddit.post(API_PATH[\"comment\"], data=data)[0]\n", "path": "praw/models/reddit/mixins/replyable.py"}]} | 976 | 314 |
gh_patches_debug_22216 | rasdani/github-patches | git_diff | spacetelescope__jwql-483 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Simplify conda environments
Since `conda` will automatically determine which libraries are needed for dependencies (e.g. `numpy_base` is installed when installing `numpy`), we could probably use to trim down our conda environments to only those high-level packages that are used within our repo, and `conda` will figure out the rest. It might also be a good time to make sure the `conda` environment is consistent with the dependencies listed in `setup.py` and `requirements.txt`
</issue>
<code>
[start of setup.py]
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.21.0'
6
7 AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '
8 AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann'
9
10 DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
11
12 REQUIRES = [
13 'astropy>=3.2.1',
14 'astroquery>=0.3.9',
15 'authlib',
16 'bokeh>=1.0',
17 'django>=2.0',
18 'jinja2',
19 'jsonschema==2.6.0',
20 'jwedb',
21 'jwst',
22 'matplotlib',
23 'numpy',
24 'numpydoc',
25 'pandas',
26 'psycopg2',
27 'pysiaf',
28 'pytest',
29 'sphinx',
30 'sqlalchemy',
31 'stsci_rtd_theme'
32 ]
33
34 setup(
35 name='jwql',
36 version=VERSION,
37 description=DESCRIPTION,
38 url='https://github.com/spacetelescope/jwql.git',
39 author=AUTHORS,
40 author_email='[email protected]',
41 license='BSD',
42 keywords=['astronomy', 'python'],
43 classifiers=['Programming Language :: Python'],
44 packages=find_packages(),
45 install_requires=REQUIRES,
46 include_package_data=True,
47 include_dirs=[np.get_include()],
48 )
49
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,21 +4,26 @@
VERSION = '0.21.0'
-AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '
-AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann'
+AUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '
+AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'
DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
REQUIRES = [
+ 'asdf>=2.3.3',
'astropy>=3.2.1',
'astroquery>=0.3.9',
'authlib',
'bokeh>=1.0',
+ 'codecov',
'django>=2.0',
+ 'flake8',
+ 'inflection',
+ 'ipython',
'jinja2',
'jsonschema==2.6.0',
- 'jwedb',
- 'jwst',
+ 'jwedb>=0.0.3',
+ 'jwst==0.13.0',
'matplotlib',
'numpy',
'numpydoc',
@@ -26,9 +31,12 @@
'psycopg2',
'pysiaf',
'pytest',
+ 'pytest-cov',
+ 'scipy',
'sphinx',
'sqlalchemy',
- 'stsci_rtd_theme'
+ 'stsci_rtd_theme',
+ 'twine'
]
setup(
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,21 +4,26 @@\n \n VERSION = '0.21.0'\n \n-AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\n-AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann'\n+AUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\n+AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'\n \n DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n \n REQUIRES = [\n+ 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0',\n+ 'codecov',\n 'django>=2.0',\n+ 'flake8',\n+ 'inflection',\n+ 'ipython',\n 'jinja2',\n 'jsonschema==2.6.0',\n- 'jwedb',\n- 'jwst',\n+ 'jwedb>=0.0.3',\n+ 'jwst==0.13.0',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n@@ -26,9 +31,12 @@\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n+ 'pytest-cov',\n+ 'scipy',\n 'sphinx',\n 'sqlalchemy',\n- 'stsci_rtd_theme'\n+ 'stsci_rtd_theme',\n+ 'twine'\n ]\n \n setup(\n", "issue": "Simplify conda environments \nSince `conda` will automatically determine which libraries are needed for dependencies (e.g. `numpy_base` is installed when installing `numpy`), we could probably use to trim down our conda environments to only those high-level packages that are used within our repo, and `conda` will figure out the rest. It might also be a good time to make sure the `conda` environment is consistent with the dependencies listed in `setup.py` and `requirements.txt`\n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.21.0'\n\nAUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nREQUIRES = [\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0',\n 'django>=2.0',\n 'jinja2',\n 'jsonschema==2.6.0',\n 'jwedb',\n 'jwst',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]} | 1,040 | 392 |
gh_patches_debug_26912 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-361 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refactor technical feedback
The current implementation of technical feedback does not comply with the way it is used in the apps when submitting feedback. Instead of having one model for technical feedback, we should rather have a `technical` flag for each of the other feedback models. This flag should be set whenever the `category`-parameter contains "technisch" or "technical". In a future API version, we can convert this to a parameter, which can either be true or false in the feedback submission request.
</issue>
<code>
[start of src/cms/models/__init__.py]
1 from .config.configuration import Configuration
2
3 from .events.event import Event
4 from .events.event_translation import EventTranslation
5 from .events.recurrence_rule import RecurrenceRule
6
7 from .offers.offer import Offer
8 from .offers.offer_template import OfferTemplate
9
10 from .feedback.event_feedback import EventFeedback
11 from .feedback.offer_feedback import OfferFeedback
12 from .feedback.feedback import Feedback
13 from .feedback.page_feedback import PageFeedback
14 from .feedback.region_feedback import RegionFeedback
15 from .feedback.search_result_feedback import SearchResultFeedback
16 from .feedback.technical_feedback import TechnicalFeedback
17
18 from .languages.language import Language
19 from .languages.language_tree_node import LanguageTreeNode
20
21 from .media.document import Document
22
23 from .pages.page import Page
24 from .pages.page_translation import PageTranslation
25
26 from .pois.poi import POI
27 from .pois.poi_translation import POITranslation
28
29 from .push_notifications.push_notification import PushNotification
30 from .push_notifications.push_notification_translation import PushNotificationTranslation
31
32 from .regions.region import Region
33
34 from .users.organization import Organization
35 from .users.user_profile import UserProfile
36 from .users.user_mfa import UserMfa
37
[end of src/cms/models/__init__.py]
[start of src/cms/models/feedback/feedback.py]
1 """
2 Module for models storing feedback from front end users
3 """
4 from django.db import models
5
6
7 class Feedback(models.Model):
8 """
9 Base class for collecting feeedback from users.
10 """
11 EMOTION = (
12 ("Pos", "Positive"),
13 ("Neg", "Negative"),
14 ("NA", "Not Available"),
15 )
16 emotion = models.CharField(max_length=3, choices=EMOTION)
17 comment = models.CharField(max_length=1000)
18 readStatus = models.BooleanField(default=False)
19
20 created_date = models.DateTimeField(auto_now_add=True)
21 last_updated = models.DateTimeField(auto_now=True)
22
23 class Meta:
24 default_permissions = ()
25 permissions = (
26 ('view_feedback', 'Can view feedback'),
27 )
28
[end of src/cms/models/feedback/feedback.py]
[start of src/cms/models/feedback/technical_feedback.py]
1 """
2 Module for models storing feedback from front end users
3 """
4 from django.db import models
5
6 from .feedback import Feedback
7 from ..pages.page import Page
8
9
10 class TechnicalFeedback(Feedback):
11 """
12 Technical feedback on the end user app
13 """
14 page = models.ForeignKey(Page, on_delete=models.CASCADE)
15
16 class Meta:
17 default_permissions = ()
18
[end of src/cms/models/feedback/technical_feedback.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cms/models/__init__.py b/src/cms/models/__init__.py
--- a/src/cms/models/__init__.py
+++ b/src/cms/models/__init__.py
@@ -13,7 +13,6 @@
from .feedback.page_feedback import PageFeedback
from .feedback.region_feedback import RegionFeedback
from .feedback.search_result_feedback import SearchResultFeedback
-from .feedback.technical_feedback import TechnicalFeedback
from .languages.language import Language
from .languages.language_tree_node import LanguageTreeNode
diff --git a/src/cms/models/feedback/feedback.py b/src/cms/models/feedback/feedback.py
--- a/src/cms/models/feedback/feedback.py
+++ b/src/cms/models/feedback/feedback.py
@@ -15,7 +15,8 @@
)
emotion = models.CharField(max_length=3, choices=EMOTION)
comment = models.CharField(max_length=1000)
- readStatus = models.BooleanField(default=False)
+ is_technical = models.BooleanField(default=False)
+ read_status = models.BooleanField(default=False)
created_date = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(auto_now=True)
diff --git a/src/cms/models/feedback/technical_feedback.py b/src/cms/models/feedback/technical_feedback.py
deleted file mode 100644
--- a/src/cms/models/feedback/technical_feedback.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""
-Module for models storing feedback from front end users
-"""
-from django.db import models
-
-from .feedback import Feedback
-from ..pages.page import Page
-
-
-class TechnicalFeedback(Feedback):
- """
- Technical feedback on the end user app
- """
- page = models.ForeignKey(Page, on_delete=models.CASCADE)
-
- class Meta:
- default_permissions = ()
| {"golden_diff": "diff --git a/src/cms/models/__init__.py b/src/cms/models/__init__.py\n--- a/src/cms/models/__init__.py\n+++ b/src/cms/models/__init__.py\n@@ -13,7 +13,6 @@\n from .feedback.page_feedback import PageFeedback\n from .feedback.region_feedback import RegionFeedback\n from .feedback.search_result_feedback import SearchResultFeedback\n-from .feedback.technical_feedback import TechnicalFeedback\n \n from .languages.language import Language\n from .languages.language_tree_node import LanguageTreeNode\ndiff --git a/src/cms/models/feedback/feedback.py b/src/cms/models/feedback/feedback.py\n--- a/src/cms/models/feedback/feedback.py\n+++ b/src/cms/models/feedback/feedback.py\n@@ -15,7 +15,8 @@\n )\n emotion = models.CharField(max_length=3, choices=EMOTION)\n comment = models.CharField(max_length=1000)\n- readStatus = models.BooleanField(default=False)\n+ is_technical = models.BooleanField(default=False)\n+ read_status = models.BooleanField(default=False)\n \n created_date = models.DateTimeField(auto_now_add=True)\n last_updated = models.DateTimeField(auto_now=True)\ndiff --git a/src/cms/models/feedback/technical_feedback.py b/src/cms/models/feedback/technical_feedback.py\ndeleted file mode 100644\n--- a/src/cms/models/feedback/technical_feedback.py\n+++ /dev/null\n@@ -1,17 +0,0 @@\n-\"\"\"\n-Module for models storing feedback from front end users\n-\"\"\"\n-from django.db import models\n-\n-from .feedback import Feedback\n-from ..pages.page import Page\n-\n-\n-class TechnicalFeedback(Feedback):\n- \"\"\"\n- Technical feedback on the end user app\n- \"\"\"\n- page = models.ForeignKey(Page, on_delete=models.CASCADE)\n-\n- class Meta:\n- default_permissions = ()\n", "issue": "Refactor technical feedback\nThe current implementation of technical feedback does not comply with the way it is used in the apps when submitting feedback. Instead of having one model for technical feedback, we should rather have a `technical` flag for each of the other feedback models. This flag should be set whenever the `category`-parameter contains \"technisch\" or \"technical\". In a future API version, we can convert this to a parameter, which can either be true or false in the feedback submission request.\n", "before_files": [{"content": "from .config.configuration import Configuration\n\nfrom .events.event import Event\nfrom .events.event_translation import EventTranslation\nfrom .events.recurrence_rule import RecurrenceRule\n\nfrom .offers.offer import Offer\nfrom .offers.offer_template import OfferTemplate\n\nfrom .feedback.event_feedback import EventFeedback\nfrom .feedback.offer_feedback import OfferFeedback\nfrom .feedback.feedback import Feedback\nfrom .feedback.page_feedback import PageFeedback\nfrom .feedback.region_feedback import RegionFeedback\nfrom .feedback.search_result_feedback import SearchResultFeedback\nfrom .feedback.technical_feedback import TechnicalFeedback\n\nfrom .languages.language import Language\nfrom .languages.language_tree_node import LanguageTreeNode\n\nfrom .media.document import Document\n\nfrom .pages.page import Page\nfrom .pages.page_translation import PageTranslation\n\nfrom .pois.poi import POI\nfrom .pois.poi_translation import POITranslation\n\nfrom .push_notifications.push_notification import PushNotification\nfrom .push_notifications.push_notification_translation import PushNotificationTranslation\n\nfrom .regions.region import Region\n\nfrom .users.organization import Organization\nfrom .users.user_profile import UserProfile\nfrom .users.user_mfa import UserMfa\n", "path": "src/cms/models/__init__.py"}, {"content": "\"\"\"\nModule for models storing feedback from front end users\n\"\"\"\nfrom django.db import models\n\n\nclass Feedback(models.Model):\n \"\"\"\n Base class for collecting feeedback from users.\n \"\"\"\n EMOTION = (\n (\"Pos\", \"Positive\"),\n (\"Neg\", \"Negative\"),\n (\"NA\", \"Not Available\"),\n )\n emotion = models.CharField(max_length=3, choices=EMOTION)\n comment = models.CharField(max_length=1000)\n readStatus = models.BooleanField(default=False)\n\n created_date = models.DateTimeField(auto_now_add=True)\n last_updated = models.DateTimeField(auto_now=True)\n\n class Meta:\n default_permissions = ()\n permissions = (\n ('view_feedback', 'Can view feedback'),\n )\n", "path": "src/cms/models/feedback/feedback.py"}, {"content": "\"\"\"\nModule for models storing feedback from front end users\n\"\"\"\nfrom django.db import models\n\nfrom .feedback import Feedback\nfrom ..pages.page import Page\n\n\nclass TechnicalFeedback(Feedback):\n \"\"\"\n Technical feedback on the end user app\n \"\"\"\n page = models.ForeignKey(Page, on_delete=models.CASCADE)\n\n class Meta:\n default_permissions = ()\n", "path": "src/cms/models/feedback/technical_feedback.py"}]} | 1,295 | 395 |
gh_patches_debug_7917 | rasdani/github-patches | git_diff | bokeh__bokeh-6159 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
For Interactive Legends, a note about `muted_color` would be helpful
For the [Interactive Legend guide](https://github.com/bokeh/bokeh/blob/d8fcffa1c472bf641517ef81698bb6d057cbd30f/sphinx/source/docs/user_guide/interaction/legends.rst#id7), in addition to the note about `muted_alpha`, it would be helpful to also demonstrate `muted_color`.
</issue>
<code>
[start of sphinx/source/docs/user_guide/examples/interaction_legend_mute.py]
1 import pandas as pd
2
3 from bokeh.palettes import Spectral4
4 from bokeh.plotting import figure, output_file, show
5
6 p = figure(plot_width=800, plot_height=250, x_axis_type="datetime")
7 p.title.text = 'Click on legend entries to mute the corresponding lines'
8
9 for name, color in zip(['AAPL', 'IBM', 'MSFT', 'GOOG'], Spectral4):
10 df = pd.read_csv(
11 "http://ichart.yahoo.com/table.csv?s=%s&a=0&b=1&c=2005&d=0&e=1&f=2014" % name,
12 parse_dates=['Date']
13 )
14 p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8, muted_alpha=0.2, legend=name)
15
16 p.legend.location = "top_left"
17 p.legend.click_policy="mute"
18
19 output_file("interactive_legend.html", title="interactive_legend.py example")
20
21 show(p)
22
[end of sphinx/source/docs/user_guide/examples/interaction_legend_mute.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py b/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py
--- a/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py
+++ b/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py
@@ -11,7 +11,8 @@
"http://ichart.yahoo.com/table.csv?s=%s&a=0&b=1&c=2005&d=0&e=1&f=2014" % name,
parse_dates=['Date']
)
- p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8, muted_alpha=0.2, legend=name)
+ p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8,
+ muted_color=color, muted_alpha=0.2, legend=name)
p.legend.location = "top_left"
p.legend.click_policy="mute"
| {"golden_diff": "diff --git a/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py b/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py\n--- a/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py\n+++ b/sphinx/source/docs/user_guide/examples/interaction_legend_mute.py\n@@ -11,7 +11,8 @@\n \"http://ichart.yahoo.com/table.csv?s=%s&a=0&b=1&c=2005&d=0&e=1&f=2014\" % name,\n parse_dates=['Date']\n )\n- p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8, muted_alpha=0.2, legend=name)\n+ p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8,\n+ muted_color=color, muted_alpha=0.2, legend=name)\n \n p.legend.location = \"top_left\"\n p.legend.click_policy=\"mute\"\n", "issue": "For Interactive Legends, a note about `muted_color` would be helpful\nFor the [Interactive Legend guide](https://github.com/bokeh/bokeh/blob/d8fcffa1c472bf641517ef81698bb6d057cbd30f/sphinx/source/docs/user_guide/interaction/legends.rst#id7), in addition to the note about `muted_alpha`, it would be helpful to also demonstrate `muted_color`.\n", "before_files": [{"content": "import pandas as pd\n\nfrom bokeh.palettes import Spectral4\nfrom bokeh.plotting import figure, output_file, show\n\np = figure(plot_width=800, plot_height=250, x_axis_type=\"datetime\")\np.title.text = 'Click on legend entries to mute the corresponding lines'\n\nfor name, color in zip(['AAPL', 'IBM', 'MSFT', 'GOOG'], Spectral4):\n df = pd.read_csv(\n \"http://ichart.yahoo.com/table.csv?s=%s&a=0&b=1&c=2005&d=0&e=1&f=2014\" % name,\n parse_dates=['Date']\n )\n p.line(df['Date'], df['Close'], line_width=2, color=color, alpha=0.8, muted_alpha=0.2, legend=name)\n\np.legend.location = \"top_left\"\np.legend.click_policy=\"mute\"\n\noutput_file(\"interactive_legend.html\", title=\"interactive_legend.py example\")\n\nshow(p)\n", "path": "sphinx/source/docs/user_guide/examples/interaction_legend_mute.py"}]} | 917 | 224 |
gh_patches_debug_13930 | rasdani/github-patches | git_diff | microsoft__ptvsd-1425 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
python without debugging won't start if there is a breakpoint
@tmdag commented on [Thu May 02 2019](https://github.com/microsoft/vscode-python/issues/5540)
## Environment data
- VSCode Version: 1.33.1
- OS Version:Linux 5.0.7-200.fc29.x86_64
- Extension version (available under the Extensions sidebar): Python 2019.4.11987
- Anaconda Extension Pack 1.0.1
## Steps to reproduce:
Open Python code
create a breakpoint
run python without debugging (ctrl+F5)
Terminal output:
cd /home/user/dev/Pytool ; env PYTHONIOENCODING=UTF-8 PYTHONUNBUFFERED=1 /usr/bin/python3 /home/user/.vscode/extensions/ms-python.python-2019.4.11987/pythonFiles/ptvsd_launcher.py --default --nodebug --client --host localhost --port 36019 /home/user/dev/Pytool/mypytool.py
Terminated
Does this issue occur when all extensions are disabled?: Yes/No
Not sure - Python extension is required
## Enabled Extensions:
Bookmarks 10.4.3
C/C++ 0.221
Gist 3.0.3
Git History 0.4.6
GitLens - 9.6.3
markdownlint 0.26.0
Syncing 2.1.6
OpenCL 0.5.2
VEX 0.4.0
TAML 0.4.0
---
@jxramos commented on [Thu May 02 2019](https://github.com/microsoft/vscode-python/issues/5540#issuecomment-488807421)
I'm seeing this too on a MacOS, it immediately exits with `Terminated: 15`. This behavior persists even if the breakpoint is disabled/unchecked.
</issue>
<code>
[start of src/ptvsd/runner.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 import pydevd
6 import threading
7
8 from ptvsd.daemon import DaemonBase
9 from ptvsd.session import DebugSession
10 from ptvsd.wrapper import VSCLifecycleMsgProcessor
11 from pydevd import init_stdout_redirect, init_stderr_redirect
12
13
14 HOSTNAME = 'localhost'
15
16
17 def run(address, filename, is_module, *args, **kwargs):
18 # TODO: docstring
19 # TODO: client/server -> address
20 daemon = Daemon()
21 if not daemon.wait_for_launch(address):
22 return
23
24 debugger = pydevd.PyDB()
25 # We do not want some internal methods to get executed in non-debug mode.
26 debugger.init_matplotlib_support = lambda *arg: None
27 debugger.run(
28 file=filename,
29 globals=None,
30 locals=None,
31 is_module=is_module,
32 set_trace=False)
33
34
35 class Daemon(DaemonBase):
36 """The process-level manager for the VSC protocol debug adapter."""
37
38 LAUNCH_TIMEOUT = 10000 # seconds
39
40 class SESSION(DebugSession):
41 class MESSAGE_PROCESSOR(VSCLifecycleMsgProcessor):
42 def on_invalid_request(self, request, args):
43 self.send_response(request, success=True)
44
45 def wait_for_launch(self, addr, timeout=LAUNCH_TIMEOUT):
46 # TODO: docstring
47 launched = threading.Event()
48 _, start_session = self.start_client(addr)
49 start_session(
50 notify_launch=launched.set,
51 )
52 return launched.wait(timeout)
53
54 def _start(self):
55 import weakref
56 weak_self = weakref.ref(self) # Avoid cyclic ref
57
58 def on_stdout(msg):
59 self = weak_self()
60 if self is not None:
61 self._send_output('stdout', msg)
62
63 def on_stderr(msg):
64 self = weak_self()
65 if self is not None:
66 self._send_output('stderr', msg)
67
68 init_stdout_redirect(on_stdout)
69 init_stderr_redirect(on_stderr)
70 return NoSocket()
71
72 def _close(self):
73 super(Daemon, self)._close()
74
75 def _send_output(self, category, output):
76 if self.session is None:
77 return
78 self.session._msgprocessor.send_event('output',
79 category=category,
80 output=output)
81
82
83 class NoSocket(object):
84 """A object with a noop socket lifecycle."""
85
86 def shutdown(self, *args, **kwargs):
87 pass
88
89 def close(self):
90 pass
91
[end of src/ptvsd/runner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/ptvsd/runner.py b/src/ptvsd/runner.py
--- a/src/ptvsd/runner.py
+++ b/src/ptvsd/runner.py
@@ -39,6 +39,19 @@
class SESSION(DebugSession):
class MESSAGE_PROCESSOR(VSCLifecycleMsgProcessor):
+
+ def on_setBreakpoints(self, request, args):
+ # Note: breakpoints is required (vscode will terminate
+ # the debugger if that's not the case).
+ # See: https://github.com/microsoft/ptvsd/issues/1408
+ self.send_response(
+ request,
+ success=True,
+ breakpoints=(
+ [{'verified': False}] * len(args.get('breakpoints', ()))
+ )
+ )
+
def on_invalid_request(self, request, args):
self.send_response(request, success=True)
| {"golden_diff": "diff --git a/src/ptvsd/runner.py b/src/ptvsd/runner.py\n--- a/src/ptvsd/runner.py\n+++ b/src/ptvsd/runner.py\n@@ -39,6 +39,19 @@\n \n class SESSION(DebugSession):\n class MESSAGE_PROCESSOR(VSCLifecycleMsgProcessor):\n+\n+ def on_setBreakpoints(self, request, args):\n+ # Note: breakpoints is required (vscode will terminate\n+ # the debugger if that's not the case).\n+ # See: https://github.com/microsoft/ptvsd/issues/1408\n+ self.send_response(\n+ request,\n+ success=True,\n+ breakpoints=(\n+ [{'verified': False}] * len(args.get('breakpoints', ()))\n+ )\n+ )\n+\n def on_invalid_request(self, request, args):\n self.send_response(request, success=True)\n", "issue": "python without debugging won't start if there is a breakpoint\n@tmdag commented on [Thu May 02 2019](https://github.com/microsoft/vscode-python/issues/5540)\n\n## Environment data\r\n- VSCode Version: 1.33.1\r\n- OS Version:Linux 5.0.7-200.fc29.x86_64\r\n- Extension version (available under the Extensions sidebar): Python 2019.4.11987\r\n- Anaconda Extension Pack 1.0.1\r\n\r\n## Steps to reproduce:\r\nOpen Python code\r\ncreate a breakpoint\r\nrun python without debugging (ctrl+F5)\r\nTerminal output:\r\n\r\ncd /home/user/dev/Pytool ; env PYTHONIOENCODING=UTF-8 PYTHONUNBUFFERED=1 /usr/bin/python3 /home/user/.vscode/extensions/ms-python.python-2019.4.11987/pythonFiles/ptvsd_launcher.py --default --nodebug --client --host localhost --port 36019 /home/user/dev/Pytool/mypytool.py\r\nTerminated\r\nDoes this issue occur when all extensions are disabled?: Yes/No\r\nNot sure - Python extension is required\r\n\r\n## Enabled Extensions:\r\nBookmarks 10.4.3\r\nC/C++ 0.221\r\nGist 3.0.3\r\nGit History 0.4.6\r\nGitLens - 9.6.3\r\nmarkdownlint 0.26.0\r\n\r\nSyncing 2.1.6\r\nOpenCL 0.5.2\r\nVEX 0.4.0\r\nTAML 0.4.0\r\n\r\n\n\n---\n\n@jxramos commented on [Thu May 02 2019](https://github.com/microsoft/vscode-python/issues/5540#issuecomment-488807421)\n\nI'm seeing this too on a MacOS, it immediately exits with `Terminated: 15`. This behavior persists even if the breakpoint is disabled/unchecked.\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport pydevd\nimport threading\n\nfrom ptvsd.daemon import DaemonBase\nfrom ptvsd.session import DebugSession\nfrom ptvsd.wrapper import VSCLifecycleMsgProcessor\nfrom pydevd import init_stdout_redirect, init_stderr_redirect\n\n\nHOSTNAME = 'localhost'\n\n\ndef run(address, filename, is_module, *args, **kwargs):\n # TODO: docstring\n # TODO: client/server -> address\n daemon = Daemon()\n if not daemon.wait_for_launch(address):\n return\n\n debugger = pydevd.PyDB()\n # We do not want some internal methods to get executed in non-debug mode.\n debugger.init_matplotlib_support = lambda *arg: None\n debugger.run(\n file=filename,\n globals=None,\n locals=None,\n is_module=is_module,\n set_trace=False)\n\n\nclass Daemon(DaemonBase):\n \"\"\"The process-level manager for the VSC protocol debug adapter.\"\"\"\n\n LAUNCH_TIMEOUT = 10000 # seconds\n\n class SESSION(DebugSession):\n class MESSAGE_PROCESSOR(VSCLifecycleMsgProcessor):\n def on_invalid_request(self, request, args):\n self.send_response(request, success=True)\n\n def wait_for_launch(self, addr, timeout=LAUNCH_TIMEOUT):\n # TODO: docstring\n launched = threading.Event()\n _, start_session = self.start_client(addr)\n start_session(\n notify_launch=launched.set,\n )\n return launched.wait(timeout)\n\n def _start(self):\n import weakref\n weak_self = weakref.ref(self) # Avoid cyclic ref\n\n def on_stdout(msg):\n self = weak_self()\n if self is not None:\n self._send_output('stdout', msg)\n\n def on_stderr(msg):\n self = weak_self()\n if self is not None:\n self._send_output('stderr', msg)\n\n init_stdout_redirect(on_stdout)\n init_stderr_redirect(on_stderr)\n return NoSocket()\n\n def _close(self):\n super(Daemon, self)._close()\n\n def _send_output(self, category, output):\n if self.session is None:\n return\n self.session._msgprocessor.send_event('output',\n category=category,\n output=output)\n\n\nclass NoSocket(object):\n \"\"\"A object with a noop socket lifecycle.\"\"\"\n\n def shutdown(self, *args, **kwargs):\n pass\n\n def close(self):\n pass\n", "path": "src/ptvsd/runner.py"}]} | 1,720 | 202 |
gh_patches_debug_13993 | rasdani/github-patches | git_diff | google__flax-1324 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: module 'flax.linen' has no attribute 'merge_param'
[This guide](https://flax.readthedocs.io/en/latest/design_notes/arguments.html) suggests using `nn.merge_param` to combine arguments, but `merge_param` is only available through `nn.module.merge_param`. I believe it needs to be added to the import line [here](https://github.com/google/flax/blob/4ae9143f7ef46ffab6d9123ba1b2e4f3303e68d1/flax/linen/__init__.py#L28). I can open a PR if this is the case.
</issue>
<code>
[start of flax/linen/__init__.py]
1 # Copyright 2021 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """The Flax Module system."""
16
17
18 # pylint: disable=g-multiple-import
19 # re-export commonly used modules and functions
20 from .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,
21 log_softmax, relu, sigmoid, soft_sign, softmax,
22 softplus, swish, silu, tanh)
23 from .attention import (MultiHeadDotProductAttention, SelfAttention,
24 dot_product_attention, make_attention_mask,
25 make_causal_mask, combine_masks)
26 from ..core import broadcast, DenyList
27 from .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed
28 from .module import Module, compact, enable_named_call, disable_named_call, Variable, init, init_with_output, apply
29 from .normalization import BatchNorm, GroupNorm, LayerNorm
30 from .pooling import avg_pool, max_pool
31 from .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell
32 from .stochastic import Dropout
33 from .transforms import jit, named_call, remat, scan, vmap
34 from .initializers import zeros, ones
35
36 # pylint: enable=g-multiple-import
37
[end of flax/linen/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flax/linen/__init__.py b/flax/linen/__init__.py
--- a/flax/linen/__init__.py
+++ b/flax/linen/__init__.py
@@ -25,7 +25,8 @@
make_causal_mask, combine_masks)
from ..core import broadcast, DenyList
from .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed
-from .module import Module, compact, enable_named_call, disable_named_call, Variable, init, init_with_output, apply
+from .module import (Module, compact, enable_named_call, disable_named_call,
+ Variable, init, init_with_output, apply, merge_param)
from .normalization import BatchNorm, GroupNorm, LayerNorm
from .pooling import avg_pool, max_pool
from .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell
| {"golden_diff": "diff --git a/flax/linen/__init__.py b/flax/linen/__init__.py\n--- a/flax/linen/__init__.py\n+++ b/flax/linen/__init__.py\n@@ -25,7 +25,8 @@\n make_causal_mask, combine_masks)\n from ..core import broadcast, DenyList\n from .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed\n-from .module import Module, compact, enable_named_call, disable_named_call, Variable, init, init_with_output, apply\n+from .module import (Module, compact, enable_named_call, disable_named_call,\n+ Variable, init, init_with_output, apply, merge_param)\n from .normalization import BatchNorm, GroupNorm, LayerNorm\n from .pooling import avg_pool, max_pool\n from .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell\n", "issue": "AttributeError: module 'flax.linen' has no attribute 'merge_param'\n[This guide](https://flax.readthedocs.io/en/latest/design_notes/arguments.html) suggests using `nn.merge_param` to combine arguments, but `merge_param` is only available through `nn.module.merge_param`. I believe it needs to be added to the import line [here](https://github.com/google/flax/blob/4ae9143f7ef46ffab6d9123ba1b2e4f3303e68d1/flax/linen/__init__.py#L28). I can open a PR if this is the case.\r\n\n", "before_files": [{"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The Flax Module system.\"\"\"\n\n\n# pylint: disable=g-multiple-import\n# re-export commonly used modules and functions\nfrom .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,\n log_softmax, relu, sigmoid, soft_sign, softmax,\n softplus, swish, silu, tanh)\nfrom .attention import (MultiHeadDotProductAttention, SelfAttention,\n dot_product_attention, make_attention_mask,\n make_causal_mask, combine_masks)\nfrom ..core import broadcast, DenyList\nfrom .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed\nfrom .module import Module, compact, enable_named_call, disable_named_call, Variable, init, init_with_output, apply\nfrom .normalization import BatchNorm, GroupNorm, LayerNorm\nfrom .pooling import avg_pool, max_pool\nfrom .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell\nfrom .stochastic import Dropout\nfrom .transforms import jit, named_call, remat, scan, vmap\nfrom .initializers import zeros, ones\n\n# pylint: enable=g-multiple-import\n", "path": "flax/linen/__init__.py"}]} | 1,141 | 202 |
gh_patches_debug_19296 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1673 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pass `Accept` header in `contrib.utils.download`
I'm copying a comment here that I made in the [HEPData Zulip chat](https://hepdata.zulipchat.com/#narrow/stream/226203-pyhf/topic/DOIs/near/213610306) on 16th October 2020.
Regarding the issue (HEPData/hepdata#162) to mint DOIs for all local resource files attached to a submission, if we do eventually get around to addressing it, we would probably redirect the DOI to a landing page for the resource file, rather than to the resource file itself (e.g. the pyhf tarball). This would follow the DataCite [Best Practices for DOI Landing Pages](https://support.datacite.org/docs/landing-pages), e.g. "[DOIs should resolve to a landing page, not directly to the content](https://support.datacite.org/docs/landing-pages#dois-should-resolve-to-a-landing-page-not-directly-to-the-content)", which I'm currently breaking for the two manually minted DOIs. In the issue (HEPdata/hepdata#162) I mentioned the possibility of using [DataCite Content Negotiation](https://support.datacite.org/docs/datacite-content-resolver) to redirect to the resource file itself, but the linked page now says "Custom content types are no longer supported since January 1st, 2020". I thought maybe content negotiation could be used to return the `.tar.gz` file directly, but the intended purpose is to retrieve DOI metadata in different formats, not to provide the content itself. In anticipation of possible future changes, I'd recommend that you use the URL directly rather than the DOI in pyhf download scripts and documentation (e.g. revert #1109).
</issue>
<code>
[start of src/pyhf/contrib/utils.py]
1 """Helper utilities for common tasks."""
2
3 from urllib.parse import urlparse
4 import tarfile
5 from io import BytesIO
6 import logging
7 from pyhf import exceptions
8
9 log = logging.getLogger(__name__)
10
11 __all__ = ["download"]
12
13
14 def __dir__():
15 return __all__
16
17
18 try:
19 import requests
20
21 def download(archive_url, output_directory, force=False, compress=False):
22 """
23 Download the patchset archive from the remote URL and extract it in a
24 directory at the path given.
25
26 Example:
27
28 >>> from pyhf.contrib.utils import download
29 >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods")
30 >>> import os
31 >>> sorted(os.listdir("1Lbb-likelihoods"))
32 ['BkgOnly.json', 'README.md', 'patchset.json']
33 >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods.tar.gz", compress=True)
34 >>> import glob
35 >>> glob.glob("1Lbb-likelihoods.tar.gz")
36 ['1Lbb-likelihoods.tar.gz']
37
38 Args:
39 archive_url (:obj:`str`): The URL of the :class:`~pyhf.patchset.PatchSet` archive to download.
40 output_directory (:obj:`str`): Name of the directory to unpack the archive into.
41 force (:obj:`bool`): Force download from non-approved host. Default is ``False``.
42 compress (:obj:`bool`): Keep the archive in a compressed ``tar.gz`` form. Default is ``False``.
43
44 Raises:
45 :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid
46 """
47 if not force:
48 valid_hosts = ["www.hepdata.net", "doi.org"]
49 netloc = urlparse(archive_url).netloc
50 if netloc not in valid_hosts:
51 raise exceptions.InvalidArchiveHost(
52 f"{netloc} is not an approved archive host: {', '.join(str(host) for host in valid_hosts)}\n"
53 + "To download an archive from this host use the --force option."
54 )
55
56 with requests.get(archive_url) as response:
57 if compress:
58 with open(output_directory, "wb") as archive:
59 archive.write(response.content)
60 else:
61 with tarfile.open(
62 mode="r|gz", fileobj=BytesIO(response.content)
63 ) as archive:
64 archive.extractall(output_directory)
65
66
67 except ModuleNotFoundError:
68 log.error(
69 "\nInstallation of the contrib extra is required to use pyhf.contrib.utils.download"
70 + "\nPlease install with: python -m pip install pyhf[contrib]\n",
71 exc_info=True,
72 )
73
[end of src/pyhf/contrib/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pyhf/contrib/utils.py b/src/pyhf/contrib/utils.py
--- a/src/pyhf/contrib/utils.py
+++ b/src/pyhf/contrib/utils.py
@@ -53,7 +53,18 @@
+ "To download an archive from this host use the --force option."
)
- with requests.get(archive_url) as response:
+ # c.f. https://github.com/scikit-hep/pyhf/issues/1491
+ # > Use content negotiation at the landing page for the resource that
+ # > the DOI resolves to. DataCite content negotiation is forwarding all
+ # > requests with unknown content types to the URL registered in the
+ # > handle system.
+ # c.f. https://blog.datacite.org/changes-to-doi-content-negotiation/
+ # The HEPData landing page for the resource file can check if the Accept
+ # request HTTP header matches the content type of the resource file and
+ # return the content directly if so.
+ with requests.get(
+ archive_url, headers={"Accept": "application/x-tar"}
+ ) as response:
if compress:
with open(output_directory, "wb") as archive:
archive.write(response.content)
| {"golden_diff": "diff --git a/src/pyhf/contrib/utils.py b/src/pyhf/contrib/utils.py\n--- a/src/pyhf/contrib/utils.py\n+++ b/src/pyhf/contrib/utils.py\n@@ -53,7 +53,18 @@\n + \"To download an archive from this host use the --force option.\"\n )\n \n- with requests.get(archive_url) as response:\n+ # c.f. https://github.com/scikit-hep/pyhf/issues/1491\n+ # > Use content negotiation at the landing page for the resource that\n+ # > the DOI resolves to. DataCite content negotiation is forwarding all\n+ # > requests with unknown content types to the URL registered in the\n+ # > handle system.\n+ # c.f. https://blog.datacite.org/changes-to-doi-content-negotiation/\n+ # The HEPData landing page for the resource file can check if the Accept\n+ # request HTTP header matches the content type of the resource file and\n+ # return the content directly if so.\n+ with requests.get(\n+ archive_url, headers={\"Accept\": \"application/x-tar\"}\n+ ) as response:\n if compress:\n with open(output_directory, \"wb\") as archive:\n archive.write(response.content)\n", "issue": "Pass `Accept` header in `contrib.utils.download`\nI'm copying a comment here that I made in the [HEPData Zulip chat](https://hepdata.zulipchat.com/#narrow/stream/226203-pyhf/topic/DOIs/near/213610306) on 16th October 2020.\r\n\r\nRegarding the issue (HEPData/hepdata#162) to mint DOIs for all local resource files attached to a submission, if we do eventually get around to addressing it, we would probably redirect the DOI to a landing page for the resource file, rather than to the resource file itself (e.g. the pyhf tarball). This would follow the DataCite [Best Practices for DOI Landing Pages](https://support.datacite.org/docs/landing-pages), e.g. \"[DOIs should resolve to a landing page, not directly to the content](https://support.datacite.org/docs/landing-pages#dois-should-resolve-to-a-landing-page-not-directly-to-the-content)\", which I'm currently breaking for the two manually minted DOIs. In the issue (HEPdata/hepdata#162) I mentioned the possibility of using [DataCite Content Negotiation](https://support.datacite.org/docs/datacite-content-resolver) to redirect to the resource file itself, but the linked page now says \"Custom content types are no longer supported since January 1st, 2020\". I thought maybe content negotiation could be used to return the `.tar.gz` file directly, but the intended purpose is to retrieve DOI metadata in different formats, not to provide the content itself. In anticipation of possible future changes, I'd recommend that you use the URL directly rather than the DOI in pyhf download scripts and documentation (e.g. revert #1109).\n", "before_files": [{"content": "\"\"\"Helper utilities for common tasks.\"\"\"\n\nfrom urllib.parse import urlparse\nimport tarfile\nfrom io import BytesIO\nimport logging\nfrom pyhf import exceptions\n\nlog = logging.getLogger(__name__)\n\n__all__ = [\"download\"]\n\n\ndef __dir__():\n return __all__\n\n\ntry:\n import requests\n\n def download(archive_url, output_directory, force=False, compress=False):\n \"\"\"\n Download the patchset archive from the remote URL and extract it in a\n directory at the path given.\n\n Example:\n\n >>> from pyhf.contrib.utils import download\n >>> download(\"https://doi.org/10.17182/hepdata.90607.v3/r3\", \"1Lbb-likelihoods\")\n >>> import os\n >>> sorted(os.listdir(\"1Lbb-likelihoods\"))\n ['BkgOnly.json', 'README.md', 'patchset.json']\n >>> download(\"https://doi.org/10.17182/hepdata.90607.v3/r3\", \"1Lbb-likelihoods.tar.gz\", compress=True)\n >>> import glob\n >>> glob.glob(\"1Lbb-likelihoods.tar.gz\")\n ['1Lbb-likelihoods.tar.gz']\n\n Args:\n archive_url (:obj:`str`): The URL of the :class:`~pyhf.patchset.PatchSet` archive to download.\n output_directory (:obj:`str`): Name of the directory to unpack the archive into.\n force (:obj:`bool`): Force download from non-approved host. Default is ``False``.\n compress (:obj:`bool`): Keep the archive in a compressed ``tar.gz`` form. Default is ``False``.\n\n Raises:\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n if not force:\n valid_hosts = [\"www.hepdata.net\", \"doi.org\"]\n netloc = urlparse(archive_url).netloc\n if netloc not in valid_hosts:\n raise exceptions.InvalidArchiveHost(\n f\"{netloc} is not an approved archive host: {', '.join(str(host) for host in valid_hosts)}\\n\"\n + \"To download an archive from this host use the --force option.\"\n )\n\n with requests.get(archive_url) as response:\n if compress:\n with open(output_directory, \"wb\") as archive:\n archive.write(response.content)\n else:\n with tarfile.open(\n mode=\"r|gz\", fileobj=BytesIO(response.content)\n ) as archive:\n archive.extractall(output_directory)\n\n\nexcept ModuleNotFoundError:\n log.error(\n \"\\nInstallation of the contrib extra is required to use pyhf.contrib.utils.download\"\n + \"\\nPlease install with: python -m pip install pyhf[contrib]\\n\",\n exc_info=True,\n )\n", "path": "src/pyhf/contrib/utils.py"}]} | 1,691 | 281 |
gh_patches_debug_50213 | rasdani/github-patches | git_diff | pex-tool__pex-1590 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.64
On the docket:
+ [x] Pex does not support mac universal2 wheels #1587
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.63"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.63"
+__version__ = "2.1.64"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.63\"\n+__version__ = \"2.1.64\"\n", "issue": "Release 2.1.64\nOn the docket:\r\n+ [x] Pex does not support mac universal2 wheels #1587 \r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.63\"\n", "path": "pex/version.py"}]} | 618 | 97 |
gh_patches_debug_8532 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2994 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Custom reports removed from 'Reports' general view
Remove custom reports from 'Reports' general view, i.e. Plan Finland or EUTF reports are not visible for other partners.

</issue>
<code>
[start of akvo/rest/views/report.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from django.db.models import Q
8 from rest_framework.decorators import api_view
9 from rest_framework.response import Response
10
11 from akvo.rsr.models import Report, ReportFormat
12 from ..serializers import ReportSerializer, ReportFormatSerializer
13
14
15 @api_view(['GET'])
16 def reports(request):
17 """
18 A view for displaying all report information, sorted by title.
19 """
20
21 user = request.user
22 is_admin = user.is_active and (user.is_superuser or user.is_admin)
23 reports = Report.objects.all()
24 if not is_admin:
25 # Show only those reports that the user is allowed to see
26 reports = reports.filter(
27 Q(organisations=None) | Q(organisations__in=user.approved_organisations())
28 ).distinct()
29
30 # FIXME: Use a viewset instead?
31 return Response({
32 'count': reports.count(),
33 'results': [ReportSerializer(r).data for r in reports.order_by('title')],
34 })
35
36
37 @api_view(['GET'])
38 def report_formats(request):
39 """
40 A view for displaying all report format information.
41 """
42 return Response({
43 'count': ReportFormat.objects.all().count(),
44 'results': [ReportFormatSerializer(f).data for f in ReportFormat.objects.all()],
45 })
46
[end of akvo/rest/views/report.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rest/views/report.py b/akvo/rest/views/report.py
--- a/akvo/rest/views/report.py
+++ b/akvo/rest/views/report.py
@@ -23,8 +23,9 @@
reports = Report.objects.all()
if not is_admin:
# Show only those reports that the user is allowed to see
+ approved_orgs = user.approved_organisations() if not user.is_anonymous() else []
reports = reports.filter(
- Q(organisations=None) | Q(organisations__in=user.approved_organisations())
+ Q(organisations=None) | Q(organisations__in=approved_orgs)
).distinct()
# FIXME: Use a viewset instead?
| {"golden_diff": "diff --git a/akvo/rest/views/report.py b/akvo/rest/views/report.py\n--- a/akvo/rest/views/report.py\n+++ b/akvo/rest/views/report.py\n@@ -23,8 +23,9 @@\n reports = Report.objects.all()\n if not is_admin:\n # Show only those reports that the user is allowed to see\n+ approved_orgs = user.approved_organisations() if not user.is_anonymous() else []\n reports = reports.filter(\n- Q(organisations=None) | Q(organisations__in=user.approved_organisations())\n+ Q(organisations=None) | Q(organisations__in=approved_orgs)\n ).distinct()\n \n # FIXME: Use a viewset instead?\n", "issue": "Custom reports removed from 'Reports' general view\nRemove custom reports from 'Reports' general view, i.e. Plan Finland or EUTF reports are not visible for other partners. \r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.db.models import Q\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom akvo.rsr.models import Report, ReportFormat\nfrom ..serializers import ReportSerializer, ReportFormatSerializer\n\n\n@api_view(['GET'])\ndef reports(request):\n \"\"\"\n A view for displaying all report information, sorted by title.\n \"\"\"\n\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n reports = Report.objects.all()\n if not is_admin:\n # Show only those reports that the user is allowed to see\n reports = reports.filter(\n Q(organisations=None) | Q(organisations__in=user.approved_organisations())\n ).distinct()\n\n # FIXME: Use a viewset instead?\n return Response({\n 'count': reports.count(),\n 'results': [ReportSerializer(r).data for r in reports.order_by('title')],\n })\n\n\n@api_view(['GET'])\ndef report_formats(request):\n \"\"\"\n A view for displaying all report format information.\n \"\"\"\n return Response({\n 'count': ReportFormat.objects.all().count(),\n 'results': [ReportFormatSerializer(f).data for f in ReportFormat.objects.all()],\n })\n", "path": "akvo/rest/views/report.py"}]} | 1,059 | 167 |
gh_patches_debug_44788 | rasdani/github-patches | git_diff | aws__aws-cli-2537 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reading/writing EMR key_pair_file configuration options behaves oddly
Version:
```
$ aws --version
aws-cli/1.11.75 Python/2.7.10 Darwin/15.6.0 botocore/1.5.38
```
[It's suggested that one can set a default key_pair_file argument here](https://github.com/aws/aws-cli/blob/master/awscli/customizations/emr/ssh.py#L25) by running `aws configure set emr.key_pair_file <value>`
By that token, I would expect `aws configure get emr.key_pair_file` to retrieve this item and to exit with a exit code of 0.
```
$ aws configure set emr.key_pair_file /tmp/foo
$ cat config
[default]
emr =
key_pair_file = /tmp/foo
$ aws configure get emr.key_pair_file
$ echo $?
1
```
As you can see, setting this and trying to retrieve it exits with a non-zero exit code which makes it a pain to check for this config item being set as part of shell scripts prior to do other EMR-based commands (such as create-cluster).
As an aside, trying to get the top level `emr` config item fails too;
```
$ aws configure get emr
expected a character buffer object
```
Additionally this item doesn't show up when `aws configure list` is run either;
```
$ aws configure list
Name Value Type Location
---- ----- ---- --------
profile <not set> None None
access_key REDACTED shared-credentials-file
secret_key REDACTED shared-credentials-file
region <not set> None None
```
</issue>
<code>
[start of awscli/customizations/configure/get.py]
1 # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import sys
14
15 from awscli.customizations.commands import BasicCommand
16
17 from . import PREDEFINED_SECTION_NAMES
18
19
20 class ConfigureGetCommand(BasicCommand):
21 NAME = 'get'
22 DESCRIPTION = BasicCommand.FROM_FILE('configure', 'get',
23 '_description.rst')
24 SYNOPSIS = ('aws configure get varname [--profile profile-name]')
25 EXAMPLES = BasicCommand.FROM_FILE('configure', 'get', '_examples.rst')
26 ARG_TABLE = [
27 {'name': 'varname',
28 'help_text': 'The name of the config value to retrieve.',
29 'action': 'store',
30 'cli_type_name': 'string', 'positional_arg': True},
31 ]
32
33 def __init__(self, session, stream=sys.stdout):
34 super(ConfigureGetCommand, self).__init__(session)
35 self._stream = stream
36
37 def _run_main(self, args, parsed_globals):
38 varname = args.varname
39 value = None
40 if '.' not in varname:
41 # get_scoped_config() returns the config variables in the config
42 # file (not the logical_var names), which is what we want.
43 config = self._session.get_scoped_config()
44 value = config.get(varname)
45 else:
46 value = self._get_dotted_config_value(varname)
47 if value is not None:
48 self._stream.write(value)
49 self._stream.write('\n')
50 return 0
51 else:
52 return 1
53
54 def _get_dotted_config_value(self, varname):
55 parts = varname.split('.')
56 num_dots = varname.count('.')
57 # Logic to deal with predefined sections like [preview], [plugin] and etc.
58 if num_dots == 1 and parts[0] in PREDEFINED_SECTION_NAMES:
59 full_config = self._session.full_config
60 section, config_name = varname.split('.')
61 value = full_config.get(section, {}).get(config_name)
62 if value is None:
63 # Try to retrieve it from the profile config.
64 value = full_config['profiles'].get(
65 section, {}).get(config_name)
66 return value
67 if parts[0] == 'profile':
68 profile_name = parts[1]
69 config_name = parts[2]
70 remaining = parts[3:]
71 # Check if varname starts with 'default' profile (e.g. default.emr-dev.emr.instance_profile)
72 # If not, go further to check if varname starts with a known profile name
73 elif parts[0] == 'default' or (parts[0] in self._session.full_config['profiles']):
74 profile_name = parts[0]
75 config_name = parts[1]
76 remaining = parts[2:]
77 else:
78 profile_name = self._session.get_config_variable('profile')
79 config_name = parts[0]
80 remaining = parts[1:]
81
82 value = self._session.full_config['profiles'].get(
83 profile_name, {}).get(config_name)
84 if len(remaining) == 1:
85 try:
86 value = value.get(remaining[-1])
87 except AttributeError:
88 value = None
89 return value
90
[end of awscli/customizations/configure/get.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/customizations/configure/get.py b/awscli/customizations/configure/get.py
--- a/awscli/customizations/configure/get.py
+++ b/awscli/customizations/configure/get.py
@@ -11,17 +11,21 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import sys
+import logging
from awscli.customizations.commands import BasicCommand
+from awscli.compat import six
from . import PREDEFINED_SECTION_NAMES
+LOG = logging.getLogger(__name__)
+
class ConfigureGetCommand(BasicCommand):
NAME = 'get'
DESCRIPTION = BasicCommand.FROM_FILE('configure', 'get',
'_description.rst')
- SYNOPSIS = ('aws configure get varname [--profile profile-name]')
+ SYNOPSIS = 'aws configure get varname [--profile profile-name]'
EXAMPLES = BasicCommand.FROM_FILE('configure', 'get', '_examples.rst')
ARG_TABLE = [
{'name': 'varname',
@@ -30,13 +34,14 @@
'cli_type_name': 'string', 'positional_arg': True},
]
- def __init__(self, session, stream=sys.stdout):
+ def __init__(self, session, stream=sys.stdout, error_stream=sys.stderr):
super(ConfigureGetCommand, self).__init__(session)
self._stream = stream
+ self._error_stream = error_stream
def _run_main(self, args, parsed_globals):
varname = args.varname
- value = None
+
if '.' not in varname:
# get_scoped_config() returns the config variables in the config
# file (not the logical_var names), which is what we want.
@@ -44,17 +49,30 @@
value = config.get(varname)
else:
value = self._get_dotted_config_value(varname)
- if value is not None:
+
+ LOG.debug(u'Config value retrieved: %s' % value)
+
+ if isinstance(value, six.string_types):
self._stream.write(value)
self._stream.write('\n')
return 0
+ elif isinstance(value, dict):
+ # TODO: add support for this. We would need to print it off in
+ # the same format as the config file.
+ self._error_stream.write(
+ 'varname (%s) must reference a value, not a section or '
+ 'sub-section.' % varname
+ )
+ return 1
else:
return 1
def _get_dotted_config_value(self, varname):
parts = varname.split('.')
num_dots = varname.count('.')
- # Logic to deal with predefined sections like [preview], [plugin] and etc.
+
+ # Logic to deal with predefined sections like [preview], [plugin] and
+ # etc.
if num_dots == 1 and parts[0] in PREDEFINED_SECTION_NAMES:
full_config = self._session.full_config
section, config_name = varname.split('.')
@@ -64,18 +82,23 @@
value = full_config['profiles'].get(
section, {}).get(config_name)
return value
+
if parts[0] == 'profile':
profile_name = parts[1]
config_name = parts[2]
remaining = parts[3:]
- # Check if varname starts with 'default' profile (e.g. default.emr-dev.emr.instance_profile)
- # If not, go further to check if varname starts with a known profile name
- elif parts[0] == 'default' or (parts[0] in self._session.full_config['profiles']):
+ # Check if varname starts with 'default' profile (e.g.
+ # default.emr-dev.emr.instance_profile) If not, go further to check
+ # if varname starts with a known profile name
+ elif parts[0] == 'default' or (
+ parts[0] in self._session.full_config['profiles']):
profile_name = parts[0]
config_name = parts[1]
remaining = parts[2:]
else:
profile_name = self._session.get_config_variable('profile')
+ if profile_name is None:
+ profile_name = 'default'
config_name = parts[0]
remaining = parts[1:]
| {"golden_diff": "diff --git a/awscli/customizations/configure/get.py b/awscli/customizations/configure/get.py\n--- a/awscli/customizations/configure/get.py\n+++ b/awscli/customizations/configure/get.py\n@@ -11,17 +11,21 @@\n # ANY KIND, either express or implied. See the License for the specific\n # language governing permissions and limitations under the License.\n import sys\n+import logging\n \n from awscli.customizations.commands import BasicCommand\n+from awscli.compat import six\n \n from . import PREDEFINED_SECTION_NAMES\n \n+LOG = logging.getLogger(__name__)\n+\n \n class ConfigureGetCommand(BasicCommand):\n NAME = 'get'\n DESCRIPTION = BasicCommand.FROM_FILE('configure', 'get',\n '_description.rst')\n- SYNOPSIS = ('aws configure get varname [--profile profile-name]')\n+ SYNOPSIS = 'aws configure get varname [--profile profile-name]'\n EXAMPLES = BasicCommand.FROM_FILE('configure', 'get', '_examples.rst')\n ARG_TABLE = [\n {'name': 'varname',\n@@ -30,13 +34,14 @@\n 'cli_type_name': 'string', 'positional_arg': True},\n ]\n \n- def __init__(self, session, stream=sys.stdout):\n+ def __init__(self, session, stream=sys.stdout, error_stream=sys.stderr):\n super(ConfigureGetCommand, self).__init__(session)\n self._stream = stream\n+ self._error_stream = error_stream\n \n def _run_main(self, args, parsed_globals):\n varname = args.varname\n- value = None\n+\n if '.' not in varname:\n # get_scoped_config() returns the config variables in the config\n # file (not the logical_var names), which is what we want.\n@@ -44,17 +49,30 @@\n value = config.get(varname)\n else:\n value = self._get_dotted_config_value(varname)\n- if value is not None:\n+\n+ LOG.debug(u'Config value retrieved: %s' % value)\n+\n+ if isinstance(value, six.string_types):\n self._stream.write(value)\n self._stream.write('\\n')\n return 0\n+ elif isinstance(value, dict):\n+ # TODO: add support for this. We would need to print it off in\n+ # the same format as the config file.\n+ self._error_stream.write(\n+ 'varname (%s) must reference a value, not a section or '\n+ 'sub-section.' % varname\n+ )\n+ return 1\n else:\n return 1\n \n def _get_dotted_config_value(self, varname):\n parts = varname.split('.')\n num_dots = varname.count('.')\n- # Logic to deal with predefined sections like [preview], [plugin] and etc.\n+\n+ # Logic to deal with predefined sections like [preview], [plugin] and\n+ # etc.\n if num_dots == 1 and parts[0] in PREDEFINED_SECTION_NAMES:\n full_config = self._session.full_config\n section, config_name = varname.split('.')\n@@ -64,18 +82,23 @@\n value = full_config['profiles'].get(\n section, {}).get(config_name)\n return value\n+\n if parts[0] == 'profile':\n profile_name = parts[1]\n config_name = parts[2]\n remaining = parts[3:]\n- # Check if varname starts with 'default' profile (e.g. default.emr-dev.emr.instance_profile)\n- # If not, go further to check if varname starts with a known profile name\n- elif parts[0] == 'default' or (parts[0] in self._session.full_config['profiles']):\n+ # Check if varname starts with 'default' profile (e.g.\n+ # default.emr-dev.emr.instance_profile) If not, go further to check\n+ # if varname starts with a known profile name\n+ elif parts[0] == 'default' or (\n+ parts[0] in self._session.full_config['profiles']):\n profile_name = parts[0]\n config_name = parts[1]\n remaining = parts[2:]\n else:\n profile_name = self._session.get_config_variable('profile')\n+ if profile_name is None:\n+ profile_name = 'default'\n config_name = parts[0]\n remaining = parts[1:]\n", "issue": "Reading/writing EMR key_pair_file configuration options behaves oddly\nVersion:\r\n\r\n```\r\n$ aws --version\r\naws-cli/1.11.75 Python/2.7.10 Darwin/15.6.0 botocore/1.5.38\r\n```\r\n\r\n[It's suggested that one can set a default key_pair_file argument here](https://github.com/aws/aws-cli/blob/master/awscli/customizations/emr/ssh.py#L25) by running `aws configure set emr.key_pair_file <value>`\r\n\r\nBy that token, I would expect `aws configure get emr.key_pair_file` to retrieve this item and to exit with a exit code of 0.\r\n\r\n```\r\n$ aws configure set emr.key_pair_file /tmp/foo\r\n\r\n$ cat config\r\n[default]\r\nemr =\r\n key_pair_file = /tmp/foo\r\n\r\n$ aws configure get emr.key_pair_file\r\n\r\n$ echo $?\r\n1\r\n```\r\n\r\nAs you can see, setting this and trying to retrieve it exits with a non-zero exit code which makes it a pain to check for this config item being set as part of shell scripts prior to do other EMR-based commands (such as create-cluster).\r\n\r\nAs an aside, trying to get the top level `emr` config item fails too;\r\n\r\n```\r\n$ aws configure get emr\r\n\r\nexpected a character buffer object\r\n```\r\n\r\nAdditionally this item doesn't show up when `aws configure list` is run either;\r\n\r\n```\r\n$ aws configure list\r\n Name Value Type Location\r\n ---- ----- ---- --------\r\n profile <not set> None None\r\naccess_key REDACTED shared-credentials-file\r\nsecret_key REDACTED shared-credentials-file\r\n region <not set> None None\r\n\r\n```\r\n\n", "before_files": [{"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport sys\n\nfrom awscli.customizations.commands import BasicCommand\n\nfrom . import PREDEFINED_SECTION_NAMES\n\n\nclass ConfigureGetCommand(BasicCommand):\n NAME = 'get'\n DESCRIPTION = BasicCommand.FROM_FILE('configure', 'get',\n '_description.rst')\n SYNOPSIS = ('aws configure get varname [--profile profile-name]')\n EXAMPLES = BasicCommand.FROM_FILE('configure', 'get', '_examples.rst')\n ARG_TABLE = [\n {'name': 'varname',\n 'help_text': 'The name of the config value to retrieve.',\n 'action': 'store',\n 'cli_type_name': 'string', 'positional_arg': True},\n ]\n\n def __init__(self, session, stream=sys.stdout):\n super(ConfigureGetCommand, self).__init__(session)\n self._stream = stream\n\n def _run_main(self, args, parsed_globals):\n varname = args.varname\n value = None\n if '.' not in varname:\n # get_scoped_config() returns the config variables in the config\n # file (not the logical_var names), which is what we want.\n config = self._session.get_scoped_config()\n value = config.get(varname)\n else:\n value = self._get_dotted_config_value(varname)\n if value is not None:\n self._stream.write(value)\n self._stream.write('\\n')\n return 0\n else:\n return 1\n\n def _get_dotted_config_value(self, varname):\n parts = varname.split('.')\n num_dots = varname.count('.')\n # Logic to deal with predefined sections like [preview], [plugin] and etc.\n if num_dots == 1 and parts[0] in PREDEFINED_SECTION_NAMES:\n full_config = self._session.full_config\n section, config_name = varname.split('.')\n value = full_config.get(section, {}).get(config_name)\n if value is None:\n # Try to retrieve it from the profile config.\n value = full_config['profiles'].get(\n section, {}).get(config_name)\n return value\n if parts[0] == 'profile':\n profile_name = parts[1]\n config_name = parts[2]\n remaining = parts[3:]\n # Check if varname starts with 'default' profile (e.g. default.emr-dev.emr.instance_profile)\n # If not, go further to check if varname starts with a known profile name\n elif parts[0] == 'default' or (parts[0] in self._session.full_config['profiles']):\n profile_name = parts[0]\n config_name = parts[1]\n remaining = parts[2:]\n else:\n profile_name = self._session.get_config_variable('profile')\n config_name = parts[0]\n remaining = parts[1:]\n\n value = self._session.full_config['profiles'].get(\n profile_name, {}).get(config_name)\n if len(remaining) == 1:\n try:\n value = value.get(remaining[-1])\n except AttributeError:\n value = None\n return value\n", "path": "awscli/customizations/configure/get.py"}]} | 1,899 | 975 |
gh_patches_debug_30466 | rasdani/github-patches | git_diff | vaexio__vaex-1150 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG-REPORT] TypeError: can't pickle vaex.superutils.ordered_set
**Description**
If I use `df.func.where` with `isin`, I can't pickle the resulted state.
This is for machine learning pipelines.
reproduce:
```
import vaex
import pickle
df = vaex.from_dict({'a':[1,2,3]})
df['b'] = df.func.where(df['a'].isin([1]),1,2)
pickle.dumps(df.state_get())
...
TypeError: can't pickle vaex.superutils.ordered_set_int64 objects
```
**Software information**
- vaex-core==4.0.0a11
- Vaex was installed via: pip
- OS: Mac
</issue>
<code>
[start of packages/vaex-core/vaex/hash.py]
1 import os
2
3
4 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
5 if not on_rtd:
6 from .superutils import *
7 from . import superutils
8 ordered_set = tuple([cls for name, cls in vars(superutils).items() if name.startswith('ordered_set')])
9
10
11 def counter_type_from_dtype(dtype, transient=True):
12 from .array_types import is_string_type
13 if is_string_type(dtype):
14 if transient:
15 postfix = 'string'
16 else:
17 postfix = 'string' # view not support atm
18 else:
19 postfix = str(dtype)
20 if postfix == '>f8':
21 postfix = 'float64'
22 if postfix == 'double': # arrow
23 postfix = 'float64'
24 name = 'counter_' + postfix
25 return globals()[name]
26
27 def ordered_set_type_from_dtype(dtype, transient=True):
28 from .array_types import is_string_type
29 if is_string_type(dtype):
30 if transient:
31 postfix = 'string'
32 else:
33 postfix = 'string' # not support atm
34 else:
35 postfix = str(dtype)
36 if postfix == '>f8':
37 postfix = 'float64'
38 name = 'ordered_set_' + postfix
39 return globals()[name]
40
41 def index_type_from_dtype(dtype, transient=True, prime_growth=False):
42 from .array_types import is_string_type
43 if is_string_type(dtype):
44 if transient:
45 postfix = 'string'
46 else:
47 postfix = 'string' # not support atm
48 else:
49 postfix = str(dtype)
50 if postfix == '>f8':
51 postfix = 'float64'
52 name = 'index_hash_' + postfix
53 if prime_growth:
54 name += "_prime_growth"
55 return globals()[name]
56
57 # from numpy import *
58 # import IPython
59 # IPython.embed()
[end of packages/vaex-core/vaex/hash.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/packages/vaex-core/vaex/hash.py b/packages/vaex-core/vaex/hash.py
--- a/packages/vaex-core/vaex/hash.py
+++ b/packages/vaex-core/vaex/hash.py
@@ -1,12 +1,19 @@
import os
+import copyreg
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
from .superutils import *
from . import superutils
+
ordered_set = tuple([cls for name, cls in vars(superutils).items() if name.startswith('ordered_set')])
+ def pickle(x):
+ return type(x), (x.extract(), x.count, x.nan_count, x.null_count)
+ for cls in ordered_set:
+ copyreg.pickle(cls, pickle)
+
def counter_type_from_dtype(dtype, transient=True):
from .array_types import is_string_type
@@ -24,6 +31,7 @@
name = 'counter_' + postfix
return globals()[name]
+
def ordered_set_type_from_dtype(dtype, transient=True):
from .array_types import is_string_type
if is_string_type(dtype):
@@ -38,6 +46,7 @@
name = 'ordered_set_' + postfix
return globals()[name]
+
def index_type_from_dtype(dtype, transient=True, prime_growth=False):
from .array_types import is_string_type
if is_string_type(dtype):
@@ -53,7 +62,3 @@
if prime_growth:
name += "_prime_growth"
return globals()[name]
-
-# from numpy import *
-# import IPython
-# IPython.embed()
\ No newline at end of file
| {"golden_diff": "diff --git a/packages/vaex-core/vaex/hash.py b/packages/vaex-core/vaex/hash.py\n--- a/packages/vaex-core/vaex/hash.py\n+++ b/packages/vaex-core/vaex/hash.py\n@@ -1,12 +1,19 @@\n import os\n+import copyreg\n \n \n on_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n if not on_rtd:\n from .superutils import *\n from . import superutils\n+\n ordered_set = tuple([cls for name, cls in vars(superutils).items() if name.startswith('ordered_set')])\n \n+ def pickle(x):\n+ return type(x), (x.extract(), x.count, x.nan_count, x.null_count)\n+ for cls in ordered_set:\n+ copyreg.pickle(cls, pickle)\n+\n \n def counter_type_from_dtype(dtype, transient=True):\n from .array_types import is_string_type\n@@ -24,6 +31,7 @@\n name = 'counter_' + postfix\n return globals()[name]\n \n+\n def ordered_set_type_from_dtype(dtype, transient=True):\n from .array_types import is_string_type\n if is_string_type(dtype):\n@@ -38,6 +46,7 @@\n name = 'ordered_set_' + postfix\n return globals()[name]\n \n+\n def index_type_from_dtype(dtype, transient=True, prime_growth=False):\n from .array_types import is_string_type\n if is_string_type(dtype):\n@@ -53,7 +62,3 @@\n if prime_growth:\n name += \"_prime_growth\"\n return globals()[name]\n-\n-# from numpy import *\n-# import IPython\n-# IPython.embed()\n\\ No newline at end of file\n", "issue": "[BUG-REPORT] TypeError: can't pickle vaex.superutils.ordered_set\n**Description**\r\nIf I use `df.func.where` with `isin`, I can't pickle the resulted state.\r\nThis is for machine learning pipelines. \r\n\r\nreproduce:\r\n```\r\nimport vaex\r\nimport pickle\r\n\r\ndf = vaex.from_dict({'a':[1,2,3]})\r\ndf['b'] = df.func.where(df['a'].isin([1]),1,2)\r\npickle.dumps(df.state_get())\r\n...\r\nTypeError: can't pickle vaex.superutils.ordered_set_int64 objects\r\n```\r\n\r\n\r\n**Software information**\r\n - vaex-core==4.0.0a11\r\n - Vaex was installed via: pip\r\n - OS: Mac\r\n \n", "before_files": [{"content": "import os\n\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd:\n from .superutils import *\n from . import superutils\n ordered_set = tuple([cls for name, cls in vars(superutils).items() if name.startswith('ordered_set')])\n\n\ndef counter_type_from_dtype(dtype, transient=True):\n from .array_types import is_string_type\n if is_string_type(dtype):\n if transient:\n postfix = 'string'\n else:\n postfix = 'string' # view not support atm\n else:\n postfix = str(dtype)\n if postfix == '>f8':\n postfix = 'float64'\n if postfix == 'double': # arrow\n postfix = 'float64'\n name = 'counter_' + postfix\n return globals()[name]\n\ndef ordered_set_type_from_dtype(dtype, transient=True):\n from .array_types import is_string_type\n if is_string_type(dtype):\n if transient:\n postfix = 'string'\n else:\n postfix = 'string' # not support atm\n else:\n postfix = str(dtype)\n if postfix == '>f8':\n postfix = 'float64'\n name = 'ordered_set_' + postfix\n return globals()[name]\n\ndef index_type_from_dtype(dtype, transient=True, prime_growth=False):\n from .array_types import is_string_type\n if is_string_type(dtype):\n if transient:\n postfix = 'string'\n else:\n postfix = 'string' # not support atm\n else:\n postfix = str(dtype)\n if postfix == '>f8':\n postfix = 'float64'\n name = 'index_hash_' + postfix\n if prime_growth:\n name += \"_prime_growth\"\n return globals()[name]\n\n# from numpy import *\n# import IPython\n# IPython.embed()", "path": "packages/vaex-core/vaex/hash.py"}]} | 1,214 | 382 |
gh_patches_debug_12726 | rasdani/github-patches | git_diff | enthought__chaco-678 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove chaco.chaco_version file
https://github.com/enthought/chaco/blob/fdd858aa6dbc76addb50d011fb81e879ce8e0355/chaco/chaco_version.py
We now create `chaco._version` file when installing the package (in `setup.py`) so we don't need this additional `chaco.chaco_version` file anymore.
</issue>
<code>
[start of chaco/chaco_version.py]
1 # ------------------------------------------------------------------------------
2 # Copyright (c) 2005, Enthought, Inc.
3 # All rights reserved.
4 #
5 # This software is provided without warranty under the terms of the BSD
6 # license included in LICENSE.txt and may be redistributed only
7 # under the conditions described in the aforementioned license. The license
8 # is also available online at http://www.enthought.com/licenses/BSD.txt
9 # Thanks for using Enthought open source!
10 #
11 # Author: Enthought, Inc.
12 # Description: <Enthought library component>
13 # ------------------------------------------------------------------------------
14 """ Defines version numbering for the Chaco package.
15 """
16 major = 2
17 minor = 0
18 micro = 9
19
20 version = "%s.%s.%s" % (major, minor, micro)
21
22
23 release_level = "beta"
24 branch = ""
25 revision = version
26
[end of chaco/chaco_version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chaco/chaco_version.py b/chaco/chaco_version.py
deleted file mode 100644
--- a/chaco/chaco_version.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# ------------------------------------------------------------------------------
-# Copyright (c) 2005, Enthought, Inc.
-# All rights reserved.
-#
-# This software is provided without warranty under the terms of the BSD
-# license included in LICENSE.txt and may be redistributed only
-# under the conditions described in the aforementioned license. The license
-# is also available online at http://www.enthought.com/licenses/BSD.txt
-# Thanks for using Enthought open source!
-#
-# Author: Enthought, Inc.
-# Description: <Enthought library component>
-# ------------------------------------------------------------------------------
-""" Defines version numbering for the Chaco package.
-"""
-major = 2
-minor = 0
-micro = 9
-
-version = "%s.%s.%s" % (major, minor, micro)
-
-
-release_level = "beta"
-branch = ""
-revision = version
| {"golden_diff": "diff --git a/chaco/chaco_version.py b/chaco/chaco_version.py\ndeleted file mode 100644\n--- a/chaco/chaco_version.py\n+++ /dev/null\n@@ -1,25 +0,0 @@\n-# ------------------------------------------------------------------------------\n-# Copyright (c) 2005, Enthought, Inc.\n-# All rights reserved.\n-#\n-# This software is provided without warranty under the terms of the BSD\n-# license included in LICENSE.txt and may be redistributed only\n-# under the conditions described in the aforementioned license. The license\n-# is also available online at http://www.enthought.com/licenses/BSD.txt\n-# Thanks for using Enthought open source!\n-#\n-# Author: Enthought, Inc.\n-# Description: <Enthought library component>\n-# ------------------------------------------------------------------------------\n-\"\"\" Defines version numbering for the Chaco package.\n-\"\"\"\n-major = 2\n-minor = 0\n-micro = 9\n-\n-version = \"%s.%s.%s\" % (major, minor, micro)\n-\n-\n-release_level = \"beta\"\n-branch = \"\"\n-revision = version\n", "issue": "Remove chaco.chaco_version file\nhttps://github.com/enthought/chaco/blob/fdd858aa6dbc76addb50d011fb81e879ce8e0355/chaco/chaco_version.py\r\n\r\nWe now create `chaco._version` file when installing the package (in `setup.py`) so we don't need this additional `chaco.chaco_version` file anymore.\n", "before_files": [{"content": "# ------------------------------------------------------------------------------\n# Copyright (c) 2005, Enthought, Inc.\n# All rights reserved.\n#\n# This software is provided without warranty under the terms of the BSD\n# license included in LICENSE.txt and may be redistributed only\n# under the conditions described in the aforementioned license. The license\n# is also available online at http://www.enthought.com/licenses/BSD.txt\n# Thanks for using Enthought open source!\n#\n# Author: Enthought, Inc.\n# Description: <Enthought library component>\n# ------------------------------------------------------------------------------\n\"\"\" Defines version numbering for the Chaco package.\n\"\"\"\nmajor = 2\nminor = 0\nmicro = 9\n\nversion = \"%s.%s.%s\" % (major, minor, micro)\n\n\nrelease_level = \"beta\"\nbranch = \"\"\nrevision = version\n", "path": "chaco/chaco_version.py"}]} | 847 | 246 |
gh_patches_debug_41891 | rasdani/github-patches | git_diff | streamlink__streamlink-2134 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Huomao plugin not work
<!--
Thanks for reporting a plugin issue!
USE THE TEMPLATE. Otherwise your plugin issue may be rejected.
First, see the contribution guidelines:
https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink
Also check the list of open and closed plugin issues:
https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22
Please see the text preview to avoid unnecessary formatting errors.
-->
## Plugin Issue
<!-- Replace [ ] with [x] in order to check the box -->
- [x] This is a plugin issue and I have read the contribution guidelines.
### Description
I found huomao plugin seems not work, i can use browser to watch stream but
streamlink says no playable stream
<!-- Explain the plugin issue as thoroughly as you can. -->
### Reproduction steps / Explicit stream URLs to test
<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->
1. https://www.huomao.com/9755
2. https://www.huomao.com/777777
3. https://www.huomao.com/888
### Log output
<!--
TEXT LOG OUTPUT IS REQUIRED for a plugin issue!
Use the `--loglevel debug` parameter and avoid using parameters which suppress log output.
https://streamlink.github.io/cli.html#cmdoption-l
Make sure to **remove usernames and passwords**
You can copy the output to https://gist.github.com/ or paste it below.
-->
```
[cli][info] Found matching plugin huomao for URL https://www.huomao.com/888
[plugin.huomao][error] Failed to extract stream_info.
error: No playable streams found on this URL: https://www.huomao.com/888
```
</issue>
<code>
[start of src/streamlink/plugins/huomao.py]
1 """
2 NOTE: Since a documented API is nowhere to be found for Huomao; this plugin
3 simply extracts the videos stream_id, stream_url and stream_quality by
4 scraping the HTML and JS of one of Huomaos mobile webpages.
5
6 When viewing a stream on huomao.com, the base URL references a room_id. This
7 room_id is mapped one-to-one to a stream_id which references the actual .flv
8 video. Both stream_id, stream_url and stream_quality can be found in the
9 HTML and JS source of the mobile_page. Since one stream can occur in many
10 different qualities, we scrape all stream_url and stream_quality occurrences
11 and return each option to the user.
12 """
13
14 import re
15
16 from streamlink.plugin import Plugin
17 from streamlink.stream import HTTPStream
18
19 # URL pattern for recognizing inputed Huomao.tv / Huomao.com URL.
20 url_re = re.compile(r"""
21 (http(s)?://)?
22 (www\.)?
23 huomao
24 (\.tv|\.com)
25 /(?P<room_id>\d+)
26 """, re.VERBOSE)
27
28 # URL used to retrive the stream_id, stream_url and stream_quality based of
29 # a room_id.
30 mobile_url = "http://www.huomao.com/mobile/mob_live/{0}"
31
32 # Pattern for extracting the stream_id from the mobile_url HTML.
33 #
34 # Example from HTML:
35 # <input id="html_stream" value="efmrCH" type="hidden">
36 stream_id_pattern = re.compile(r'id=\"html_stream\" value=\"(?P<stream_id>\w+)\"')
37
38 # Pattern for extracting each stream_url, stream_quality_url and a prettified
39 # stream_quality_name used for quality naming.
40 #
41 # Example from HTML:
42 # "2: 'http://live-ws.huomaotv.cn/live/'+stream+'_720/playlist.m3u8'"
43 stream_info_pattern = re.compile(r"""
44 [1-9]:
45 \s+
46 '(?P<stream_url>(?:\w|\.|:|-|/)+)
47 '\+stream\+'
48 (?P<stream_quality_url>_?(?P<stream_quality_name>\d*))
49 /playlist.m3u8'
50 """, re.VERBOSE)
51
52
53 class Huomao(Plugin):
54 @classmethod
55 def can_handle_url(self, url):
56 return url_re.match(url)
57
58 def get_stream_id(self, html):
59 """Returns the stream_id contained in the HTML."""
60 stream_id = stream_id_pattern.search(html)
61
62 if not stream_id:
63 self.logger.error("Failed to extract stream_id.")
64
65 return stream_id.group("stream_id")
66
67 def get_stream_info(self, html):
68 """Returns a nested list of different stream options.
69
70 Each entry in the list will contain a stream_url, stream_quality_url
71 and stream_quality_name for each stream occurrence that was found in
72 the JS.
73 """
74 stream_info = stream_info_pattern.findall(html)
75
76 if not stream_info:
77 self.logger.error("Failed to extract stream_info.")
78
79 # Rename the "" quality to "source" by transforming the tuples to a
80 # list and reassigning.
81 stream_info_list = []
82 for info in stream_info:
83 if not info[2]:
84 stream_info_list.append([info[0], info[1], "source"])
85 else:
86 stream_info_list.append(list(info))
87
88 return stream_info_list
89
90 def _get_streams(self):
91 room_id = url_re.search(self.url).group("room_id")
92 html = self.session.http.get(mobile_url.format(room_id))
93 stream_id = self.get_stream_id(html.text)
94 stream_info = self.get_stream_info(html.text)
95
96 streams = {}
97 for info in stream_info:
98 streams[info[2]] = HTTPStream(self.session,
99 info[0] + stream_id + info[1] + ".flv")
100
101 return streams
102
103
104 __plugin__ = Huomao
105
[end of src/streamlink/plugins/huomao.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/huomao.py b/src/streamlink/plugins/huomao.py
--- a/src/streamlink/plugins/huomao.py
+++ b/src/streamlink/plugins/huomao.py
@@ -4,8 +4,8 @@
scraping the HTML and JS of one of Huomaos mobile webpages.
When viewing a stream on huomao.com, the base URL references a room_id. This
-room_id is mapped one-to-one to a stream_id which references the actual .flv
-video. Both stream_id, stream_url and stream_quality can be found in the
+room_id is mapped one-to-one to a stream_id which references the actual .m3u8
+file. Both stream_id, stream_url and stream_quality can be found in the
HTML and JS source of the mobile_page. Since one stream can occur in many
different qualities, we scrape all stream_url and stream_quality occurrences
and return each option to the user.
@@ -14,7 +14,7 @@
import re
from streamlink.plugin import Plugin
-from streamlink.stream import HTTPStream
+from streamlink.stream import HLSStream
# URL pattern for recognizing inputed Huomao.tv / Huomao.com URL.
url_re = re.compile(r"""
@@ -35,18 +35,15 @@
# <input id="html_stream" value="efmrCH" type="hidden">
stream_id_pattern = re.compile(r'id=\"html_stream\" value=\"(?P<stream_id>\w+)\"')
-# Pattern for extracting each stream_url, stream_quality_url and a prettified
+# Pattern for extracting each stream_url and
# stream_quality_name used for quality naming.
#
# Example from HTML:
-# "2: 'http://live-ws.huomaotv.cn/live/'+stream+'_720/playlist.m3u8'"
+# src="http://live-ws-hls.huomaotv.cn/live/<stream_id>_720/playlist.m3u8"
stream_info_pattern = re.compile(r"""
- [1-9]:
- \s+
- '(?P<stream_url>(?:\w|\.|:|-|/)+)
- '\+stream\+'
- (?P<stream_quality_url>_?(?P<stream_quality_name>\d*))
- /playlist.m3u8'
+ (?P<stream_url>(?:[\w\/\.\-:]+)
+ \/[^_\"]+(?:_(?P<stream_quality_name>\d+))
+ ?/playlist.m3u8)
""", re.VERBOSE)
@@ -65,11 +62,11 @@
return stream_id.group("stream_id")
def get_stream_info(self, html):
- """Returns a nested list of different stream options.
+ """
+ Returns a nested list of different stream options.
- Each entry in the list will contain a stream_url, stream_quality_url
- and stream_quality_name for each stream occurrence that was found in
- the JS.
+ Each entry in the list will contain a stream_url and stream_quality_name
+ for each stream occurrence that was found in the JS.
"""
stream_info = stream_info_pattern.findall(html)
@@ -80,8 +77,8 @@
# list and reassigning.
stream_info_list = []
for info in stream_info:
- if not info[2]:
- stream_info_list.append([info[0], info[1], "source"])
+ if not info[1]:
+ stream_info_list.append([info[0], "source"])
else:
stream_info_list.append(list(info))
@@ -95,8 +92,8 @@
streams = {}
for info in stream_info:
- streams[info[2]] = HTTPStream(self.session,
- info[0] + stream_id + info[1] + ".flv")
+ if stream_id in info[0]:
+ streams[info[1]] = HLSStream(self.session, info[0])
return streams
| {"golden_diff": "diff --git a/src/streamlink/plugins/huomao.py b/src/streamlink/plugins/huomao.py\n--- a/src/streamlink/plugins/huomao.py\n+++ b/src/streamlink/plugins/huomao.py\n@@ -4,8 +4,8 @@\n scraping the HTML and JS of one of Huomaos mobile webpages.\n \n When viewing a stream on huomao.com, the base URL references a room_id. This\n-room_id is mapped one-to-one to a stream_id which references the actual .flv\n-video. Both stream_id, stream_url and stream_quality can be found in the\n+room_id is mapped one-to-one to a stream_id which references the actual .m3u8\n+file. Both stream_id, stream_url and stream_quality can be found in the\n HTML and JS source of the mobile_page. Since one stream can occur in many\n different qualities, we scrape all stream_url and stream_quality occurrences\n and return each option to the user.\n@@ -14,7 +14,7 @@\n import re\n \n from streamlink.plugin import Plugin\n-from streamlink.stream import HTTPStream\n+from streamlink.stream import HLSStream\n \n # URL pattern for recognizing inputed Huomao.tv / Huomao.com URL.\n url_re = re.compile(r\"\"\"\n@@ -35,18 +35,15 @@\n # <input id=\"html_stream\" value=\"efmrCH\" type=\"hidden\">\n stream_id_pattern = re.compile(r'id=\\\"html_stream\\\" value=\\\"(?P<stream_id>\\w+)\\\"')\n \n-# Pattern for extracting each stream_url, stream_quality_url and a prettified\n+# Pattern for extracting each stream_url and\n # stream_quality_name used for quality naming.\n #\n # Example from HTML:\n-# \"2: 'http://live-ws.huomaotv.cn/live/'+stream+'_720/playlist.m3u8'\"\n+# src=\"http://live-ws-hls.huomaotv.cn/live/<stream_id>_720/playlist.m3u8\"\n stream_info_pattern = re.compile(r\"\"\"\n- [1-9]:\n- \\s+\n- '(?P<stream_url>(?:\\w|\\.|:|-|/)+)\n- '\\+stream\\+'\n- (?P<stream_quality_url>_?(?P<stream_quality_name>\\d*))\n- /playlist.m3u8'\n+ (?P<stream_url>(?:[\\w\\/\\.\\-:]+)\n+ \\/[^_\\\"]+(?:_(?P<stream_quality_name>\\d+))\n+ ?/playlist.m3u8)\n \"\"\", re.VERBOSE)\n \n \n@@ -65,11 +62,11 @@\n return stream_id.group(\"stream_id\")\n \n def get_stream_info(self, html):\n- \"\"\"Returns a nested list of different stream options.\n+ \"\"\"\n+ Returns a nested list of different stream options.\n \n- Each entry in the list will contain a stream_url, stream_quality_url\n- and stream_quality_name for each stream occurrence that was found in\n- the JS.\n+ Each entry in the list will contain a stream_url and stream_quality_name\n+ for each stream occurrence that was found in the JS.\n \"\"\"\n stream_info = stream_info_pattern.findall(html)\n \n@@ -80,8 +77,8 @@\n # list and reassigning.\n stream_info_list = []\n for info in stream_info:\n- if not info[2]:\n- stream_info_list.append([info[0], info[1], \"source\"])\n+ if not info[1]:\n+ stream_info_list.append([info[0], \"source\"])\n else:\n stream_info_list.append(list(info))\n \n@@ -95,8 +92,8 @@\n \n streams = {}\n for info in stream_info:\n- streams[info[2]] = HTTPStream(self.session,\n- info[0] + stream_id + info[1] + \".flv\")\n+ if stream_id in info[0]:\n+ streams[info[1]] = HLSStream(self.session, info[0])\n \n return streams\n", "issue": "Huomao plugin not work\n<!--\r\nThanks for reporting a plugin issue!\r\nUSE THE TEMPLATE. Otherwise your plugin issue may be rejected.\r\n\r\nFirst, see the contribution guidelines:\r\nhttps://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink\r\n\r\nAlso check the list of open and closed plugin issues:\r\nhttps://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22\r\n\r\nPlease see the text preview to avoid unnecessary formatting errors.\r\n-->\r\n\r\n\r\n## Plugin Issue\r\n\r\n<!-- Replace [ ] with [x] in order to check the box -->\r\n- [x] This is a plugin issue and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\nI found huomao plugin seems not work, i can use browser to watch stream but\r\nstreamlink says no playable stream\r\n\r\n\r\n<!-- Explain the plugin issue as thoroughly as you can. -->\r\n\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->\r\n\r\n1. https://www.huomao.com/9755\r\n2. https://www.huomao.com/777777\r\n3. https://www.huomao.com/888\r\n\r\n\r\n### Log output\r\n\r\n<!--\r\nTEXT LOG OUTPUT IS REQUIRED for a plugin issue!\r\nUse the `--loglevel debug` parameter and avoid using parameters which suppress log output.\r\nhttps://streamlink.github.io/cli.html#cmdoption-l\r\n\r\nMake sure to **remove usernames and passwords**\r\nYou can copy the output to https://gist.github.com/ or paste it below.\r\n-->\r\n\r\n```\r\n[cli][info] Found matching plugin huomao for URL https://www.huomao.com/888\r\n[plugin.huomao][error] Failed to extract stream_info.\r\nerror: No playable streams found on this URL: https://www.huomao.com/888\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nNOTE: Since a documented API is nowhere to be found for Huomao; this plugin\nsimply extracts the videos stream_id, stream_url and stream_quality by\nscraping the HTML and JS of one of Huomaos mobile webpages.\n\nWhen viewing a stream on huomao.com, the base URL references a room_id. This\nroom_id is mapped one-to-one to a stream_id which references the actual .flv\nvideo. Both stream_id, stream_url and stream_quality can be found in the\nHTML and JS source of the mobile_page. Since one stream can occur in many\ndifferent qualities, we scrape all stream_url and stream_quality occurrences\nand return each option to the user.\n\"\"\"\n\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.stream import HTTPStream\n\n# URL pattern for recognizing inputed Huomao.tv / Huomao.com URL.\nurl_re = re.compile(r\"\"\"\n (http(s)?://)?\n (www\\.)?\n huomao\n (\\.tv|\\.com)\n /(?P<room_id>\\d+)\n\"\"\", re.VERBOSE)\n\n# URL used to retrive the stream_id, stream_url and stream_quality based of\n# a room_id.\nmobile_url = \"http://www.huomao.com/mobile/mob_live/{0}\"\n\n# Pattern for extracting the stream_id from the mobile_url HTML.\n#\n# Example from HTML:\n# <input id=\"html_stream\" value=\"efmrCH\" type=\"hidden\">\nstream_id_pattern = re.compile(r'id=\\\"html_stream\\\" value=\\\"(?P<stream_id>\\w+)\\\"')\n\n# Pattern for extracting each stream_url, stream_quality_url and a prettified\n# stream_quality_name used for quality naming.\n#\n# Example from HTML:\n# \"2: 'http://live-ws.huomaotv.cn/live/'+stream+'_720/playlist.m3u8'\"\nstream_info_pattern = re.compile(r\"\"\"\n [1-9]:\n \\s+\n '(?P<stream_url>(?:\\w|\\.|:|-|/)+)\n '\\+stream\\+'\n (?P<stream_quality_url>_?(?P<stream_quality_name>\\d*))\n /playlist.m3u8'\n\"\"\", re.VERBOSE)\n\n\nclass Huomao(Plugin):\n @classmethod\n def can_handle_url(self, url):\n return url_re.match(url)\n\n def get_stream_id(self, html):\n \"\"\"Returns the stream_id contained in the HTML.\"\"\"\n stream_id = stream_id_pattern.search(html)\n\n if not stream_id:\n self.logger.error(\"Failed to extract stream_id.\")\n\n return stream_id.group(\"stream_id\")\n\n def get_stream_info(self, html):\n \"\"\"Returns a nested list of different stream options.\n\n Each entry in the list will contain a stream_url, stream_quality_url\n and stream_quality_name for each stream occurrence that was found in\n the JS.\n \"\"\"\n stream_info = stream_info_pattern.findall(html)\n\n if not stream_info:\n self.logger.error(\"Failed to extract stream_info.\")\n\n # Rename the \"\" quality to \"source\" by transforming the tuples to a\n # list and reassigning.\n stream_info_list = []\n for info in stream_info:\n if not info[2]:\n stream_info_list.append([info[0], info[1], \"source\"])\n else:\n stream_info_list.append(list(info))\n\n return stream_info_list\n\n def _get_streams(self):\n room_id = url_re.search(self.url).group(\"room_id\")\n html = self.session.http.get(mobile_url.format(room_id))\n stream_id = self.get_stream_id(html.text)\n stream_info = self.get_stream_info(html.text)\n\n streams = {}\n for info in stream_info:\n streams[info[2]] = HTTPStream(self.session,\n info[0] + stream_id + info[1] + \".flv\")\n\n return streams\n\n\n__plugin__ = Huomao\n", "path": "src/streamlink/plugins/huomao.py"}]} | 2,022 | 890 |
gh_patches_debug_6455 | rasdani/github-patches | git_diff | voicepaw__so-vits-svc-fork-354 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UnpicklingError: Weights only load failed. Unpickler error: Unsupported class numpy.core.multiarray._reconstruct
**Describe the bug**
I tried to update, but I got this exception start from version 3.6.0 during inference
```
UnpicklingError: Weights only load failed. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution.Do it only if you get the file from a trusted source. WeightsUnpickler error: Unsupported class numpy.core.multiarray._reconstruct
```
**To Reproduce**
Steps to reproduce the behavior:
- Update so-vits-svc-fork
- Run inference
**Additional context**
Initially I updated to version 3.8.0, because of the exception I tried to solve by keep downgrading the version until I got to version 3.5.1 to solve the problem.
</issue>
<code>
[start of src/so_vits_svc_fork/cluster/__init__.py]
1 from __future__ import annotations
2
3 from pathlib import Path
4 from typing import Any
5
6 import torch
7 from sklearn.cluster import KMeans
8
9
10 def get_cluster_model(ckpt_path: Path | str):
11 with Path(ckpt_path).open("rb") as f:
12 checkpoint = torch.load(f, map_location="cpu", weights_only=True)
13 kmeans_dict = {}
14 for spk, ckpt in checkpoint.items():
15 km = KMeans(ckpt["n_features_in_"])
16 km.__dict__["n_features_in_"] = ckpt["n_features_in_"]
17 km.__dict__["_n_threads"] = ckpt["_n_threads"]
18 km.__dict__["cluster_centers_"] = ckpt["cluster_centers_"]
19 kmeans_dict[spk] = km
20 return kmeans_dict
21
22
23 def check_speaker(model: Any, speaker: Any):
24 if speaker not in model:
25 raise ValueError(f"Speaker {speaker} not in {list(model.keys())}")
26
27
28 def get_cluster_result(model: Any, x: Any, speaker: Any):
29 """
30 x: np.array [t, 256]
31 return cluster class result
32 """
33 check_speaker(model, speaker)
34 return model[speaker].predict(x)
35
36
37 def get_cluster_center_result(model: Any, x: Any, speaker: Any):
38 """x: np.array [t, 256]"""
39 check_speaker(model, speaker)
40 predict = model[speaker].predict(x)
41 return model[speaker].cluster_centers_[predict]
42
43
44 def get_center(model: Any, x: Any, speaker: Any):
45 check_speaker(model, speaker)
46 return model[speaker].cluster_centers_[x]
47
[end of src/so_vits_svc_fork/cluster/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/so_vits_svc_fork/cluster/__init__.py b/src/so_vits_svc_fork/cluster/__init__.py
--- a/src/so_vits_svc_fork/cluster/__init__.py
+++ b/src/so_vits_svc_fork/cluster/__init__.py
@@ -9,7 +9,9 @@
def get_cluster_model(ckpt_path: Path | str):
with Path(ckpt_path).open("rb") as f:
- checkpoint = torch.load(f, map_location="cpu", weights_only=True)
+ checkpoint = torch.load(
+ f, map_location="cpu"
+ ) # Danger of arbitrary code execution
kmeans_dict = {}
for spk, ckpt in checkpoint.items():
km = KMeans(ckpt["n_features_in_"])
| {"golden_diff": "diff --git a/src/so_vits_svc_fork/cluster/__init__.py b/src/so_vits_svc_fork/cluster/__init__.py\n--- a/src/so_vits_svc_fork/cluster/__init__.py\n+++ b/src/so_vits_svc_fork/cluster/__init__.py\n@@ -9,7 +9,9 @@\n \n def get_cluster_model(ckpt_path: Path | str):\n with Path(ckpt_path).open(\"rb\") as f:\n- checkpoint = torch.load(f, map_location=\"cpu\", weights_only=True)\n+ checkpoint = torch.load(\n+ f, map_location=\"cpu\"\n+ ) # Danger of arbitrary code execution\n kmeans_dict = {}\n for spk, ckpt in checkpoint.items():\n km = KMeans(ckpt[\"n_features_in_\"])\n", "issue": "UnpicklingError: Weights only load failed. Unpickler error: Unsupported class numpy.core.multiarray._reconstruct\n**Describe the bug**\r\nI tried to update, but I got this exception start from version 3.6.0 during inference\r\n```\r\nUnpicklingError: Weights only load failed. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution.Do it only if you get the file from a trusted source. WeightsUnpickler error: Unsupported class numpy.core.multiarray._reconstruct\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n- Update so-vits-svc-fork\r\n- Run inference\r\n\r\n**Additional context**\r\nInitially I updated to version 3.8.0, because of the exception I tried to solve by keep downgrading the version until I got to version 3.5.1 to solve the problem.\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Any\n\nimport torch\nfrom sklearn.cluster import KMeans\n\n\ndef get_cluster_model(ckpt_path: Path | str):\n with Path(ckpt_path).open(\"rb\") as f:\n checkpoint = torch.load(f, map_location=\"cpu\", weights_only=True)\n kmeans_dict = {}\n for spk, ckpt in checkpoint.items():\n km = KMeans(ckpt[\"n_features_in_\"])\n km.__dict__[\"n_features_in_\"] = ckpt[\"n_features_in_\"]\n km.__dict__[\"_n_threads\"] = ckpt[\"_n_threads\"]\n km.__dict__[\"cluster_centers_\"] = ckpt[\"cluster_centers_\"]\n kmeans_dict[spk] = km\n return kmeans_dict\n\n\ndef check_speaker(model: Any, speaker: Any):\n if speaker not in model:\n raise ValueError(f\"Speaker {speaker} not in {list(model.keys())}\")\n\n\ndef get_cluster_result(model: Any, x: Any, speaker: Any):\n \"\"\"\n x: np.array [t, 256]\n return cluster class result\n \"\"\"\n check_speaker(model, speaker)\n return model[speaker].predict(x)\n\n\ndef get_cluster_center_result(model: Any, x: Any, speaker: Any):\n \"\"\"x: np.array [t, 256]\"\"\"\n check_speaker(model, speaker)\n predict = model[speaker].predict(x)\n return model[speaker].cluster_centers_[predict]\n\n\ndef get_center(model: Any, x: Any, speaker: Any):\n check_speaker(model, speaker)\n return model[speaker].cluster_centers_[x]\n", "path": "src/so_vits_svc_fork/cluster/__init__.py"}]} | 1,205 | 180 |
gh_patches_debug_7476 | rasdani/github-patches | git_diff | Mailu__Mailu-1084 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Setup error for incorrect docker network subnet
When entering an incorrect docker subnet (e.g. 172.168.0.1/16) the setup throws a 500 error -- without any reasons being given.
If you run locally, the error is clearly reported in an ungraceful way.
</issue>
<code>
[start of setup/server.py]
1 import flask
2 import flask_bootstrap
3 import redis
4 import json
5 import os
6 import jinja2
7 import uuid
8 import string
9 import random
10 import ipaddress
11 import hashlib
12 import time
13
14
15 version = os.getenv("this_version", "master")
16 static_url_path = "/" + version + "/static"
17 app = flask.Flask(__name__, static_url_path=static_url_path)
18 flask_bootstrap.Bootstrap(app)
19 db = redis.StrictRedis(host='redis', port=6379, db=0)
20
21
22 def render_flavor(flavor, template, data):
23 return flask.render_template(
24 os.path.join(flavor, template),
25 **data
26 )
27
28
29 @app.add_template_global
30 def secret(length=16):
31 charset = string.ascii_uppercase + string.digits
32 return ''.join(
33 random.SystemRandom().choice(charset)
34 for _ in range(length)
35 )
36
37 #Original copied from https://github.com/andrewlkho/ulagen
38 def random_ipv6_subnet():
39 eui64 = uuid.getnode() >> 24 << 48 | 0xfffe000000 | uuid.getnode() & 0xffffff
40 eui64_canon = "-".join([format(eui64, "02X")[i:i+2] for i in range(0, 18, 2)])
41
42 h = hashlib.sha1()
43 h.update((eui64_canon + str(time.time() - time.mktime((1900, 1, 1, 0, 0, 0, 0, 1, -1)))).encode('utf-8'))
44 globalid = h.hexdigest()[0:10]
45
46 prefix = ":".join(("fd" + globalid[0:2], globalid[2:6], globalid[6:10]))
47 return prefix
48
49 def build_app(path):
50
51 app.jinja_env.trim_blocks = True
52 app.jinja_env.lstrip_blocks = True
53
54 @app.context_processor
55 def app_context():
56 return dict(versions=os.getenv("VERSIONS","master").split(','))
57
58 prefix_bp = flask.Blueprint(version, __name__)
59 prefix_bp.jinja_loader = jinja2.ChoiceLoader([
60 jinja2.FileSystemLoader(os.path.join(path, "templates")),
61 jinja2.FileSystemLoader(os.path.join(path, "flavors"))
62 ])
63
64 root_bp = flask.Blueprint("root", __name__)
65 root_bp.jinja_loader = jinja2.ChoiceLoader([
66 jinja2.FileSystemLoader(os.path.join(path, "templates")),
67 jinja2.FileSystemLoader(os.path.join(path, "flavors"))
68 ])
69
70 @prefix_bp.context_processor
71 @root_bp.context_processor
72 def bp_context(version=version):
73 return dict(version=version)
74
75 @prefix_bp.route("/")
76 @root_bp.route("/")
77 def wizard():
78 return flask.render_template('wizard.html')
79
80 @prefix_bp.route("/submit_flavor", methods=["POST"])
81 @root_bp.route("/submit_flavor", methods=["POST"])
82 def submit_flavor():
83 data = flask.request.form.copy()
84 subnet6 = random_ipv6_subnet()
85 steps = sorted(os.listdir(os.path.join(path, "templates", "steps", data["flavor"])))
86 return flask.render_template('wizard.html', flavor=data["flavor"], steps=steps, subnet6=subnet6)
87
88 @prefix_bp.route("/submit", methods=["POST"])
89 @root_bp.route("/submit", methods=["POST"])
90 def submit():
91 data = flask.request.form.copy()
92 data['uid'] = str(uuid.uuid4())
93 try:
94 data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])
95 except ValueError as err:
96 return "Error while generating files: " + str(err)
97 db.set(data['uid'], json.dumps(data))
98 return flask.redirect(flask.url_for('.setup', uid=data['uid']))
99
100 @prefix_bp.route("/setup/<uid>", methods=["GET"])
101 @root_bp.route("/setup/<uid>", methods=["GET"])
102 def setup(uid):
103 data = json.loads(db.get(uid))
104 flavor = data.get("flavor", "compose")
105 rendered = render_flavor(flavor, "setup.html", data)
106 return flask.render_template("setup.html", contents=rendered)
107
108 @prefix_bp.route("/file/<uid>/<filepath>", methods=["GET"])
109 @root_bp.route("/file/<uid>/<filepath>", methods=["GET"])
110 def file(uid, filepath):
111 data = json.loads(db.get(uid))
112 flavor = data.get("flavor", "compose")
113 return flask.Response(
114 render_flavor(flavor, filepath, data),
115 mimetype="application/text"
116 )
117
118 app.register_blueprint(prefix_bp, url_prefix="/{}".format(version))
119 app.register_blueprint(root_bp)
120
121
122 if __name__ == "__main__":
123 build_app("/tmp/mailutest")
124 app.run(debug=True)
125
[end of setup/server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup/server.py b/setup/server.py
--- a/setup/server.py
+++ b/setup/server.py
@@ -91,7 +91,7 @@
data = flask.request.form.copy()
data['uid'] = str(uuid.uuid4())
try:
- data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])
+ data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])
except ValueError as err:
return "Error while generating files: " + str(err)
db.set(data['uid'], json.dumps(data))
| {"golden_diff": "diff --git a/setup/server.py b/setup/server.py\n--- a/setup/server.py\n+++ b/setup/server.py\n@@ -91,7 +91,7 @@\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n try:\n- data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])\n+ data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])\n except ValueError as err:\n return \"Error while generating files: \" + str(err)\n db.set(data['uid'], json.dumps(data))\n", "issue": "Setup error for incorrect docker network subnet\nWhen entering an incorrect docker subnet (e.g. 172.168.0.1/16) the setup throws a 500 error -- without any reasons being given. \r\n\r\nIf you run locally, the error is clearly reported in an ungraceful way.\n", "before_files": [{"content": "import flask\nimport flask_bootstrap\nimport redis\nimport json\nimport os\nimport jinja2\nimport uuid\nimport string\nimport random\nimport ipaddress\nimport hashlib\nimport time\n\n\nversion = os.getenv(\"this_version\", \"master\")\nstatic_url_path = \"/\" + version + \"/static\"\napp = flask.Flask(__name__, static_url_path=static_url_path)\nflask_bootstrap.Bootstrap(app)\ndb = redis.StrictRedis(host='redis', port=6379, db=0)\n\n\ndef render_flavor(flavor, template, data):\n return flask.render_template(\n os.path.join(flavor, template),\n **data\n )\n\n\[email protected]_template_global\ndef secret(length=16):\n charset = string.ascii_uppercase + string.digits\n return ''.join(\n random.SystemRandom().choice(charset)\n for _ in range(length)\n )\n\n#Original copied from https://github.com/andrewlkho/ulagen\ndef random_ipv6_subnet():\n eui64 = uuid.getnode() >> 24 << 48 | 0xfffe000000 | uuid.getnode() & 0xffffff\n eui64_canon = \"-\".join([format(eui64, \"02X\")[i:i+2] for i in range(0, 18, 2)])\n\n h = hashlib.sha1()\n h.update((eui64_canon + str(time.time() - time.mktime((1900, 1, 1, 0, 0, 0, 0, 1, -1)))).encode('utf-8'))\n globalid = h.hexdigest()[0:10]\n\n prefix = \":\".join((\"fd\" + globalid[0:2], globalid[2:6], globalid[6:10]))\n return prefix\n\ndef build_app(path):\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n\n @app.context_processor\n def app_context():\n return dict(versions=os.getenv(\"VERSIONS\",\"master\").split(','))\n\n prefix_bp = flask.Blueprint(version, __name__)\n prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n root_bp = flask.Blueprint(\"root\", __name__)\n root_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n @prefix_bp.context_processor\n @root_bp.context_processor\n def bp_context(version=version):\n return dict(version=version)\n\n @prefix_bp.route(\"/\")\n @root_bp.route(\"/\")\n def wizard():\n return flask.render_template('wizard.html')\n\n @prefix_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n @root_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n def submit_flavor():\n data = flask.request.form.copy()\n subnet6 = random_ipv6_subnet()\n steps = sorted(os.listdir(os.path.join(path, \"templates\", \"steps\", data[\"flavor\"])))\n return flask.render_template('wizard.html', flavor=data[\"flavor\"], steps=steps, subnet6=subnet6)\n\n @prefix_bp.route(\"/submit\", methods=[\"POST\"])\n @root_bp.route(\"/submit\", methods=[\"POST\"])\n def submit():\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n try:\n data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])\n except ValueError as err:\n return \"Error while generating files: \" + str(err)\n db.set(data['uid'], json.dumps(data))\n return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n\n @prefix_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n @root_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n def setup(uid):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n rendered = render_flavor(flavor, \"setup.html\", data)\n return flask.render_template(\"setup.html\", contents=rendered)\n\n @prefix_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n @root_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n def file(uid, filepath):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n return flask.Response(\n render_flavor(flavor, filepath, data),\n mimetype=\"application/text\"\n )\n\n app.register_blueprint(prefix_bp, url_prefix=\"/{}\".format(version))\n app.register_blueprint(root_bp)\n\n\nif __name__ == \"__main__\":\n build_app(\"/tmp/mailutest\")\n app.run(debug=True)\n", "path": "setup/server.py"}]} | 1,939 | 133 |
gh_patches_debug_23312 | rasdani/github-patches | git_diff | ephios-dev__ephios-338 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Format event description
As a planner, I want to be able to format the event description. This should at least mean that links will be formatted so that they are clickable. Maybe this should mean that the description will accept Markdown.
</issue>
<code>
[start of ephios/extra/templatetags/rich_text.py]
1 import bleach
2 import markdown
3 from django import template
4 from django.utils.safestring import mark_safe
5
6 register = template.Library()
7
8 ALLOWED_TAGS = [
9 "a",
10 "abbr",
11 "acronym",
12 "b",
13 "blockquote",
14 "br",
15 "code",
16 "div",
17 "em",
18 "h1",
19 "h2",
20 "h3",
21 "h4",
22 "h5",
23 "h6",
24 "hr",
25 "i",
26 "li",
27 "ol",
28 "p",
29 "pre",
30 "span",
31 "strong",
32 "table",
33 "tbody",
34 "td",
35 "th",
36 "thead",
37 "tr",
38 "ul",
39 ]
40
41
42 ALLOWED_ATTRIBUTES = {
43 "a": ["href", "title", "class"],
44 "abbr": ["title"],
45 "acronym": ["title"],
46 "table": ["width"],
47 "td": ["width", "align"],
48 "div": ["class"],
49 "p": ["class"],
50 "span": ["class", "title"],
51 }
52
53 ALLOWED_PROTOCOLS = ["http", "https", "mailto", "tel"]
54
55
56 def markdown_compile(source):
57 extensions = ["markdown.extensions.sane_lists", "markdown.extensions.nl2br"]
58 return bleach.clean(
59 markdown.markdown(source, extensions=extensions),
60 tags=ALLOWED_TAGS,
61 attributes=ALLOWED_ATTRIBUTES,
62 protocols=ALLOWED_PROTOCOLS,
63 )
64
65
66 @register.filter
67 def rich_text(text: str, **kwargs):
68 """
69 Processes markdown and cleans HTML in a text input.
70 """
71 text = str(text)
72 linker = bleach.Linker(parse_email=True)
73 body_md = linker.linkify(markdown_compile(text))
74 return mark_safe(body_md)
75
[end of ephios/extra/templatetags/rich_text.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ephios/extra/templatetags/rich_text.py b/ephios/extra/templatetags/rich_text.py
--- a/ephios/extra/templatetags/rich_text.py
+++ b/ephios/extra/templatetags/rich_text.py
@@ -53,22 +53,28 @@
ALLOWED_PROTOCOLS = ["http", "https", "mailto", "tel"]
-def markdown_compile(source):
+def markdown_compile(source, excluded_tags=""):
extensions = ["markdown.extensions.sane_lists", "markdown.extensions.nl2br"]
+ tags = ALLOWED_TAGS.copy()
+ for tag in excluded_tags.split(","):
+ try:
+ tags.remove(tag)
+ except ValueError:
+ pass
return bleach.clean(
markdown.markdown(source, extensions=extensions),
- tags=ALLOWED_TAGS,
+ tags=tags,
attributes=ALLOWED_ATTRIBUTES,
protocols=ALLOWED_PROTOCOLS,
)
@register.filter
-def rich_text(text: str, **kwargs):
+def rich_text(text: str, excluded_tags=""):
"""
Processes markdown and cleans HTML in a text input.
"""
text = str(text)
linker = bleach.Linker(parse_email=True)
- body_md = linker.linkify(markdown_compile(text))
+ body_md = linker.linkify(markdown_compile(text, excluded_tags=excluded_tags))
return mark_safe(body_md)
| {"golden_diff": "diff --git a/ephios/extra/templatetags/rich_text.py b/ephios/extra/templatetags/rich_text.py\n--- a/ephios/extra/templatetags/rich_text.py\n+++ b/ephios/extra/templatetags/rich_text.py\n@@ -53,22 +53,28 @@\n ALLOWED_PROTOCOLS = [\"http\", \"https\", \"mailto\", \"tel\"]\n \n \n-def markdown_compile(source):\n+def markdown_compile(source, excluded_tags=\"\"):\n extensions = [\"markdown.extensions.sane_lists\", \"markdown.extensions.nl2br\"]\n+ tags = ALLOWED_TAGS.copy()\n+ for tag in excluded_tags.split(\",\"):\n+ try:\n+ tags.remove(tag)\n+ except ValueError:\n+ pass\n return bleach.clean(\n markdown.markdown(source, extensions=extensions),\n- tags=ALLOWED_TAGS,\n+ tags=tags,\n attributes=ALLOWED_ATTRIBUTES,\n protocols=ALLOWED_PROTOCOLS,\n )\n \n \n @register.filter\n-def rich_text(text: str, **kwargs):\n+def rich_text(text: str, excluded_tags=\"\"):\n \"\"\"\n Processes markdown and cleans HTML in a text input.\n \"\"\"\n text = str(text)\n linker = bleach.Linker(parse_email=True)\n- body_md = linker.linkify(markdown_compile(text))\n+ body_md = linker.linkify(markdown_compile(text, excluded_tags=excluded_tags))\n return mark_safe(body_md)\n", "issue": "Format event description\nAs a planner, I want to be able to format the event description. This should at least mean that links will be formatted so that they are clickable. Maybe this should mean that the description will accept Markdown.\n", "before_files": [{"content": "import bleach\nimport markdown\nfrom django import template\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\nALLOWED_TAGS = [\n \"a\",\n \"abbr\",\n \"acronym\",\n \"b\",\n \"blockquote\",\n \"br\",\n \"code\",\n \"div\",\n \"em\",\n \"h1\",\n \"h2\",\n \"h3\",\n \"h4\",\n \"h5\",\n \"h6\",\n \"hr\",\n \"i\",\n \"li\",\n \"ol\",\n \"p\",\n \"pre\",\n \"span\",\n \"strong\",\n \"table\",\n \"tbody\",\n \"td\",\n \"th\",\n \"thead\",\n \"tr\",\n \"ul\",\n]\n\n\nALLOWED_ATTRIBUTES = {\n \"a\": [\"href\", \"title\", \"class\"],\n \"abbr\": [\"title\"],\n \"acronym\": [\"title\"],\n \"table\": [\"width\"],\n \"td\": [\"width\", \"align\"],\n \"div\": [\"class\"],\n \"p\": [\"class\"],\n \"span\": [\"class\", \"title\"],\n}\n\nALLOWED_PROTOCOLS = [\"http\", \"https\", \"mailto\", \"tel\"]\n\n\ndef markdown_compile(source):\n extensions = [\"markdown.extensions.sane_lists\", \"markdown.extensions.nl2br\"]\n return bleach.clean(\n markdown.markdown(source, extensions=extensions),\n tags=ALLOWED_TAGS,\n attributes=ALLOWED_ATTRIBUTES,\n protocols=ALLOWED_PROTOCOLS,\n )\n\n\[email protected]\ndef rich_text(text: str, **kwargs):\n \"\"\"\n Processes markdown and cleans HTML in a text input.\n \"\"\"\n text = str(text)\n linker = bleach.Linker(parse_email=True)\n body_md = linker.linkify(markdown_compile(text))\n return mark_safe(body_md)\n", "path": "ephios/extra/templatetags/rich_text.py"}]} | 1,126 | 315 |
gh_patches_debug_30302 | rasdani/github-patches | git_diff | wagtail__wagtail-10860 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Please add a progress bar to "wagtail_update_image_renditions" management command
I love this new management command to regenerate all image renditions `wagtail_update_image_renditions`. But often the websites I had to use it on had many thousands of images and this command would need some hours to complete. I would love to see some kind of **progress feedback** in the terminal to get an idea of the time this task might need to complete. This could be text based of even a bar, like we know from packages like "tqdm".
</issue>
<code>
[start of wagtail/images/management/commands/wagtail_update_image_renditions.py]
1 import logging
2
3 from django.core.management.base import BaseCommand
4 from django.db import transaction
5
6 from wagtail.images import get_image_model
7
8 logger = logging.getLogger(__name__)
9
10
11 class Command(BaseCommand):
12 """Command to create missing image renditions with the option to remove (purge) any existing ones."""
13
14 help = "This command will generate all image renditions, with an option to purge existing renditions first."
15
16 def add_arguments(self, parser):
17 parser.add_argument(
18 "--purge-only",
19 action="store_true",
20 help="Purge all image renditions without regenerating them",
21 )
22 parser.add_argument(
23 "--chunk-size",
24 type=int,
25 default=50,
26 help="Operate in x size chunks (default: %(default)s)",
27 )
28
29 def handle(self, *args, **options):
30 Rendition = get_image_model().get_rendition_model()
31
32 renditions = Rendition.objects.all()
33
34 purge_only = options["purge_only"]
35
36 if not renditions.exists():
37 self.stdout.write(self.style.WARNING("No image renditions found."))
38 return
39
40 rendition_ids = list(renditions.values_list("id", flat=True))
41 num_renditions = len(rendition_ids)
42
43 if purge_only:
44 self.stdout.write(
45 self.style.HTTP_INFO(f"Purging {num_renditions} rendition(s)")
46 )
47 else:
48 self.stdout.write(
49 self.style.HTTP_INFO(f"Regenerating {num_renditions} rendition(s)")
50 )
51
52 for rendition in (
53 # Pre-calculate the ids of the renditions to change,
54 # otherwise `.iterator` never ends.
55 renditions.filter(id__in=rendition_ids)
56 .select_related("image")
57 .iterator(chunk_size=options["chunk_size"])
58 ):
59 try:
60 with transaction.atomic():
61 rendition_filter = rendition.filter
62 rendition_image = rendition.image
63
64 # Delete the existing rendition
65 rendition.delete()
66
67 if not purge_only:
68 # Create a new one
69 rendition_image.get_rendition(rendition_filter)
70 except: # noqa:E722
71 logger.exception("Error operating on rendition %d", rendition.id)
72 self.stderr.write(
73 self.style.ERROR(f"Failed to operate on rendition {rendition.id}")
74 )
75 num_renditions -= 1
76
77 if num_renditions:
78 self.stdout.write(
79 self.style.SUCCESS(
80 f"Successfully processed {num_renditions} rendition(s)"
81 )
82 )
83 else:
84 self.stdout.write(self.style.WARNING("Could not process any renditions."))
85
[end of wagtail/images/management/commands/wagtail_update_image_renditions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/images/management/commands/wagtail_update_image_renditions.py b/wagtail/images/management/commands/wagtail_update_image_renditions.py
--- a/wagtail/images/management/commands/wagtail_update_image_renditions.py
+++ b/wagtail/images/management/commands/wagtail_update_image_renditions.py
@@ -8,6 +8,17 @@
logger = logging.getLogger(__name__)
+def progress_bar(current, total, bar_length=50):
+ fraction = current / total
+
+ arrow = int(fraction * bar_length - 1) * "-" + ">"
+ padding = int(bar_length - len(arrow)) * " "
+
+ ending = "\n" if current == total else "\r"
+
+ return (f"Progress: [{arrow}{padding}] {int(fraction*100)}%", ending)
+
+
class Command(BaseCommand):
"""Command to create missing image renditions with the option to remove (purge) any existing ones."""
@@ -49,6 +60,7 @@
self.style.HTTP_INFO(f"Regenerating {num_renditions} rendition(s)")
)
+ progress_bar_current = 1
for rendition in (
# Pre-calculate the ids of the renditions to change,
# otherwise `.iterator` never ends.
@@ -64,6 +76,10 @@
# Delete the existing rendition
rendition.delete()
+ _progress_bar = progress_bar(progress_bar_current, num_renditions)
+ self.stdout.write(_progress_bar[0], ending=_progress_bar[1])
+ progress_bar_current = progress_bar_current + 1
+
if not purge_only:
# Create a new one
rendition_image.get_rendition(rendition_filter)
| {"golden_diff": "diff --git a/wagtail/images/management/commands/wagtail_update_image_renditions.py b/wagtail/images/management/commands/wagtail_update_image_renditions.py\n--- a/wagtail/images/management/commands/wagtail_update_image_renditions.py\n+++ b/wagtail/images/management/commands/wagtail_update_image_renditions.py\n@@ -8,6 +8,17 @@\n logger = logging.getLogger(__name__)\n \n \n+def progress_bar(current, total, bar_length=50):\n+ fraction = current / total\n+\n+ arrow = int(fraction * bar_length - 1) * \"-\" + \">\"\n+ padding = int(bar_length - len(arrow)) * \" \"\n+\n+ ending = \"\\n\" if current == total else \"\\r\"\n+\n+ return (f\"Progress: [{arrow}{padding}] {int(fraction*100)}%\", ending)\n+\n+\n class Command(BaseCommand):\n \"\"\"Command to create missing image renditions with the option to remove (purge) any existing ones.\"\"\"\n \n@@ -49,6 +60,7 @@\n self.style.HTTP_INFO(f\"Regenerating {num_renditions} rendition(s)\")\n )\n \n+ progress_bar_current = 1\n for rendition in (\n # Pre-calculate the ids of the renditions to change,\n # otherwise `.iterator` never ends.\n@@ -64,6 +76,10 @@\n # Delete the existing rendition\n rendition.delete()\n \n+ _progress_bar = progress_bar(progress_bar_current, num_renditions)\n+ self.stdout.write(_progress_bar[0], ending=_progress_bar[1])\n+ progress_bar_current = progress_bar_current + 1\n+\n if not purge_only:\n # Create a new one\n rendition_image.get_rendition(rendition_filter)\n", "issue": "Please add a progress bar to \"wagtail_update_image_renditions\" management command\nI love this new management command to regenerate all image renditions `wagtail_update_image_renditions`. But often the websites I had to use it on had many thousands of images and this command would need some hours to complete. I would love to see some kind of **progress feedback** in the terminal to get an idea of the time this task might need to complete. This could be text based of even a bar, like we know from packages like \"tqdm\".\n", "before_files": [{"content": "import logging\n\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\n\nfrom wagtail.images import get_image_model\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n \"\"\"Command to create missing image renditions with the option to remove (purge) any existing ones.\"\"\"\n\n help = \"This command will generate all image renditions, with an option to purge existing renditions first.\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--purge-only\",\n action=\"store_true\",\n help=\"Purge all image renditions without regenerating them\",\n )\n parser.add_argument(\n \"--chunk-size\",\n type=int,\n default=50,\n help=\"Operate in x size chunks (default: %(default)s)\",\n )\n\n def handle(self, *args, **options):\n Rendition = get_image_model().get_rendition_model()\n\n renditions = Rendition.objects.all()\n\n purge_only = options[\"purge_only\"]\n\n if not renditions.exists():\n self.stdout.write(self.style.WARNING(\"No image renditions found.\"))\n return\n\n rendition_ids = list(renditions.values_list(\"id\", flat=True))\n num_renditions = len(rendition_ids)\n\n if purge_only:\n self.stdout.write(\n self.style.HTTP_INFO(f\"Purging {num_renditions} rendition(s)\")\n )\n else:\n self.stdout.write(\n self.style.HTTP_INFO(f\"Regenerating {num_renditions} rendition(s)\")\n )\n\n for rendition in (\n # Pre-calculate the ids of the renditions to change,\n # otherwise `.iterator` never ends.\n renditions.filter(id__in=rendition_ids)\n .select_related(\"image\")\n .iterator(chunk_size=options[\"chunk_size\"])\n ):\n try:\n with transaction.atomic():\n rendition_filter = rendition.filter\n rendition_image = rendition.image\n\n # Delete the existing rendition\n rendition.delete()\n\n if not purge_only:\n # Create a new one\n rendition_image.get_rendition(rendition_filter)\n except: # noqa:E722\n logger.exception(\"Error operating on rendition %d\", rendition.id)\n self.stderr.write(\n self.style.ERROR(f\"Failed to operate on rendition {rendition.id}\")\n )\n num_renditions -= 1\n\n if num_renditions:\n self.stdout.write(\n self.style.SUCCESS(\n f\"Successfully processed {num_renditions} rendition(s)\"\n )\n )\n else:\n self.stdout.write(self.style.WARNING(\"Could not process any renditions.\"))\n", "path": "wagtail/images/management/commands/wagtail_update_image_renditions.py"}]} | 1,400 | 398 |
gh_patches_debug_19081 | rasdani/github-patches | git_diff | nvaccess__nvda-11609 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Visual Studio IntelliSense overly verbose
To reproduce this issue:
1. I created a console application
2. In the main method I started typing "Console" and it reads ok
3. When I press "." to access "WriteLine" or other method it reads the whole line. Which gets very confusing and anoying when lines get longer. Imagine pressing up or down arrows and for each item in the Intelli sense it repeats the whole line of code and lastly reads the item in the IntelliSense.
For example, if the line is following:
string name = string.format("Hello {0}", textField.text);
When I got to the point when I type "textField" and than "." it will repeat the whole line and than read the "text" property, and it repeats if I move up or down using arrow keys.
It is worth mentioning that this issue is not present when using Freedom scientific JAWS, but NVDA is much better choice for blind developers.
I also reported this issue to the Microsoft: https://developercommunity.visualstudio.com/content/problem/164719/nvda-screen-reader-is-not-working-properly-with-in.html
</issue>
<code>
[start of source/NVDAObjects/UIA/VisualStudio.py]
1 # This file is covered by the GNU General Public License.
2 # See the file COPYING for more details.
3 # Copyright (C) 2020 NV Access Limited, Leonard de Ruijter
4
5 """
6 Object overlay classes for Visual Studio components
7 available in Visual Studio and SQL Server Management Studio.
8 """
9
10 from . import UIA
11 import speech
12 import braille
13 import api
14
15
16 class IntelliSenseItem(UIA):
17
18 def _get_name(self):
19 return self.UIAElement.cachedAutomationID
20
21 def event_UIA_elementSelected(self):
22 # Cancel speech to have speech announce the selection as soon as possible.
23 # This is needed because L{reportFocus} does not cancel speech.
24 # Therefore, if speech wouldn't be cancelled,
25 # selection announcements would queue up when changing selection rapidly.
26 speech.cancelSpeech()
27 api.setNavigatorObject(self, isFocus=True)
28 self.reportFocus()
29 # Display results as flash messages.
30 braille.handler.message(braille.getPropertiesBraille(
31 name=self.name, role=self.role, positionInfo=self.positionInfo, description=self.description
32 ))
33
34
35 class IntelliSenseList(UIA):
36 ...
37
38
39 def findExtraOverlayClasses(obj, clsList):
40 if obj.UIAElement.cachedAutomationId == "listBoxCompletions":
41 clsList.insert(0, IntelliSenseList)
42 elif isinstance(obj.parent, IntelliSenseList) and obj.UIAElement.cachedClassName == "IntellisenseMenuItem":
43 clsList.insert(0, IntelliSenseItem)
44
[end of source/NVDAObjects/UIA/VisualStudio.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/source/NVDAObjects/UIA/VisualStudio.py b/source/NVDAObjects/UIA/VisualStudio.py
--- a/source/NVDAObjects/UIA/VisualStudio.py
+++ b/source/NVDAObjects/UIA/VisualStudio.py
@@ -36,8 +36,31 @@
...
+class IntelliSenseLiveRegion(UIA):
+ """
+ Visual Studio uses both Intellisense menu item objects and a live region
+ to communicate Intellisense selections.
+ NVDA uses the menu item approach and therefore the live region provides doubled information
+ and is disabled.
+ """
+
+ _shouldAllowUIALiveRegionChangeEvent = False
+
+
+_INTELLISENSE_LIST_AUTOMATION_IDS = {
+ "listBoxCompletions",
+ "CompletionList"
+}
+
+
def findExtraOverlayClasses(obj, clsList):
- if obj.UIAElement.cachedAutomationId == "listBoxCompletions":
+ if obj.UIAAutomationId in _INTELLISENSE_LIST_AUTOMATION_IDS:
clsList.insert(0, IntelliSenseList)
elif isinstance(obj.parent, IntelliSenseList) and obj.UIAElement.cachedClassName == "IntellisenseMenuItem":
clsList.insert(0, IntelliSenseItem)
+ elif (
+ obj.UIAElement.cachedClassName == "LiveTextBlock"
+ and obj.previous
+ and isinstance(obj.previous.previous, IntelliSenseList)
+ ):
+ clsList.insert(0, IntelliSenseLiveRegion)
| {"golden_diff": "diff --git a/source/NVDAObjects/UIA/VisualStudio.py b/source/NVDAObjects/UIA/VisualStudio.py\n--- a/source/NVDAObjects/UIA/VisualStudio.py\n+++ b/source/NVDAObjects/UIA/VisualStudio.py\n@@ -36,8 +36,31 @@\n \t...\n \n \n+class IntelliSenseLiveRegion(UIA):\n+\t\"\"\"\n+\tVisual Studio uses both Intellisense menu item objects and a live region\n+\tto communicate Intellisense selections.\n+\tNVDA uses the menu item approach and therefore the live region provides doubled information\n+\tand is disabled.\n+\t\"\"\"\n+\n+\t_shouldAllowUIALiveRegionChangeEvent = False\n+\n+\n+_INTELLISENSE_LIST_AUTOMATION_IDS = {\n+\t\"listBoxCompletions\",\n+\t\"CompletionList\"\n+}\n+\n+\n def findExtraOverlayClasses(obj, clsList):\n-\tif obj.UIAElement.cachedAutomationId == \"listBoxCompletions\":\n+\tif obj.UIAAutomationId in _INTELLISENSE_LIST_AUTOMATION_IDS:\n \t\tclsList.insert(0, IntelliSenseList)\n \telif isinstance(obj.parent, IntelliSenseList) and obj.UIAElement.cachedClassName == \"IntellisenseMenuItem\":\n \t\tclsList.insert(0, IntelliSenseItem)\n+\telif (\n+\t\tobj.UIAElement.cachedClassName == \"LiveTextBlock\"\n+\t\tand obj.previous\n+\t\tand isinstance(obj.previous.previous, IntelliSenseList)\n+\t):\n+\t\tclsList.insert(0, IntelliSenseLiveRegion)\n", "issue": "Visual Studio IntelliSense overly verbose\nTo reproduce this issue:\r\n1. I created a console application\r\n2. In the main method I started typing \"Console\" and it reads ok\r\n3. When I press \".\" to access \"WriteLine\" or other method it reads the whole line. Which gets very confusing and anoying when lines get longer. Imagine pressing up or down arrows and for each item in the Intelli sense it repeats the whole line of code and lastly reads the item in the IntelliSense.\r\nFor example, if the line is following:\r\nstring name = string.format(\"Hello {0}\", textField.text);\r\n\r\nWhen I got to the point when I type \"textField\" and than \".\" it will repeat the whole line and than read the \"text\" property, and it repeats if I move up or down using arrow keys.\r\n\r\nIt is worth mentioning that this issue is not present when using Freedom scientific JAWS, but NVDA is much better choice for blind developers.\r\n\r\nI also reported this issue to the Microsoft: https://developercommunity.visualstudio.com/content/problem/164719/nvda-screen-reader-is-not-working-properly-with-in.html\n", "before_files": [{"content": "# This file is covered by the GNU General Public License.\n# See the file COPYING for more details.\n# Copyright (C) 2020 NV Access Limited, Leonard de Ruijter\n\n\"\"\"\nObject overlay classes for Visual Studio components\navailable in Visual Studio and SQL Server Management Studio.\n\"\"\"\n\nfrom . import UIA\nimport speech\nimport braille\nimport api\n\n\nclass IntelliSenseItem(UIA):\n\n\tdef _get_name(self):\n\t\treturn self.UIAElement.cachedAutomationID\n\n\tdef event_UIA_elementSelected(self):\n\t\t# Cancel speech to have speech announce the selection as soon as possible.\n\t\t# This is needed because L{reportFocus} does not cancel speech.\n\t\t# Therefore, if speech wouldn't be cancelled,\n\t\t# selection announcements would queue up when changing selection rapidly.\n\t\tspeech.cancelSpeech()\n\t\tapi.setNavigatorObject(self, isFocus=True)\n\t\tself.reportFocus()\n\t\t# Display results as flash messages.\n\t\tbraille.handler.message(braille.getPropertiesBraille(\n\t\t\tname=self.name, role=self.role, positionInfo=self.positionInfo, description=self.description\n\t\t))\n\n\nclass IntelliSenseList(UIA):\n\t...\n\n\ndef findExtraOverlayClasses(obj, clsList):\n\tif obj.UIAElement.cachedAutomationId == \"listBoxCompletions\":\n\t\tclsList.insert(0, IntelliSenseList)\n\telif isinstance(obj.parent, IntelliSenseList) and obj.UIAElement.cachedClassName == \"IntellisenseMenuItem\":\n\t\tclsList.insert(0, IntelliSenseItem)\n", "path": "source/NVDAObjects/UIA/VisualStudio.py"}]} | 1,195 | 337 |
gh_patches_debug_566 | rasdani/github-patches | git_diff | pex-tool__pex-797 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.0.1
On the docket:
+ [x] pex --index-url=... fails in 2.0.0 #794
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '2.0.0'
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '2.0.0'
+__version__ = '2.0.1'
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.0.0'\n+__version__ = '2.0.1'\n", "issue": "Release 2.0.1\nOn the docket:\r\n\r\n+ [x] pex --index-url=... fails in 2.0.0 #794\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.0.0'\n", "path": "pex/version.py"}]} | 619 | 95 |
gh_patches_debug_56452 | rasdani/github-patches | git_diff | netket__netket-111 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python bindings for Jastrow machines randomly failing
I realized in #91 that once in a while the python tests for the `Jastrow` machines fail. This issue seems related to some memory problem, but I still don't understand if it is on the c++ side or python
</issue>
<code>
[start of setup.py]
1 import os
2 import re
3 import sys
4 import platform
5 import subprocess
6
7 from setuptools import setup, Extension
8 from setuptools.command.build_ext import build_ext
9 from distutils.version import LooseVersion
10
11
12 class CMakeExtension(Extension):
13 def __init__(self, name, sourcedir=''):
14 Extension.__init__(self, name, sources=[])
15 self.sourcedir = os.path.abspath(sourcedir)
16
17
18 class CMakeBuild(build_ext):
19 def run(self):
20 try:
21 out = subprocess.check_output(['cmake', '--version'])
22 except OSError:
23 raise RuntimeError("CMake must be installed to build the following extensions: " +
24 ", ".join(e.name for e in self.extensions))
25
26 if platform.system() == "Windows":
27 cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
28 if cmake_version < '3.1.0':
29 raise RuntimeError("CMake >= 3.1.0 is required on Windows")
30
31 for ext in self.extensions:
32 self.build_extension(ext)
33
34 def build_extension(self, ext):
35 extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
36 cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
37 '-DPYTHON_EXECUTABLE=' + sys.executable]
38
39 cfg = 'Debug' if self.debug else 'Release'
40 build_args = ['--config', cfg]
41
42 if platform.system() == "Windows":
43 cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
44 if sys.maxsize > 2**32:
45 cmake_args += ['-A', 'x64']
46 build_args += ['--', '/m']
47 else:
48 cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
49 build_args += ['--', '-j2']
50
51 env = os.environ.copy()
52 env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
53 self.distribution.get_version())
54 if not os.path.exists(self.build_temp):
55 os.makedirs(self.build_temp)
56 subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
57 subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
58
59 setup(
60 name='netket',
61 version='0.1',
62 author='Giuseppe Carleo et al.',
63 description='NetKet',
64 url='http://github.com/netket/netket',
65 author_email='[email protected]',
66 license='Apache',
67 ext_modules=[CMakeExtension('netket')],
68 cmdclass=dict(build_ext=CMakeBuild),
69 zip_safe=False,
70 )
71
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -58,7 +58,7 @@
setup(
name='netket',
- version='0.1',
+ version='2.0',
author='Giuseppe Carleo et al.',
description='NetKet',
url='http://github.com/netket/netket',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -58,7 +58,7 @@\n \n setup(\n name='netket',\n- version='0.1',\n+ version='2.0',\n author='Giuseppe Carleo et al.',\n description='NetKet',\n url='http://github.com/netket/netket',\n", "issue": "Python bindings for Jastrow machines randomly failing\nI realized in #91 that once in a while the python tests for the `Jastrow` machines fail. This issue seems related to some memory problem, but I still don't understand if it is on the c++ side or python \n", "before_files": [{"content": "import os\nimport re\nimport sys\nimport platform\nimport subprocess\n\nfrom setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\nfrom distutils.version import LooseVersion\n\n\nclass CMakeExtension(Extension):\n def __init__(self, name, sourcedir=''):\n Extension.__init__(self, name, sources=[])\n self.sourcedir = os.path.abspath(sourcedir)\n\n\nclass CMakeBuild(build_ext):\n def run(self):\n try:\n out = subprocess.check_output(['cmake', '--version'])\n except OSError:\n raise RuntimeError(\"CMake must be installed to build the following extensions: \" +\n \", \".join(e.name for e in self.extensions))\n\n if platform.system() == \"Windows\":\n cmake_version = LooseVersion(re.search(r'version\\s*([\\d.]+)', out.decode()).group(1))\n if cmake_version < '3.1.0':\n raise RuntimeError(\"CMake >= 3.1.0 is required on Windows\")\n\n for ext in self.extensions:\n self.build_extension(ext)\n\n def build_extension(self, ext):\n extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))\n cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,\n '-DPYTHON_EXECUTABLE=' + sys.executable]\n\n cfg = 'Debug' if self.debug else 'Release'\n build_args = ['--config', cfg]\n\n if platform.system() == \"Windows\":\n cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]\n if sys.maxsize > 2**32:\n cmake_args += ['-A', 'x64']\n build_args += ['--', '/m']\n else:\n cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]\n build_args += ['--', '-j2']\n\n env = os.environ.copy()\n env['CXXFLAGS'] = '{} -DVERSION_INFO=\\\\\"{}\\\\\"'.format(env.get('CXXFLAGS', ''),\n self.distribution.get_version())\n if not os.path.exists(self.build_temp):\n os.makedirs(self.build_temp)\n subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)\n subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)\n\nsetup(\n name='netket',\n version='0.1',\n author='Giuseppe Carleo et al.',\n description='NetKet',\n url='http://github.com/netket/netket',\n author_email='[email protected]',\n license='Apache',\n ext_modules=[CMakeExtension('netket')],\n cmdclass=dict(build_ext=CMakeBuild),\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 1,319 | 87 |
gh_patches_debug_20708 | rasdani/github-patches | git_diff | cfpb__consumerfinance.gov-457 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
little typo on The Bureau page
http://beta.consumerfinance.gov/the-bureau/
"Organizaitonal structure and leadership >"
should be Organizational
</issue>
<code>
[start of _lib/wordpress_post_processor.py]
1 import sys
2 import json
3 import os.path
4 import requests
5 from string import Template
6
7 import dateutil.parser
8
9 def posts_at_url(url):
10
11 current_page = 1
12 max_page = sys.maxint
13
14 while current_page <= max_page:
15
16 url = os.path.expandvars(url)
17 resp = requests.get(url, params={'page':current_page, 'count': '-1'})
18 results = json.loads(resp.content)
19 current_page += 1
20 max_page = results['pages']
21 total = 0
22 for p in results['posts']:
23 total += 1
24 yield p
25
26 def documents(name, url, **kwargs):
27
28 for post in posts_at_url(url):
29 yield process_post(post)
30
31
32 def process_post(post, newsroom = False):
33 del post['comments']
34 post['_id'] = post['slug']
35 # remove fields we're not interested in
36 if post['type'] == 'cfpb_newsroom':
37 post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_cfpb_newsroom_cat_taxonomy']]
38 elif post['type'] == 'post':
39 post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_fj_category']]
40 if post['type'] == 'watchroom':
41 post['author'] = [post['author']['name']]
42 # convert watchroom_data_x into a proper list
43 links = []
44 for x in xrange(0,10):
45 custom_fields = post['custom_fields']
46 key = 'watchroom_data_%s_link' % x
47 if key in custom_fields:
48 links.append(custom_fields[key])
49 post['links'] = links
50 else:
51 post['tags'] = [tag['title'] for tag in post['taxonomy_fj_tag']]
52 post['author'] = [author['title'] for author in
53 post['taxonomy_fj_author'] if 'Press Release' not in
54 post['category']]
55 if newsroom and post['type'] == 'post':
56 post['category'][0] = "Blog"
57 author_template = Template("$first_name $last_name")
58 dt = dateutil.parser.parse(post['date'])
59 dt_string = dt.strftime('%Y-%m-%dT%H:%M:%SZ')
60 post['date'] = dt_string
61 if 'twtr_text' in post['custom_fields']:
62 post['twtr_text'] = post['custom_fields']['twtr_text'][0]
63 if 'twtr_lang' in post['custom_fields']:
64 post['twtr_lang'] = post['custom_fields']['twtr_lang'][0]
65 if 'twtr_rel' in post['custom_fields']:
66 post['twtr_rel'] = post['custom_fields']['twtr_rel'][0]
67 if 'twtr_hash' in post['custom_fields']:
68 post['twtr_hash'] = post['custom_fields']['twtr_hash'][0]
69 return post
70
[end of _lib/wordpress_post_processor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/_lib/wordpress_post_processor.py b/_lib/wordpress_post_processor.py
--- a/_lib/wordpress_post_processor.py
+++ b/_lib/wordpress_post_processor.py
@@ -37,13 +37,13 @@
post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_cfpb_newsroom_cat_taxonomy']]
elif post['type'] == 'post':
post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_fj_category']]
- if post['type'] == 'watchroom':
+ if post['type'] == 'featured_topic':
post['author'] = [post['author']['name']]
- # convert watchroom_data_x into a proper list
+ # convert featured_topic_data_x into a proper list
links = []
for x in xrange(0,10):
custom_fields = post['custom_fields']
- key = 'watchroom_data_%s_link' % x
+ key = 'featured_topic_data_%s_link' % x
if key in custom_fields:
links.append(custom_fields[key])
post['links'] = links
| {"golden_diff": "diff --git a/_lib/wordpress_post_processor.py b/_lib/wordpress_post_processor.py\n--- a/_lib/wordpress_post_processor.py\n+++ b/_lib/wordpress_post_processor.py\n@@ -37,13 +37,13 @@\n post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_cfpb_newsroom_cat_taxonomy']]\n elif post['type'] == 'post':\n post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_fj_category']]\n- if post['type'] == 'watchroom':\n+ if post['type'] == 'featured_topic':\n post['author'] = [post['author']['name']]\n- # convert watchroom_data_x into a proper list\n+ # convert featured_topic_data_x into a proper list\n links = []\n for x in xrange(0,10):\n custom_fields = post['custom_fields']\n- key = 'watchroom_data_%s_link' % x\n+ key = 'featured_topic_data_%s_link' % x\n if key in custom_fields:\n links.append(custom_fields[key])\n post['links'] = links\n", "issue": "little typo on The Bureau page\nhttp://beta.consumerfinance.gov/the-bureau/\n\n\"Organizaitonal structure and leadership >\"\n\nshould be Organizational\n\n", "before_files": [{"content": "import sys\nimport json\nimport os.path\nimport requests\nfrom string import Template\n\nimport dateutil.parser\n\ndef posts_at_url(url):\n \n current_page = 1\n max_page = sys.maxint\n\n while current_page <= max_page:\n\n url = os.path.expandvars(url)\n resp = requests.get(url, params={'page':current_page, 'count': '-1'})\n results = json.loads(resp.content) \n current_page += 1\n max_page = results['pages']\n total = 0\n for p in results['posts']:\n total += 1\n yield p\n\ndef documents(name, url, **kwargs):\n \n for post in posts_at_url(url):\n yield process_post(post)\n\n\ndef process_post(post, newsroom = False):\n del post['comments']\n post['_id'] = post['slug']\n # remove fields we're not interested in\n if post['type'] == 'cfpb_newsroom':\n post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_cfpb_newsroom_cat_taxonomy']]\n elif post['type'] == 'post':\n post['category'] = [cat['title'].replace('&', '&') for cat in post['taxonomy_fj_category']]\n if post['type'] == 'watchroom':\n post['author'] = [post['author']['name']]\n # convert watchroom_data_x into a proper list\n links = []\n for x in xrange(0,10):\n custom_fields = post['custom_fields']\n key = 'watchroom_data_%s_link' % x\n if key in custom_fields:\n links.append(custom_fields[key])\n post['links'] = links\n else:\n post['tags'] = [tag['title'] for tag in post['taxonomy_fj_tag']]\n post['author'] = [author['title'] for author in\n post['taxonomy_fj_author'] if 'Press Release' not in\n post['category']]\n if newsroom and post['type'] == 'post':\n post['category'][0] = \"Blog\"\n author_template = Template(\"$first_name $last_name\")\n dt = dateutil.parser.parse(post['date'])\n dt_string = dt.strftime('%Y-%m-%dT%H:%M:%SZ')\n post['date'] = dt_string\n if 'twtr_text' in post['custom_fields']:\n post['twtr_text'] = post['custom_fields']['twtr_text'][0]\n if 'twtr_lang' in post['custom_fields']:\n post['twtr_lang'] = post['custom_fields']['twtr_lang'][0]\n if 'twtr_rel' in post['custom_fields']:\n post['twtr_rel'] = post['custom_fields']['twtr_rel'][0]\n if 'twtr_hash' in post['custom_fields']:\n post['twtr_hash'] = post['custom_fields']['twtr_hash'][0]\n return post\n", "path": "_lib/wordpress_post_processor.py"}]} | 1,344 | 262 |
gh_patches_debug_12524 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-1734 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python supported versions need to be updated in docs
Not really a bug, but for example it still says Python 3.4 is supported in readme and setup explanations.
Copy-pasting from https://pypi.org/project/python-telegram-bot/ :
> This library provides a pure Python interface for the Telegram Bot API. It’s compatible with Python versions 2.7, 3.3+ and PyPy.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 """The setup and build script for the python-telegram-bot library."""
3
4 import codecs
5 import os
6 from setuptools import setup, find_packages
7
8
9 def requirements():
10 """Build the requirements list for this project"""
11 requirements_list = []
12
13 with open('requirements.txt') as requirements:
14 for install in requirements:
15 requirements_list.append(install.strip())
16
17 return requirements_list
18
19
20 packages = find_packages(exclude=['tests*'])
21
22 with codecs.open('README.rst', 'r', 'utf-8') as fd:
23 fn = os.path.join('telegram', 'version.py')
24 with open(fn) as fh:
25 code = compile(fh.read(), fn, 'exec')
26 exec(code)
27
28 setup(name='python-telegram-bot',
29 version=__version__,
30 author='Leandro Toledo',
31 author_email='[email protected]',
32 license='LGPLv3',
33 url='https://python-telegram-bot.org/',
34 keywords='python telegram bot api wrapper',
35 description="We have made you a wrapper you can't refuse",
36 long_description=fd.read(),
37 packages=packages,
38 install_requires=requirements(),
39 extras_require={
40 'json': 'ujson',
41 'socks': 'PySocks'
42 },
43 include_package_data=True,
44 classifiers=[
45 'Development Status :: 5 - Production/Stable',
46 'Intended Audience :: Developers',
47 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
48 'Operating System :: OS Independent',
49 'Topic :: Software Development :: Libraries :: Python Modules',
50 'Topic :: Communications :: Chat',
51 'Topic :: Internet',
52 'Programming Language :: Python',
53 'Programming Language :: Python :: 2',
54 'Programming Language :: Python :: 2.7',
55 'Programming Language :: Python :: 3',
56 'Programming Language :: Python :: 3.4',
57 'Programming Language :: Python :: 3.5',
58 'Programming Language :: Python :: 3.6',
59 'Programming Language :: Python :: 3.7'
60 ],)
61
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -50,10 +50,7 @@
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -50,10 +50,7 @@\n 'Topic :: Communications :: Chat',\n 'Topic :: Internet',\n 'Programming Language :: Python',\n- 'Programming Language :: Python :: 2',\n- 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7'\n", "issue": "Python supported versions need to be updated in docs\nNot really a bug, but for example it still says Python 3.4 is supported in readme and setup explanations.\r\n\r\nCopy-pasting from https://pypi.org/project/python-telegram-bot/ :\r\n\r\n> This library provides a pure Python interface for the Telegram Bot API. It\u2019s compatible with Python versions 2.7, 3.3+ and PyPy.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"The setup and build script for the python-telegram-bot library.\"\"\"\n\nimport codecs\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef requirements():\n \"\"\"Build the requirements list for this project\"\"\"\n requirements_list = []\n\n with open('requirements.txt') as requirements:\n for install in requirements:\n requirements_list.append(install.strip())\n\n return requirements_list\n\n\npackages = find_packages(exclude=['tests*'])\n\nwith codecs.open('README.rst', 'r', 'utf-8') as fd:\n fn = os.path.join('telegram', 'version.py')\n with open(fn) as fh:\n code = compile(fh.read(), fn, 'exec')\n exec(code)\n\n setup(name='python-telegram-bot',\n version=__version__,\n author='Leandro Toledo',\n author_email='[email protected]',\n license='LGPLv3',\n url='https://python-telegram-bot.org/',\n keywords='python telegram bot api wrapper',\n description=\"We have made you a wrapper you can't refuse\",\n long_description=fd.read(),\n packages=packages,\n install_requires=requirements(),\n extras_require={\n 'json': 'ujson',\n 'socks': 'PySocks'\n },\n include_package_data=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Communications :: Chat',\n 'Topic :: Internet',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7'\n ],)\n", "path": "setup.py"}]} | 1,178 | 138 |
gh_patches_debug_17838 | rasdani/github-patches | git_diff | voxel51__fiftyone-1283 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FR] update opencv-python-headless
### Proposal Summary
Currently this repo requires opencv-python-headless<=4.4.0.46. To cut a long story short there are no wheels available for python3.9 and I am unable to install fiftyone (I am using docker `image: jupyter/scipy-notebook:latest`). However version `4.5.3.56` is available for install without issue, and I propose updating the requirement for this dependency.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 """
3 Installs FiftyOne.
4
5 | Copyright 2017-2021, Voxel51, Inc.
6 | `voxel51.com <https://voxel51.com/>`_
7 |
8 """
9 import os
10 from setuptools import setup, find_packages
11 from wheel.bdist_wheel import bdist_wheel
12
13
14 class BdistWheelCustom(bdist_wheel):
15 def finalize_options(self):
16 bdist_wheel.finalize_options(self)
17 # make just the wheel require these packages, since they aren't needed
18 # for a development installation
19 self.distribution.install_requires += [
20 "fiftyone-brain>=0.7,<0.8",
21 "fiftyone-db>=0.3,<0.4",
22 ]
23
24
25 VERSION = "0.13.2"
26
27
28 def get_version():
29 if "RELEASE_VERSION" in os.environ:
30 version = os.environ["RELEASE_VERSION"]
31 if not version.startswith(VERSION):
32 raise ValueError(
33 "Release version does not match version: %s and %s"
34 % (version, VERSION)
35 )
36 return version
37
38 return VERSION
39
40
41 EXTRAS_REQUIREMENTS = {"desktop": ["fiftyone-desktop>=0.16,<0.17"]}
42
43
44 with open("README.md", "r") as fh:
45 long_description = fh.read()
46
47
48 setup(
49 name="fiftyone",
50 version=get_version(),
51 description=(
52 "FiftyOne: the open-source tool for building high-quality datasets "
53 "and computer vision models"
54 ),
55 author="Voxel51, Inc.",
56 author_email="[email protected]",
57 url="https://github.com/voxel51/fiftyone",
58 extras_require=EXTRAS_REQUIREMENTS,
59 license="Apache",
60 long_description=long_description,
61 long_description_content_type="text/markdown",
62 packages=find_packages() + ["fiftyone.recipes", "fiftyone.tutorials"],
63 package_dir={
64 "fiftyone.recipes": "docs/source/recipes",
65 "fiftyone.tutorials": "docs/source/tutorials",
66 },
67 include_package_data=True,
68 install_requires=[
69 # third-party packages
70 "argcomplete",
71 "boto3",
72 "Deprecated",
73 "eventlet",
74 "future",
75 "Jinja2",
76 "kaleido",
77 "matplotlib",
78 "mongoengine==0.20.0",
79 "motor>=2.3,<3",
80 "numpy",
81 "packaging",
82 "pandas",
83 "Pillow>=6.2",
84 "plotly>=4.14,<5",
85 "pprintpp",
86 "psutil",
87 "pymongo>=3.11,<4",
88 "PyYAML",
89 "retrying",
90 "scikit-learn",
91 "scikit-image",
92 "setuptools",
93 "tabulate",
94 "tornado>=5.1.1,<7",
95 "xmltodict",
96 "universal-analytics-python3>=1.0.1,<2",
97 # internal packages
98 "voxel51-eta>=0.5.2,<0.6",
99 # ETA dependency - restricted to a maximum version known to provide
100 # wheels here because it tends to publish sdists several hours before
101 # wheels. When users install FiftyOne in this window, they will need to
102 # compile OpenCV from source, leading to either errors or a
103 # time-consuming installation.
104 "opencv-python-headless<=4.4.0.46",
105 ],
106 classifiers=[
107 "Development Status :: 4 - Beta",
108 "Intended Audience :: Developers",
109 "Intended Audience :: Science/Research",
110 "License :: OSI Approved :: Apache Software License",
111 "Topic :: Scientific/Engineering :: Artificial Intelligence",
112 "Topic :: Scientific/Engineering :: Image Processing",
113 "Topic :: Scientific/Engineering :: Image Recognition",
114 "Topic :: Scientific/Engineering :: Information Analysis",
115 "Topic :: Scientific/Engineering :: Visualization",
116 "Operating System :: MacOS :: MacOS X",
117 "Operating System :: POSIX :: Linux",
118 "Operating System :: Microsoft :: Windows",
119 "Programming Language :: Python :: 3",
120 "Programming Language :: Python :: 3.6",
121 "Programming Language :: Python :: 3.7",
122 "Programming Language :: Python :: 3.8",
123 "Programming Language :: Python :: 3.9",
124 ],
125 entry_points={"console_scripts": ["fiftyone=fiftyone.core.cli:main"]},
126 python_requires=">=3.6",
127 cmdclass={"bdist_wheel": BdistWheelCustom},
128 )
129
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -78,6 +78,7 @@
"mongoengine==0.20.0",
"motor>=2.3,<3",
"numpy",
+ "opencv-python-headless",
"packaging",
"pandas",
"Pillow>=6.2",
@@ -96,12 +97,6 @@
"universal-analytics-python3>=1.0.1,<2",
# internal packages
"voxel51-eta>=0.5.2,<0.6",
- # ETA dependency - restricted to a maximum version known to provide
- # wheels here because it tends to publish sdists several hours before
- # wheels. When users install FiftyOne in this window, they will need to
- # compile OpenCV from source, leading to either errors or a
- # time-consuming installation.
- "opencv-python-headless<=4.4.0.46",
],
classifiers=[
"Development Status :: 4 - Beta",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -78,6 +78,7 @@\n \"mongoengine==0.20.0\",\n \"motor>=2.3,<3\",\n \"numpy\",\n+ \"opencv-python-headless\",\n \"packaging\",\n \"pandas\",\n \"Pillow>=6.2\",\n@@ -96,12 +97,6 @@\n \"universal-analytics-python3>=1.0.1,<2\",\n # internal packages\n \"voxel51-eta>=0.5.2,<0.6\",\n- # ETA dependency - restricted to a maximum version known to provide\n- # wheels here because it tends to publish sdists several hours before\n- # wheels. When users install FiftyOne in this window, they will need to\n- # compile OpenCV from source, leading to either errors or a\n- # time-consuming installation.\n- \"opencv-python-headless<=4.4.0.46\",\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n", "issue": "[FR] update opencv-python-headless\n### Proposal Summary\r\nCurrently this repo requires opencv-python-headless<=4.4.0.46. To cut a long story short there are no wheels available for python3.9 and I am unable to install fiftyone (I am using docker `image: jupyter/scipy-notebook:latest`). However version `4.5.3.56` is available for install without issue, and I propose updating the requirement for this dependency.\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nInstalls FiftyOne.\n\n| Copyright 2017-2021, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport os\nfrom setuptools import setup, find_packages\nfrom wheel.bdist_wheel import bdist_wheel\n\n\nclass BdistWheelCustom(bdist_wheel):\n def finalize_options(self):\n bdist_wheel.finalize_options(self)\n # make just the wheel require these packages, since they aren't needed\n # for a development installation\n self.distribution.install_requires += [\n \"fiftyone-brain>=0.7,<0.8\",\n \"fiftyone-db>=0.3,<0.4\",\n ]\n\n\nVERSION = \"0.13.2\"\n\n\ndef get_version():\n if \"RELEASE_VERSION\" in os.environ:\n version = os.environ[\"RELEASE_VERSION\"]\n if not version.startswith(VERSION):\n raise ValueError(\n \"Release version does not match version: %s and %s\"\n % (version, VERSION)\n )\n return version\n\n return VERSION\n\n\nEXTRAS_REQUIREMENTS = {\"desktop\": [\"fiftyone-desktop>=0.16,<0.17\"]}\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetup(\n name=\"fiftyone\",\n version=get_version(),\n description=(\n \"FiftyOne: the open-source tool for building high-quality datasets \"\n \"and computer vision models\"\n ),\n author=\"Voxel51, Inc.\",\n author_email=\"[email protected]\",\n url=\"https://github.com/voxel51/fiftyone\",\n extras_require=EXTRAS_REQUIREMENTS,\n license=\"Apache\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages() + [\"fiftyone.recipes\", \"fiftyone.tutorials\"],\n package_dir={\n \"fiftyone.recipes\": \"docs/source/recipes\",\n \"fiftyone.tutorials\": \"docs/source/tutorials\",\n },\n include_package_data=True,\n install_requires=[\n # third-party packages\n \"argcomplete\",\n \"boto3\",\n \"Deprecated\",\n \"eventlet\",\n \"future\",\n \"Jinja2\",\n \"kaleido\",\n \"matplotlib\",\n \"mongoengine==0.20.0\",\n \"motor>=2.3,<3\",\n \"numpy\",\n \"packaging\",\n \"pandas\",\n \"Pillow>=6.2\",\n \"plotly>=4.14,<5\",\n \"pprintpp\",\n \"psutil\",\n \"pymongo>=3.11,<4\",\n \"PyYAML\",\n \"retrying\",\n \"scikit-learn\",\n \"scikit-image\",\n \"setuptools\",\n \"tabulate\",\n \"tornado>=5.1.1,<7\",\n \"xmltodict\",\n \"universal-analytics-python3>=1.0.1,<2\",\n # internal packages\n \"voxel51-eta>=0.5.2,<0.6\",\n # ETA dependency - restricted to a maximum version known to provide\n # wheels here because it tends to publish sdists several hours before\n # wheels. When users install FiftyOne in this window, they will need to\n # compile OpenCV from source, leading to either errors or a\n # time-consuming installation.\n \"opencv-python-headless<=4.4.0.46\",\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Image Processing\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n entry_points={\"console_scripts\": [\"fiftyone=fiftyone.core.cli:main\"]},\n python_requires=\">=3.6\",\n cmdclass={\"bdist_wheel\": BdistWheelCustom},\n)\n", "path": "setup.py"}]} | 1,917 | 243 |
gh_patches_debug_7459 | rasdani/github-patches | git_diff | rlworkgroup__garage-1558 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Backport #1554
</issue>
<code>
[start of src/garage/tf/plotter/plotter.py]
1 import atexit
2 from collections import namedtuple
3 from enum import Enum
4 import platform
5 from queue import Queue
6 from threading import Thread
7
8 import numpy as np
9 import tensorflow as tf
10
11 from garage.sampler.utils import rollout as default_rollout
12
13 __all__ = ['Plotter']
14
15
16 class Op(Enum):
17 STOP = 0
18 UPDATE = 1
19 DEMO = 2
20
21
22 Message = namedtuple('Message', ['op', 'args', 'kwargs'])
23
24
25 class Plotter:
26
27 # Static variable used to disable the plotter
28 enable = True
29 # List containing all plotters instantiated in the process
30 __plotters = []
31
32 def __init__(self,
33 env,
34 policy,
35 sess=None,
36 graph=None,
37 rollout=default_rollout):
38 Plotter.__plotters.append(self)
39 self.env = env
40 self.sess = tf.compat.v1.get_default_session(
41 ) if sess is None else sess
42 self.graph = tf.compat.v1.get_default_graph(
43 ) if graph is None else graph
44 with self.sess.as_default(), self.graph.as_default():
45 self.policy = policy.clone('plotter_policy')
46 self.rollout = rollout
47 self.worker_thread = Thread(target=self._start_worker, daemon=True)
48 self.queue = Queue()
49
50 # Needed in order to draw glfw window on the main thread
51 if ('Darwin' in platform.platform()):
52 self.rollout(env,
53 policy,
54 max_path_length=np.inf,
55 animated=True,
56 speedup=5)
57
58 def _start_worker(self):
59 env = None
60 policy = None
61 max_length = None
62 initial_rollout = True
63 try:
64 with self.sess.as_default(), self.sess.graph.as_default():
65 # Each iteration will process ALL messages currently in the
66 # queue
67 while True:
68 msgs = {}
69 # If true, block and yield processor
70 if initial_rollout:
71 msg = self.queue.get()
72 msgs[msg.op] = msg
73 # Only fetch the last message of each type
74 while not self.queue.empty():
75 msg = self.queue.get()
76 msgs[msg.op] = msg
77 else:
78 # Only fetch the last message of each type
79 while not self.queue.empty():
80 msg = self.queue.get_nowait()
81 msgs[msg.op] = msg
82
83 if Op.STOP in msgs:
84 self.queue.task_done()
85 break
86 if Op.UPDATE in msgs:
87 env, policy = msgs[Op.UPDATE].args
88 self.queue.task_done()
89 if Op.DEMO in msgs:
90 param_values, max_length = msgs[Op.DEMO].args
91 policy.set_param_values(param_values)
92 initial_rollout = False
93 self.rollout(env,
94 policy,
95 max_path_length=max_length,
96 animated=True,
97 speedup=5)
98 self.queue.task_done()
99 else:
100 if max_length:
101 self.rollout(env,
102 policy,
103 max_path_length=max_length,
104 animated=True,
105 speedup=5)
106 except KeyboardInterrupt:
107 pass
108
109 def close(self):
110 if self.worker_thread.is_alive():
111 while not self.queue.empty():
112 self.queue.get()
113 self.queue.task_done()
114 self.queue.put(Message(op=Op.STOP, args=None, kwargs=None))
115 self.queue.join()
116 self.worker_thread.join()
117
118 @staticmethod
119 def disable():
120 """Disable all instances of the Plotter class."""
121 Plotter.enable = False
122
123 @staticmethod
124 def get_plotters():
125 return Plotter.__plotters
126
127 def start(self):
128 if not Plotter.enable:
129 return
130 if not self.worker_thread.is_alive():
131 tf.compat.v1.get_variable_scope().reuse_variables()
132 self.worker_thread.start()
133 self.queue.put(
134 Message(op=Op.UPDATE,
135 args=(self.env, self.policy),
136 kwargs=None))
137 atexit.register(self.close)
138
139 def update_plot(self, policy, max_length=np.inf):
140 if not Plotter.enable:
141 return
142 if self.worker_thread.is_alive():
143 self.queue.put(
144 Message(op=Op.DEMO,
145 args=(policy.get_param_values(), max_length),
146 kwargs=None))
147
[end of src/garage/tf/plotter/plotter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/garage/tf/plotter/plotter.py b/src/garage/tf/plotter/plotter.py
--- a/src/garage/tf/plotter/plotter.py
+++ b/src/garage/tf/plotter/plotter.py
@@ -43,6 +43,7 @@
) if graph is None else graph
with self.sess.as_default(), self.graph.as_default():
self.policy = policy.clone('plotter_policy')
+ self.policy.build(policy.model.input)
self.rollout = rollout
self.worker_thread = Thread(target=self._start_worker, daemon=True)
self.queue = Queue()
| {"golden_diff": "diff --git a/src/garage/tf/plotter/plotter.py b/src/garage/tf/plotter/plotter.py\n--- a/src/garage/tf/plotter/plotter.py\n+++ b/src/garage/tf/plotter/plotter.py\n@@ -43,6 +43,7 @@\n ) if graph is None else graph\n with self.sess.as_default(), self.graph.as_default():\n self.policy = policy.clone('plotter_policy')\n+ self.policy.build(policy.model.input)\n self.rollout = rollout\n self.worker_thread = Thread(target=self._start_worker, daemon=True)\n self.queue = Queue()\n", "issue": "Backport #1554\n\n", "before_files": [{"content": "import atexit\nfrom collections import namedtuple\nfrom enum import Enum\nimport platform\nfrom queue import Queue\nfrom threading import Thread\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom garage.sampler.utils import rollout as default_rollout\n\n__all__ = ['Plotter']\n\n\nclass Op(Enum):\n STOP = 0\n UPDATE = 1\n DEMO = 2\n\n\nMessage = namedtuple('Message', ['op', 'args', 'kwargs'])\n\n\nclass Plotter:\n\n # Static variable used to disable the plotter\n enable = True\n # List containing all plotters instantiated in the process\n __plotters = []\n\n def __init__(self,\n env,\n policy,\n sess=None,\n graph=None,\n rollout=default_rollout):\n Plotter.__plotters.append(self)\n self.env = env\n self.sess = tf.compat.v1.get_default_session(\n ) if sess is None else sess\n self.graph = tf.compat.v1.get_default_graph(\n ) if graph is None else graph\n with self.sess.as_default(), self.graph.as_default():\n self.policy = policy.clone('plotter_policy')\n self.rollout = rollout\n self.worker_thread = Thread(target=self._start_worker, daemon=True)\n self.queue = Queue()\n\n # Needed in order to draw glfw window on the main thread\n if ('Darwin' in platform.platform()):\n self.rollout(env,\n policy,\n max_path_length=np.inf,\n animated=True,\n speedup=5)\n\n def _start_worker(self):\n env = None\n policy = None\n max_length = None\n initial_rollout = True\n try:\n with self.sess.as_default(), self.sess.graph.as_default():\n # Each iteration will process ALL messages currently in the\n # queue\n while True:\n msgs = {}\n # If true, block and yield processor\n if initial_rollout:\n msg = self.queue.get()\n msgs[msg.op] = msg\n # Only fetch the last message of each type\n while not self.queue.empty():\n msg = self.queue.get()\n msgs[msg.op] = msg\n else:\n # Only fetch the last message of each type\n while not self.queue.empty():\n msg = self.queue.get_nowait()\n msgs[msg.op] = msg\n\n if Op.STOP in msgs:\n self.queue.task_done()\n break\n if Op.UPDATE in msgs:\n env, policy = msgs[Op.UPDATE].args\n self.queue.task_done()\n if Op.DEMO in msgs:\n param_values, max_length = msgs[Op.DEMO].args\n policy.set_param_values(param_values)\n initial_rollout = False\n self.rollout(env,\n policy,\n max_path_length=max_length,\n animated=True,\n speedup=5)\n self.queue.task_done()\n else:\n if max_length:\n self.rollout(env,\n policy,\n max_path_length=max_length,\n animated=True,\n speedup=5)\n except KeyboardInterrupt:\n pass\n\n def close(self):\n if self.worker_thread.is_alive():\n while not self.queue.empty():\n self.queue.get()\n self.queue.task_done()\n self.queue.put(Message(op=Op.STOP, args=None, kwargs=None))\n self.queue.join()\n self.worker_thread.join()\n\n @staticmethod\n def disable():\n \"\"\"Disable all instances of the Plotter class.\"\"\"\n Plotter.enable = False\n\n @staticmethod\n def get_plotters():\n return Plotter.__plotters\n\n def start(self):\n if not Plotter.enable:\n return\n if not self.worker_thread.is_alive():\n tf.compat.v1.get_variable_scope().reuse_variables()\n self.worker_thread.start()\n self.queue.put(\n Message(op=Op.UPDATE,\n args=(self.env, self.policy),\n kwargs=None))\n atexit.register(self.close)\n\n def update_plot(self, policy, max_length=np.inf):\n if not Plotter.enable:\n return\n if self.worker_thread.is_alive():\n self.queue.put(\n Message(op=Op.DEMO,\n args=(policy.get_param_values(), max_length),\n kwargs=None))\n", "path": "src/garage/tf/plotter/plotter.py"}]} | 1,799 | 146 |
gh_patches_debug_1848 | rasdani/github-patches | git_diff | kivy__python-for-android-1163 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Openssl recipe crashes on x86 arch
p4a branch: stable
buildozer: 0.33
bootstrap: sdl2
kivy: 1.10.0
Error message i get:
```
arm_arch.h:46:6: error: #error "unsupported ARM architecture"
```
</issue>
<code>
[start of pythonforandroid/recipes/openssl/__init__.py]
1 from functools import partial
2
3 from pythonforandroid.toolchain import Recipe, shprint, current_directory
4 import sh
5
6
7 class OpenSSLRecipe(Recipe):
8 version = '1.0.2h'
9 url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'
10
11 def should_build(self, arch):
12 return not self.has_libs(arch, 'libssl' + self.version + '.so',
13 'libcrypto' + self.version + '.so')
14
15 def check_symbol(self, env, sofile, symbol):
16 nm = env.get('NM', 'nm')
17 syms = sh.sh('-c', "{} -gp {} | cut -d' ' -f3".format(
18 nm, sofile), _env=env).splitlines()
19 if symbol in syms:
20 return True
21 print('{} missing symbol {}; rebuilding'.format(sofile, symbol))
22 return False
23
24 def get_recipe_env(self, arch=None):
25 env = super(OpenSSLRecipe, self).get_recipe_env(arch)
26 env['OPENSSL_VERSION'] = self.version
27 env['CFLAGS'] += ' ' + env['LDFLAGS']
28 env['CC'] += ' ' + env['LDFLAGS']
29 return env
30
31 def select_build_arch(self, arch):
32 aname = arch.arch
33 if 'arm64' in aname:
34 return 'linux-aarch64'
35 if 'v7a' in aname:
36 return 'android-armv7'
37 if 'arm' in aname:
38 return 'android'
39 return 'linux-armv4'
40
41 def build_arch(self, arch):
42 env = self.get_recipe_env(arch)
43 with current_directory(self.get_build_dir(arch.arch)):
44 # sh fails with code 255 trying to execute ./Configure
45 # so instead we manually run perl passing in Configure
46 perl = sh.Command('perl')
47 buildarch = self.select_build_arch(arch)
48 shprint(perl, 'Configure', 'shared', 'no-dso', 'no-krb5', buildarch, _env=env)
49 self.apply_patch('disable-sover.patch', arch.arch)
50 self.apply_patch('rename-shared-lib.patch', arch.arch)
51
52 # check_ssl = partial(self.check_symbol, env, 'libssl' + self.version + '.so')
53 check_crypto = partial(self.check_symbol, env, 'libcrypto' + self.version + '.so')
54 while True:
55 shprint(sh.make, 'build_libs', _env=env)
56 if all(map(check_crypto, ('SSLeay', 'MD5_Transform', 'MD4_Init'))):
57 break
58 shprint(sh.make, 'clean', _env=env)
59
60 self.install_libs(arch, 'libssl' + self.version + '.so',
61 'libcrypto' + self.version + '.so')
62
63 recipe = OpenSSLRecipe()
64
[end of pythonforandroid/recipes/openssl/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pythonforandroid/recipes/openssl/__init__.py b/pythonforandroid/recipes/openssl/__init__.py
--- a/pythonforandroid/recipes/openssl/__init__.py
+++ b/pythonforandroid/recipes/openssl/__init__.py
@@ -36,6 +36,8 @@
return 'android-armv7'
if 'arm' in aname:
return 'android'
+ if 'x86' in aname:
+ return 'android-x86'
return 'linux-armv4'
def build_arch(self, arch):
| {"golden_diff": "diff --git a/pythonforandroid/recipes/openssl/__init__.py b/pythonforandroid/recipes/openssl/__init__.py\n--- a/pythonforandroid/recipes/openssl/__init__.py\n+++ b/pythonforandroid/recipes/openssl/__init__.py\n@@ -36,6 +36,8 @@\n return 'android-armv7'\n if 'arm' in aname:\n return 'android'\n+ if 'x86' in aname:\n+ return 'android-x86'\n return 'linux-armv4'\n \n def build_arch(self, arch):\n", "issue": "Openssl recipe crashes on x86 arch\np4a branch: stable\r\nbuildozer: 0.33\r\nbootstrap: sdl2\r\nkivy: 1.10.0\r\n\r\nError message i get:\r\n```\r\narm_arch.h:46:6: error: #error \"unsupported ARM architecture\"\r\n```\n", "before_files": [{"content": "from functools import partial\n\nfrom pythonforandroid.toolchain import Recipe, shprint, current_directory\nimport sh\n\n\nclass OpenSSLRecipe(Recipe):\n version = '1.0.2h'\n url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'\n\n def should_build(self, arch):\n return not self.has_libs(arch, 'libssl' + self.version + '.so',\n 'libcrypto' + self.version + '.so')\n\n def check_symbol(self, env, sofile, symbol):\n nm = env.get('NM', 'nm')\n syms = sh.sh('-c', \"{} -gp {} | cut -d' ' -f3\".format(\n nm, sofile), _env=env).splitlines()\n if symbol in syms:\n return True\n print('{} missing symbol {}; rebuilding'.format(sofile, symbol))\n return False\n\n def get_recipe_env(self, arch=None):\n env = super(OpenSSLRecipe, self).get_recipe_env(arch)\n env['OPENSSL_VERSION'] = self.version\n env['CFLAGS'] += ' ' + env['LDFLAGS']\n env['CC'] += ' ' + env['LDFLAGS']\n return env\n\n def select_build_arch(self, arch):\n aname = arch.arch\n if 'arm64' in aname:\n return 'linux-aarch64'\n if 'v7a' in aname:\n return 'android-armv7'\n if 'arm' in aname:\n return 'android'\n return 'linux-armv4'\n\n def build_arch(self, arch):\n env = self.get_recipe_env(arch)\n with current_directory(self.get_build_dir(arch.arch)):\n # sh fails with code 255 trying to execute ./Configure\n # so instead we manually run perl passing in Configure\n perl = sh.Command('perl')\n buildarch = self.select_build_arch(arch)\n shprint(perl, 'Configure', 'shared', 'no-dso', 'no-krb5', buildarch, _env=env)\n self.apply_patch('disable-sover.patch', arch.arch)\n self.apply_patch('rename-shared-lib.patch', arch.arch)\n\n # check_ssl = partial(self.check_symbol, env, 'libssl' + self.version + '.so')\n check_crypto = partial(self.check_symbol, env, 'libcrypto' + self.version + '.so')\n while True:\n shprint(sh.make, 'build_libs', _env=env)\n if all(map(check_crypto, ('SSLeay', 'MD5_Transform', 'MD4_Init'))):\n break\n shprint(sh.make, 'clean', _env=env)\n\n self.install_libs(arch, 'libssl' + self.version + '.so',\n 'libcrypto' + self.version + '.so')\n\nrecipe = OpenSSLRecipe()\n", "path": "pythonforandroid/recipes/openssl/__init__.py"}]} | 1,369 | 129 |
gh_patches_debug_66775 | rasdani/github-patches | git_diff | apache__airflow-1296 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Inconsistent 'owner' field in examples
Dear Airflow Maintainers,
### Environment
- Version of Airflow (e.g. a release version, running your own fork, running off master -- provide a git log snippet): **1.7.0**
- Screen shots of your DAG's graph and tree views:

- Operating System: (Windows Version or `$ uname -a`) **Ubuntu 14.04**
- Python Version: `$ python --version` **2.7**
### Description of Issue
- What did you expect to happen? **All of the examples have a consistent owner, probably 'airflow'**
- What happened instead? **[Some](https://github.com/airbnb/airflow/blob/master/airflow/example_dags/example_python_operator.py) examples have `airflow`, [some](https://github.com/airbnb/airflow/blob/master/airflow/example_dags/example_passing_params_via_test_command.py) have `me`**
### Reproduction Steps
1. install airflow 1.7.0 via pip
2. start the webserver
3. look at the web UI, probably http://localhost:8080
**Inconsistent hard-coding in the examples will likely lead to confusion for new users.**
</issue>
<code>
[start of airflow/example_dags/example_trigger_controller_dag.py]
1
2 """This example illustrates the use of the TriggerDagRunOperator. There are 2
3 entities at work in this scenario:
4 1. The Controller DAG - the DAG that conditionally executes the trigger
5 2. The Target DAG - DAG being triggered (in example_trigger_target_dag.py)
6
7 This example illustrates the following features :
8 1. A TriggerDagRunOperator that takes:
9 a. A python callable that decides whether or not to trigger the Target DAG
10 b. An optional params dict passed to the python callable to help in
11 evaluating whether or not to trigger the Target DAG
12 c. The id (name) of the Target DAG
13 d. The python callable can add contextual info to the DagRun created by
14 way of adding a Pickleable payload (e.g. dictionary of primitives). This
15 state is then made available to the TargetDag
16 2. A Target DAG : c.f. example_trigger_target_dag.py
17 """
18
19 from airflow import DAG
20 from airflow.operators import TriggerDagRunOperator
21 from datetime import datetime
22
23 import pprint
24
25 pp = pprint.PrettyPrinter(indent=4)
26
27
28 def conditionally_trigger(context, dag_run_obj):
29 """This function decides whether or not to Trigger the remote DAG"""
30 c_p =context['params']['condition_param']
31 print("Controller DAG : conditionally_trigger = {}".format(c_p))
32 if context['params']['condition_param']:
33 dag_run_obj.payload = {'message': context['params']['message']}
34 pp.pprint(dag_run_obj.payload)
35 return dag_run_obj
36
37
38 # Define the DAG
39 dag = DAG(dag_id='example_trigger_controller_dag',
40 default_args={"owner": "me",
41 "start_date": datetime.now()},
42 schedule_interval='@once')
43
44
45 # Define the single task in this controller example DAG
46 trigger = TriggerDagRunOperator(task_id='test_trigger_dagrun',
47 trigger_dag_id="example_trigger_target_dag",
48 python_callable=conditionally_trigger,
49 params={'condition_param': True,
50 'message': 'Hello World'},
51 dag=dag)
52
[end of airflow/example_dags/example_trigger_controller_dag.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/example_dags/example_trigger_controller_dag.py b/airflow/example_dags/example_trigger_controller_dag.py
--- a/airflow/example_dags/example_trigger_controller_dag.py
+++ b/airflow/example_dags/example_trigger_controller_dag.py
@@ -37,7 +37,7 @@
# Define the DAG
dag = DAG(dag_id='example_trigger_controller_dag',
- default_args={"owner": "me",
+ default_args={"owner": "airflow",
"start_date": datetime.now()},
schedule_interval='@once')
| {"golden_diff": "diff --git a/airflow/example_dags/example_trigger_controller_dag.py b/airflow/example_dags/example_trigger_controller_dag.py\n--- a/airflow/example_dags/example_trigger_controller_dag.py\n+++ b/airflow/example_dags/example_trigger_controller_dag.py\n@@ -37,7 +37,7 @@\n \n # Define the DAG\n dag = DAG(dag_id='example_trigger_controller_dag',\n- default_args={\"owner\": \"me\",\n+ default_args={\"owner\": \"airflow\",\n \"start_date\": datetime.now()},\n schedule_interval='@once')\n", "issue": "Inconsistent 'owner' field in examples\nDear Airflow Maintainers,\n### Environment\n- Version of Airflow (e.g. a release version, running your own fork, running off master -- provide a git log snippet): **1.7.0**\n- Screen shots of your DAG's graph and tree views:\n \n- Operating System: (Windows Version or `$ uname -a`) **Ubuntu 14.04**\n- Python Version: `$ python --version` **2.7**\n### Description of Issue\n- What did you expect to happen? **All of the examples have a consistent owner, probably 'airflow'**\n- What happened instead? **[Some](https://github.com/airbnb/airflow/blob/master/airflow/example_dags/example_python_operator.py) examples have `airflow`, [some](https://github.com/airbnb/airflow/blob/master/airflow/example_dags/example_passing_params_via_test_command.py) have `me`**\n### Reproduction Steps\n1. install airflow 1.7.0 via pip\n2. start the webserver\n3. look at the web UI, probably http://localhost:8080\n\n**Inconsistent hard-coding in the examples will likely lead to confusion for new users.**\n\n", "before_files": [{"content": "\n\"\"\"This example illustrates the use of the TriggerDagRunOperator. There are 2\nentities at work in this scenario:\n1. The Controller DAG - the DAG that conditionally executes the trigger\n2. The Target DAG - DAG being triggered (in example_trigger_target_dag.py)\n\nThis example illustrates the following features :\n1. A TriggerDagRunOperator that takes:\n a. A python callable that decides whether or not to trigger the Target DAG\n b. An optional params dict passed to the python callable to help in\n evaluating whether or not to trigger the Target DAG\n c. The id (name) of the Target DAG\n d. The python callable can add contextual info to the DagRun created by\n way of adding a Pickleable payload (e.g. dictionary of primitives). This\n state is then made available to the TargetDag\n2. A Target DAG : c.f. example_trigger_target_dag.py\n\"\"\"\n\nfrom airflow import DAG\nfrom airflow.operators import TriggerDagRunOperator\nfrom datetime import datetime\n\nimport pprint\n\npp = pprint.PrettyPrinter(indent=4)\n\n\ndef conditionally_trigger(context, dag_run_obj):\n \"\"\"This function decides whether or not to Trigger the remote DAG\"\"\"\n c_p =context['params']['condition_param']\n print(\"Controller DAG : conditionally_trigger = {}\".format(c_p))\n if context['params']['condition_param']:\n dag_run_obj.payload = {'message': context['params']['message']}\n pp.pprint(dag_run_obj.payload)\n return dag_run_obj\n\n\n# Define the DAG\ndag = DAG(dag_id='example_trigger_controller_dag',\n default_args={\"owner\": \"me\",\n \"start_date\": datetime.now()},\n schedule_interval='@once')\n\n\n# Define the single task in this controller example DAG\ntrigger = TriggerDagRunOperator(task_id='test_trigger_dagrun',\n trigger_dag_id=\"example_trigger_target_dag\",\n python_callable=conditionally_trigger,\n params={'condition_param': True,\n 'message': 'Hello World'},\n dag=dag)\n", "path": "airflow/example_dags/example_trigger_controller_dag.py"}]} | 1,372 | 128 |
gh_patches_debug_37830 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-2969 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG]: 运行chatgpt推理示例报错
### 🐛 Describe the bug
(https://github.com/hpcaitech/ColossalAI/tree/main/applications/ChatGPT)/examples/ 运行inference.py 抛出OSError:

### Environment
_No response_
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/ChatGPT/examples/inference.py]
1 import argparse
2 import torch
3
4 from chatgpt.nn import BLOOMActor, GPTActor, OPTActor
5 from transformers import AutoTokenizer
6 from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer
7
8
9 def eval(args):
10 # configure model
11 if args.model == 'gpt2':
12 model = GPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())
13 elif args.model == 'bloom':
14 model = BLOOMActor(pretrained=args.pretrain).to(torch.cuda.current_device())
15 elif args.model == 'opt':
16 model = OPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())
17 else:
18 raise ValueError(f'Unsupported model "{args.model}"')
19
20 # configure tokenizer
21 if args.model == 'gpt2':
22 tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
23 tokenizer.pad_token = tokenizer.eos_token
24 elif args.model == 'bloom':
25 tokenizer = AutoTokenizer.from_pretrained(args.pretrain)
26 tokenizer.pad_token = tokenizer.eos_token
27 elif args.model == 'opt':
28 tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
29 else:
30 raise ValueError(f'Unsupported model "{args.model}"')
31
32 model.eval()
33 input = args.input
34 input_ids = tokenizer.encode(input, return_tensors='pt').to(torch.cuda.current_device())
35 outputs = model.generate(input_ids,
36 max_length=args.max_length,
37 do_sample=True,
38 top_k=50,
39 top_p=0.95,
40 num_return_sequences=1)
41 output = tokenizer.batch_decode(outputs[0], skip_special_tokens=True)
42 print(output)
43
44
45 if __name__ == '__main__':
46 parser = argparse.ArgumentParser()
47 parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt'])
48 parser.add_argument('--pretrain', type=str, default=None)
49 parser.add_argument('--input', type=str, default='Q: How are you ? A:')
50 parser.add_argument('--max_length', type=int, default=100)
51 args = parser.parse_args()
52 eval(args)
53
[end of applications/ChatGPT/examples/inference.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/applications/ChatGPT/examples/inference.py b/applications/ChatGPT/examples/inference.py
--- a/applications/ChatGPT/examples/inference.py
+++ b/applications/ChatGPT/examples/inference.py
@@ -9,30 +9,34 @@
def eval(args):
# configure model
if args.model == 'gpt2':
- model = GPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())
+ actor = GPTActor().to(torch.cuda.current_device())
elif args.model == 'bloom':
- model = BLOOMActor(pretrained=args.pretrain).to(torch.cuda.current_device())
+ actor = BLOOMActor().to(torch.cuda.current_device())
elif args.model == 'opt':
- model = OPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())
+ actor = OPTActor().to(torch.cuda.current_device())
else:
raise ValueError(f'Unsupported model "{args.model}"')
+ state_dict = torch.load(args.pretrain)
+ actor.model.load_state_dict(state_dict)
+
+
# configure tokenizer
if args.model == 'gpt2':
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
tokenizer.pad_token = tokenizer.eos_token
elif args.model == 'bloom':
- tokenizer = AutoTokenizer.from_pretrained(args.pretrain)
+ tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom-560m')
tokenizer.pad_token = tokenizer.eos_token
elif args.model == 'opt':
- tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
+ tokenizer = AutoTokenizer.from_pretrained('facebook/opt-350m')
else:
raise ValueError(f'Unsupported model "{args.model}"')
- model.eval()
+ actor.eval()
input = args.input
input_ids = tokenizer.encode(input, return_tensors='pt').to(torch.cuda.current_device())
- outputs = model.generate(input_ids,
+ outputs = actor.generate(input_ids,
max_length=args.max_length,
do_sample=True,
top_k=50,
@@ -46,7 +50,7 @@
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt'])
parser.add_argument('--pretrain', type=str, default=None)
- parser.add_argument('--input', type=str, default='Q: How are you ? A:')
+ parser.add_argument('--input', type=str, default='Question: How are you ? Answer:')
parser.add_argument('--max_length', type=int, default=100)
args = parser.parse_args()
eval(args)
| {"golden_diff": "diff --git a/applications/ChatGPT/examples/inference.py b/applications/ChatGPT/examples/inference.py\n--- a/applications/ChatGPT/examples/inference.py\n+++ b/applications/ChatGPT/examples/inference.py\n@@ -9,30 +9,34 @@\n def eval(args):\n # configure model\n if args.model == 'gpt2':\n- model = GPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n+ actor = GPTActor().to(torch.cuda.current_device())\n elif args.model == 'bloom':\n- model = BLOOMActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n+ actor = BLOOMActor().to(torch.cuda.current_device())\n elif args.model == 'opt':\n- model = OPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n+ actor = OPTActor().to(torch.cuda.current_device())\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n \n+ state_dict = torch.load(args.pretrain)\n+ actor.model.load_state_dict(state_dict)\n+ \n+ \n # configure tokenizer\n if args.model == 'gpt2':\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n tokenizer.pad_token = tokenizer.eos_token\n elif args.model == 'bloom':\n- tokenizer = AutoTokenizer.from_pretrained(args.pretrain)\n+ tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom-560m')\n tokenizer.pad_token = tokenizer.eos_token\n elif args.model == 'opt':\n- tokenizer = AutoTokenizer.from_pretrained(\"facebook/opt-350m\")\n+ tokenizer = AutoTokenizer.from_pretrained('facebook/opt-350m')\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n \n- model.eval()\n+ actor.eval()\n input = args.input\n input_ids = tokenizer.encode(input, return_tensors='pt').to(torch.cuda.current_device())\n- outputs = model.generate(input_ids,\n+ outputs = actor.generate(input_ids,\n max_length=args.max_length,\n do_sample=True,\n top_k=50,\n@@ -46,7 +50,7 @@\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt'])\n parser.add_argument('--pretrain', type=str, default=None)\n- parser.add_argument('--input', type=str, default='Q: How are you ? A:')\n+ parser.add_argument('--input', type=str, default='Question: How are you ? Answer:')\n parser.add_argument('--max_length', type=int, default=100)\n args = parser.parse_args()\n eval(args)\n", "issue": "[BUG]: \u8fd0\u884cchatgpt\u63a8\u7406\u793a\u4f8b\u62a5\u9519\n### \ud83d\udc1b Describe the bug\n\n(https://github.com/hpcaitech/ColossalAI/tree/main/applications/ChatGPT)/examples/ \u8fd0\u884cinference.py \u629b\u51faOSError:\r\n\r\n\n\n### Environment\n\n_No response_\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import argparse\nimport torch\n\nfrom chatgpt.nn import BLOOMActor, GPTActor, OPTActor\nfrom transformers import AutoTokenizer\nfrom transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer\n\n\ndef eval(args):\n # configure model\n if args.model == 'gpt2':\n model = GPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n elif args.model == 'bloom':\n model = BLOOMActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n elif args.model == 'opt':\n model = OPTActor(pretrained=args.pretrain).to(torch.cuda.current_device())\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n\n # configure tokenizer\n if args.model == 'gpt2':\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n tokenizer.pad_token = tokenizer.eos_token\n elif args.model == 'bloom':\n tokenizer = AutoTokenizer.from_pretrained(args.pretrain)\n tokenizer.pad_token = tokenizer.eos_token\n elif args.model == 'opt':\n tokenizer = AutoTokenizer.from_pretrained(\"facebook/opt-350m\")\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n\n model.eval()\n input = args.input\n input_ids = tokenizer.encode(input, return_tensors='pt').to(torch.cuda.current_device())\n outputs = model.generate(input_ids,\n max_length=args.max_length,\n do_sample=True,\n top_k=50,\n top_p=0.95,\n num_return_sequences=1)\n output = tokenizer.batch_decode(outputs[0], skip_special_tokens=True)\n print(output)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt'])\n parser.add_argument('--pretrain', type=str, default=None)\n parser.add_argument('--input', type=str, default='Q: How are you ? A:')\n parser.add_argument('--max_length', type=int, default=100)\n args = parser.parse_args()\n eval(args)\n", "path": "applications/ChatGPT/examples/inference.py"}]} | 1,251 | 593 |
gh_patches_debug_15935 | rasdani/github-patches | git_diff | vispy__vispy-305 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The first emitted Timer event has `None` as `dt` property
``` python
def on_timer(self, event):
print event.dt
```
displays `None` the first time, and the correct dt then (a float). The first dt should probably be `0.0`.
</issue>
<code>
[start of vispy/app/timer.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2014, Vispy Development Team.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4
5 from __future__ import division
6
7 from ..util.event import Event, EmitterGroup
8 from ..util.ptime import time as precision_time
9 from ..ext.six import string_types
10 from .base import BaseTimerBackend as TimerBackend # noqa
11 from . import use_app, Application
12
13
14 class Timer(object):
15
16 """Timer used to schedule events in the future or on a repeating schedule
17
18 Parameters
19 ----------
20 interval : float
21 Time between events.
22 connect : function | None
23 The function to call.
24 iterations : int
25 Number of iterations. Can be -1 for infinite.
26 start : bool
27 Whether to start the timer.
28 app : instance of vispy.app.Application
29 The application to attach the timer to.
30 """
31
32 def __init__(self, interval=0.0, connect=None, iterations=-1, start=False,
33 app=None):
34 self.events = EmitterGroup(source=self,
35 start=Event,
36 stop=Event,
37 timeout=Event)
38 #self.connect = self.events.timeout.connect
39 #self.disconnect = self.events.timeout.disconnect
40
41 # Get app instance
42 if app is None:
43 self._app = use_app()
44 elif isinstance(app, Application):
45 self._app = app
46 elif isinstance(app, string_types):
47 self._app = Application(app)
48 else:
49 raise ValueError('Invalid value for app %r' % app)
50
51 # Ensure app has backend app object
52 self._app.native
53
54 # Instantiate the backed with the right class
55 self._backend = self._app.backend_module.TimerBackend(self)
56
57 self._interval = interval
58 self._running = False
59 self._last_emit_time = None
60 self.iter_count = 0
61 self.max_iterations = iterations
62 if connect is not None:
63 self.connect(connect)
64 if start:
65 self.start()
66
67 @property
68 def app(self):
69 """ The vispy Application instance on which this Timer is based.
70 """
71 return self._app
72
73 @property
74 def interval(self):
75 return self._interval
76
77 @interval.setter
78 def interval(self, val):
79 self._interval = val
80 if self.running:
81 self.stop()
82 self.start()
83
84 @property
85 def running(self):
86 return self._running
87
88 def start(self, interval=None, iterations=None):
89 """Start the timer.
90
91 A timeout event will be generated every *interval* seconds.
92 If *interval* is None, then self.interval will be used.
93
94 If *iterations* is specified, the timer will stop after
95 emitting that number of events. If unspecified, then
96 the previous value of self.iterations will be used. If the value is
97 negative, then the timer will continue running until stop() is called.
98 """
99 self.iter_count = 0
100 if interval is not None:
101 self.interval = interval
102 if iterations is not None:
103 self.max_iterations = iterations
104 self._backend._vispy_start(self.interval)
105 self._running = True
106 self._last_emit_time = None
107 self.events.start(type='timer_start')
108
109 def stop(self):
110 """Stop the timer."""
111 self._backend._vispy_stop()
112 self._running = False
113 self.events.stop(type='timer_stop')
114
115 # use timer.app.run() and .quit() instead.
116 # def run_event_loop(self):
117 #"""Execute the event loop for this Timer's backend.
118 #"""
119 # return self._backend._vispy_run()
120
121 # def quit_event_loop(self):
122 #"""Exit the event loop for this Timer's backend.
123 #"""
124 # return self._backend._vispy_quit()
125
126 @property
127 def native(self):
128 """ The native timer on which this Timer is based.
129 """
130 return self._backend._vispy_get_native_timer()
131
132 def _timeout(self, *args):
133 # called when the backend timer has triggered.
134 if not self.running:
135 return
136 if self.max_iterations >= 0 and self.iter_count >= self.max_iterations:
137 self.stop()
138 return
139
140 # compute dt since last event
141 now = precision_time()
142 if self._last_emit_time is None:
143 dt = None
144 else:
145 dt = now - self._last_emit_time
146 self._last_emit_time = now
147
148 self.events.timeout(
149 type='timer_timeout',
150 iteration=self.iter_count,
151 dt=dt)
152 self.iter_count += 1
153
154 def connect(self, callback):
155 """ Alias for self.events.timeout.connect() """
156 return self.events.timeout.connect(callback)
157
158 def disconnect(self, callback=None):
159 """ Alias for self.events.timeout.disconnect() """
160 return self.events.timeout.disconnect(callback)
161
[end of vispy/app/timer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vispy/app/timer.py b/vispy/app/timer.py
--- a/vispy/app/timer.py
+++ b/vispy/app/timer.py
@@ -103,7 +103,7 @@
self.max_iterations = iterations
self._backend._vispy_start(self.interval)
self._running = True
- self._last_emit_time = None
+ self._last_emit_time = precision_time()
self.events.start(type='timer_start')
def stop(self):
@@ -139,10 +139,7 @@
# compute dt since last event
now = precision_time()
- if self._last_emit_time is None:
- dt = None
- else:
- dt = now - self._last_emit_time
+ dt = now - self._last_emit_time
self._last_emit_time = now
self.events.timeout(
| {"golden_diff": "diff --git a/vispy/app/timer.py b/vispy/app/timer.py\n--- a/vispy/app/timer.py\n+++ b/vispy/app/timer.py\n@@ -103,7 +103,7 @@\n self.max_iterations = iterations\n self._backend._vispy_start(self.interval)\n self._running = True\n- self._last_emit_time = None\n+ self._last_emit_time = precision_time()\n self.events.start(type='timer_start')\n \n def stop(self):\n@@ -139,10 +139,7 @@\n \n # compute dt since last event\n now = precision_time()\n- if self._last_emit_time is None:\n- dt = None\n- else:\n- dt = now - self._last_emit_time\n+ dt = now - self._last_emit_time\n self._last_emit_time = now\n \n self.events.timeout(\n", "issue": "The first emitted Timer event has `None` as `dt` property\n``` python\ndef on_timer(self, event):\n print event.dt\n```\n\ndisplays `None` the first time, and the correct dt then (a float). The first dt should probably be `0.0`.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\nfrom __future__ import division\n\nfrom ..util.event import Event, EmitterGroup\nfrom ..util.ptime import time as precision_time\nfrom ..ext.six import string_types\nfrom .base import BaseTimerBackend as TimerBackend # noqa\nfrom . import use_app, Application\n\n\nclass Timer(object):\n\n \"\"\"Timer used to schedule events in the future or on a repeating schedule\n\n Parameters\n ----------\n interval : float\n Time between events.\n connect : function | None\n The function to call.\n iterations : int\n Number of iterations. Can be -1 for infinite.\n start : bool\n Whether to start the timer.\n app : instance of vispy.app.Application\n The application to attach the timer to.\n \"\"\"\n\n def __init__(self, interval=0.0, connect=None, iterations=-1, start=False,\n app=None):\n self.events = EmitterGroup(source=self,\n start=Event,\n stop=Event,\n timeout=Event)\n #self.connect = self.events.timeout.connect\n #self.disconnect = self.events.timeout.disconnect\n\n # Get app instance\n if app is None:\n self._app = use_app()\n elif isinstance(app, Application):\n self._app = app\n elif isinstance(app, string_types):\n self._app = Application(app)\n else:\n raise ValueError('Invalid value for app %r' % app)\n \n # Ensure app has backend app object\n self._app.native\n \n # Instantiate the backed with the right class\n self._backend = self._app.backend_module.TimerBackend(self)\n\n self._interval = interval\n self._running = False\n self._last_emit_time = None\n self.iter_count = 0\n self.max_iterations = iterations\n if connect is not None:\n self.connect(connect)\n if start:\n self.start()\n\n @property\n def app(self):\n \"\"\" The vispy Application instance on which this Timer is based.\n \"\"\"\n return self._app\n\n @property\n def interval(self):\n return self._interval\n\n @interval.setter\n def interval(self, val):\n self._interval = val\n if self.running:\n self.stop()\n self.start()\n\n @property\n def running(self):\n return self._running\n\n def start(self, interval=None, iterations=None):\n \"\"\"Start the timer.\n\n A timeout event will be generated every *interval* seconds.\n If *interval* is None, then self.interval will be used.\n\n If *iterations* is specified, the timer will stop after\n emitting that number of events. If unspecified, then\n the previous value of self.iterations will be used. If the value is\n negative, then the timer will continue running until stop() is called.\n \"\"\"\n self.iter_count = 0\n if interval is not None:\n self.interval = interval\n if iterations is not None:\n self.max_iterations = iterations\n self._backend._vispy_start(self.interval)\n self._running = True\n self._last_emit_time = None\n self.events.start(type='timer_start')\n\n def stop(self):\n \"\"\"Stop the timer.\"\"\"\n self._backend._vispy_stop()\n self._running = False\n self.events.stop(type='timer_stop')\n\n # use timer.app.run() and .quit() instead.\n # def run_event_loop(self):\n #\"\"\"Execute the event loop for this Timer's backend.\n #\"\"\"\n # return self._backend._vispy_run()\n\n # def quit_event_loop(self):\n #\"\"\"Exit the event loop for this Timer's backend.\n #\"\"\"\n # return self._backend._vispy_quit()\n\n @property\n def native(self):\n \"\"\" The native timer on which this Timer is based.\n \"\"\"\n return self._backend._vispy_get_native_timer()\n\n def _timeout(self, *args):\n # called when the backend timer has triggered.\n if not self.running:\n return\n if self.max_iterations >= 0 and self.iter_count >= self.max_iterations:\n self.stop()\n return\n\n # compute dt since last event\n now = precision_time()\n if self._last_emit_time is None:\n dt = None\n else:\n dt = now - self._last_emit_time\n self._last_emit_time = now\n\n self.events.timeout(\n type='timer_timeout',\n iteration=self.iter_count,\n dt=dt)\n self.iter_count += 1\n\n def connect(self, callback):\n \"\"\" Alias for self.events.timeout.connect() \"\"\"\n return self.events.timeout.connect(callback)\n\n def disconnect(self, callback=None):\n \"\"\" Alias for self.events.timeout.disconnect() \"\"\"\n return self.events.timeout.disconnect(callback)\n", "path": "vispy/app/timer.py"}]} | 2,045 | 205 |
gh_patches_debug_24108 | rasdani/github-patches | git_diff | pypa__pip-11264 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Checking out Bazaar branch makes full clone
When checking out a Bazaar branch, pip currently makes a full clone of the branch history. This is unnecessary and much slower than just fetching the latest revision:
For example, performance on my system for 'bzr co --lightweight lp:bzr':
0.60s user 0.11s system 5% cpu 12.234 total
Performance on my system for 'bzr branch lp:bzr':
65.41s user 1.48s system 39% cpu 2:47.91 total
</issue>
<code>
[start of src/pip/_internal/vcs/bazaar.py]
1 import logging
2 from typing import List, Optional, Tuple
3
4 from pip._internal.utils.misc import HiddenText, display_path
5 from pip._internal.utils.subprocess import make_command
6 from pip._internal.utils.urls import path_to_url
7 from pip._internal.vcs.versioncontrol import (
8 AuthInfo,
9 RemoteNotFoundError,
10 RevOptions,
11 VersionControl,
12 vcs,
13 )
14
15 logger = logging.getLogger(__name__)
16
17
18 class Bazaar(VersionControl):
19 name = "bzr"
20 dirname = ".bzr"
21 repo_name = "branch"
22 schemes = (
23 "bzr+http",
24 "bzr+https",
25 "bzr+ssh",
26 "bzr+sftp",
27 "bzr+ftp",
28 "bzr+lp",
29 "bzr+file",
30 )
31
32 @staticmethod
33 def get_base_rev_args(rev: str) -> List[str]:
34 return ["-r", rev]
35
36 def fetch_new(
37 self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int
38 ) -> None:
39 rev_display = rev_options.to_display()
40 logger.info(
41 "Checking out %s%s to %s",
42 url,
43 rev_display,
44 display_path(dest),
45 )
46 if verbosity <= 0:
47 flag = "--quiet"
48 elif verbosity == 1:
49 flag = ""
50 else:
51 flag = f"-{'v'*verbosity}"
52 cmd_args = make_command("branch", flag, rev_options.to_args(), url, dest)
53 self.run_command(cmd_args)
54
55 def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
56 self.run_command(make_command("switch", url), cwd=dest)
57
58 def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
59 cmd_args = make_command("pull", "-q", rev_options.to_args())
60 self.run_command(cmd_args, cwd=dest)
61
62 @classmethod
63 def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:
64 # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
65 url, rev, user_pass = super().get_url_rev_and_auth(url)
66 if url.startswith("ssh://"):
67 url = "bzr+" + url
68 return url, rev, user_pass
69
70 @classmethod
71 def get_remote_url(cls, location: str) -> str:
72 urls = cls.run_command(
73 ["info"], show_stdout=False, stdout_only=True, cwd=location
74 )
75 for line in urls.splitlines():
76 line = line.strip()
77 for x in ("checkout of branch: ", "parent branch: "):
78 if line.startswith(x):
79 repo = line.split(x)[1]
80 if cls._is_local_repository(repo):
81 return path_to_url(repo)
82 return repo
83 raise RemoteNotFoundError
84
85 @classmethod
86 def get_revision(cls, location: str) -> str:
87 revision = cls.run_command(
88 ["revno"],
89 show_stdout=False,
90 stdout_only=True,
91 cwd=location,
92 )
93 return revision.splitlines()[-1]
94
95 @classmethod
96 def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:
97 """Always assume the versions don't match"""
98 return False
99
100
101 vcs.register(Bazaar)
102
[end of src/pip/_internal/vcs/bazaar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pip/_internal/vcs/bazaar.py b/src/pip/_internal/vcs/bazaar.py
--- a/src/pip/_internal/vcs/bazaar.py
+++ b/src/pip/_internal/vcs/bazaar.py
@@ -49,14 +49,25 @@
flag = ""
else:
flag = f"-{'v'*verbosity}"
- cmd_args = make_command("branch", flag, rev_options.to_args(), url, dest)
+ cmd_args = make_command(
+ "checkout", "--lightweight", flag, rev_options.to_args(), url, dest
+ )
self.run_command(cmd_args)
def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
self.run_command(make_command("switch", url), cwd=dest)
def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
- cmd_args = make_command("pull", "-q", rev_options.to_args())
+ output = self.run_command(
+ make_command("info"), show_stdout=False, stdout_only=True, cwd=dest
+ )
+ if output.startswith("Standalone "):
+ # Older versions of pip used to create standalone branches.
+ # Convert the standalone branch to a checkout by calling "bzr bind".
+ cmd_args = make_command("bind", "-q", url)
+ self.run_command(cmd_args, cwd=dest)
+
+ cmd_args = make_command("update", "-q", rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
@classmethod
| {"golden_diff": "diff --git a/src/pip/_internal/vcs/bazaar.py b/src/pip/_internal/vcs/bazaar.py\n--- a/src/pip/_internal/vcs/bazaar.py\n+++ b/src/pip/_internal/vcs/bazaar.py\n@@ -49,14 +49,25 @@\n flag = \"\"\n else:\n flag = f\"-{'v'*verbosity}\"\n- cmd_args = make_command(\"branch\", flag, rev_options.to_args(), url, dest)\n+ cmd_args = make_command(\n+ \"checkout\", \"--lightweight\", flag, rev_options.to_args(), url, dest\n+ )\n self.run_command(cmd_args)\n \n def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:\n self.run_command(make_command(\"switch\", url), cwd=dest)\n \n def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:\n- cmd_args = make_command(\"pull\", \"-q\", rev_options.to_args())\n+ output = self.run_command(\n+ make_command(\"info\"), show_stdout=False, stdout_only=True, cwd=dest\n+ )\n+ if output.startswith(\"Standalone \"):\n+ # Older versions of pip used to create standalone branches.\n+ # Convert the standalone branch to a checkout by calling \"bzr bind\".\n+ cmd_args = make_command(\"bind\", \"-q\", url)\n+ self.run_command(cmd_args, cwd=dest)\n+\n+ cmd_args = make_command(\"update\", \"-q\", rev_options.to_args())\n self.run_command(cmd_args, cwd=dest)\n \n @classmethod\n", "issue": "Checking out Bazaar branch makes full clone\nWhen checking out a Bazaar branch, pip currently makes a full clone of the branch history. This is unnecessary and much slower than just fetching the latest revision:\r\n\r\nFor example, performance on my system for 'bzr co --lightweight lp:bzr':\r\n\r\n0.60s user 0.11s system 5% cpu 12.234 total\r\n\r\nPerformance on my system for 'bzr branch lp:bzr':\r\n\r\n65.41s user 1.48s system 39% cpu 2:47.91 total\r\n\n", "before_files": [{"content": "import logging\nfrom typing import List, Optional, Tuple\n\nfrom pip._internal.utils.misc import HiddenText, display_path\nfrom pip._internal.utils.subprocess import make_command\nfrom pip._internal.utils.urls import path_to_url\nfrom pip._internal.vcs.versioncontrol import (\n AuthInfo,\n RemoteNotFoundError,\n RevOptions,\n VersionControl,\n vcs,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass Bazaar(VersionControl):\n name = \"bzr\"\n dirname = \".bzr\"\n repo_name = \"branch\"\n schemes = (\n \"bzr+http\",\n \"bzr+https\",\n \"bzr+ssh\",\n \"bzr+sftp\",\n \"bzr+ftp\",\n \"bzr+lp\",\n \"bzr+file\",\n )\n\n @staticmethod\n def get_base_rev_args(rev: str) -> List[str]:\n return [\"-r\", rev]\n\n def fetch_new(\n self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int\n ) -> None:\n rev_display = rev_options.to_display()\n logger.info(\n \"Checking out %s%s to %s\",\n url,\n rev_display,\n display_path(dest),\n )\n if verbosity <= 0:\n flag = \"--quiet\"\n elif verbosity == 1:\n flag = \"\"\n else:\n flag = f\"-{'v'*verbosity}\"\n cmd_args = make_command(\"branch\", flag, rev_options.to_args(), url, dest)\n self.run_command(cmd_args)\n\n def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:\n self.run_command(make_command(\"switch\", url), cwd=dest)\n\n def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:\n cmd_args = make_command(\"pull\", \"-q\", rev_options.to_args())\n self.run_command(cmd_args, cwd=dest)\n\n @classmethod\n def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:\n # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it\n url, rev, user_pass = super().get_url_rev_and_auth(url)\n if url.startswith(\"ssh://\"):\n url = \"bzr+\" + url\n return url, rev, user_pass\n\n @classmethod\n def get_remote_url(cls, location: str) -> str:\n urls = cls.run_command(\n [\"info\"], show_stdout=False, stdout_only=True, cwd=location\n )\n for line in urls.splitlines():\n line = line.strip()\n for x in (\"checkout of branch: \", \"parent branch: \"):\n if line.startswith(x):\n repo = line.split(x)[1]\n if cls._is_local_repository(repo):\n return path_to_url(repo)\n return repo\n raise RemoteNotFoundError\n\n @classmethod\n def get_revision(cls, location: str) -> str:\n revision = cls.run_command(\n [\"revno\"],\n show_stdout=False,\n stdout_only=True,\n cwd=location,\n )\n return revision.splitlines()[-1]\n\n @classmethod\n def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:\n \"\"\"Always assume the versions don't match\"\"\"\n return False\n\n\nvcs.register(Bazaar)\n", "path": "src/pip/_internal/vcs/bazaar.py"}]} | 1,614 | 356 |
gh_patches_debug_5737 | rasdani/github-patches | git_diff | spesmilo__electrum-1738 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature request: Label colour - History Window
The transaction amount of outgoing transactions, when viewed in the history window, is coloured red.
It would be a nice if the label of these transactions could be coloured the same red colour so that they stand out more against the other incoming transactions.
Not a big issue but would be a 'nice to have'.
</issue>
<code>
[start of gui/qt/history_widget.py]
1 #!/usr/bin/env python
2 #
3 # Electrum - lightweight Bitcoin client
4 # Copyright (C) 2015 Thomas Voegtlin
5 #
6 # Permission is hereby granted, free of charge, to any person
7 # obtaining a copy of this software and associated documentation files
8 # (the "Software"), to deal in the Software without restriction,
9 # including without limitation the rights to use, copy, modify, merge,
10 # publish, distribute, sublicense, and/or sell copies of the Software,
11 # and to permit persons to whom the Software is furnished to do so,
12 # subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice shall be
15 # included in all copies or substantial portions of the Software.
16 #
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
21 # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
22 # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
23 # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 # SOFTWARE.
25
26
27 import webbrowser
28
29 from util import *
30 from electrum.i18n import _
31 from electrum.util import block_explorer_URL, format_satoshis, format_time
32 from electrum.plugins import run_hook
33
34
35 class HistoryWidget(MyTreeWidget):
36
37 def __init__(self, parent=None):
38 MyTreeWidget.__init__(self, parent, self.create_menu, [], 3)
39 self.refresh_headers()
40 self.setColumnHidden(1, True)
41 self.config = self.parent.config
42
43 def refresh_headers(self):
44 headers = ['', '', _('Date'), _('Description') , _('Amount'),
45 _('Balance')]
46 run_hook('history_tab_headers', headers)
47 self.update_headers(headers)
48
49 def get_icon(self, conf, timestamp):
50 time_str = _("unknown")
51 if conf > 0:
52 time_str = format_time(timestamp)
53 if conf == -1:
54 time_str = _('Not Verified')
55 icon = QIcon(":icons/unconfirmed.png")
56 elif conf == 0:
57 time_str = _('Unconfirmed')
58 icon = QIcon(":icons/unconfirmed.png")
59 elif conf < 6:
60 icon = QIcon(":icons/clock%d.png"%conf)
61 else:
62 icon = QIcon(":icons/confirmed.png")
63 return icon, time_str
64
65 def get_domain(self):
66 '''Replaced in address_dialog.py'''
67 return self.wallet.get_account_addresses(self.parent.current_account)
68
69 def on_update(self):
70 self.wallet = self.parent.wallet
71 h = self.wallet.get_history(self.get_domain())
72
73 item = self.currentItem()
74 current_tx = item.data(0, Qt.UserRole).toString() if item else None
75 self.clear()
76 run_hook('history_tab_update_begin')
77 for tx in h:
78 tx_hash, conf, value, timestamp, balance = tx
79 if conf is None and timestamp is None:
80 continue # skip history in offline mode
81 icon, time_str = self.get_icon(conf, timestamp)
82 v_str = self.parent.format_amount(value, True, whitespaces=True)
83 balance_str = self.parent.format_amount(balance, whitespaces=True)
84 label = self.wallet.get_label(tx_hash)
85 entry = ['', tx_hash, time_str, label, v_str, balance_str]
86 run_hook('history_tab_update', tx, entry)
87 item = QTreeWidgetItem(entry)
88 item.setIcon(0, icon)
89 for i in range(len(entry)):
90 if i>3:
91 item.setTextAlignment(i, Qt.AlignRight)
92 if i!=2:
93 item.setFont(i, QFont(MONOSPACE_FONT))
94 if value < 0:
95 item.setForeground(4, QBrush(QColor("#BC1E1E")))
96 if tx_hash:
97 item.setData(0, Qt.UserRole, tx_hash)
98 self.insertTopLevelItem(0, item)
99 if current_tx == tx_hash:
100 self.setCurrentItem(item)
101
102 def update_item(self, tx_hash, conf, timestamp):
103 icon, time_str = self.get_icon(conf, timestamp)
104 items = self.findItems(tx_hash, Qt.UserRole|Qt.MatchContains|Qt.MatchRecursive, column=1)
105 if items:
106 item = items[0]
107 item.setIcon(0, icon)
108 item.setText(2, time_str)
109
110 def create_menu(self, position):
111 self.selectedIndexes()
112 item = self.currentItem()
113 if not item:
114 return
115 tx_hash = str(item.data(0, Qt.UserRole).toString())
116 if not tx_hash:
117 return
118 tx_URL = block_explorer_URL(self.config, 'tx', tx_hash)
119 if not tx_URL:
120 return
121 menu = QMenu()
122 menu.addAction(_("Copy ID to Clipboard"), lambda: self.parent.app.clipboard().setText(tx_hash))
123 menu.addAction(_("Details"), lambda: self.parent.show_transaction(self.wallet.transactions.get(tx_hash)))
124 menu.addAction(_("Edit description"), lambda: self.editItem(item, self.editable_columns[0]))
125 menu.addAction(_("View on block explorer"), lambda: webbrowser.open(tx_URL))
126 menu.exec_(self.viewport().mapToGlobal(position))
127
[end of gui/qt/history_widget.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gui/qt/history_widget.py b/gui/qt/history_widget.py
--- a/gui/qt/history_widget.py
+++ b/gui/qt/history_widget.py
@@ -92,6 +92,7 @@
if i!=2:
item.setFont(i, QFont(MONOSPACE_FONT))
if value < 0:
+ item.setForeground(3, QBrush(QColor("#BC1E1E")))
item.setForeground(4, QBrush(QColor("#BC1E1E")))
if tx_hash:
item.setData(0, Qt.UserRole, tx_hash)
| {"golden_diff": "diff --git a/gui/qt/history_widget.py b/gui/qt/history_widget.py\n--- a/gui/qt/history_widget.py\n+++ b/gui/qt/history_widget.py\n@@ -92,6 +92,7 @@\n if i!=2:\n item.setFont(i, QFont(MONOSPACE_FONT))\n if value < 0:\n+ item.setForeground(3, QBrush(QColor(\"#BC1E1E\")))\n item.setForeground(4, QBrush(QColor(\"#BC1E1E\")))\n if tx_hash:\n item.setData(0, Qt.UserRole, tx_hash)\n", "issue": "Feature request: Label colour - History Window\nThe transaction amount of outgoing transactions, when viewed in the history window, is coloured red. \n\nIt would be a nice if the label of these transactions could be coloured the same red colour so that they stand out more against the other incoming transactions. \n\nNot a big issue but would be a 'nice to have'.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# Electrum - lightweight Bitcoin client\n# Copyright (C) 2015 Thomas Voegtlin\n#\n# Permission is hereby granted, free of charge, to any person\n# obtaining a copy of this software and associated documentation files\n# (the \"Software\"), to deal in the Software without restriction,\n# including without limitation the rights to use, copy, modify, merge,\n# publish, distribute, sublicense, and/or sell copies of the Software,\n# and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\nimport webbrowser\n\nfrom util import *\nfrom electrum.i18n import _\nfrom electrum.util import block_explorer_URL, format_satoshis, format_time\nfrom electrum.plugins import run_hook\n\n\nclass HistoryWidget(MyTreeWidget):\n\n def __init__(self, parent=None):\n MyTreeWidget.__init__(self, parent, self.create_menu, [], 3)\n self.refresh_headers()\n self.setColumnHidden(1, True)\n self.config = self.parent.config\n\n def refresh_headers(self):\n headers = ['', '', _('Date'), _('Description') , _('Amount'),\n _('Balance')]\n run_hook('history_tab_headers', headers)\n self.update_headers(headers)\n\n def get_icon(self, conf, timestamp):\n time_str = _(\"unknown\")\n if conf > 0:\n time_str = format_time(timestamp)\n if conf == -1:\n time_str = _('Not Verified')\n icon = QIcon(\":icons/unconfirmed.png\")\n elif conf == 0:\n time_str = _('Unconfirmed')\n icon = QIcon(\":icons/unconfirmed.png\")\n elif conf < 6:\n icon = QIcon(\":icons/clock%d.png\"%conf)\n else:\n icon = QIcon(\":icons/confirmed.png\")\n return icon, time_str\n\n def get_domain(self):\n '''Replaced in address_dialog.py'''\n return self.wallet.get_account_addresses(self.parent.current_account)\n\n def on_update(self):\n self.wallet = self.parent.wallet\n h = self.wallet.get_history(self.get_domain())\n\n item = self.currentItem()\n current_tx = item.data(0, Qt.UserRole).toString() if item else None\n self.clear()\n run_hook('history_tab_update_begin')\n for tx in h:\n tx_hash, conf, value, timestamp, balance = tx\n if conf is None and timestamp is None:\n continue # skip history in offline mode\n icon, time_str = self.get_icon(conf, timestamp)\n v_str = self.parent.format_amount(value, True, whitespaces=True)\n balance_str = self.parent.format_amount(balance, whitespaces=True)\n label = self.wallet.get_label(tx_hash)\n entry = ['', tx_hash, time_str, label, v_str, balance_str]\n run_hook('history_tab_update', tx, entry)\n item = QTreeWidgetItem(entry)\n item.setIcon(0, icon)\n for i in range(len(entry)):\n if i>3:\n item.setTextAlignment(i, Qt.AlignRight)\n if i!=2:\n item.setFont(i, QFont(MONOSPACE_FONT))\n if value < 0:\n item.setForeground(4, QBrush(QColor(\"#BC1E1E\")))\n if tx_hash:\n item.setData(0, Qt.UserRole, tx_hash)\n self.insertTopLevelItem(0, item)\n if current_tx == tx_hash:\n self.setCurrentItem(item)\n\n def update_item(self, tx_hash, conf, timestamp):\n icon, time_str = self.get_icon(conf, timestamp)\n items = self.findItems(tx_hash, Qt.UserRole|Qt.MatchContains|Qt.MatchRecursive, column=1)\n if items:\n item = items[0]\n item.setIcon(0, icon)\n item.setText(2, time_str)\n\n def create_menu(self, position):\n self.selectedIndexes()\n item = self.currentItem()\n if not item:\n return\n tx_hash = str(item.data(0, Qt.UserRole).toString())\n if not tx_hash:\n return\n tx_URL = block_explorer_URL(self.config, 'tx', tx_hash)\n if not tx_URL:\n return\n menu = QMenu()\n menu.addAction(_(\"Copy ID to Clipboard\"), lambda: self.parent.app.clipboard().setText(tx_hash))\n menu.addAction(_(\"Details\"), lambda: self.parent.show_transaction(self.wallet.transactions.get(tx_hash)))\n menu.addAction(_(\"Edit description\"), lambda: self.editItem(item, self.editable_columns[0]))\n menu.addAction(_(\"View on block explorer\"), lambda: webbrowser.open(tx_URL))\n menu.exec_(self.viewport().mapToGlobal(position))\n", "path": "gui/qt/history_widget.py"}]} | 2,004 | 123 |
gh_patches_debug_627 | rasdani/github-patches | git_diff | bridgecrewio__checkov-3151 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Terraform parsing error string with escaped backslash at the end
**Describe the issue**
Checkov crashes if it encounters an escaped backslash (`"\\"`) at the end of a string.
**Examples**
Minimal example to reproduce the error:
```terraform
variable "slash" {
default = "\\"
}
output "slash" {
value = var.slash
}
```
`terraform validate` sees this configuration as valid, but checkov fails with a parsing error.
This only happens when the last character of the string is the escaped backslash, as the parser assumes the closing quotation mark is escaped. Adding any normal character at the end of the string doesn't trigger this error.
```terraform
variable "slash" {
default = "\\"
}
```
**Exception Trace**
Relevant traceback
```sh
> LOG_LEVEL=DEBUG checkov -d .
[...]
[MainThread ] [DEBUG] failed while parsing file /workdir/main.tf
Traceback (most recent call last):
File "/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/checkov/terraform/parser.py", line 726, in _load_or_die_quietly
raw_data = hcl2.load(f)
File "/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/hcl2/api.py", line 12, in load
return loads(file.read())
File "/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/hcl2/api.py", line 80, in loads
raise ValueError(f"Line has unclosed quote marks: {line}")
ValueError: Line has unclosed quote marks: default = "\\"
[...]
```
**Desktop (please complete the following information):**
- OS: MacOS 12.3.1 (Intel)
- Checkov Version: 2.0.1230
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "pytest==5.3.1",
28 "coverage==5.5",
29 "coverage-badge",
30 "GitPython==3.1.7",
31 "bandit",
32 "jsonschema",
33 ]
34 },
35 install_requires=[
36 "bc-python-hcl2==0.3.42",
37 "cloudsplaining>=0.4.1",
38 "deep_merge",
39 "tabulate",
40 "colorama",
41 "termcolor",
42 "junit-xml>=1.9",
43 "dpath>=1.5.0,<2",
44 "pyyaml>=5.4.1",
45 "boto3>=1.17",
46 "GitPython",
47 "jmespath",
48 "tqdm",
49 "update_checker",
50 "semantic_version",
51 "packaging",
52 "networkx",
53 "dockerfile-parse",
54 "docker",
55 "configargparse",
56 "argcomplete",
57 "detect-secrets",
58 "policyuniverse",
59 "typing-extensions>=4.1.0",
60 "cachetools",
61 "cyclonedx-python-lib>=2.4.0",
62 "click>=8.0.0",
63 "aiohttp",
64 "aiodns",
65 "aiomultiprocess",
66 "jsonpath_ng",
67 "jsonschema~=3.0",
68 "prettytable>=3.0.0",
69 "pycep-parser==0.3.7",
70 "charset-normalizer",
71 ],
72 license="Apache License 2.0",
73 name="checkov",
74 version=version,
75 python_requires=">=3.7",
76 description="Infrastructure as code static analysis",
77 author="bridgecrew",
78 author_email="[email protected]",
79 url="https://github.com/bridgecrewio/checkov",
80 packages=setuptools.find_packages(exclude=["tests*", "integration_tests*"]),
81 include_package_data=True,
82 package_dir={
83 "checkov.bicep.checks.graph_checks": "checkov/bicep/checks/graph_checks",
84 "checkov.terraform.checks.graph_checks": "checkov/terraform/checks/graph_checks",
85 },
86 package_data={
87 "checkov": ["py.typed"],
88 "checkov.bicep.checks.graph_checks": ["*.yaml"],
89 "checkov.common.util.templates": ["*.jinja2"],
90 "checkov.terraform.checks.graph_checks": [
91 "aws/*.yaml",
92 "gcp/*.yaml",
93 "azure/*.yaml",
94 ],
95 },
96 scripts=["bin/checkov", "bin/checkov.cmd"],
97 long_description=long_description,
98 long_description_content_type="text/markdown",
99 classifiers=[
100 "Environment :: Console",
101 "Intended Audience :: Developers",
102 "Intended Audience :: System Administrators",
103 "License :: OSI Approved :: Apache Software License",
104 "Programming Language :: Python :: 3 :: Only",
105 "Programming Language :: Python :: 3.7",
106 "Programming Language :: Python :: 3.8",
107 "Programming Language :: Python :: 3.9",
108 "Programming Language :: Python :: 3.10",
109 "Topic :: Security",
110 "Topic :: Software Development :: Build Tools",
111 "Typing :: Typed",
112 ],
113 )
114
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@
]
},
install_requires=[
- "bc-python-hcl2==0.3.42",
+ "bc-python-hcl2==0.3.44",
"cloudsplaining>=0.4.1",
"deep_merge",
"tabulate",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,7 @@\n ]\n },\n install_requires=[\n- \"bc-python-hcl2==0.3.42\",\n+ \"bc-python-hcl2==0.3.44\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n", "issue": "Terraform parsing error string with escaped backslash at the end\n**Describe the issue**\r\nCheckov crashes if it encounters an escaped backslash (`\"\\\\\"`) at the end of a string.\r\n\r\n**Examples**\r\nMinimal example to reproduce the error:\r\n```terraform\r\nvariable \"slash\" {\r\n default = \"\\\\\"\r\n}\r\n\r\noutput \"slash\" {\r\n value = var.slash\r\n}\r\n```\r\n`terraform validate` sees this configuration as valid, but checkov fails with a parsing error.\r\n\r\nThis only happens when the last character of the string is the escaped backslash, as the parser assumes the closing quotation mark is escaped. Adding any normal character at the end of the string doesn't trigger this error.\r\n```terraform\r\nvariable \"slash\" {\r\n default = \"\\\\\"\r\n}\r\n```\r\n\r\n**Exception Trace**\r\nRelevant traceback\r\n```sh\r\n> LOG_LEVEL=DEBUG checkov -d .\r\n[...]\r\n[MainThread ] [DEBUG] failed while parsing file /workdir/main.tf\r\nTraceback (most recent call last):\r\n File \"/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/checkov/terraform/parser.py\", line 726, in _load_or_die_quietly\r\n raw_data = hcl2.load(f)\r\n File \"/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/hcl2/api.py\", line 12, in load\r\n return loads(file.read())\r\n File \"/Users/user/.local/pipx/venvs/checkov/lib/python3.8/site-packages/hcl2/api.py\", line 80, in loads\r\n raise ValueError(f\"Line has unclosed quote marks: {line}\")\r\nValueError: Line has unclosed quote marks: default = \"\\\\\"\r\n[...]\r\n```\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: MacOS 12.3.1 (Intel)\r\n - Checkov Version: 2.0.1230\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2==0.3.42\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"detect-secrets\",\n \"policyuniverse\",\n \"typing-extensions>=4.1.0\",\n \"cachetools\",\n \"cyclonedx-python-lib>=2.4.0\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonpath_ng\",\n \"jsonschema~=3.0\",\n \"prettytable>=3.0.0\",\n \"pycep-parser==0.3.7\",\n \"charset-normalizer\",\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.bicep.checks.graph_checks\": \"checkov/bicep/checks/graph_checks\",\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\",\n },\n package_data={\n \"checkov\": [\"py.typed\"],\n \"checkov.bicep.checks.graph_checks\": [\"*.yaml\"],\n \"checkov.common.util.templates\": [\"*.jinja2\"],\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ],\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n \"Typing :: Typed\",\n ],\n)\n", "path": "setup.py"}]} | 2,039 | 95 |
gh_patches_debug_16829 | rasdani/github-patches | git_diff | pyload__pyload-1412 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Smoozed
Hello,
Smooth is free in the moment for 10GB a day but the hooks said that it is not a premium account.
Would be great if somebody can have a look on it.
Thanks
</issue>
<code>
[start of module/plugins/accounts/SmoozedCom.py]
1 # -*- coding: utf-8 -*-
2
3 import hashlib
4 import time
5
6 try:
7 from beaker.crypto.pbkdf2 import PBKDF2
8
9 except ImportError:
10 from beaker.crypto.pbkdf2 import pbkdf2
11 from binascii import b2a_hex
12
13 class PBKDF2(object):
14 def __init__(self, passphrase, salt, iterations=1000):
15 self.passphrase = passphrase
16 self.salt = salt
17 self.iterations = iterations
18
19 def hexread(self, octets):
20 return b2a_hex(pbkdf2(self.passphrase, self.salt, self.iterations, octets))
21
22 from module.common.json_layer import json_loads
23 from module.plugins.Account import Account
24
25
26 class SmoozedCom(Account):
27 __name__ = "SmoozedCom"
28 __type__ = "account"
29 __version__ = "0.04"
30
31 __description__ = """Smoozed.com account plugin"""
32 __license__ = "GPLv3"
33 __authors__ = [("", "")]
34
35
36 def loadAccountInfo(self, user, req):
37 # Get user data from premiumize.me
38 status = self.getAccountStatus(user, req)
39
40 self.logDebug(status)
41
42 if status['state'] != 'ok':
43 info = {'validuntil' : None,
44 'trafficleft': None,
45 'premium' : False}
46 else:
47 # Parse account info
48 info = {'validuntil' : float(status["data"]["user"]["user_premium"]),
49 'trafficleft': max(0, status["data"]["traffic"][1] - status["data"]["traffic"][0]),
50 'session' : status["data"]["session_key"],
51 'hosters' : [hoster["name"] for hoster in status["data"]["hoster"]]}
52
53 if info['validuntil'] < time.time():
54 info['premium'] = False
55 else:
56 info['premium'] = True
57
58 return info
59
60
61 def login(self, user, data, req):
62 # Get user data from premiumize.me
63 status = self.getAccountStatus(user, req)
64
65 # Check if user and password are valid
66 if status['state'] != 'ok':
67 self.wrongPassword()
68
69
70 def getAccountStatus(self, user, req):
71 password = self.getAccountData(user)['password']
72 salt = hashlib.sha256(password).hexdigest()
73 encrypted = PBKDF2(password, salt, iterations=1000).hexread(32)
74
75 return json_loads(req.load("http://www2.smoozed.com/api/login",
76 get={'auth': user, 'password': encrypted}))
77
[end of module/plugins/accounts/SmoozedCom.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/module/plugins/accounts/SmoozedCom.py b/module/plugins/accounts/SmoozedCom.py
--- a/module/plugins/accounts/SmoozedCom.py
+++ b/module/plugins/accounts/SmoozedCom.py
@@ -34,7 +34,6 @@
def loadAccountInfo(self, user, req):
- # Get user data from premiumize.me
status = self.getAccountStatus(user, req)
self.logDebug(status)
@@ -51,7 +50,10 @@
'hosters' : [hoster["name"] for hoster in status["data"]["hoster"]]}
if info['validuntil'] < time.time():
- info['premium'] = False
+ if float(status["data"]["user"].get("user_trial", 0)) > time.time():
+ info['premium'] = True
+ else:
+ info['premium'] = False
else:
info['premium'] = True
| {"golden_diff": "diff --git a/module/plugins/accounts/SmoozedCom.py b/module/plugins/accounts/SmoozedCom.py\n--- a/module/plugins/accounts/SmoozedCom.py\n+++ b/module/plugins/accounts/SmoozedCom.py\n@@ -34,7 +34,6 @@\n \n \n def loadAccountInfo(self, user, req):\n- # Get user data from premiumize.me\n status = self.getAccountStatus(user, req)\n \n self.logDebug(status)\n@@ -51,7 +50,10 @@\n 'hosters' : [hoster[\"name\"] for hoster in status[\"data\"][\"hoster\"]]}\n \n if info['validuntil'] < time.time():\n- info['premium'] = False\n+ if float(status[\"data\"][\"user\"].get(\"user_trial\", 0)) > time.time():\n+ info['premium'] = True\n+ else:\n+ info['premium'] = False\n else:\n info['premium'] = True\n", "issue": "Smoozed\nHello,\n\nSmooth is free in the moment for 10GB a day but the hooks said that it is not a premium account.\n\nWould be great if somebody can have a look on it.\n\nThanks\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport hashlib\nimport time\n\ntry:\n from beaker.crypto.pbkdf2 import PBKDF2\n\nexcept ImportError:\n from beaker.crypto.pbkdf2 import pbkdf2\n from binascii import b2a_hex\n\n class PBKDF2(object):\n def __init__(self, passphrase, salt, iterations=1000):\n self.passphrase = passphrase\n self.salt = salt\n self.iterations = iterations\n\n def hexread(self, octets):\n return b2a_hex(pbkdf2(self.passphrase, self.salt, self.iterations, octets))\n\nfrom module.common.json_layer import json_loads\nfrom module.plugins.Account import Account\n\n\nclass SmoozedCom(Account):\n __name__ = \"SmoozedCom\"\n __type__ = \"account\"\n __version__ = \"0.04\"\n\n __description__ = \"\"\"Smoozed.com account plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"\", \"\")]\n\n\n def loadAccountInfo(self, user, req):\n # Get user data from premiumize.me\n status = self.getAccountStatus(user, req)\n\n self.logDebug(status)\n\n if status['state'] != 'ok':\n info = {'validuntil' : None,\n 'trafficleft': None,\n 'premium' : False}\n else:\n # Parse account info\n info = {'validuntil' : float(status[\"data\"][\"user\"][\"user_premium\"]),\n 'trafficleft': max(0, status[\"data\"][\"traffic\"][1] - status[\"data\"][\"traffic\"][0]),\n 'session' : status[\"data\"][\"session_key\"],\n 'hosters' : [hoster[\"name\"] for hoster in status[\"data\"][\"hoster\"]]}\n\n if info['validuntil'] < time.time():\n info['premium'] = False\n else:\n info['premium'] = True\n\n return info\n\n\n def login(self, user, data, req):\n # Get user data from premiumize.me\n status = self.getAccountStatus(user, req)\n\n # Check if user and password are valid\n if status['state'] != 'ok':\n self.wrongPassword()\n\n\n def getAccountStatus(self, user, req):\n password = self.getAccountData(user)['password']\n salt = hashlib.sha256(password).hexdigest()\n encrypted = PBKDF2(password, salt, iterations=1000).hexread(32)\n\n return json_loads(req.load(\"http://www2.smoozed.com/api/login\",\n get={'auth': user, 'password': encrypted}))\n", "path": "module/plugins/accounts/SmoozedCom.py"}]} | 1,334 | 215 |
gh_patches_debug_6986 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1210 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MAE can't handle MPS backend
## 🐛 Bug
The currently, the mean absolute error can't handle the MPS backend. This is a simple fix and just requires casting to .float()
### To Reproduce
This works:
```python
import torchmetrics
import torch
a = torch.tensor([1, 2, 3]).to(torch.device("mps"))
acc = torchmetrics.Accuracy().to(torch.device("mps"))
acc.update(a, a)
acc.compute()
```
this also works:
```
a = torch.tensor([1, 2, 3])
mae = torchmetrics.MeanAbsoluteError()
mae.update(a, a)
mae.compute()
```
but this crashes
```python
a = torch.tensor([1, 2, 3]).to(torch.device("mps"))
mae = torchmetrics.MeanAbsoluteError().to(torch.device("mps"))
mae.update(a, a)
mae.compute()
```
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Input In [12], in <module>
1 a = torch.tensor([1, 2, 3]).to(torch.device("mps"))
3 acc = torchmetrics.MeanAbsoluteError().to(torch.device("mps"))
----> 4 acc.update(a, a)
5 acc.compute()
File ~/miniforge3/lib/python3.9/site-packages/torchmetrics/metric.py:391, in Metric._wrap_update.<locals>.wrapped_func(*args, **kwargs)
389 with torch.set_grad_enabled(self._enable_grad):
390 try:
--> 391 update(*args, **kwargs)
392 except RuntimeError as err:
393 if "Expected all tensors to be on" in str(err):
File ~/miniforge3/lib/python3.9/site-packages/torchmetrics/regression/mae.py:63, in MeanAbsoluteError.update(self, preds, target)
56 def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
57 """Update state with predictions and targets.
58
59 Args:
60 preds: Predictions from model
61 target: Ground truth values
62 """
---> 63 sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)
65 self.sum_abs_error += sum_abs_error
66 self.total += n_obs
File ~/miniforge3/lib/python3.9/site-packages/torchmetrics/functional/regression/mae.py:33, in _mean_absolute_error_update(preds, target)
23 """Updates and returns variables required to compute Mean Absolute Error.
24
25 Checks for same shape of input tensors.
(...)
29 target: Ground truth tensor
30 """
32 _check_same_shape(preds, target)
---> 33 sum_abs_error = torch.sum(torch.abs(preds - target))
34 n_obs = target.numel()
35 return sum_abs_error, n_obs
TypeError: Operation 'abs_out_mps()' does not support input type 'int64' in MPS backend.
```
### Environment
```
torch : 1.12.1
lightning : 2022.9.8
torchmetrics: 0.9.3
```
</issue>
<code>
[start of src/torchmetrics/functional/regression/mae.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Tuple
15
16 import torch
17 from torch import Tensor
18
19 from torchmetrics.utilities.checks import _check_same_shape
20
21
22 def _mean_absolute_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]:
23 """Updates and returns variables required to compute Mean Absolute Error.
24
25 Checks for same shape of input tensors.
26
27 Args:
28 preds: Predicted tensor
29 target: Ground truth tensor
30 """
31
32 _check_same_shape(preds, target)
33 sum_abs_error = torch.sum(torch.abs(preds - target))
34 n_obs = target.numel()
35 return sum_abs_error, n_obs
36
37
38 def _mean_absolute_error_compute(sum_abs_error: Tensor, n_obs: int) -> Tensor:
39 """Computes Mean Absolute Error.
40
41 Args:
42 sum_abs_error: Sum of absolute value of errors over all observations
43 n_obs: Number of predictions or observations
44
45 Example:
46 >>> preds = torch.tensor([0., 1, 2, 3])
47 >>> target = torch.tensor([0., 1, 2, 2])
48 >>> sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)
49 >>> _mean_absolute_error_compute(sum_abs_error, n_obs)
50 tensor(0.2500)
51 """
52
53 return sum_abs_error / n_obs
54
55
56 def mean_absolute_error(preds: Tensor, target: Tensor) -> Tensor:
57 """Computes mean absolute error.
58
59 Args:
60 preds: estimated labels
61 target: ground truth labels
62
63 Return:
64 Tensor with MAE
65
66 Example:
67 >>> from torchmetrics.functional import mean_absolute_error
68 >>> x = torch.tensor([0., 1, 2, 3])
69 >>> y = torch.tensor([0., 1, 2, 2])
70 >>> mean_absolute_error(x, y)
71 tensor(0.2500)
72 """
73 sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)
74 return _mean_absolute_error_compute(sum_abs_error, n_obs)
75
[end of src/torchmetrics/functional/regression/mae.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/torchmetrics/functional/regression/mae.py b/src/torchmetrics/functional/regression/mae.py
--- a/src/torchmetrics/functional/regression/mae.py
+++ b/src/torchmetrics/functional/regression/mae.py
@@ -28,8 +28,9 @@
preds: Predicted tensor
target: Ground truth tensor
"""
-
_check_same_shape(preds, target)
+ preds = preds if preds.is_floating_point else preds.float()
+ target = target if target.is_floating_point else target.float()
sum_abs_error = torch.sum(torch.abs(preds - target))
n_obs = target.numel()
return sum_abs_error, n_obs
| {"golden_diff": "diff --git a/src/torchmetrics/functional/regression/mae.py b/src/torchmetrics/functional/regression/mae.py\n--- a/src/torchmetrics/functional/regression/mae.py\n+++ b/src/torchmetrics/functional/regression/mae.py\n@@ -28,8 +28,9 @@\n preds: Predicted tensor\n target: Ground truth tensor\n \"\"\"\n-\n _check_same_shape(preds, target)\n+ preds = preds if preds.is_floating_point else preds.float()\n+ target = target if target.is_floating_point else target.float()\n sum_abs_error = torch.sum(torch.abs(preds - target))\n n_obs = target.numel()\n return sum_abs_error, n_obs\n", "issue": "MAE can't handle MPS backend\n## \ud83d\udc1b Bug\r\n\r\nThe currently, the mean absolute error can't handle the MPS backend. This is a simple fix and just requires casting to .float()\r\n\r\n### To Reproduce\r\n\r\nThis works:\r\n\r\n```python\r\nimport torchmetrics\r\nimport torch\r\n\r\na = torch.tensor([1, 2, 3]).to(torch.device(\"mps\"))\r\n\r\nacc = torchmetrics.Accuracy().to(torch.device(\"mps\"))\r\nacc.update(a, a)\r\nacc.compute()\r\n```\r\n\r\nthis also works:\r\n\r\n```\r\na = torch.tensor([1, 2, 3])\r\n\r\nmae = torchmetrics.MeanAbsoluteError()\r\nmae.update(a, a)\r\nmae.compute()\r\n\r\n```\r\n\r\nbut this crashes\r\n\r\n```python\r\na = torch.tensor([1, 2, 3]).to(torch.device(\"mps\"))\r\n\r\nmae = torchmetrics.MeanAbsoluteError().to(torch.device(\"mps\"))\r\nmae.update(a, a)\r\nmae.compute()\r\n```\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\nInput In [12], in <module>\r\n 1 a = torch.tensor([1, 2, 3]).to(torch.device(\"mps\"))\r\n 3 acc = torchmetrics.MeanAbsoluteError().to(torch.device(\"mps\"))\r\n----> 4 acc.update(a, a)\r\n 5 acc.compute()\r\n\r\nFile ~/miniforge3/lib/python3.9/site-packages/torchmetrics/metric.py:391, in Metric._wrap_update.<locals>.wrapped_func(*args, **kwargs)\r\n 389 with torch.set_grad_enabled(self._enable_grad):\r\n 390 try:\r\n--> 391 update(*args, **kwargs)\r\n 392 except RuntimeError as err:\r\n 393 if \"Expected all tensors to be on\" in str(err):\r\n\r\nFile ~/miniforge3/lib/python3.9/site-packages/torchmetrics/regression/mae.py:63, in MeanAbsoluteError.update(self, preds, target)\r\n 56 def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\r\n 57 \"\"\"Update state with predictions and targets.\r\n 58 \r\n 59 Args:\r\n 60 preds: Predictions from model\r\n 61 target: Ground truth values\r\n 62 \"\"\"\r\n---> 63 sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)\r\n 65 self.sum_abs_error += sum_abs_error\r\n 66 self.total += n_obs\r\n\r\nFile ~/miniforge3/lib/python3.9/site-packages/torchmetrics/functional/regression/mae.py:33, in _mean_absolute_error_update(preds, target)\r\n 23 \"\"\"Updates and returns variables required to compute Mean Absolute Error.\r\n 24 \r\n 25 Checks for same shape of input tensors.\r\n (...)\r\n 29 target: Ground truth tensor\r\n 30 \"\"\"\r\n 32 _check_same_shape(preds, target)\r\n---> 33 sum_abs_error = torch.sum(torch.abs(preds - target))\r\n 34 n_obs = target.numel()\r\n 35 return sum_abs_error, n_obs\r\n\r\nTypeError: Operation 'abs_out_mps()' does not support input type 'int64' in MPS backend.\r\n```\r\n\r\n\r\n### Environment\r\n\r\n```\r\ntorch : 1.12.1\r\nlightning : 2022.9.8\r\ntorchmetrics: 0.9.3\r\n```\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef _mean_absolute_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]:\n \"\"\"Updates and returns variables required to compute Mean Absolute Error.\n\n Checks for same shape of input tensors.\n\n Args:\n preds: Predicted tensor\n target: Ground truth tensor\n \"\"\"\n\n _check_same_shape(preds, target)\n sum_abs_error = torch.sum(torch.abs(preds - target))\n n_obs = target.numel()\n return sum_abs_error, n_obs\n\n\ndef _mean_absolute_error_compute(sum_abs_error: Tensor, n_obs: int) -> Tensor:\n \"\"\"Computes Mean Absolute Error.\n\n Args:\n sum_abs_error: Sum of absolute value of errors over all observations\n n_obs: Number of predictions or observations\n\n Example:\n >>> preds = torch.tensor([0., 1, 2, 3])\n >>> target = torch.tensor([0., 1, 2, 2])\n >>> sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)\n >>> _mean_absolute_error_compute(sum_abs_error, n_obs)\n tensor(0.2500)\n \"\"\"\n\n return sum_abs_error / n_obs\n\n\ndef mean_absolute_error(preds: Tensor, target: Tensor) -> Tensor:\n \"\"\"Computes mean absolute error.\n\n Args:\n preds: estimated labels\n target: ground truth labels\n\n Return:\n Tensor with MAE\n\n Example:\n >>> from torchmetrics.functional import mean_absolute_error\n >>> x = torch.tensor([0., 1, 2, 3])\n >>> y = torch.tensor([0., 1, 2, 2])\n >>> mean_absolute_error(x, y)\n tensor(0.2500)\n \"\"\"\n sum_abs_error, n_obs = _mean_absolute_error_update(preds, target)\n return _mean_absolute_error_compute(sum_abs_error, n_obs)\n", "path": "src/torchmetrics/functional/regression/mae.py"}]} | 2,031 | 161 |
gh_patches_debug_1424 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-1891 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
2.2.0 CLI reports version 2.1.2dev0
* Cookiecutter version: 2.2.0 (or 2.1.2dev0, depending on who you ask)
* Template project url: n/a
* Python version: 3.11
* Operating System: linux
### Description:
Get the accurate version of cookiecutter from the CLI
### What I've run:
```bash
cookiecutter --version
Cookiecutter 2.1.2.dev0 from $PREFIX/lib/python3.11/site-packages (Python 3.11.4 | packaged by conda-forge | (main, Jun 10 2023, 18:08:17) [GCC 12.2.0])
```
Would be a one-line fix, but ideally would be always be sourced from exactly one place:
- `setup.py` and `importlib_metadata`
- `__init__.py`
- a `VERSION` file
</issue>
<code>
[start of setup.py]
1 """cookiecutter distutils configuration."""
2 from setuptools import setup
3
4 version = "2.2.2.dev0"
5
6 with open('README.md', encoding='utf-8') as readme_file:
7 readme = readme_file.read()
8
9 requirements = [
10 'binaryornot>=0.4.4',
11 'Jinja2>=2.7,<4.0.0',
12 'click>=7.0,<9.0.0',
13 'pyyaml>=5.3.1',
14 'python-slugify>=4.0.0',
15 'requests>=2.23.0',
16 'arrow',
17 ]
18
19 setup(
20 name='cookiecutter',
21 version=version,
22 description=(
23 'A command-line utility that creates projects from project '
24 'templates, e.g. creating a Python package project from a '
25 'Python package project template.'
26 ),
27 long_description=readme,
28 long_description_content_type='text/markdown',
29 author='Audrey Feldroy',
30 author_email='[email protected]',
31 url='https://github.com/cookiecutter/cookiecutter',
32 project_urls={
33 "Documentation": "https://cookiecutter.readthedocs.io",
34 "Issues": "https://github.com/cookiecutter/cookiecutter/issues",
35 "Discord": "https://discord.gg/9BrxzPKuEW",
36 },
37 packages=['cookiecutter'],
38 package_dir={'cookiecutter': 'cookiecutter'},
39 entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},
40 include_package_data=True,
41 python_requires='>=3.7',
42 install_requires=requirements,
43 license='BSD',
44 zip_safe=False,
45 classifiers=[
46 "Development Status :: 5 - Production/Stable",
47 "Environment :: Console",
48 "Intended Audience :: Developers",
49 "Natural Language :: English",
50 "License :: OSI Approved :: BSD License",
51 "Programming Language :: Python :: 3 :: Only",
52 "Programming Language :: Python :: 3",
53 "Programming Language :: Python :: 3.7",
54 "Programming Language :: Python :: 3.8",
55 "Programming Language :: Python :: 3.9",
56 "Programming Language :: Python :: 3.10",
57 "Programming Language :: Python :: 3.11",
58 "Programming Language :: Python :: Implementation :: CPython",
59 "Programming Language :: Python :: Implementation :: PyPy",
60 "Programming Language :: Python",
61 "Topic :: Software Development",
62 ],
63 keywords=[
64 "cookiecutter",
65 "Python",
66 "projects",
67 "project templates",
68 "Jinja2",
69 "skeleton",
70 "scaffolding",
71 "project directory",
72 "package",
73 "packaging",
74 ],
75 )
76
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,7 +1,7 @@
"""cookiecutter distutils configuration."""
from setuptools import setup
-version = "2.2.2.dev0"
+version = "2.2.2"
with open('README.md', encoding='utf-8') as readme_file:
readme = readme_file.read()
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,7 +1,7 @@\n \"\"\"cookiecutter distutils configuration.\"\"\"\n from setuptools import setup\n \n-version = \"2.2.2.dev0\"\n+version = \"2.2.2\"\n \n with open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n", "issue": "2.2.0 CLI reports version 2.1.2dev0 \n* Cookiecutter version: 2.2.0 (or 2.1.2dev0, depending on who you ask)\r\n* Template project url: n/a\r\n* Python version: 3.11\r\n* Operating System: linux\r\n\r\n### Description:\r\n\r\nGet the accurate version of cookiecutter from the CLI\r\n\r\n### What I've run:\r\n\r\n```bash\r\ncookiecutter --version\r\nCookiecutter 2.1.2.dev0 from $PREFIX/lib/python3.11/site-packages (Python 3.11.4 | packaged by conda-forge | (main, Jun 10 2023, 18:08:17) [GCC 12.2.0])\r\n```\r\n\r\nWould be a one-line fix, but ideally would be always be sourced from exactly one place:\r\n- `setup.py` and `importlib_metadata`\r\n- `__init__.py`\r\n- a `VERSION` file\n", "before_files": [{"content": "\"\"\"cookiecutter distutils configuration.\"\"\"\nfrom setuptools import setup\n\nversion = \"2.2.2.dev0\"\n\nwith open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0,<9.0.0',\n 'pyyaml>=5.3.1',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n 'arrow',\n]\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n project_urls={\n \"Documentation\": \"https://cookiecutter.readthedocs.io\",\n \"Issues\": \"https://github.com/cookiecutter/cookiecutter/issues\",\n \"Discord\": \"https://discord.gg/9BrxzPKuEW\",\n },\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=3.7',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n ],\n keywords=[\n \"cookiecutter\",\n \"Python\",\n \"projects\",\n \"project templates\",\n \"Jinja2\",\n \"skeleton\",\n \"scaffolding\",\n \"project directory\",\n \"package\",\n \"packaging\",\n ],\n)\n", "path": "setup.py"}]} | 1,485 | 89 |
gh_patches_debug_33 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-1864 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stop using external 'mock' dependency
As of Python 3.4, 'mock' is included in the standard library under the unittest module, and since the lowest supported version of Python is greater than that, we can remove the external dependency.
</issue>
<code>
[start of noxfile.py]
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import shutil
17
18 import nox
19
20 BLACK_VERSION = "black==22.3.0"
21 ISORT_VERSION = "isort==5.10.1"
22 BLACK_PATHS = [
23 "apiclient",
24 "googleapiclient",
25 "scripts",
26 "tests",
27 "describe.py",
28 "expandsymlinks.py",
29 "noxfile.py",
30 "owlbot.py",
31 "setup.py",
32 ]
33
34 test_dependencies = [
35 "django>=2.0.0",
36 "google-auth",
37 "google-auth-httplib2",
38 "mox",
39 "parameterized",
40 "pyopenssl",
41 "pytest",
42 "pytest-cov",
43 "webtest",
44 "coverage",
45 "mock",
46 ]
47
48
49 @nox.session(python=["3.7"])
50 def lint(session):
51 session.install("flake8")
52 session.run(
53 "flake8",
54 "googleapiclient",
55 "tests",
56 "--count",
57 "--select=E9,F63,F7,F82",
58 "--show-source",
59 "--statistics",
60 )
61
62
63 @nox.session(python="3.8")
64 def format(session):
65 """
66 Run isort to sort imports. Then run black
67 to format code to uniform standard.
68 """
69 session.install(BLACK_VERSION, ISORT_VERSION)
70 # Use the --fss option to sort imports using strict alphabetical order.
71 # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections
72 session.run(
73 "isort",
74 "--fss",
75 *BLACK_PATHS,
76 )
77 session.run(
78 "black",
79 *BLACK_PATHS,
80 )
81
82
83 @nox.session(python=["3.7", "3.8", "3.9", "3.10"])
84 @nox.parametrize(
85 "oauth2client",
86 [
87 "oauth2client<2dev",
88 "oauth2client>=2,<=3dev",
89 "oauth2client>=3,<=4dev",
90 "oauth2client>=4,<=5dev",
91 ],
92 )
93 def unit(session, oauth2client):
94 # Clean up dist and build folders
95 shutil.rmtree("dist", ignore_errors=True)
96 shutil.rmtree("build", ignore_errors=True)
97
98 session.install(*test_dependencies)
99 session.install(oauth2client)
100
101 # Create and install wheels
102 session.run("python3", "setup.py", "bdist_wheel")
103 session.install(os.path.join("dist", os.listdir("dist").pop()))
104
105 # Run tests from a different directory to test the package artifacts
106 root_dir = os.path.dirname(os.path.realpath(__file__))
107 temp_dir = session.create_tmp()
108 session.chdir(temp_dir)
109 shutil.copytree(os.path.join(root_dir, "tests"), "tests")
110
111 # Run py.test against the unit tests.
112 session.run(
113 "py.test",
114 "--quiet",
115 "--cov=googleapiclient",
116 "--cov=tests",
117 "--cov-append",
118 "--cov-config=.coveragerc",
119 "--cov-report=",
120 "--cov-fail-under=85",
121 "tests",
122 *session.posargs,
123 )
124
125
126 @nox.session(python=["3.9"])
127 def scripts(session):
128 session.install(*test_dependencies)
129 session.install("-e", ".")
130 session.install("-r", "scripts/requirements.txt")
131
132 # Run py.test against the unit tests.
133 session.run(
134 "py.test",
135 "--quiet",
136 "--cov=scripts",
137 "--cov-config=.coveragerc",
138 "--cov-report=",
139 "--cov-fail-under=91",
140 "scripts",
141 *session.posargs,
142 )
143
[end of noxfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -42,7 +42,6 @@
"pytest-cov",
"webtest",
"coverage",
- "mock",
]
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -42,7 +42,6 @@\n \"pytest-cov\",\n \"webtest\",\n \"coverage\",\n- \"mock\",\n ]\n", "issue": "Stop using external 'mock' dependency\nAs of Python 3.4, 'mock' is included in the standard library under the unittest module, and since the lowest supported version of Python is greater than that, we can remove the external dependency.\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\n\nimport nox\n\nBLACK_VERSION = \"black==22.3.0\"\nISORT_VERSION = \"isort==5.10.1\"\nBLACK_PATHS = [\n \"apiclient\",\n \"googleapiclient\",\n \"scripts\",\n \"tests\",\n \"describe.py\",\n \"expandsymlinks.py\",\n \"noxfile.py\",\n \"owlbot.py\",\n \"setup.py\",\n]\n\ntest_dependencies = [\n \"django>=2.0.0\",\n \"google-auth\",\n \"google-auth-httplib2\",\n \"mox\",\n \"parameterized\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"webtest\",\n \"coverage\",\n \"mock\",\n]\n\n\[email protected](python=[\"3.7\"])\ndef lint(session):\n session.install(\"flake8\")\n session.run(\n \"flake8\",\n \"googleapiclient\",\n \"tests\",\n \"--count\",\n \"--select=E9,F63,F7,F82\",\n \"--show-source\",\n \"--statistics\",\n )\n\n\[email protected](python=\"3.8\")\ndef format(session):\n \"\"\"\n Run isort to sort imports. Then run black\n to format code to uniform standard.\n \"\"\"\n session.install(BLACK_VERSION, ISORT_VERSION)\n # Use the --fss option to sort imports using strict alphabetical order.\n # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections\n session.run(\n \"isort\",\n \"--fss\",\n *BLACK_PATHS,\n )\n session.run(\n \"black\",\n *BLACK_PATHS,\n )\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\"])\[email protected](\n \"oauth2client\",\n [\n \"oauth2client<2dev\",\n \"oauth2client>=2,<=3dev\",\n \"oauth2client>=3,<=4dev\",\n \"oauth2client>=4,<=5dev\",\n ],\n)\ndef unit(session, oauth2client):\n # Clean up dist and build folders\n shutil.rmtree(\"dist\", ignore_errors=True)\n shutil.rmtree(\"build\", ignore_errors=True)\n\n session.install(*test_dependencies)\n session.install(oauth2client)\n\n # Create and install wheels\n session.run(\"python3\", \"setup.py\", \"bdist_wheel\")\n session.install(os.path.join(\"dist\", os.listdir(\"dist\").pop()))\n\n # Run tests from a different directory to test the package artifacts\n root_dir = os.path.dirname(os.path.realpath(__file__))\n temp_dir = session.create_tmp()\n session.chdir(temp_dir)\n shutil.copytree(os.path.join(root_dir, \"tests\"), \"tests\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=googleapiclient\",\n \"--cov=tests\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=85\",\n \"tests\",\n *session.posargs,\n )\n\n\[email protected](python=[\"3.9\"])\ndef scripts(session):\n session.install(*test_dependencies)\n session.install(\"-e\", \".\")\n session.install(\"-r\", \"scripts/requirements.txt\")\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=scripts\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=91\",\n \"scripts\",\n *session.posargs,\n )\n", "path": "noxfile.py"}]} | 1,834 | 62 |
gh_patches_debug_28626 | rasdani/github-patches | git_diff | pymodbus-dev__pymodbus-1053 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Updating Server Example does not work
<!--
Please use the Pymodbus gitter channel at https://gitter.im/pymodbus_dev/Lobby or Stack Overflow(tag [pymodbus](https://stackoverflow.com/questions/tagged/pymodbus) for
support questions.
Before opening a new issue, make sure you do the following:
* check that your issue isn't already filed: https://github.com/riptideio/pymodbus/issues
* check the discussions forum https://github.com/riptideio/pymodbus/discussions
* prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus
-->
### Versions
* Python: 3.7, 3.8, 3,10
* OS: ubuntu bionic
* Pymodbus: 2.5.3
* Modbus Hardware (if used):
### Pymodbus Specific
* Server: tcp - sync/async
### Description
I try to run example, first error is
`TypeError: ModbusDeviceIdentification.__init__() got an unexpected keyword argument 'info_name'`
when i remove that info, it ends on
```
modbusupdater.py", line 81, in run_updating_server
loop.start(time, now=False) # initially delay by time
AttributeError: '_UnixSelectorEventLoop' object has no attribute 'start'
```
async/sync server have same error with info_name, but run when removed. But i am unable to run updating server example.
Thank you
</issue>
<code>
[start of examples/common/updating_server.py]
1 #!/usr/bin/env python3
2 # pylint: disable=missing-any-param-doc,differing-param-doc
3 """Pymodbus Server With Updating Thread.
4
5 This is an example of having a background thread updating the
6 context while the server is operating. This can also be done with
7 a python thread::
8
9 from threading import Thread
10 Thread(target=updating_writer, args=(context,)).start()
11 """
12 import logging
13 import asyncio
14
15 from pymodbus.datastore import (
16 ModbusSequentialDataBlock,
17 ModbusServerContext,
18 ModbusSlaveContext,
19 )
20 from pymodbus.device import ModbusDeviceIdentification
21 from pymodbus.server.async_io import StartTcpServer
22 from pymodbus.version import version
23
24 # --------------------------------------------------------------------------- #
25 # configure the service logging
26 # --------------------------------------------------------------------------- #
27 log = logging.getLogger()
28 log.setLevel(logging.DEBUG)
29
30 # --------------------------------------------------------------------------- #
31 # define your callback process
32 # --------------------------------------------------------------------------- #
33
34
35 def updating_writer(extra):
36 """Run every so often,
37
38 and updates live values of the context. It should be noted
39 that there is a race condition for the update.
40
41 :param arguments: The input arguments to the call
42 """
43 log.debug("updating the context")
44 context = extra[0]
45 register = 3
46 slave_id = 0x00
47 address = 0x10
48 values = context[slave_id].getValues(register, address, count=5)
49 values = [v + 1 for v in values]
50 txt = f"new values: {str(values)}"
51 log.debug(txt)
52 context[slave_id].setValues(register, address, values)
53
54
55 def run_updating_server():
56 """Run updating server."""
57 # ----------------------------------------------------------------------- #
58 # initialize your data store
59 # ----------------------------------------------------------------------- #
60
61 store = ModbusSlaveContext(
62 di=ModbusSequentialDataBlock(0, [17] * 100),
63 co=ModbusSequentialDataBlock(0, [17] * 100),
64 hr=ModbusSequentialDataBlock(0, [17] * 100),
65 ir=ModbusSequentialDataBlock(0, [17] * 100),
66 )
67 context = ModbusServerContext(slaves=store, single=True)
68
69 # ----------------------------------------------------------------------- #
70 # initialize the server information
71 # ----------------------------------------------------------------------- #
72 identity = ModbusDeviceIdentification(
73 info_name={
74 "VendorName": "pymodbus",
75 "ProductCode": "PM",
76 "VendorUrl": "https://github.com/riptideio/pymodbus/",
77 "ProductName": "pymodbus Server",
78 "ModelName": "pymodbus Server",
79 "MajorMinorRevision": version.short(),
80 }
81 )
82
83 # ----------------------------------------------------------------------- #
84 # run the server you want
85 # ----------------------------------------------------------------------- #
86 time = 5 # 5 seconds delay
87 loop = asyncio.get_event_loop()
88 loop.start(time, now=False) # initially delay by time
89 StartTcpServer(context, identity=identity, address=("localhost", 5020))
90
91
92 if __name__ == "__main__":
93 run_updating_server()
94
[end of examples/common/updating_server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/common/updating_server.py b/examples/common/updating_server.py
--- a/examples/common/updating_server.py
+++ b/examples/common/updating_server.py
@@ -36,7 +36,7 @@
"""Run every so often,
and updates live values of the context. It should be noted
- that there is a race condition for the update.
+ that there is a lrace condition for the update.
:param arguments: The input arguments to the call
"""
@@ -52,7 +52,7 @@
context[slave_id].setValues(register, address, values)
-def run_updating_server():
+async def run_updating_server():
"""Run updating server."""
# ----------------------------------------------------------------------- #
# initialize your data store
@@ -83,11 +83,15 @@
# ----------------------------------------------------------------------- #
# run the server you want
# ----------------------------------------------------------------------- #
- time = 5 # 5 seconds delay
- loop = asyncio.get_event_loop()
- loop.start(time, now=False) # initially delay by time
- StartTcpServer(context, identity=identity, address=("localhost", 5020))
+ log.debug("Start server")
+ await StartTcpServer(
+ context,
+ identity=identity,
+ address=("localhost", 5020),
+ defer_start=False
+ )
+ log.debug("Done")
if __name__ == "__main__":
- run_updating_server()
+ asyncio.run(run_updating_server())
| {"golden_diff": "diff --git a/examples/common/updating_server.py b/examples/common/updating_server.py\n--- a/examples/common/updating_server.py\n+++ b/examples/common/updating_server.py\n@@ -36,7 +36,7 @@\n \"\"\"Run every so often,\n \n and updates live values of the context. It should be noted\n- that there is a race condition for the update.\n+ that there is a lrace condition for the update.\n \n :param arguments: The input arguments to the call\n \"\"\"\n@@ -52,7 +52,7 @@\n context[slave_id].setValues(register, address, values)\n \n \n-def run_updating_server():\n+async def run_updating_server():\n \"\"\"Run updating server.\"\"\"\n # ----------------------------------------------------------------------- #\n # initialize your data store\n@@ -83,11 +83,15 @@\n # ----------------------------------------------------------------------- #\n # run the server you want\n # ----------------------------------------------------------------------- #\n- time = 5 # 5 seconds delay\n- loop = asyncio.get_event_loop()\n- loop.start(time, now=False) # initially delay by time\n- StartTcpServer(context, identity=identity, address=(\"localhost\", 5020))\n+ log.debug(\"Start server\")\n+ await StartTcpServer(\n+ context,\n+ identity=identity,\n+ address=(\"localhost\", 5020),\n+ defer_start=False\n+ )\n+ log.debug(\"Done\")\n \n \n if __name__ == \"__main__\":\n- run_updating_server()\n+ asyncio.run(run_updating_server())\n", "issue": "Updating Server Example does not work\n<!--\r\nPlease use the Pymodbus gitter channel at https://gitter.im/pymodbus_dev/Lobby or Stack Overflow(tag [pymodbus](https://stackoverflow.com/questions/tagged/pymodbus) for\r\nsupport questions.\r\n\r\nBefore opening a new issue, make sure you do the following:\r\n * check that your issue isn't already filed: https://github.com/riptideio/pymodbus/issues\r\n * check the discussions forum https://github.com/riptideio/pymodbus/discussions\r\n * prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus\r\n-->\r\n\r\n### Versions\r\n\r\n* Python: 3.7, 3.8, 3,10\r\n* OS: ubuntu bionic\r\n* Pymodbus: 2.5.3\r\n* Modbus Hardware (if used): \r\n\r\n### Pymodbus Specific\r\n* Server: tcp - sync/async\r\n\r\n\r\n### Description\r\n\r\nI try to run example, first error is\r\n`TypeError: ModbusDeviceIdentification.__init__() got an unexpected keyword argument 'info_name'`\r\n\r\nwhen i remove that info, it ends on\r\n\r\n```\r\nmodbusupdater.py\", line 81, in run_updating_server\r\n loop.start(time, now=False) # initially delay by time\r\nAttributeError: '_UnixSelectorEventLoop' object has no attribute 'start'\r\n```\r\n\r\nasync/sync server have same error with info_name, but run when removed. But i am unable to run updating server example.\r\n\r\nThank you\n", "before_files": [{"content": "#!/usr/bin/env python3\n# pylint: disable=missing-any-param-doc,differing-param-doc\n\"\"\"Pymodbus Server With Updating Thread.\n\nThis is an example of having a background thread updating the\ncontext while the server is operating. This can also be done with\na python thread::\n\n from threading import Thread\n Thread(target=updating_writer, args=(context,)).start()\n\"\"\"\nimport logging\nimport asyncio\n\nfrom pymodbus.datastore import (\n ModbusSequentialDataBlock,\n ModbusServerContext,\n ModbusSlaveContext,\n)\nfrom pymodbus.device import ModbusDeviceIdentification\nfrom pymodbus.server.async_io import StartTcpServer\nfrom pymodbus.version import version\n\n# --------------------------------------------------------------------------- #\n# configure the service logging\n# --------------------------------------------------------------------------- #\nlog = logging.getLogger()\nlog.setLevel(logging.DEBUG)\n\n# --------------------------------------------------------------------------- #\n# define your callback process\n# --------------------------------------------------------------------------- #\n\n\ndef updating_writer(extra):\n \"\"\"Run every so often,\n\n and updates live values of the context. It should be noted\n that there is a race condition for the update.\n\n :param arguments: The input arguments to the call\n \"\"\"\n log.debug(\"updating the context\")\n context = extra[0]\n register = 3\n slave_id = 0x00\n address = 0x10\n values = context[slave_id].getValues(register, address, count=5)\n values = [v + 1 for v in values]\n txt = f\"new values: {str(values)}\"\n log.debug(txt)\n context[slave_id].setValues(register, address, values)\n\n\ndef run_updating_server():\n \"\"\"Run updating server.\"\"\"\n # ----------------------------------------------------------------------- #\n # initialize your data store\n # ----------------------------------------------------------------------- #\n\n store = ModbusSlaveContext(\n di=ModbusSequentialDataBlock(0, [17] * 100),\n co=ModbusSequentialDataBlock(0, [17] * 100),\n hr=ModbusSequentialDataBlock(0, [17] * 100),\n ir=ModbusSequentialDataBlock(0, [17] * 100),\n )\n context = ModbusServerContext(slaves=store, single=True)\n\n # ----------------------------------------------------------------------- #\n # initialize the server information\n # ----------------------------------------------------------------------- #\n identity = ModbusDeviceIdentification(\n info_name={\n \"VendorName\": \"pymodbus\",\n \"ProductCode\": \"PM\",\n \"VendorUrl\": \"https://github.com/riptideio/pymodbus/\",\n \"ProductName\": \"pymodbus Server\",\n \"ModelName\": \"pymodbus Server\",\n \"MajorMinorRevision\": version.short(),\n }\n )\n\n # ----------------------------------------------------------------------- #\n # run the server you want\n # ----------------------------------------------------------------------- #\n time = 5 # 5 seconds delay\n loop = asyncio.get_event_loop()\n loop.start(time, now=False) # initially delay by time\n StartTcpServer(context, identity=identity, address=(\"localhost\", 5020))\n\n\nif __name__ == \"__main__\":\n run_updating_server()\n", "path": "examples/common/updating_server.py"}]} | 1,736 | 339 |
gh_patches_debug_3003 | rasdani/github-patches | git_diff | lightly-ai__lightly-656 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect inputsize for BarlowTwins Lightning Example Code
Should the input_size in [1] be `32` instead of `224`?
In [2], we use `input_size=32`.
[1] https://github.com/lightly-ai/lightly/blob/master/examples/pytorch_lightning/barlowtwins.py#L44
[2] https://github.com/lightly-ai/lightly/blob/master/examples/pytorch/barlowtwins.py#L35
</issue>
<code>
[start of examples/pytorch_lightning/barlowtwins.py]
1 import torch
2 from torch import nn
3 import torchvision
4 import pytorch_lightning as pl
5
6 from lightly.data import LightlyDataset
7 from lightly.data import ImageCollateFunction
8 from lightly.loss import BarlowTwinsLoss
9 from lightly.models.modules import BarlowTwinsProjectionHead
10
11
12 class BarlowTwins(pl.LightningModule):
13 def __init__(self):
14 super().__init__()
15 resnet = torchvision.models.resnet18()
16 self.backbone = nn.Sequential(*list(resnet.children())[:-1])
17 self.projection_head = BarlowTwinsProjectionHead(512, 2048, 2048)
18 self.criterion = BarlowTwinsLoss()
19
20 def forward(self, x):
21 x = self.backbone(x).flatten(start_dim=1)
22 z = self.projection_head(x)
23 return z
24
25 def training_step(self, batch, batch_index):
26 (x0, x1), _, _ = batch
27 z0 = self.forward(x0)
28 z1 = self.forward(x1)
29 loss = self.criterion(z0, z1)
30 return loss
31
32 def configure_optimizers(self):
33 optim = torch.optim.SGD(self.parameters(), lr=0.06)
34 return optim
35
36
37 model = BarlowTwins()
38
39 cifar10 = torchvision.datasets.CIFAR10("datasets/cifar10", download=True)
40 dataset = LightlyDataset.from_torch_dataset(cifar10)
41 # or create a dataset from a folder containing images or videos:
42 # dataset = LightlyDataset("path/to/folder")
43
44 collate_fn = ImageCollateFunction(input_size=224)
45
46 dataloader = torch.utils.data.DataLoader(
47 dataset,
48 batch_size=256,
49 collate_fn=collate_fn,
50 shuffle=True,
51 drop_last=True,
52 num_workers=8,
53 )
54
55 gpus = 1 if torch.cuda.is_available() else 0
56
57 trainer = pl.Trainer(max_epochs=10, gpus=gpus)
58 trainer.fit(model=model, train_dataloaders=dataloader)
59
[end of examples/pytorch_lightning/barlowtwins.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/pytorch_lightning/barlowtwins.py b/examples/pytorch_lightning/barlowtwins.py
--- a/examples/pytorch_lightning/barlowtwins.py
+++ b/examples/pytorch_lightning/barlowtwins.py
@@ -41,7 +41,7 @@
# or create a dataset from a folder containing images or videos:
# dataset = LightlyDataset("path/to/folder")
-collate_fn = ImageCollateFunction(input_size=224)
+collate_fn = ImageCollateFunction(input_size=32)
dataloader = torch.utils.data.DataLoader(
dataset,
| {"golden_diff": "diff --git a/examples/pytorch_lightning/barlowtwins.py b/examples/pytorch_lightning/barlowtwins.py\n--- a/examples/pytorch_lightning/barlowtwins.py\n+++ b/examples/pytorch_lightning/barlowtwins.py\n@@ -41,7 +41,7 @@\n # or create a dataset from a folder containing images or videos:\n # dataset = LightlyDataset(\"path/to/folder\")\n \n-collate_fn = ImageCollateFunction(input_size=224)\n+collate_fn = ImageCollateFunction(input_size=32)\n \n dataloader = torch.utils.data.DataLoader(\n dataset,\n", "issue": "Incorrect inputsize for BarlowTwins Lightning Example Code\nShould the input_size in [1] be `32` instead of `224`?\r\nIn [2], we use `input_size=32`.\r\n\r\n[1] https://github.com/lightly-ai/lightly/blob/master/examples/pytorch_lightning/barlowtwins.py#L44\r\n[2] https://github.com/lightly-ai/lightly/blob/master/examples/pytorch/barlowtwins.py#L35\r\n\r\n\n", "before_files": [{"content": "import torch\nfrom torch import nn\nimport torchvision\nimport pytorch_lightning as pl\n\nfrom lightly.data import LightlyDataset\nfrom lightly.data import ImageCollateFunction\nfrom lightly.loss import BarlowTwinsLoss\nfrom lightly.models.modules import BarlowTwinsProjectionHead\n\n\nclass BarlowTwins(pl.LightningModule):\n def __init__(self):\n super().__init__()\n resnet = torchvision.models.resnet18()\n self.backbone = nn.Sequential(*list(resnet.children())[:-1])\n self.projection_head = BarlowTwinsProjectionHead(512, 2048, 2048)\n self.criterion = BarlowTwinsLoss()\n\n def forward(self, x):\n x = self.backbone(x).flatten(start_dim=1)\n z = self.projection_head(x)\n return z\n\n def training_step(self, batch, batch_index):\n (x0, x1), _, _ = batch\n z0 = self.forward(x0)\n z1 = self.forward(x1)\n loss = self.criterion(z0, z1)\n return loss\n\n def configure_optimizers(self):\n optim = torch.optim.SGD(self.parameters(), lr=0.06)\n return optim\n\n\nmodel = BarlowTwins()\n\ncifar10 = torchvision.datasets.CIFAR10(\"datasets/cifar10\", download=True)\ndataset = LightlyDataset.from_torch_dataset(cifar10)\n# or create a dataset from a folder containing images or videos:\n# dataset = LightlyDataset(\"path/to/folder\")\n\ncollate_fn = ImageCollateFunction(input_size=224)\n\ndataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=256,\n collate_fn=collate_fn,\n shuffle=True,\n drop_last=True,\n num_workers=8,\n)\n\ngpus = 1 if torch.cuda.is_available() else 0\n\ntrainer = pl.Trainer(max_epochs=10, gpus=gpus)\ntrainer.fit(model=model, train_dataloaders=dataloader)\n", "path": "examples/pytorch_lightning/barlowtwins.py"}]} | 1,216 | 136 |
gh_patches_debug_924 | rasdani/github-patches | git_diff | joke2k__faker-993 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
text-unidecode is released under the Artistic license
`text-unidecode` is released under the Artistic license v1.0, which is considered non-free by the FSF (and therefore not compatible with the GPL). I believe this clause is also of concern to commercial users of faker too:
> 5. You may charge a reasonable copying fee for any distribution of this Package. You may charge any fee you choose for support of this Package. You may not charge a fee for this Package itself. However, you may distribute this Package in aggregate with other (possibly commercial) programs as part of a larger (possibly commercial) software distribution provided that you do not advertise this Package as a product of your own.
Not being able to charge a fee for the software is problematic for those of us who are contractors, for example.
I realise there aren't really any good alternatives (`unidecode` is GPL licensed as pointed out in #628 , `isounidecode` doesn't support Python 3), so would a patch making `text-unidecode` an optional dependency be acceptable?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # coding=utf-8
3
4 import io
5 import os
6
7 from setuptools import find_packages, setup
8
9 here = os.path.abspath(os.path.dirname(__file__))
10 with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as fp:
11 README = fp.read()
12
13 with io.open(os.path.join(here, 'VERSION')) as version_file:
14 VERSION = version_file.read().strip()
15
16
17 # this module can be zip-safe if the zipimporter implements iter_modules or if
18 # pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.
19 try:
20 import pkgutil
21 import zipimport
22 zip_safe = hasattr(zipimport.zipimporter, "iter_modules") or \
23 zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()
24 except (ImportError, AttributeError):
25 zip_safe = False
26
27 setup(
28 name='Faker',
29 version=VERSION,
30 description="Faker is a Python package that generates fake data for you.",
31 long_description=README,
32 entry_points={
33 'console_scripts': ['faker=faker.cli:execute_from_command_line'],
34 },
35 classifiers=[
36 # See https://pypi.org/pypi?%3Aaction=list_classifiers
37 'Development Status :: 5 - Production/Stable',
38 'Environment :: Console',
39 'Intended Audience :: Developers',
40 'Programming Language :: Python',
41 'Programming Language :: Python :: 2',
42 'Programming Language :: Python :: 2.7',
43 'Programming Language :: Python :: 3',
44 'Programming Language :: Python :: 3.4',
45 'Programming Language :: Python :: 3.5',
46 'Programming Language :: Python :: 3.6',
47 'Programming Language :: Python :: 3.7',
48 'Programming Language :: Python :: Implementation :: CPython',
49 'Programming Language :: Python :: Implementation :: PyPy',
50 'Topic :: Software Development :: Libraries :: Python Modules',
51 'Topic :: Software Development :: Testing',
52 'Topic :: Utilities',
53 'License :: OSI Approved :: MIT License',
54 ],
55 keywords='faker fixtures data test mock generator',
56 author='joke2k',
57 author_email='[email protected]',
58 url='https://github.com/joke2k/faker',
59 license='MIT License',
60 packages=find_packages(exclude=["docs", "tests", "tests.*"]),
61 platforms=["any"],
62 test_suite='tests',
63 zip_safe=zip_safe,
64 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
65 setup_requires=["pytest-runner"],
66 install_requires=[
67 "python-dateutil>=2.4",
68 "six>=1.10",
69 "text-unidecode==1.2",
70 ],
71 tests_require=[
72 "validators>=0.13.0",
73 "ukpostcodeparser>=1.1.1",
74 "mock ; python_version < '3.3'",
75 "pytest>=3.8.0,<3.9",
76 "more-itertools<6.0.0 ; python_version < '3.0'",
77 # restricted because they may drop python2 support in future versions
78 # https://github.com/joke2k/faker/issues/970
79 "random2<1.1",
80 "freezegun<0.4",
81 ],
82 extras_require={
83 ':python_version<"3.3"': [
84 'ipaddress',
85 ],
86 },
87 )
88
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -66,7 +66,7 @@
install_requires=[
"python-dateutil>=2.4",
"six>=1.10",
- "text-unidecode==1.2",
+ "text-unidecode==1.3",
],
tests_require=[
"validators>=0.13.0",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,7 +66,7 @@\n install_requires=[\n \"python-dateutil>=2.4\",\n \"six>=1.10\",\n- \"text-unidecode==1.2\",\n+ \"text-unidecode==1.3\",\n ],\n tests_require=[\n \"validators>=0.13.0\",\n", "issue": "text-unidecode is released under the Artistic license\n`text-unidecode` is released under the Artistic license v1.0, which is considered non-free by the FSF (and therefore not compatible with the GPL). I believe this clause is also of concern to commercial users of faker too:\r\n\r\n> 5. You may charge a reasonable copying fee for any distribution of this Package. You may charge any fee you choose for support of this Package. You may not charge a fee for this Package itself. However, you may distribute this Package in aggregate with other (possibly commercial) programs as part of a larger (possibly commercial) software distribution provided that you do not advertise this Package as a product of your own.\r\n\r\nNot being able to charge a fee for the software is problematic for those of us who are contractors, for example.\r\n\r\nI realise there aren't really any good alternatives (`unidecode` is GPL licensed as pointed out in #628 , `isounidecode` doesn't support Python 3), so would a patch making `text-unidecode` an optional dependency be acceptable?\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport io\nimport os\n\nfrom setuptools import find_packages, setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as fp:\n README = fp.read()\n\nwith io.open(os.path.join(here, 'VERSION')) as version_file:\n VERSION = version_file.read().strip()\n\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n zip_safe = hasattr(zipimport.zipimporter, \"iter_modules\") or \\\n zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\nexcept (ImportError, AttributeError):\n zip_safe = False\n\nsetup(\n name='Faker',\n version=VERSION,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README,\n entry_points={\n 'console_scripts': ['faker=faker.cli:execute_from_command_line'],\n },\n classifiers=[\n # See https://pypi.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License',\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='https://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(exclude=[\"docs\", \"tests\", \"tests.*\"]),\n platforms=[\"any\"],\n test_suite='tests',\n zip_safe=zip_safe,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*\",\n setup_requires=[\"pytest-runner\"],\n install_requires=[\n \"python-dateutil>=2.4\",\n \"six>=1.10\",\n \"text-unidecode==1.2\",\n ],\n tests_require=[\n \"validators>=0.13.0\",\n \"ukpostcodeparser>=1.1.1\",\n \"mock ; python_version < '3.3'\",\n \"pytest>=3.8.0,<3.9\",\n \"more-itertools<6.0.0 ; python_version < '3.0'\",\n # restricted because they may drop python2 support in future versions\n # https://github.com/joke2k/faker/issues/970\n \"random2<1.1\",\n \"freezegun<0.4\",\n ],\n extras_require={\n ':python_version<\"3.3\"': [\n 'ipaddress',\n ],\n },\n)\n", "path": "setup.py"}]} | 1,678 | 97 |
gh_patches_debug_22236 | rasdani/github-patches | git_diff | onnx__onnx-4386 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make C++ and Python checker API consistent
Python checker API supports `full_check` arg:
https://github.com/onnx/onnx/blob/fa6f8cfdce3d86346e8a7494f3062b98416c85fb/onnx/checker.py#L94
C++ does not.
It'd be nice for them to be consistent.
</issue>
<code>
[start of onnx/checker.py]
1 # SPDX-License-Identifier: Apache-2.0
2 """onnx checker
3
4 This implements graphalities that allows us to check whether a serialized
5 proto is legal.
6 """
7
8 import functools
9
10 from onnx import (ValueInfoProto,
11 AttributeProto,
12 TensorProto,
13 SparseTensorProto,
14 NodeProto,
15 ModelProto,
16 GraphProto,
17 IR_VERSION)
18 import onnx.onnx_cpp2py_export.checker as C
19 import onnx.defs
20 from google.protobuf.message import Message
21 from typing import TypeVar, Callable, Any, Type, cast, Union
22 import onnx.shape_inference
23 import sys
24
25
26 # Limitation of single protobuf file is 2GB
27 MAXIMUM_PROTOBUF = 2000000000
28
29 # TODO: This thing where we reserialize the protobuf back into the
30 # string, only to deserialize it at the call site, is really goofy.
31 # Stop doing that.
32
33
34 # NB: Please don't edit this context!
35 DEFAULT_CONTEXT = C.CheckerContext()
36 DEFAULT_CONTEXT.ir_version = IR_VERSION
37 # TODO: Maybe ONNX-ML should also be defaulted?
38 DEFAULT_CONTEXT.opset_imports = {'': onnx.defs.onnx_opset_version()}
39
40
41 FuncType = TypeVar('FuncType', bound=Callable[..., Any])
42
43
44 # TODO: This really doesn't seem worth the metaprogramming...
45 def _create_checker(proto_type: Type[Message]) -> Callable[[FuncType], FuncType]:
46 def decorator(py_func: FuncType) -> FuncType:
47 @functools.wraps(py_func)
48 def checker(proto: Message, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> Any:
49 if not isinstance(proto, proto_type):
50 raise RuntimeError(
51 'You cannot pass an object that is not of type {}'.format(
52 proto_type.__name__))
53 return getattr(C, py_func.__name__)(
54 proto.SerializeToString(), ctx)
55 return cast(FuncType, checker)
56 return decorator
57
58
59 @_create_checker(ValueInfoProto)
60 def check_value_info(value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
61 pass
62
63
64 @_create_checker(TensorProto)
65 def check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
66 pass
67
68
69 @_create_checker(AttributeProto)
70 def check_attribute(attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
71 pass
72
73
74 @_create_checker(NodeProto)
75 def check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
76 pass
77
78
79 @_create_checker(GraphProto)
80 def check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
81 pass
82
83
84 def check_sparse_tensor(sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
85 C.check_sparse_tensor(sparse.SerializeToString(), ctx)
86
87
88 def check_model(model: Union[ModelProto, str, bytes], full_check: bool = False) -> None:
89 """Check the consistency of a model. An exception is raised if the test fails.
90
91 Arguments:
92 model (ModelProto): model to check
93 full_check (bool): if True, the function checks shapes can be inferred
94 """
95 # If model is a path instead of ModelProto
96 if isinstance(model, str):
97 C.check_model_path(model)
98 if full_check:
99 onnx.shape_inference.infer_shapes_path(model, check_type=True, strict_mode=True)
100 else:
101 protobuf_string = model if isinstance(model, bytes) else model.SerializeToString()
102 # If the protobuf is larger than 2GB,
103 # remind users should use the model path to check
104 if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:
105 raise ValueError('This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.')
106 C.check_model(protobuf_string)
107 if full_check:
108 onnx.shape_inference.infer_shapes(model, check_type=True, strict_mode=True)
109
110
111 ValidationError = C.ValidationError
112
[end of onnx/checker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/onnx/checker.py b/onnx/checker.py
--- a/onnx/checker.py
+++ b/onnx/checker.py
@@ -94,18 +94,14 @@
"""
# If model is a path instead of ModelProto
if isinstance(model, str):
- C.check_model_path(model)
- if full_check:
- onnx.shape_inference.infer_shapes_path(model, check_type=True, strict_mode=True)
+ C.check_model_path(model, full_check)
else:
protobuf_string = model if isinstance(model, bytes) else model.SerializeToString()
# If the protobuf is larger than 2GB,
# remind users should use the model path to check
if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:
raise ValueError('This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.')
- C.check_model(protobuf_string)
- if full_check:
- onnx.shape_inference.infer_shapes(model, check_type=True, strict_mode=True)
+ C.check_model(protobuf_string, full_check)
ValidationError = C.ValidationError
| {"golden_diff": "diff --git a/onnx/checker.py b/onnx/checker.py\n--- a/onnx/checker.py\n+++ b/onnx/checker.py\n@@ -94,18 +94,14 @@\n \"\"\"\n # If model is a path instead of ModelProto\n if isinstance(model, str):\n- C.check_model_path(model)\n- if full_check:\n- onnx.shape_inference.infer_shapes_path(model, check_type=True, strict_mode=True)\n+ C.check_model_path(model, full_check)\n else:\n protobuf_string = model if isinstance(model, bytes) else model.SerializeToString()\n # If the protobuf is larger than 2GB,\n # remind users should use the model path to check\n if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:\n raise ValueError('This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.')\n- C.check_model(protobuf_string)\n- if full_check:\n- onnx.shape_inference.infer_shapes(model, check_type=True, strict_mode=True)\n+ C.check_model(protobuf_string, full_check)\n \n \n ValidationError = C.ValidationError\n", "issue": "Make C++ and Python checker API consistent\nPython checker API supports `full_check` arg:\r\nhttps://github.com/onnx/onnx/blob/fa6f8cfdce3d86346e8a7494f3062b98416c85fb/onnx/checker.py#L94\r\n\r\nC++ does not.\r\nIt'd be nice for them to be consistent.\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n\"\"\"onnx checker\n\nThis implements graphalities that allows us to check whether a serialized\nproto is legal.\n\"\"\"\n\nimport functools\n\nfrom onnx import (ValueInfoProto,\n AttributeProto,\n TensorProto,\n SparseTensorProto,\n NodeProto,\n ModelProto,\n GraphProto,\n IR_VERSION)\nimport onnx.onnx_cpp2py_export.checker as C\nimport onnx.defs\nfrom google.protobuf.message import Message\nfrom typing import TypeVar, Callable, Any, Type, cast, Union\nimport onnx.shape_inference\nimport sys\n\n\n# Limitation of single protobuf file is 2GB\nMAXIMUM_PROTOBUF = 2000000000\n\n# TODO: This thing where we reserialize the protobuf back into the\n# string, only to deserialize it at the call site, is really goofy.\n# Stop doing that.\n\n\n# NB: Please don't edit this context!\nDEFAULT_CONTEXT = C.CheckerContext()\nDEFAULT_CONTEXT.ir_version = IR_VERSION\n# TODO: Maybe ONNX-ML should also be defaulted?\nDEFAULT_CONTEXT.opset_imports = {'': onnx.defs.onnx_opset_version()}\n\n\nFuncType = TypeVar('FuncType', bound=Callable[..., Any])\n\n\n# TODO: This really doesn't seem worth the metaprogramming...\ndef _create_checker(proto_type: Type[Message]) -> Callable[[FuncType], FuncType]:\n def decorator(py_func: FuncType) -> FuncType:\n @functools.wraps(py_func)\n def checker(proto: Message, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> Any:\n if not isinstance(proto, proto_type):\n raise RuntimeError(\n 'You cannot pass an object that is not of type {}'.format(\n proto_type.__name__))\n return getattr(C, py_func.__name__)(\n proto.SerializeToString(), ctx)\n return cast(FuncType, checker)\n return decorator\n\n\n@_create_checker(ValueInfoProto)\ndef check_value_info(value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\n@_create_checker(TensorProto)\ndef check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\n@_create_checker(AttributeProto)\ndef check_attribute(attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\n@_create_checker(NodeProto)\ndef check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\n@_create_checker(GraphProto)\ndef check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\ndef check_sparse_tensor(sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n C.check_sparse_tensor(sparse.SerializeToString(), ctx)\n\n\ndef check_model(model: Union[ModelProto, str, bytes], full_check: bool = False) -> None:\n \"\"\"Check the consistency of a model. An exception is raised if the test fails.\n\n Arguments:\n model (ModelProto): model to check\n full_check (bool): if True, the function checks shapes can be inferred\n \"\"\"\n # If model is a path instead of ModelProto\n if isinstance(model, str):\n C.check_model_path(model)\n if full_check:\n onnx.shape_inference.infer_shapes_path(model, check_type=True, strict_mode=True)\n else:\n protobuf_string = model if isinstance(model, bytes) else model.SerializeToString()\n # If the protobuf is larger than 2GB,\n # remind users should use the model path to check\n if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:\n raise ValueError('This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.')\n C.check_model(protobuf_string)\n if full_check:\n onnx.shape_inference.infer_shapes(model, check_type=True, strict_mode=True)\n\n\nValidationError = C.ValidationError\n", "path": "onnx/checker.py"}]} | 1,727 | 250 |
gh_patches_debug_37440 | rasdani/github-patches | git_diff | arviz-devs__arviz-636 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow normalization in plot_parallel
It would be great if plot_parallel had a keyword arg `normalize` (or standardize), that centers and rescales the variables before plotting. That would make it easier to see things if some posteriors are much more tight than others:

</issue>
<code>
[start of arviz/plots/parallelplot.py]
1 """Parallel coordinates plot showing posterior points with and without divergences marked."""
2 import matplotlib.pyplot as plt
3 import numpy as np
4
5 from ..data import convert_to_dataset
6 from .plot_utils import _scale_fig_size, xarray_to_ndarray, get_coords
7 from ..utils import _var_names
8
9
10 def plot_parallel(
11 data,
12 var_names=None,
13 coords=None,
14 figsize=None,
15 textsize=None,
16 legend=True,
17 colornd="k",
18 colord="C1",
19 shadend=0.025,
20 ax=None,
21 ):
22 """
23 Plot parallel coordinates plot showing posterior points with and without divergences.
24
25 Described by https://arxiv.org/abs/1709.01449, suggested by Ari Hartikainen
26
27 Parameters
28 ----------
29 data : obj
30 Any object that can be converted to an az.InferenceData object
31 Refer to documentation of az.convert_to_dataset for details
32 var_names : list of variable names
33 Variables to be plotted, if None all variable are plotted. Can be used to change the order
34 of the plotted variables
35 coords : mapping, optional
36 Coordinates of var_names to be plotted. Passed to `Dataset.sel`
37 figsize : tuple
38 Figure size. If None it will be defined automatically.
39 textsize: float
40 Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
41 on figsize.
42 legend : bool
43 Flag for plotting legend (defaults to True)
44 colornd : valid matplotlib color
45 color for non-divergent points. Defaults to 'k'
46 colord : valid matplotlib color
47 color for divergent points. Defaults to 'C1'
48 shadend : float
49 Alpha blending value for non-divergent points, between 0 (invisible) and 1 (opaque).
50 Defaults to .025
51 ax : axes
52 Matplotlib axes.
53
54 Returns
55 -------
56 ax : matplotlib axes
57 """
58 if coords is None:
59 coords = {}
60
61 # Get diverging draws and combine chains
62 divergent_data = convert_to_dataset(data, group="sample_stats")
63 _, diverging_mask = xarray_to_ndarray(divergent_data, var_names=("diverging",), combined=True)
64 diverging_mask = np.squeeze(diverging_mask)
65
66 # Get posterior draws and combine chains
67 posterior_data = convert_to_dataset(data, group="posterior")
68 var_names = _var_names(var_names, posterior_data)
69 var_names, _posterior = xarray_to_ndarray(
70 get_coords(posterior_data, coords), var_names=var_names, combined=True
71 )
72
73 if len(var_names) < 2:
74 raise ValueError("This plot needs at least two variables")
75
76 figsize, _, _, xt_labelsize, _, _ = _scale_fig_size(figsize, textsize, 1, 1)
77
78 if ax is None:
79 _, ax = plt.subplots(figsize=figsize, constrained_layout=True)
80
81 ax.plot(_posterior[:, ~diverging_mask], color=colornd, alpha=shadend)
82
83 if np.any(diverging_mask):
84 ax.plot(_posterior[:, diverging_mask], color=colord, lw=1)
85
86 ax.tick_params(labelsize=textsize)
87 ax.set_xticks(range(len(var_names)))
88 ax.set_xticklabels(var_names)
89
90 if legend:
91 ax.plot([], color=colornd, label="non-divergent")
92 if np.any(diverging_mask):
93 ax.plot([], color=colord, label="divergent")
94 ax.legend(fontsize=xt_labelsize)
95
96 return ax
97
[end of arviz/plots/parallelplot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/arviz/plots/parallelplot.py b/arviz/plots/parallelplot.py
--- a/arviz/plots/parallelplot.py
+++ b/arviz/plots/parallelplot.py
@@ -2,6 +2,7 @@
import matplotlib.pyplot as plt
import numpy as np
+from scipy.stats.mstats import rankdata
from ..data import convert_to_dataset
from .plot_utils import _scale_fig_size, xarray_to_ndarray, get_coords
from ..utils import _var_names
@@ -18,6 +19,7 @@
colord="C1",
shadend=0.025,
ax=None,
+ norm_method=None,
):
"""
Plot parallel coordinates plot showing posterior points with and without divergences.
@@ -50,10 +52,33 @@
Defaults to .025
ax : axes
Matplotlib axes.
+ norm_method : str
+ Method for normalizing the data. Methods include normal, minmax and rank.
+ Defaults to none.
Returns
-------
ax : matplotlib axes
+
+ Examples
+ --------
+ Plot default parallel plot
+
+ .. plot::
+ :context: close-figs
+
+ >>> import arviz as az
+ >>> data = az.load_arviz_data('centered_eight')
+ >>> az.plot_parallel(data, var_names=["mu", "tau"])
+
+
+ Plot parallel plot with normalization
+
+ .. plot::
+ :context: close-figs
+
+ >>> az.plot_parallel(data, var_names=["mu", "tau"], norm_method='normal')
+
"""
if coords is None:
coords = {}
@@ -69,9 +94,23 @@
var_names, _posterior = xarray_to_ndarray(
get_coords(posterior_data, coords), var_names=var_names, combined=True
)
-
if len(var_names) < 2:
raise ValueError("This plot needs at least two variables")
+ if norm_method is not None:
+ if norm_method == "normal":
+ mean = np.mean(_posterior, axis=1)
+ standard_deviation = np.std(_posterior, axis=1)
+ for i in range(0, np.shape(mean)[0]):
+ _posterior[i, :] = (_posterior[i, :] - mean[i]) / standard_deviation[i]
+ elif norm_method == "minmax":
+ min_elem = np.min(_posterior, axis=1)
+ max_elem = np.max(_posterior, axis=1)
+ for i in range(0, np.shape(min_elem)[0]):
+ _posterior[i, :] = ((_posterior[i, :]) - min_elem[i]) / (max_elem[i] - min_elem[i])
+ elif norm_method == "rank":
+ _posterior = rankdata(_posterior, axis=1)
+ else:
+ raise ValueError("{} is not supported. Use normal, minmax or rank.".format(norm_method))
figsize, _, _, xt_labelsize, _, _ = _scale_fig_size(figsize, textsize, 1, 1)
| {"golden_diff": "diff --git a/arviz/plots/parallelplot.py b/arviz/plots/parallelplot.py\n--- a/arviz/plots/parallelplot.py\n+++ b/arviz/plots/parallelplot.py\n@@ -2,6 +2,7 @@\n import matplotlib.pyplot as plt\n import numpy as np\n \n+from scipy.stats.mstats import rankdata\n from ..data import convert_to_dataset\n from .plot_utils import _scale_fig_size, xarray_to_ndarray, get_coords\n from ..utils import _var_names\n@@ -18,6 +19,7 @@\n colord=\"C1\",\n shadend=0.025,\n ax=None,\n+ norm_method=None,\n ):\n \"\"\"\n Plot parallel coordinates plot showing posterior points with and without divergences.\n@@ -50,10 +52,33 @@\n Defaults to .025\n ax : axes\n Matplotlib axes.\n+ norm_method : str\n+ Method for normalizing the data. Methods include normal, minmax and rank.\n+ Defaults to none.\n \n Returns\n -------\n ax : matplotlib axes\n+\n+ Examples\n+ --------\n+ Plot default parallel plot\n+\n+ .. plot::\n+ :context: close-figs\n+\n+ >>> import arviz as az\n+ >>> data = az.load_arviz_data('centered_eight')\n+ >>> az.plot_parallel(data, var_names=[\"mu\", \"tau\"])\n+\n+\n+ Plot parallel plot with normalization\n+\n+ .. plot::\n+ :context: close-figs\n+\n+ >>> az.plot_parallel(data, var_names=[\"mu\", \"tau\"], norm_method='normal')\n+\n \"\"\"\n if coords is None:\n coords = {}\n@@ -69,9 +94,23 @@\n var_names, _posterior = xarray_to_ndarray(\n get_coords(posterior_data, coords), var_names=var_names, combined=True\n )\n-\n if len(var_names) < 2:\n raise ValueError(\"This plot needs at least two variables\")\n+ if norm_method is not None:\n+ if norm_method == \"normal\":\n+ mean = np.mean(_posterior, axis=1)\n+ standard_deviation = np.std(_posterior, axis=1)\n+ for i in range(0, np.shape(mean)[0]):\n+ _posterior[i, :] = (_posterior[i, :] - mean[i]) / standard_deviation[i]\n+ elif norm_method == \"minmax\":\n+ min_elem = np.min(_posterior, axis=1)\n+ max_elem = np.max(_posterior, axis=1)\n+ for i in range(0, np.shape(min_elem)[0]):\n+ _posterior[i, :] = ((_posterior[i, :]) - min_elem[i]) / (max_elem[i] - min_elem[i])\n+ elif norm_method == \"rank\":\n+ _posterior = rankdata(_posterior, axis=1)\n+ else:\n+ raise ValueError(\"{} is not supported. Use normal, minmax or rank.\".format(norm_method))\n \n figsize, _, _, xt_labelsize, _, _ = _scale_fig_size(figsize, textsize, 1, 1)\n", "issue": "Allow normalization in plot_parallel\nIt would be great if plot_parallel had a keyword arg `normalize` (or standardize), that centers and rescales the variables before plotting. That would make it easier to see things if some posteriors are much more tight than others:\r\n\n", "before_files": [{"content": "\"\"\"Parallel coordinates plot showing posterior points with and without divergences marked.\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom ..data import convert_to_dataset\nfrom .plot_utils import _scale_fig_size, xarray_to_ndarray, get_coords\nfrom ..utils import _var_names\n\n\ndef plot_parallel(\n data,\n var_names=None,\n coords=None,\n figsize=None,\n textsize=None,\n legend=True,\n colornd=\"k\",\n colord=\"C1\",\n shadend=0.025,\n ax=None,\n):\n \"\"\"\n Plot parallel coordinates plot showing posterior points with and without divergences.\n\n Described by https://arxiv.org/abs/1709.01449, suggested by Ari Hartikainen\n\n Parameters\n ----------\n data : obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n var_names : list of variable names\n Variables to be plotted, if None all variable are plotted. Can be used to change the order\n of the plotted variables\n coords : mapping, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n figsize : tuple\n Figure size. If None it will be defined automatically.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n legend : bool\n Flag for plotting legend (defaults to True)\n colornd : valid matplotlib color\n color for non-divergent points. Defaults to 'k'\n colord : valid matplotlib color\n color for divergent points. Defaults to 'C1'\n shadend : float\n Alpha blending value for non-divergent points, between 0 (invisible) and 1 (opaque).\n Defaults to .025\n ax : axes\n Matplotlib axes.\n\n Returns\n -------\n ax : matplotlib axes\n \"\"\"\n if coords is None:\n coords = {}\n\n # Get diverging draws and combine chains\n divergent_data = convert_to_dataset(data, group=\"sample_stats\")\n _, diverging_mask = xarray_to_ndarray(divergent_data, var_names=(\"diverging\",), combined=True)\n diverging_mask = np.squeeze(diverging_mask)\n\n # Get posterior draws and combine chains\n posterior_data = convert_to_dataset(data, group=\"posterior\")\n var_names = _var_names(var_names, posterior_data)\n var_names, _posterior = xarray_to_ndarray(\n get_coords(posterior_data, coords), var_names=var_names, combined=True\n )\n\n if len(var_names) < 2:\n raise ValueError(\"This plot needs at least two variables\")\n\n figsize, _, _, xt_labelsize, _, _ = _scale_fig_size(figsize, textsize, 1, 1)\n\n if ax is None:\n _, ax = plt.subplots(figsize=figsize, constrained_layout=True)\n\n ax.plot(_posterior[:, ~diverging_mask], color=colornd, alpha=shadend)\n\n if np.any(diverging_mask):\n ax.plot(_posterior[:, diverging_mask], color=colord, lw=1)\n\n ax.tick_params(labelsize=textsize)\n ax.set_xticks(range(len(var_names)))\n ax.set_xticklabels(var_names)\n\n if legend:\n ax.plot([], color=colornd, label=\"non-divergent\")\n if np.any(diverging_mask):\n ax.plot([], color=colord, label=\"divergent\")\n ax.legend(fontsize=xt_labelsize)\n\n return ax\n", "path": "arviz/plots/parallelplot.py"}]} | 1,632 | 703 |
gh_patches_debug_30482 | rasdani/github-patches | git_diff | SeldonIO__MLServer-605 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add docker build option to not use cache
To ensure fresh environment and while potentially developing a new model version users will not want to use cached layers in the docker build for their image.
In docker this is the --no-cache option, I would make it a boolean option within the cli and also add the --rm option (although default is true might be worth making sure) to remove any intermediate containers after success to again ensure a clean environment in CI/CD as well as locally.
</issue>
<code>
[start of mlserver/cli/build.py]
1 import subprocess
2 import os
3
4 from tempfile import TemporaryDirectory
5
6 from .. import __version__
7 from ..logging import logger
8
9 from .constants import (
10 DockerfileName,
11 DockerfileTemplate,
12 DockerignoreName,
13 Dockerignore,
14 )
15
16
17 def generate_dockerfile() -> str:
18 return DockerfileTemplate.format(version=__version__)
19
20
21 def write_dockerfile(
22 folder: str, dockerfile: str, include_dockerignore: bool = True
23 ) -> str:
24 dockerfile_path = os.path.join(folder, DockerfileName)
25 with open(dockerfile_path, "w") as dockerfile_handler:
26 logger.info(f"Writing Dockerfile in {dockerfile_path}")
27 dockerfile_handler.write(dockerfile)
28
29 if include_dockerignore:
30 # Point to our own .dockerignore
31 # https://docs.docker.com/engine/reference/commandline/build/#use-a-dockerignore-file
32 dockerignore_path = dockerfile_path + DockerignoreName
33 with open(dockerignore_path, "w") as dockerignore_handler:
34 logger.info(f"Writing .dockerignore in {dockerignore_path}")
35 dockerignore_handler.write(Dockerignore)
36
37 return dockerfile_path
38
39
40 def build_image(folder: str, dockerfile: str, image_tag: str) -> str:
41 logger.info(f"Building Docker image with tag {image_tag}")
42 with TemporaryDirectory() as tmp_dir:
43 dockerfile_path = write_dockerfile(tmp_dir, dockerfile)
44
45 build_cmd = f"docker build {folder} -f {dockerfile_path} -t {image_tag}"
46 build_env = os.environ.copy()
47 build_env["DOCKER_BUILDKIT"] = "1"
48 subprocess.run(build_cmd, check=True, shell=True, env=build_env)
49
50 return image_tag
51
[end of mlserver/cli/build.py]
[start of mlserver/cli/main.py]
1 """
2 Command-line interface to manage MLServer models.
3 """
4 import click
5 import asyncio
6
7 from functools import wraps
8
9 from ..server import MLServer
10 from ..logging import logger, configure_logger
11 from ..utils import install_uvloop_event_loop
12
13 from .build import generate_dockerfile, build_image, write_dockerfile
14 from .serve import load_settings
15
16
17 def click_async(f):
18 @wraps(f)
19 def wrapper(*args, **kwargs):
20 return asyncio.run(f(*args, **kwargs))
21
22 return wrapper
23
24
25 @click.group()
26 @click.version_option()
27 def root():
28 """
29 Command-line interface to manage MLServer models.
30 """
31 pass
32
33
34 @root.command("start")
35 @click.argument("folder", nargs=1)
36 @click_async
37 async def start(folder: str):
38 """
39 Start serving a machine learning model with MLServer.
40 """
41 settings, models_settings = await load_settings(folder)
42
43 server = MLServer(settings)
44 await server.start(models_settings)
45
46
47 @root.command("build")
48 @click.argument("folder", nargs=1)
49 @click.option("-t", "--tag", type=str)
50 @click_async
51 async def build(folder: str, tag: str):
52 """
53 Build a Docker image for a custom MLServer runtime.
54 """
55 dockerfile = generate_dockerfile()
56 build_image(folder, dockerfile, tag)
57 logger.info(f"Successfully built custom Docker image with tag {tag}")
58
59
60 @root.command("dockerfile")
61 @click.argument("folder", nargs=1)
62 @click.option("-i", "--include-dockerignore", is_flag=True)
63 @click_async
64 async def dockerfile(folder: str, include_dockerignore: bool):
65 """
66 Generate a Dockerfile
67 """
68 dockerfile = generate_dockerfile()
69 dockerfile_path = write_dockerfile(
70 folder, dockerfile, include_dockerignore=include_dockerignore
71 )
72 logger.info(f"Successfully written Dockerfile in {dockerfile_path}")
73
74
75 def main():
76 configure_logger()
77 install_uvloop_event_loop()
78 root()
79
80
81 if __name__ == "__main__":
82 main()
83
[end of mlserver/cli/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mlserver/cli/build.py b/mlserver/cli/build.py
--- a/mlserver/cli/build.py
+++ b/mlserver/cli/build.py
@@ -37,12 +37,18 @@
return dockerfile_path
-def build_image(folder: str, dockerfile: str, image_tag: str) -> str:
+def build_image(
+ folder: str, dockerfile: str, image_tag: str, no_cache: bool = False
+) -> str:
logger.info(f"Building Docker image with tag {image_tag}")
+ _docker_command_prefix = "docker build --rm "
with TemporaryDirectory() as tmp_dir:
dockerfile_path = write_dockerfile(tmp_dir, dockerfile)
-
- build_cmd = f"docker build {folder} -f {dockerfile_path} -t {image_tag}"
+ _docker_command_suffix = f"{folder} -f {dockerfile_path} -t {image_tag}"
+ if no_cache:
+ build_cmd = _docker_command_prefix + "--no-cache " + _docker_command_suffix
+ else:
+ build_cmd = _docker_command_prefix + _docker_command_suffix
build_env = os.environ.copy()
build_env["DOCKER_BUILDKIT"] = "1"
subprocess.run(build_cmd, check=True, shell=True, env=build_env)
diff --git a/mlserver/cli/main.py b/mlserver/cli/main.py
--- a/mlserver/cli/main.py
+++ b/mlserver/cli/main.py
@@ -47,13 +47,14 @@
@root.command("build")
@click.argument("folder", nargs=1)
@click.option("-t", "--tag", type=str)
[email protected]("--no-cache", default=False, is_flag=True)
@click_async
-async def build(folder: str, tag: str):
+async def build(folder: str, tag: str, no_cache: bool = False):
"""
Build a Docker image for a custom MLServer runtime.
"""
dockerfile = generate_dockerfile()
- build_image(folder, dockerfile, tag)
+ build_image(folder, dockerfile, tag, no_cache=no_cache)
logger.info(f"Successfully built custom Docker image with tag {tag}")
| {"golden_diff": "diff --git a/mlserver/cli/build.py b/mlserver/cli/build.py\n--- a/mlserver/cli/build.py\n+++ b/mlserver/cli/build.py\n@@ -37,12 +37,18 @@\n return dockerfile_path\n \n \n-def build_image(folder: str, dockerfile: str, image_tag: str) -> str:\n+def build_image(\n+ folder: str, dockerfile: str, image_tag: str, no_cache: bool = False\n+) -> str:\n logger.info(f\"Building Docker image with tag {image_tag}\")\n+ _docker_command_prefix = \"docker build --rm \"\n with TemporaryDirectory() as tmp_dir:\n dockerfile_path = write_dockerfile(tmp_dir, dockerfile)\n-\n- build_cmd = f\"docker build {folder} -f {dockerfile_path} -t {image_tag}\"\n+ _docker_command_suffix = f\"{folder} -f {dockerfile_path} -t {image_tag}\"\n+ if no_cache:\n+ build_cmd = _docker_command_prefix + \"--no-cache \" + _docker_command_suffix\n+ else:\n+ build_cmd = _docker_command_prefix + _docker_command_suffix\n build_env = os.environ.copy()\n build_env[\"DOCKER_BUILDKIT\"] = \"1\"\n subprocess.run(build_cmd, check=True, shell=True, env=build_env)\ndiff --git a/mlserver/cli/main.py b/mlserver/cli/main.py\n--- a/mlserver/cli/main.py\n+++ b/mlserver/cli/main.py\n@@ -47,13 +47,14 @@\n @root.command(\"build\")\n @click.argument(\"folder\", nargs=1)\n @click.option(\"-t\", \"--tag\", type=str)\[email protected](\"--no-cache\", default=False, is_flag=True)\n @click_async\n-async def build(folder: str, tag: str):\n+async def build(folder: str, tag: str, no_cache: bool = False):\n \"\"\"\n Build a Docker image for a custom MLServer runtime.\n \"\"\"\n dockerfile = generate_dockerfile()\n- build_image(folder, dockerfile, tag)\n+ build_image(folder, dockerfile, tag, no_cache=no_cache)\n logger.info(f\"Successfully built custom Docker image with tag {tag}\")\n", "issue": "Add docker build option to not use cache\nTo ensure fresh environment and while potentially developing a new model version users will not want to use cached layers in the docker build for their image.\r\n\r\nIn docker this is the --no-cache option, I would make it a boolean option within the cli and also add the --rm option (although default is true might be worth making sure) to remove any intermediate containers after success to again ensure a clean environment in CI/CD as well as locally. \n", "before_files": [{"content": "import subprocess\nimport os\n\nfrom tempfile import TemporaryDirectory\n\nfrom .. import __version__\nfrom ..logging import logger\n\nfrom .constants import (\n DockerfileName,\n DockerfileTemplate,\n DockerignoreName,\n Dockerignore,\n)\n\n\ndef generate_dockerfile() -> str:\n return DockerfileTemplate.format(version=__version__)\n\n\ndef write_dockerfile(\n folder: str, dockerfile: str, include_dockerignore: bool = True\n) -> str:\n dockerfile_path = os.path.join(folder, DockerfileName)\n with open(dockerfile_path, \"w\") as dockerfile_handler:\n logger.info(f\"Writing Dockerfile in {dockerfile_path}\")\n dockerfile_handler.write(dockerfile)\n\n if include_dockerignore:\n # Point to our own .dockerignore\n # https://docs.docker.com/engine/reference/commandline/build/#use-a-dockerignore-file\n dockerignore_path = dockerfile_path + DockerignoreName\n with open(dockerignore_path, \"w\") as dockerignore_handler:\n logger.info(f\"Writing .dockerignore in {dockerignore_path}\")\n dockerignore_handler.write(Dockerignore)\n\n return dockerfile_path\n\n\ndef build_image(folder: str, dockerfile: str, image_tag: str) -> str:\n logger.info(f\"Building Docker image with tag {image_tag}\")\n with TemporaryDirectory() as tmp_dir:\n dockerfile_path = write_dockerfile(tmp_dir, dockerfile)\n\n build_cmd = f\"docker build {folder} -f {dockerfile_path} -t {image_tag}\"\n build_env = os.environ.copy()\n build_env[\"DOCKER_BUILDKIT\"] = \"1\"\n subprocess.run(build_cmd, check=True, shell=True, env=build_env)\n\n return image_tag\n", "path": "mlserver/cli/build.py"}, {"content": "\"\"\"\nCommand-line interface to manage MLServer models.\n\"\"\"\nimport click\nimport asyncio\n\nfrom functools import wraps\n\nfrom ..server import MLServer\nfrom ..logging import logger, configure_logger\nfrom ..utils import install_uvloop_event_loop\n\nfrom .build import generate_dockerfile, build_image, write_dockerfile\nfrom .serve import load_settings\n\n\ndef click_async(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n return asyncio.run(f(*args, **kwargs))\n\n return wrapper\n\n\[email protected]()\[email protected]_option()\ndef root():\n \"\"\"\n Command-line interface to manage MLServer models.\n \"\"\"\n pass\n\n\[email protected](\"start\")\[email protected](\"folder\", nargs=1)\n@click_async\nasync def start(folder: str):\n \"\"\"\n Start serving a machine learning model with MLServer.\n \"\"\"\n settings, models_settings = await load_settings(folder)\n\n server = MLServer(settings)\n await server.start(models_settings)\n\n\[email protected](\"build\")\[email protected](\"folder\", nargs=1)\[email protected](\"-t\", \"--tag\", type=str)\n@click_async\nasync def build(folder: str, tag: str):\n \"\"\"\n Build a Docker image for a custom MLServer runtime.\n \"\"\"\n dockerfile = generate_dockerfile()\n build_image(folder, dockerfile, tag)\n logger.info(f\"Successfully built custom Docker image with tag {tag}\")\n\n\[email protected](\"dockerfile\")\[email protected](\"folder\", nargs=1)\[email protected](\"-i\", \"--include-dockerignore\", is_flag=True)\n@click_async\nasync def dockerfile(folder: str, include_dockerignore: bool):\n \"\"\"\n Generate a Dockerfile\n \"\"\"\n dockerfile = generate_dockerfile()\n dockerfile_path = write_dockerfile(\n folder, dockerfile, include_dockerignore=include_dockerignore\n )\n logger.info(f\"Successfully written Dockerfile in {dockerfile_path}\")\n\n\ndef main():\n configure_logger()\n install_uvloop_event_loop()\n root()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "mlserver/cli/main.py"}]} | 1,739 | 480 |
gh_patches_debug_10612 | rasdani/github-patches | git_diff | fedora-infra__bodhi-1450 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
There are multiple alembic heads on the develop branch
The migrations can't be applied on the develop branch because there are multiple alembic heads:
```
[vagrant@bodhi-dev bodhi]$ alembic upgrade head
/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib64/python2.7/site-packages/zope': missing __init__.py
file, filename, etc = imp.find_module(subname, path)
/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/zope': missing __init__.py
file, filename, etc = imp.find_module(subname, path)
/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/paste': missing __init__.py
file, filename, etc = imp.find_module(subname, path)
/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/moksha': missing __init__.py
file, filename, etc = imp.find_module(subname, path)
/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/repoze': missing __init__.py
file, filename, etc = imp.find_module(subname, path)
/home/vagrant/bodhi/bodhi/server/__init__.py:26: DeprecationWarning: unauthenticated_userid: As of Pyramid 1.5 the "pyramid.security.unauthenticated_userid" API is now deprecated. It will be removed in Pyramd 1.8. Use the "unauthenticated_userid" attribute of the Pyramid request instead.
from pyramid.security import unauthenticated_userid
INFO [alembic.runtime.migration] Context impl PostgresqlImpl.
INFO [alembic.runtime.migration] Will assume transactional DDL.
ERROR [alembic.util.messaging] Multiple head revisions are present for given argument 'head'; please specify a specific target revision, '<branchname>@head' to narrow to a specific head, or 'heads' for all heads
FAILED: Multiple head revisions are present for given argument 'head'; please specify a specific target revision,
'<branchname>@head' to narrow to a specific head, or 'heads' for all heads
```
</issue>
<code>
[start of alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py]
1 """Convert the builds table to be polymorphic.
2
3 Revision ID: 9241378c92ab
4 Revises: fc6b0169c596
5 Create Date: 2017-04-06 20:37:24.766366
6 """
7 from alembic import op
8 import sqlalchemy as sa
9
10
11 # revision identifiers, used by Alembic.
12 revision = '9241378c92ab'
13 down_revision = 'fc6b0169c596'
14
15
16 def upgrade():
17 """Add the type column to the builds table."""
18 # The default of ``1`` is the RPM Build type.
19 op.add_column('builds', sa.Column('type', sa.Integer(), nullable=False, server_default=u'1'))
20 op.alter_column('builds', 'type', server_default=None)
21
22
23 def downgrade():
24 """Remove the type column from the builds table."""
25 op.drop_column('builds', 'type')
26
[end of alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py b/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py
--- a/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py
+++ b/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py
@@ -1,7 +1,7 @@
"""Convert the builds table to be polymorphic.
Revision ID: 9241378c92ab
-Revises: fc6b0169c596
+Revises: 12d3e8695f90
Create Date: 2017-04-06 20:37:24.766366
"""
from alembic import op
@@ -10,7 +10,7 @@
# revision identifiers, used by Alembic.
revision = '9241378c92ab'
-down_revision = 'fc6b0169c596'
+down_revision = '12d3e8695f90'
def upgrade():
| {"golden_diff": "diff --git a/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py b/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py\n--- a/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py\n+++ b/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py\n@@ -1,7 +1,7 @@\n \"\"\"Convert the builds table to be polymorphic.\n \n Revision ID: 9241378c92ab\n-Revises: fc6b0169c596\n+Revises: 12d3e8695f90\n Create Date: 2017-04-06 20:37:24.766366\n \"\"\"\n from alembic import op\n@@ -10,7 +10,7 @@\n \n # revision identifiers, used by Alembic.\n revision = '9241378c92ab'\n-down_revision = 'fc6b0169c596'\n+down_revision = '12d3e8695f90'\n \n \n def upgrade():\n", "issue": "There are multiple alembic heads on the develop branch\nThe migrations can't be applied on the develop branch because there are multiple alembic heads:\r\n\r\n```\r\n[vagrant@bodhi-dev bodhi]$ alembic upgrade head\r\n/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib64/python2.7/site-packages/zope': missing __init__.py\r\n file, filename, etc = imp.find_module(subname, path)\r\n/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/zope': missing __init__.py\r\n file, filename, etc = imp.find_module(subname, path)\r\n/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/paste': missing __init__.py\r\n file, filename, etc = imp.find_module(subname, path)\r\n/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/moksha': missing __init__.py\r\n file, filename, etc = imp.find_module(subname, path)\r\n/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/repoze': missing __init__.py\r\n file, filename, etc = imp.find_module(subname, path)\r\n/home/vagrant/bodhi/bodhi/server/__init__.py:26: DeprecationWarning: unauthenticated_userid: As of Pyramid 1.5 the \"pyramid.security.unauthenticated_userid\" API is now deprecated. It will be removed in Pyramd 1.8. Use the \"unauthenticated_userid\" attribute of the Pyramid request instead.\r\n from pyramid.security import unauthenticated_userid\r\nINFO [alembic.runtime.migration] Context impl PostgresqlImpl.\r\nINFO [alembic.runtime.migration] Will assume transactional DDL.\r\nERROR [alembic.util.messaging] Multiple head revisions are present for given argument 'head'; please specify a specific target revision, '<branchname>@head' to narrow to a specific head, or 'heads' for all heads\r\n FAILED: Multiple head revisions are present for given argument 'head'; please specify a specific target revision,\r\n '<branchname>@head' to narrow to a specific head, or 'heads' for all heads\r\n```\n", "before_files": [{"content": "\"\"\"Convert the builds table to be polymorphic.\n\nRevision ID: 9241378c92ab\nRevises: fc6b0169c596\nCreate Date: 2017-04-06 20:37:24.766366\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '9241378c92ab'\ndown_revision = 'fc6b0169c596'\n\n\ndef upgrade():\n \"\"\"Add the type column to the builds table.\"\"\"\n # The default of ``1`` is the RPM Build type.\n op.add_column('builds', sa.Column('type', sa.Integer(), nullable=False, server_default=u'1'))\n op.alter_column('builds', 'type', server_default=None)\n\n\ndef downgrade():\n \"\"\"Remove the type column from the builds table.\"\"\"\n op.drop_column('builds', 'type')\n", "path": "alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py"}]} | 1,388 | 308 |
gh_patches_debug_57587 | rasdani/github-patches | git_diff | joke2k__faker-262 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
en_US SSN provider generates invalid SSNs
There's a few limitations on United States' SSNs that prevent it from being completely random.
- No group can be all 0s
- The SSN cannot start with 666
- The SSN cannot start with a number >= 900
See http://www.ssa.gov/employer/randomization.html
Could you modify the SSN generator to avoid these issues?
</issue>
<code>
[start of faker/providers/ssn/en_US/__init__.py]
1 # coding=utf-8
2 from __future__ import unicode_literals
3 from .. import Provider as BaseProvider
4
5
6 class Provider(BaseProvider):
7 pass
8
[end of faker/providers/ssn/en_US/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/ssn/en_US/__init__.py b/faker/providers/ssn/en_US/__init__.py
--- a/faker/providers/ssn/en_US/__init__.py
+++ b/faker/providers/ssn/en_US/__init__.py
@@ -4,4 +4,17 @@
class Provider(BaseProvider):
- pass
+
+ @classmethod
+ def ssn(cls):
+ # Certain numbers are invalid for U.S. SSNs. The area (first 3 digits)
+ # cannot be 666 or 900-999. The group number (middle digits) cannot be
+ # 00. The serial (last 4 digits) cannot be 0000
+ area = BaseProvider.random_int(min=1, max=899)
+ if area == 666:
+ area += 1
+ group = BaseProvider.random_int(1, 99)
+ serial = BaseProvider.random_int(1, 9999)
+
+ ssn = "{0:03d}-{1:02d}-{2:04d}".format(area, group, serial)
+ return ssn
| {"golden_diff": "diff --git a/faker/providers/ssn/en_US/__init__.py b/faker/providers/ssn/en_US/__init__.py\n--- a/faker/providers/ssn/en_US/__init__.py\n+++ b/faker/providers/ssn/en_US/__init__.py\n@@ -4,4 +4,17 @@\n \n \n class Provider(BaseProvider):\n- pass\n+\n+ @classmethod\n+ def ssn(cls):\n+ # Certain numbers are invalid for U.S. SSNs. The area (first 3 digits)\n+ # cannot be 666 or 900-999. The group number (middle digits) cannot be\n+ # 00. The serial (last 4 digits) cannot be 0000\n+ area = BaseProvider.random_int(min=1, max=899)\n+ if area == 666:\n+ area += 1\n+ group = BaseProvider.random_int(1, 99)\n+ serial = BaseProvider.random_int(1, 9999)\n+\n+ ssn = \"{0:03d}-{1:02d}-{2:04d}\".format(area, group, serial)\n+ return ssn\n", "issue": "en_US SSN provider generates invalid SSNs\nThere's a few limitations on United States' SSNs that prevent it from being completely random.\n- No group can be all 0s\n- The SSN cannot start with 666\n- The SSN cannot start with a number >= 900\n\nSee http://www.ssa.gov/employer/randomization.html\n\nCould you modify the SSN generator to avoid these issues?\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom .. import Provider as BaseProvider\n\n\nclass Provider(BaseProvider):\n pass\n", "path": "faker/providers/ssn/en_US/__init__.py"}]} | 677 | 270 |
gh_patches_debug_57082 | rasdani/github-patches | git_diff | SeldonIO__MLServer-1171 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add OS constraint in PyPI
Mention MLServer's OS constraints as metadata in `setup.py`, so that it becomes visible in pypi.org.
```
setup(...,
classifiers=[
'Operating System :: POSIX',
],
)
```
_Originally posted by @HugoMVale in https://github.com/SeldonIO/MLServer/issues/1022#issuecomment-1456788132_
</issue>
<code>
[start of setup.py]
1 import os
2
3 from typing import Dict
4 from setuptools import setup, find_packages
5
6 ROOT_PATH = os.path.dirname(__file__)
7 PKG_NAME = "mlserver"
8 PKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)
9
10
11 def _load_version() -> str:
12 version = ""
13 version_path = os.path.join(PKG_PATH, "version.py")
14 with open(version_path) as fp:
15 version_module: Dict[str, str] = {}
16 exec(fp.read(), version_module)
17 version = version_module["__version__"]
18
19 return version
20
21
22 def _load_description() -> str:
23 readme_path = os.path.join(ROOT_PATH, "README.md")
24 with open(readme_path) as fp:
25 return fp.read()
26
27
28 env_marker_cpython = (
29 "sys_platform != 'win32'"
30 " and (sys_platform != 'cygwin'"
31 " and platform_python_implementation != 'PyPy')"
32 )
33
34 setup(
35 name=PKG_NAME,
36 version=_load_version(),
37 url="https://github.com/SeldonIO/MLServer.git",
38 author="Seldon Technologies Ltd.",
39 author_email="[email protected]",
40 description="ML server",
41 include_package_data=True,
42 packages=find_packages(exclude=["tests", "tests.*"]),
43 install_requires=[
44 "click",
45 # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861
46 "fastapi >=0.88.0, <=0.89.1, !=0.89.0",
47 "python-dotenv",
48 "grpcio",
49 # The importlib-resources backport is required to use some
50 # functionality added in Python 3.10
51 # https://setuptools.pypa.io/en/latest/userguide/datafiles.html#accessing-data-files-at-runtime
52 "importlib-resources",
53 "numpy",
54 "pandas",
55 "protobuf",
56 "uvicorn",
57 "starlette_exporter",
58 "py-grpc-prometheus",
59 "uvloop;" + env_marker_cpython,
60 "aiokafka",
61 "tritonclient[http]>=2.24",
62 "aiofiles",
63 "orjson",
64 ],
65 entry_points={"console_scripts": ["mlserver=mlserver.cli:main"]},
66 long_description=_load_description(),
67 long_description_content_type="text/markdown",
68 license="Apache 2.0",
69 )
70
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -37,6 +37,7 @@
url="https://github.com/SeldonIO/MLServer.git",
author="Seldon Technologies Ltd.",
author_email="[email protected]",
+ classifiers=["Operating System :: POSIX", "Operating System :: MacOS"],
description="ML server",
include_package_data=True,
packages=find_packages(exclude=["tests", "tests.*"]),
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,6 +37,7 @@\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n+ classifiers=[\"Operating System :: POSIX\", \"Operating System :: MacOS\"],\n description=\"ML server\",\n include_package_data=True,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n", "issue": "Add OS constraint in PyPI\nMention MLServer's OS constraints as metadata in `setup.py`, so that it becomes visible in pypi.org. \r\n\r\n```\r\nsetup(...,\r\n classifiers=[\r\n 'Operating System :: POSIX',\r\n ],\r\n )\r\n```\r\n\r\n_Originally posted by @HugoMVale in https://github.com/SeldonIO/MLServer/issues/1022#issuecomment-1456788132_\r\n \n", "before_files": [{"content": "import os\n\nfrom typing import Dict\nfrom setuptools import setup, find_packages\n\nROOT_PATH = os.path.dirname(__file__)\nPKG_NAME = \"mlserver\"\nPKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)\n\n\ndef _load_version() -> str:\n version = \"\"\n version_path = os.path.join(PKG_PATH, \"version.py\")\n with open(version_path) as fp:\n version_module: Dict[str, str] = {}\n exec(fp.read(), version_module)\n version = version_module[\"__version__\"]\n\n return version\n\n\ndef _load_description() -> str:\n readme_path = os.path.join(ROOT_PATH, \"README.md\")\n with open(readme_path) as fp:\n return fp.read()\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nsetup(\n name=PKG_NAME,\n version=_load_version(),\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n description=\"ML server\",\n include_package_data=True,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861\n \"fastapi >=0.88.0, <=0.89.1, !=0.89.0\",\n \"python-dotenv\",\n \"grpcio\",\n # The importlib-resources backport is required to use some\n # functionality added in Python 3.10\n # https://setuptools.pypa.io/en/latest/userguide/datafiles.html#accessing-data-files-at-runtime\n \"importlib-resources\",\n \"numpy\",\n \"pandas\",\n \"protobuf\",\n \"uvicorn\",\n \"starlette_exporter\",\n \"py-grpc-prometheus\",\n \"uvloop;\" + env_marker_cpython,\n \"aiokafka\",\n \"tritonclient[http]>=2.24\",\n \"aiofiles\",\n \"orjson\",\n ],\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n long_description=_load_description(),\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n)\n", "path": "setup.py"}]} | 1,283 | 105 |
gh_patches_debug_41531 | rasdani/github-patches | git_diff | deepset-ai__haystack-7247 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docstrings - `haystack.components.caching`
</issue>
<code>
[start of haystack/components/caching/cache_checker.py]
1 from typing import List, Dict, Any
2
3 import importlib
4
5 import logging
6
7 from haystack import component, Document, default_from_dict, default_to_dict, DeserializationError
8 from haystack.document_stores.types import DocumentStore
9
10
11 logger = logging.getLogger(__name__)
12
13
14 @component
15 class CacheChecker:
16 """
17 CacheChecker is a component that checks for the presence of documents in a Document Store based on a specified
18 cache field.
19 """
20
21 def __init__(self, document_store: DocumentStore, cache_field: str):
22 """
23 Create a UrlCacheChecker component.
24 """
25 self.document_store = document_store
26 self.cache_field = cache_field
27
28 def to_dict(self) -> Dict[str, Any]:
29 """
30 Serialize this component to a dictionary.
31 """
32 return default_to_dict(self, document_store=self.document_store.to_dict(), cache_field=self.cache_field)
33
34 @classmethod
35 def from_dict(cls, data: Dict[str, Any]) -> "CacheChecker":
36 """
37 Deserialize this component from a dictionary.
38 """
39 init_params = data.get("init_parameters", {})
40 if "document_store" not in init_params:
41 raise DeserializationError("Missing 'document_store' in serialization data")
42 if "type" not in init_params["document_store"]:
43 raise DeserializationError("Missing 'type' in document store's serialization data")
44
45 try:
46 module_name, type_ = init_params["document_store"]["type"].rsplit(".", 1)
47 logger.debug("Trying to import %s", module_name)
48 module = importlib.import_module(module_name)
49 except (ImportError, DeserializationError) as e:
50 raise DeserializationError(
51 f"DocumentStore of type '{init_params['document_store']['type']}' not correctly imported"
52 ) from e
53
54 docstore_class = getattr(module, type_)
55 docstore = docstore_class.from_dict(init_params["document_store"])
56
57 data["init_parameters"]["document_store"] = docstore
58 return default_from_dict(cls, data)
59
60 @component.output_types(hits=List[Document], misses=List)
61 def run(self, items: List[Any]):
62 """
63 Checks if any document associated with the specified field is already present in the store. If matching documents
64 are found, they are returned as hits. If not, the items are returned as misses, indicating they are not in the cache.
65
66 :param items: A list of values associated with the cache_field to be checked against the cache.
67 :return: A dictionary with two keys: "hits" and "misses". The values are lists of documents that were found in
68 the cache and items that were not, respectively.
69 """
70 found_documents = []
71 misses = []
72
73 for item in items:
74 filters = {self.cache_field: item}
75 found = self.document_store.filter_documents(filters=filters)
76 if found:
77 found_documents.extend(found)
78 else:
79 misses.append(item)
80 return {"hits": found_documents, "misses": misses}
81
[end of haystack/components/caching/cache_checker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/haystack/components/caching/cache_checker.py b/haystack/components/caching/cache_checker.py
--- a/haystack/components/caching/cache_checker.py
+++ b/haystack/components/caching/cache_checker.py
@@ -14,27 +14,63 @@
@component
class CacheChecker:
"""
- CacheChecker is a component that checks for the presence of documents in a Document Store based on a specified
- cache field.
+ Checks for the presence of documents in a Document Store based on a specified
+ field in each document's metadata.
+
+ If matching documents are found, they are returned as hits. If not, the items
+ are returned as misses, indicating they are not in the cache.
+
+ Usage example:
+ ```python
+ from haystack import Document
+ from haystack.document_stores.in_memory import InMemoryDocumentStore
+ from haystack.components.caching.cache_checker import CacheChecker
+
+ docstore = InMemoryDocumentStore()
+ documents = [
+ Document(content="doc1", meta={"url": "https://example.com/1"}),
+ Document(content="doc2", meta={"url": "https://example.com/2"}),
+ Document(content="doc3", meta={"url": "https://example.com/1"}),
+ Document(content="doc4", meta={"url": "https://example.com/2"}),
+ ]
+ docstore.write_documents(documents)
+ checker = CacheChecker(docstore, cache_field="url")
+ results = checker.run(items=["https://example.com/1", "https://example.com/5"])
+ assert results == {"hits": [documents[0], documents[2]], "misses": ["https://example.com/5"]}
+ ```
"""
def __init__(self, document_store: DocumentStore, cache_field: str):
"""
- Create a UrlCacheChecker component.
+ Create a CacheChecker component.
+
+ :param document_store:
+ Document store to check.
+ :param cache_field:
+ Name of the Document metadata field
+ to check for cache hits.
"""
self.document_store = document_store
self.cache_field = cache_field
def to_dict(self) -> Dict[str, Any]:
"""
- Serialize this component to a dictionary.
+ Serializes the component to a dictionary.
+
+ :returns:
+ Dictionary with serialized data.
"""
return default_to_dict(self, document_store=self.document_store.to_dict(), cache_field=self.cache_field)
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "CacheChecker":
"""
- Deserialize this component from a dictionary.
+ Deserializes the component from a dictionary.
+
+ :param data:
+ Dictionary to deserialize from.
+ :returns:
+ Deserialized component.
"""
init_params = data.get("init_parameters", {})
if "document_store" not in init_params:
@@ -60,12 +96,15 @@
@component.output_types(hits=List[Document], misses=List)
def run(self, items: List[Any]):
"""
- Checks if any document associated with the specified field is already present in the store. If matching documents
- are found, they are returned as hits. If not, the items are returned as misses, indicating they are not in the cache.
-
- :param items: A list of values associated with the cache_field to be checked against the cache.
- :return: A dictionary with two keys: "hits" and "misses". The values are lists of documents that were found in
- the cache and items that were not, respectively.
+ Checks if any document associated with the specified cache field
+ is already present in the store.
+
+ :param items:
+ Values to be checked against the cache field.
+ :return:
+ A dictionary with two keys:
+ - `hits` - Documents that matched with any of the items.
+ - `misses` - Items that were not present in any documents.
"""
found_documents = []
misses = []
| {"golden_diff": "diff --git a/haystack/components/caching/cache_checker.py b/haystack/components/caching/cache_checker.py\n--- a/haystack/components/caching/cache_checker.py\n+++ b/haystack/components/caching/cache_checker.py\n@@ -14,27 +14,63 @@\n @component\n class CacheChecker:\n \"\"\"\n- CacheChecker is a component that checks for the presence of documents in a Document Store based on a specified\n- cache field.\n+ Checks for the presence of documents in a Document Store based on a specified\n+ field in each document's metadata.\n+\n+ If matching documents are found, they are returned as hits. If not, the items\n+ are returned as misses, indicating they are not in the cache.\n+\n+ Usage example:\n+ ```python\n+ from haystack import Document\n+ from haystack.document_stores.in_memory import InMemoryDocumentStore\n+ from haystack.components.caching.cache_checker import CacheChecker\n+\n+ docstore = InMemoryDocumentStore()\n+ documents = [\n+ Document(content=\"doc1\", meta={\"url\": \"https://example.com/1\"}),\n+ Document(content=\"doc2\", meta={\"url\": \"https://example.com/2\"}),\n+ Document(content=\"doc3\", meta={\"url\": \"https://example.com/1\"}),\n+ Document(content=\"doc4\", meta={\"url\": \"https://example.com/2\"}),\n+ ]\n+ docstore.write_documents(documents)\n+ checker = CacheChecker(docstore, cache_field=\"url\")\n+ results = checker.run(items=[\"https://example.com/1\", \"https://example.com/5\"])\n+ assert results == {\"hits\": [documents[0], documents[2]], \"misses\": [\"https://example.com/5\"]}\n+ ```\n \"\"\"\n \n def __init__(self, document_store: DocumentStore, cache_field: str):\n \"\"\"\n- Create a UrlCacheChecker component.\n+ Create a CacheChecker component.\n+\n+ :param document_store:\n+ Document store to check.\n+ :param cache_field:\n+ Name of the Document metadata field\n+ to check for cache hits.\n \"\"\"\n self.document_store = document_store\n self.cache_field = cache_field\n \n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n- Serialize this component to a dictionary.\n+ Serializes the component to a dictionary.\n+\n+ :returns:\n+ Dictionary with serialized data.\n \"\"\"\n return default_to_dict(self, document_store=self.document_store.to_dict(), cache_field=self.cache_field)\n \n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"CacheChecker\":\n \"\"\"\n- Deserialize this component from a dictionary.\n+ Deserializes the component from a dictionary.\n+\n+ :param data:\n+ Dictionary to deserialize from.\n+ :returns:\n+ Deserialized component.\n \"\"\"\n init_params = data.get(\"init_parameters\", {})\n if \"document_store\" not in init_params:\n@@ -60,12 +96,15 @@\n @component.output_types(hits=List[Document], misses=List)\n def run(self, items: List[Any]):\n \"\"\"\n- Checks if any document associated with the specified field is already present in the store. If matching documents\n- are found, they are returned as hits. If not, the items are returned as misses, indicating they are not in the cache.\n-\n- :param items: A list of values associated with the cache_field to be checked against the cache.\n- :return: A dictionary with two keys: \"hits\" and \"misses\". The values are lists of documents that were found in\n- the cache and items that were not, respectively.\n+ Checks if any document associated with the specified cache field\n+ is already present in the store.\n+\n+ :param items:\n+ Values to be checked against the cache field.\n+ :return:\n+ A dictionary with two keys:\n+ - `hits` - Documents that matched with any of the items.\n+ - `misses` - Items that were not present in any documents.\n \"\"\"\n found_documents = []\n misses = []\n", "issue": "Docstrings - `haystack.components.caching`\n\n", "before_files": [{"content": "from typing import List, Dict, Any\n\nimport importlib\n\nimport logging\n\nfrom haystack import component, Document, default_from_dict, default_to_dict, DeserializationError\nfrom haystack.document_stores.types import DocumentStore\n\n\nlogger = logging.getLogger(__name__)\n\n\n@component\nclass CacheChecker:\n \"\"\"\n CacheChecker is a component that checks for the presence of documents in a Document Store based on a specified\n cache field.\n \"\"\"\n\n def __init__(self, document_store: DocumentStore, cache_field: str):\n \"\"\"\n Create a UrlCacheChecker component.\n \"\"\"\n self.document_store = document_store\n self.cache_field = cache_field\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n \"\"\"\n return default_to_dict(self, document_store=self.document_store.to_dict(), cache_field=self.cache_field)\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"CacheChecker\":\n \"\"\"\n Deserialize this component from a dictionary.\n \"\"\"\n init_params = data.get(\"init_parameters\", {})\n if \"document_store\" not in init_params:\n raise DeserializationError(\"Missing 'document_store' in serialization data\")\n if \"type\" not in init_params[\"document_store\"]:\n raise DeserializationError(\"Missing 'type' in document store's serialization data\")\n\n try:\n module_name, type_ = init_params[\"document_store\"][\"type\"].rsplit(\".\", 1)\n logger.debug(\"Trying to import %s\", module_name)\n module = importlib.import_module(module_name)\n except (ImportError, DeserializationError) as e:\n raise DeserializationError(\n f\"DocumentStore of type '{init_params['document_store']['type']}' not correctly imported\"\n ) from e\n\n docstore_class = getattr(module, type_)\n docstore = docstore_class.from_dict(init_params[\"document_store\"])\n\n data[\"init_parameters\"][\"document_store\"] = docstore\n return default_from_dict(cls, data)\n\n @component.output_types(hits=List[Document], misses=List)\n def run(self, items: List[Any]):\n \"\"\"\n Checks if any document associated with the specified field is already present in the store. If matching documents\n are found, they are returned as hits. If not, the items are returned as misses, indicating they are not in the cache.\n\n :param items: A list of values associated with the cache_field to be checked against the cache.\n :return: A dictionary with two keys: \"hits\" and \"misses\". The values are lists of documents that were found in\n the cache and items that were not, respectively.\n \"\"\"\n found_documents = []\n misses = []\n\n for item in items:\n filters = {self.cache_field: item}\n found = self.document_store.filter_documents(filters=filters)\n if found:\n found_documents.extend(found)\n else:\n misses.append(item)\n return {\"hits\": found_documents, \"misses\": misses}\n", "path": "haystack/components/caching/cache_checker.py"}]} | 1,348 | 900 |
gh_patches_debug_19709 | rasdani/github-patches | git_diff | fossasia__open-event-server-5615 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to login to the Admin Panel.
**Description:**
When we try to login to admin panel, even if the credentials belong to super-admin, it returns "Credential incorrect"
**Steps to reproduce the behavior:**
1. Go to 127.0.0.1:5000/admin
2. Enter the admin credentials
3. Click on login button
4. See error "Credential incorrect"
**Expected Behaviour:**
It should login the user to the admin panel if credentials are correct and the user is an admin.
**Screenshots**

</issue>
<code>
[start of app/views/__init__.py]
1 import flask_login as login
2 import requests
3 from flask import url_for, redirect, Blueprint, request, make_response
4 from flask_admin import Admin, AdminIndexView, expose, helpers as admin_helpers
5 from flask_admin.contrib.sqla import ModelView
6 from flask_scrypt import generate_password_hash
7 from wtforms import form, fields, validators
8
9 from app.models import db
10 from app.models.user import User
11
12
13 class AdminModelView(ModelView):
14 def is_accessible(self):
15 return login.current_user.is_authenticated
16
17 def inaccessible_callback(self, name, **kwargs):
18 # redirect to login page if user doesn't have access
19 return redirect(url_for('admin.index', next=request.url))
20
21
22 class LoginForm(form.Form):
23 login = fields.TextField(validators=[validators.required(), validators.email()], render_kw={"placeholder": "[email protected]"})
24 password = fields.PasswordField(validators=[validators.required()], render_kw={"placeholder": "xyzzy"})
25
26 def validate_login(self, field):
27 """
28 validate login
29 :param field:
30 :return:
31 """
32 user = self.get_user()
33
34 if user is None:
35 raise validators.ValidationError('User does not exist.')
36
37 if user.password != generate_password_hash(self.password.data, user.salt):
38 raise validators.ValidationError('Credentials incorrect.')
39
40 if not user.is_admin and not user.is_super_admin:
41 raise validators.ValidationError('Access Forbidden. Admin Rights Required')
42
43 def get_user(self):
44 return User.query.filter_by(email=self.login.data).first()
45
46
47 class MyAdminIndexView(AdminIndexView):
48 @expose('/')
49 def index(self):
50 """
51 /admin
52 :return:
53 """
54 if not login.current_user.is_authenticated:
55 return redirect(url_for('.login_view'))
56 return super(MyAdminIndexView, self).index()
57
58 @expose('/login/', methods=('GET', 'POST'))
59 def login_view(self):
60 """
61 login view for flask-admin
62 :return:
63 """
64 # handle user login
65 form = LoginForm(request.form)
66 if admin_helpers.validate_form_on_submit(form):
67 user = form.get_user()
68 login.login_user(user)
69
70 if login.current_user.is_authenticated:
71 return redirect(url_for('.index'))
72 self._template_args['form'] = form
73 return super(MyAdminIndexView, self).index()
74
75 @expose('/logout/')
76 def logout_view(self):
77 login.logout_user()
78 return redirect(url_for('.index'))
79
80
81 home_routes = Blueprint('home', __name__)
82
83
84 # Flask views
85 @home_routes.route('/')
86 def index():
87 """
88 Index route
89 :return:
90 """
91 r = requests.get('https://raw.githubusercontent.com/fossasia/open-event-server/gh-pages/api/v1/index.html')
92 response = make_response(r.content)
93 response.headers["Content-Type"] = "text/html"
94 return response
95
96
97 class BlueprintsManager:
98 def __init__(self):
99 pass
100
101 @staticmethod
102 def register(app):
103 """
104 Register blueprints
105 :param app: a flask app instance
106 :return:
107 """
108 app.register_blueprint(home_routes)
109 admin = Admin(app, name='Open Event API', template_mode='bootstrap3', index_view=MyAdminIndexView(),
110 base_template='admin_base.html')
111
112 # Get all the models in the db, all models should have a explicit __tablename__
113 classes, models, table_names = [], [], []
114 # noinspection PyProtectedMember
115 for class_ in list(db.Model._decl_class_registry.values()):
116 try:
117 table_names.append(class_.__tablename__)
118 classes.append(class_)
119 except:
120 pass
121 for table in list(db.metadata.tables.items()):
122 if table[0] in table_names:
123 models.append(classes[table_names.index(table[0])])
124
125 for model in models:
126 admin.add_view(AdminModelView(model, db.session))
127
[end of app/views/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/views/__init__.py b/app/views/__init__.py
--- a/app/views/__init__.py
+++ b/app/views/__init__.py
@@ -3,7 +3,6 @@
from flask import url_for, redirect, Blueprint, request, make_response
from flask_admin import Admin, AdminIndexView, expose, helpers as admin_helpers
from flask_admin.contrib.sqla import ModelView
-from flask_scrypt import generate_password_hash
from wtforms import form, fields, validators
from app.models import db
@@ -34,7 +33,7 @@
if user is None:
raise validators.ValidationError('User does not exist.')
- if user.password != generate_password_hash(self.password.data, user.salt):
+ if not user.is_correct_password(self.password.data):
raise validators.ValidationError('Credentials incorrect.')
if not user.is_admin and not user.is_super_admin:
| {"golden_diff": "diff --git a/app/views/__init__.py b/app/views/__init__.py\n--- a/app/views/__init__.py\n+++ b/app/views/__init__.py\n@@ -3,7 +3,6 @@\n from flask import url_for, redirect, Blueprint, request, make_response\n from flask_admin import Admin, AdminIndexView, expose, helpers as admin_helpers\n from flask_admin.contrib.sqla import ModelView\n-from flask_scrypt import generate_password_hash\n from wtforms import form, fields, validators\n \n from app.models import db\n@@ -34,7 +33,7 @@\n if user is None:\n raise validators.ValidationError('User does not exist.')\n \n- if user.password != generate_password_hash(self.password.data, user.salt):\n+ if not user.is_correct_password(self.password.data):\n raise validators.ValidationError('Credentials incorrect.')\n \n if not user.is_admin and not user.is_super_admin:\n", "issue": "Unable to login to the Admin Panel.\n**Description:**\r\nWhen we try to login to admin panel, even if the credentials belong to super-admin, it returns \"Credential incorrect\"\r\n\r\n**Steps to reproduce the behavior:**\r\n1. Go to 127.0.0.1:5000/admin\r\n2. Enter the admin credentials\r\n3. Click on login button\r\n4. See error \"Credential incorrect\"\r\n\r\n**Expected Behaviour:**\r\nIt should login the user to the admin panel if credentials are correct and the user is an admin.\r\n\r\n**Screenshots**\r\n\r\n\n", "before_files": [{"content": "import flask_login as login\nimport requests\nfrom flask import url_for, redirect, Blueprint, request, make_response\nfrom flask_admin import Admin, AdminIndexView, expose, helpers as admin_helpers\nfrom flask_admin.contrib.sqla import ModelView\nfrom flask_scrypt import generate_password_hash\nfrom wtforms import form, fields, validators\n\nfrom app.models import db\nfrom app.models.user import User\n\n\nclass AdminModelView(ModelView):\n def is_accessible(self):\n return login.current_user.is_authenticated\n\n def inaccessible_callback(self, name, **kwargs):\n # redirect to login page if user doesn't have access\n return redirect(url_for('admin.index', next=request.url))\n\n\nclass LoginForm(form.Form):\n login = fields.TextField(validators=[validators.required(), validators.email()], render_kw={\"placeholder\": \"[email protected]\"})\n password = fields.PasswordField(validators=[validators.required()], render_kw={\"placeholder\": \"xyzzy\"})\n\n def validate_login(self, field):\n \"\"\"\n validate login\n :param field:\n :return:\n \"\"\"\n user = self.get_user()\n\n if user is None:\n raise validators.ValidationError('User does not exist.')\n\n if user.password != generate_password_hash(self.password.data, user.salt):\n raise validators.ValidationError('Credentials incorrect.')\n\n if not user.is_admin and not user.is_super_admin:\n raise validators.ValidationError('Access Forbidden. Admin Rights Required')\n\n def get_user(self):\n return User.query.filter_by(email=self.login.data).first()\n\n\nclass MyAdminIndexView(AdminIndexView):\n @expose('/')\n def index(self):\n \"\"\"\n /admin\n :return:\n \"\"\"\n if not login.current_user.is_authenticated:\n return redirect(url_for('.login_view'))\n return super(MyAdminIndexView, self).index()\n\n @expose('/login/', methods=('GET', 'POST'))\n def login_view(self):\n \"\"\"\n login view for flask-admin\n :return:\n \"\"\"\n # handle user login\n form = LoginForm(request.form)\n if admin_helpers.validate_form_on_submit(form):\n user = form.get_user()\n login.login_user(user)\n\n if login.current_user.is_authenticated:\n return redirect(url_for('.index'))\n self._template_args['form'] = form\n return super(MyAdminIndexView, self).index()\n\n @expose('/logout/')\n def logout_view(self):\n login.logout_user()\n return redirect(url_for('.index'))\n\n\nhome_routes = Blueprint('home', __name__)\n\n\n# Flask views\n@home_routes.route('/')\ndef index():\n \"\"\"\n Index route\n :return:\n \"\"\"\n r = requests.get('https://raw.githubusercontent.com/fossasia/open-event-server/gh-pages/api/v1/index.html')\n response = make_response(r.content)\n response.headers[\"Content-Type\"] = \"text/html\"\n return response\n\n\nclass BlueprintsManager:\n def __init__(self):\n pass\n\n @staticmethod\n def register(app):\n \"\"\"\n Register blueprints\n :param app: a flask app instance\n :return:\n \"\"\"\n app.register_blueprint(home_routes)\n admin = Admin(app, name='Open Event API', template_mode='bootstrap3', index_view=MyAdminIndexView(),\n base_template='admin_base.html')\n\n # Get all the models in the db, all models should have a explicit __tablename__\n classes, models, table_names = [], [], []\n # noinspection PyProtectedMember\n for class_ in list(db.Model._decl_class_registry.values()):\n try:\n table_names.append(class_.__tablename__)\n classes.append(class_)\n except:\n pass\n for table in list(db.metadata.tables.items()):\n if table[0] in table_names:\n models.append(classes[table_names.index(table[0])])\n\n for model in models:\n admin.add_view(AdminModelView(model, db.session))\n", "path": "app/views/__init__.py"}]} | 1,824 | 194 |
gh_patches_debug_10505 | rasdani/github-patches | git_diff | cornellius-gp__gpytorch-1371 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ZeroMean for Batch Independent Multioutput GP
I'm following the Batch Independent Multioutput GP example, but instead of using a constant mean, I would like each dimension to use a zero mean (maybe this is a bad idea?).
```
class ZeroMeanIndependentMultitaskGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood, nx):
super().__init__(train_x, train_y, likelihood)
self.n = nx #output dimension
#self.mean_module = gpytorch.means.MultitaskMean([gpytorch.means.ZeroMean()]*self.n,
# num_tasks=self.n)
#self.mean_module = gpytorch.means.ConstantMean(batch_shape=torch.Size([self.n]))
self.mean_module = gpytorch.means.ZeroMean()
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(batch_shape=torch.Size([self.n])),
batch_shape=torch.Size([self.n])
)
def forward(self, x):
mean_x = self.mean_module(x) # is this needed for ZeroMean?
covar_x = self.covar_module(x)
return gpytorch.distributions.MultitaskMultivariateNormal.from_batch_mvn(
gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
)
```
When training with this, I get the error `RuntimeError: mean should be a matrix or a batch matrix (batch mode)`. It works as intended with constant mean. As you can see, I've tried a couple different things, but they don't seem to work either. I can't seem to find other people with the same issue online. Is it possible to do this with ZeroMean?
</issue>
<code>
[start of gpytorch/means/zero_mean.py]
1 #!/usr/bin/env python3
2
3 import torch
4
5 from .mean import Mean
6
7
8 class ZeroMean(Mean):
9 def forward(self, input):
10 return torch.zeros(input.shape[:-1], dtype=input.dtype, device=input.device)
11
[end of gpytorch/means/zero_mean.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gpytorch/means/zero_mean.py b/gpytorch/means/zero_mean.py
--- a/gpytorch/means/zero_mean.py
+++ b/gpytorch/means/zero_mean.py
@@ -2,9 +2,18 @@
import torch
+from ..utils.broadcasting import _mul_broadcast_shape
from .mean import Mean
class ZeroMean(Mean):
+ def __init__(self, batch_shape=torch.Size(), **kwargs):
+ super(ZeroMean, self).__init__()
+ self.batch_shape = batch_shape
+
def forward(self, input):
- return torch.zeros(input.shape[:-1], dtype=input.dtype, device=input.device)
+ mean = torch.zeros(*self.batch_shape, 1, dtype=input.dtype, device=input.device)
+ if input.shape[:-2] == self.batch_shape:
+ return mean.expand(input.shape[:-1])
+ else:
+ return mean.expand(_mul_broadcast_shape(input.shape[:-1], mean.shape))
| {"golden_diff": "diff --git a/gpytorch/means/zero_mean.py b/gpytorch/means/zero_mean.py\n--- a/gpytorch/means/zero_mean.py\n+++ b/gpytorch/means/zero_mean.py\n@@ -2,9 +2,18 @@\n \n import torch\n \n+from ..utils.broadcasting import _mul_broadcast_shape\n from .mean import Mean\n \n \n class ZeroMean(Mean):\n+ def __init__(self, batch_shape=torch.Size(), **kwargs):\n+ super(ZeroMean, self).__init__()\n+ self.batch_shape = batch_shape\n+\n def forward(self, input):\n- return torch.zeros(input.shape[:-1], dtype=input.dtype, device=input.device)\n+ mean = torch.zeros(*self.batch_shape, 1, dtype=input.dtype, device=input.device)\n+ if input.shape[:-2] == self.batch_shape:\n+ return mean.expand(input.shape[:-1])\n+ else:\n+ return mean.expand(_mul_broadcast_shape(input.shape[:-1], mean.shape))\n", "issue": "ZeroMean for Batch Independent Multioutput GP\nI'm following the Batch Independent Multioutput GP example, but instead of using a constant mean, I would like each dimension to use a zero mean (maybe this is a bad idea?).\r\n\r\n```\r\nclass ZeroMeanIndependentMultitaskGPModel(gpytorch.models.ExactGP):\r\n def __init__(self, train_x, train_y, likelihood, nx):\r\n super().__init__(train_x, train_y, likelihood)\r\n self.n = nx #output dimension\r\n #self.mean_module = gpytorch.means.MultitaskMean([gpytorch.means.ZeroMean()]*self.n,\r\n # num_tasks=self.n)\r\n #self.mean_module = gpytorch.means.ConstantMean(batch_shape=torch.Size([self.n]))\r\n self.mean_module = gpytorch.means.ZeroMean()\r\n self.covar_module = gpytorch.kernels.ScaleKernel(\r\n gpytorch.kernels.RBFKernel(batch_shape=torch.Size([self.n])),\r\n batch_shape=torch.Size([self.n])\r\n )\r\n\r\n def forward(self, x):\r\n mean_x = self.mean_module(x) # is this needed for ZeroMean?\r\n covar_x = self.covar_module(x)\r\n return gpytorch.distributions.MultitaskMultivariateNormal.from_batch_mvn(\r\n gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\r\n )\r\n```\r\n\r\n\r\nWhen training with this, I get the error `RuntimeError: mean should be a matrix or a batch matrix (batch mode)`. It works as intended with constant mean. As you can see, I've tried a couple different things, but they don't seem to work either. I can't seem to find other people with the same issue online. Is it possible to do this with ZeroMean?\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport torch\n\nfrom .mean import Mean\n\n\nclass ZeroMean(Mean):\n def forward(self, input):\n return torch.zeros(input.shape[:-1], dtype=input.dtype, device=input.device)\n", "path": "gpytorch/means/zero_mean.py"}]} | 985 | 217 |
gh_patches_debug_1971 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-1994 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Postponed annotation evaluation causes `Annotated` to break
When using postponed annotation evaluation, annotating resolver arguments no longer works:
```python
from __future__ import annotations
import random
from typing import Annotated
import strawberry
@strawberry.type
class Query:
@strawberry.field
def dice_roll(
self,
sides: Annotated[
int,
strawberry.argument(description="Number of sides the die should have."),
] = 6,
) -> int:
return random.randint(1, sides)
strawberry.Schema(query=Query)
```
The example above raises this TypeError:
```
TypeError: Query fields cannot be resolved. Unexpected type 'typing.Annotated[int, <strawberry.arguments.StrawberryArgumentAnnotation object at 0x7fd12e130d00>]'
```
When the first line (`from __future__ import annotations`) is left out, everything works as intended. This will probably also break once Python 3.11 lands, since the behavior will become mandatory then. #1586 refers to a somewhat related issue.
</issue>
<code>
[start of strawberry/auto.py]
1 from __future__ import annotations
2
3 from typing import Any, Optional, Union, cast
4
5 from typing_extensions import Annotated, get_args, get_origin
6
7 from strawberry.type import StrawberryType
8
9 from .annotation import StrawberryAnnotation
10
11
12 class StrawberryAutoMeta(type):
13 """Metaclass for StrawberryAuto.
14
15 This is used to make sure StrawberryAuto is a singleton and also to
16 override the behavior of `isinstance` so that it consider the following
17 cases:
18
19 >> isinstance(StrawberryAuto(), StrawberryAuto)
20 True
21 >> isinstance(StrawberryAnnotation(StrawberryAuto()), StrawberryAuto)
22 True
23 >> isinstance(Annotated[StrawberryAuto(), object()), StrawberryAuto)
24 True
25
26 """
27
28 def __init__(self, *args, **kwargs):
29 self._instance: Optional[StrawberryAuto] = None
30 super().__init__(*args, **kwargs)
31
32 def __call__(cls, *args, **kwargs):
33 if cls._instance is None:
34 cls._instance = super().__call__(*args, **kwargs)
35
36 return cls._instance
37
38 def __instancecheck__(
39 self,
40 instance: Union[StrawberryAuto, StrawberryAnnotation, StrawberryType, type],
41 ):
42 if isinstance(instance, StrawberryAnnotation):
43 resolved = instance.annotation
44 if isinstance(resolved, str):
45 namespace = instance.namespace
46 resolved = namespace and namespace.get(resolved)
47
48 if resolved is not None:
49 instance = cast(type, resolved)
50
51 if instance is auto:
52 return True
53
54 # Support uses of Annotated[auto, something()]
55 if get_origin(instance) is Annotated:
56 args = get_args(instance)
57 if args[0] is Any:
58 return any(isinstance(arg, StrawberryAuto) for arg in args[1:])
59
60 return False
61
62
63 class StrawberryAuto(metaclass=StrawberryAutoMeta):
64 def __str__(self):
65 return "auto"
66
67 def __repr__(self):
68 return "<auto>"
69
70
71 auto = Annotated[Any, StrawberryAuto()]
72
[end of strawberry/auto.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/strawberry/auto.py b/strawberry/auto.py
--- a/strawberry/auto.py
+++ b/strawberry/auto.py
@@ -57,7 +57,7 @@
if args[0] is Any:
return any(isinstance(arg, StrawberryAuto) for arg in args[1:])
- return False
+ return instance == "strawberry.auto"
class StrawberryAuto(metaclass=StrawberryAutoMeta):
| {"golden_diff": "diff --git a/strawberry/auto.py b/strawberry/auto.py\n--- a/strawberry/auto.py\n+++ b/strawberry/auto.py\n@@ -57,7 +57,7 @@\n if args[0] is Any:\n return any(isinstance(arg, StrawberryAuto) for arg in args[1:])\n \n- return False\n+ return instance == \"strawberry.auto\"\n \n \n class StrawberryAuto(metaclass=StrawberryAutoMeta):\n", "issue": "Postponed annotation evaluation causes `Annotated` to break\nWhen using postponed annotation evaluation, annotating resolver arguments no longer works:\r\n\r\n```python\r\nfrom __future__ import annotations\r\n\r\nimport random\r\nfrom typing import Annotated\r\n\r\nimport strawberry\r\n\r\n\r\[email protected]\r\nclass Query:\r\n @strawberry.field\r\n def dice_roll(\r\n self,\r\n sides: Annotated[\r\n int,\r\n strawberry.argument(description=\"Number of sides the die should have.\"),\r\n ] = 6,\r\n ) -> int:\r\n return random.randint(1, sides)\r\n\r\n\r\nstrawberry.Schema(query=Query)\r\n```\r\n\r\nThe example above raises this TypeError:\r\n\r\n```\r\nTypeError: Query fields cannot be resolved. Unexpected type 'typing.Annotated[int, <strawberry.arguments.StrawberryArgumentAnnotation object at 0x7fd12e130d00>]'\r\n```\r\n\r\nWhen the first line (`from __future__ import annotations`) is left out, everything works as intended. This will probably also break once Python 3.11 lands, since the behavior will become mandatory then. #1586 refers to a somewhat related issue.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Optional, Union, cast\n\nfrom typing_extensions import Annotated, get_args, get_origin\n\nfrom strawberry.type import StrawberryType\n\nfrom .annotation import StrawberryAnnotation\n\n\nclass StrawberryAutoMeta(type):\n \"\"\"Metaclass for StrawberryAuto.\n\n This is used to make sure StrawberryAuto is a singleton and also to\n override the behavior of `isinstance` so that it consider the following\n cases:\n\n >> isinstance(StrawberryAuto(), StrawberryAuto)\n True\n >> isinstance(StrawberryAnnotation(StrawberryAuto()), StrawberryAuto)\n True\n >> isinstance(Annotated[StrawberryAuto(), object()), StrawberryAuto)\n True\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._instance: Optional[StrawberryAuto] = None\n super().__init__(*args, **kwargs)\n\n def __call__(cls, *args, **kwargs):\n if cls._instance is None:\n cls._instance = super().__call__(*args, **kwargs)\n\n return cls._instance\n\n def __instancecheck__(\n self,\n instance: Union[StrawberryAuto, StrawberryAnnotation, StrawberryType, type],\n ):\n if isinstance(instance, StrawberryAnnotation):\n resolved = instance.annotation\n if isinstance(resolved, str):\n namespace = instance.namespace\n resolved = namespace and namespace.get(resolved)\n\n if resolved is not None:\n instance = cast(type, resolved)\n\n if instance is auto:\n return True\n\n # Support uses of Annotated[auto, something()]\n if get_origin(instance) is Annotated:\n args = get_args(instance)\n if args[0] is Any:\n return any(isinstance(arg, StrawberryAuto) for arg in args[1:])\n\n return False\n\n\nclass StrawberryAuto(metaclass=StrawberryAutoMeta):\n def __str__(self):\n return \"auto\"\n\n def __repr__(self):\n return \"<auto>\"\n\n\nauto = Annotated[Any, StrawberryAuto()]\n", "path": "strawberry/auto.py"}]} | 1,349 | 105 |
gh_patches_debug_41072 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSeg-1746 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
paddleseg/models/hrnet_contrast.py 中没有执行 init_weight
paddleseg/models/hrnet_contrast.py 中__init__()没有执行 init_weight,导致hrnet_w48_contrast 没法加载完整的模型
</issue>
<code>
[start of paddleseg/models/hrnet_contrast.py]
1 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import paddle
16 import paddle.nn as nn
17 import paddle.nn.functional as F
18
19 from paddleseg.cvlibs import manager
20 from paddleseg.models import layers
21 from paddleseg.utils import utils
22
23
24 @manager.MODELS.add_component
25 class HRNetW48Contrast(nn.Layer):
26 """
27 The HRNetW48Contrast implementation based on PaddlePaddle.
28
29 The original article refers to
30 Wenguan Wang, Tianfei Zhou, et al. "Exploring Cross-Image Pixel Contrast for Semantic Segmentation"
31 (https://arxiv.org/abs/2101.11939).
32
33 Args:
34 in_channels (int): The output dimensions of backbone.
35 num_classes (int): The unique number of target classes.
36 backbone (Paddle.nn.Layer): Backbone network, currently support HRNet_W48.
37 drop_prob (float): The probability of dropout.
38 proj_dim (int): The projection dimensions.
39 align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even,
40 e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.
41 pretrained (str, optional): The path or url of pretrained model. Default: None.
42 """
43 def __init__(self,
44 in_channels,
45 num_classes,
46 backbone,
47 drop_prob,
48 proj_dim,
49 align_corners=False,
50 pretrained=None):
51 super().__init__()
52 self.in_channels = in_channels
53 self.backbone = backbone
54 self.num_classes = num_classes
55 self.proj_dim = proj_dim
56 self.align_corners = align_corners
57 self.pretrained = pretrained
58
59 self.cls_head = nn.Sequential(
60 layers.ConvBNReLU(in_channels,
61 in_channels,
62 kernel_size=3,
63 stride=1,
64 padding=1),
65 nn.Dropout2D(drop_prob),
66 nn.Conv2D(in_channels,
67 num_classes,
68 kernel_size=1,
69 stride=1,
70 bias_attr=False),
71 )
72 self.proj_head = ProjectionHead(dim_in=in_channels,
73 proj_dim=self.proj_dim)
74
75 def init_weight(self):
76 if self.pretrained is not None:
77 utils.load_entire_model(self, self.pretrained)
78
79 def forward(self, x):
80 feats = self.backbone(x)[0]
81 out = self.cls_head(feats)
82 logit_list = []
83 if self.training:
84 emb = self.proj_head(feats)
85 logit_list.append(
86 F.interpolate(out,
87 paddle.shape(x)[2:],
88 mode='bilinear',
89 align_corners=self.align_corners))
90 logit_list.append({'seg': out, 'embed': emb})
91 else:
92 logit_list.append(
93 F.interpolate(out,
94 paddle.shape(x)[2:],
95 mode='bilinear',
96 align_corners=self.align_corners))
97 return logit_list
98
99
100 class ProjectionHead(nn.Layer):
101 """
102 The projection head used by contrast learning.
103 Args:
104 dim_in (int): The dimensions of input features.
105 proj_dim (int, optional): The output dimensions of projection head. Default: 256.
106 proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.
107 """
108 def __init__(self, dim_in, proj_dim=256, proj='convmlp'):
109 super(ProjectionHead, self).__init__()
110 if proj == 'linear':
111 self.proj = nn.Conv2D(dim_in, proj_dim, kernel_size=1)
112 elif proj == 'convmlp':
113 self.proj = nn.Sequential(
114 layers.ConvBNReLU(dim_in, dim_in, kernel_size=1),
115 nn.Conv2D(dim_in, proj_dim, kernel_size=1),
116 )
117 else:
118 raise ValueError(
119 "The type of project head only support 'linear' and 'convmlp', but got {}."
120 .format(proj))
121
122 def forward(self, x):
123 return F.normalize(self.proj(x), p=2, axis=1)
124
[end of paddleseg/models/hrnet_contrast.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/paddleseg/models/hrnet_contrast.py b/paddleseg/models/hrnet_contrast.py
--- a/paddleseg/models/hrnet_contrast.py
+++ b/paddleseg/models/hrnet_contrast.py
@@ -40,6 +40,7 @@
e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.
pretrained (str, optional): The path or url of pretrained model. Default: None.
"""
+
def __init__(self,
in_channels,
num_classes,
@@ -54,23 +55,23 @@
self.num_classes = num_classes
self.proj_dim = proj_dim
self.align_corners = align_corners
- self.pretrained = pretrained
self.cls_head = nn.Sequential(
- layers.ConvBNReLU(in_channels,
- in_channels,
- kernel_size=3,
- stride=1,
- padding=1),
+ layers.ConvBNReLU(
+ in_channels, in_channels, kernel_size=3, stride=1, padding=1),
nn.Dropout2D(drop_prob),
- nn.Conv2D(in_channels,
- num_classes,
- kernel_size=1,
- stride=1,
- bias_attr=False),
+ nn.Conv2D(
+ in_channels,
+ num_classes,
+ kernel_size=1,
+ stride=1,
+ bias_attr=False),
)
- self.proj_head = ProjectionHead(dim_in=in_channels,
- proj_dim=self.proj_dim)
+ self.proj_head = ProjectionHead(
+ dim_in=in_channels, proj_dim=self.proj_dim)
+
+ self.pretrained = pretrained
+ self.init_weight()
def init_weight(self):
if self.pretrained is not None:
@@ -83,17 +84,19 @@
if self.training:
emb = self.proj_head(feats)
logit_list.append(
- F.interpolate(out,
- paddle.shape(x)[2:],
- mode='bilinear',
- align_corners=self.align_corners))
+ F.interpolate(
+ out,
+ paddle.shape(x)[2:],
+ mode='bilinear',
+ align_corners=self.align_corners))
logit_list.append({'seg': out, 'embed': emb})
else:
logit_list.append(
- F.interpolate(out,
- paddle.shape(x)[2:],
- mode='bilinear',
- align_corners=self.align_corners))
+ F.interpolate(
+ out,
+ paddle.shape(x)[2:],
+ mode='bilinear',
+ align_corners=self.align_corners))
return logit_list
@@ -105,6 +108,7 @@
proj_dim (int, optional): The output dimensions of projection head. Default: 256.
proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.
"""
+
def __init__(self, dim_in, proj_dim=256, proj='convmlp'):
super(ProjectionHead, self).__init__()
if proj == 'linear':
| {"golden_diff": "diff --git a/paddleseg/models/hrnet_contrast.py b/paddleseg/models/hrnet_contrast.py\n--- a/paddleseg/models/hrnet_contrast.py\n+++ b/paddleseg/models/hrnet_contrast.py\n@@ -40,6 +40,7 @@\n e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.\n pretrained (str, optional): The path or url of pretrained model. Default: None.\n \"\"\"\n+\n def __init__(self,\n in_channels,\n num_classes,\n@@ -54,23 +55,23 @@\n self.num_classes = num_classes\n self.proj_dim = proj_dim\n self.align_corners = align_corners\n- self.pretrained = pretrained\n \n self.cls_head = nn.Sequential(\n- layers.ConvBNReLU(in_channels,\n- in_channels,\n- kernel_size=3,\n- stride=1,\n- padding=1),\n+ layers.ConvBNReLU(\n+ in_channels, in_channels, kernel_size=3, stride=1, padding=1),\n nn.Dropout2D(drop_prob),\n- nn.Conv2D(in_channels,\n- num_classes,\n- kernel_size=1,\n- stride=1,\n- bias_attr=False),\n+ nn.Conv2D(\n+ in_channels,\n+ num_classes,\n+ kernel_size=1,\n+ stride=1,\n+ bias_attr=False),\n )\n- self.proj_head = ProjectionHead(dim_in=in_channels,\n- proj_dim=self.proj_dim)\n+ self.proj_head = ProjectionHead(\n+ dim_in=in_channels, proj_dim=self.proj_dim)\n+\n+ self.pretrained = pretrained\n+ self.init_weight()\n \n def init_weight(self):\n if self.pretrained is not None:\n@@ -83,17 +84,19 @@\n if self.training:\n emb = self.proj_head(feats)\n logit_list.append(\n- F.interpolate(out,\n- paddle.shape(x)[2:],\n- mode='bilinear',\n- align_corners=self.align_corners))\n+ F.interpolate(\n+ out,\n+ paddle.shape(x)[2:],\n+ mode='bilinear',\n+ align_corners=self.align_corners))\n logit_list.append({'seg': out, 'embed': emb})\n else:\n logit_list.append(\n- F.interpolate(out,\n- paddle.shape(x)[2:],\n- mode='bilinear',\n- align_corners=self.align_corners))\n+ F.interpolate(\n+ out,\n+ paddle.shape(x)[2:],\n+ mode='bilinear',\n+ align_corners=self.align_corners))\n return logit_list\n \n \n@@ -105,6 +108,7 @@\n proj_dim (int, optional): The output dimensions of projection head. Default: 256.\n proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.\n \"\"\"\n+\n def __init__(self, dim_in, proj_dim=256, proj='convmlp'):\n super(ProjectionHead, self).__init__()\n if proj == 'linear':\n", "issue": "paddleseg/models/hrnet_contrast.py \u4e2d\u6ca1\u6709\u6267\u884c init_weight\npaddleseg/models/hrnet_contrast.py \u4e2d__init__()\u6ca1\u6709\u6267\u884c init_weight\uff0c\u5bfc\u81f4hrnet_w48_contrast \u6ca1\u6cd5\u52a0\u8f7d\u5b8c\u6574\u7684\u6a21\u578b\n", "before_files": [{"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n\nfrom paddleseg.cvlibs import manager\nfrom paddleseg.models import layers\nfrom paddleseg.utils import utils\n\n\[email protected]_component\nclass HRNetW48Contrast(nn.Layer):\n \"\"\"\n The HRNetW48Contrast implementation based on PaddlePaddle.\n\n The original article refers to\n Wenguan Wang, Tianfei Zhou, et al. \"Exploring Cross-Image Pixel Contrast for Semantic Segmentation\"\n (https://arxiv.org/abs/2101.11939).\n\n Args:\n in_channels (int): The output dimensions of backbone.\n num_classes (int): The unique number of target classes.\n backbone (Paddle.nn.Layer): Backbone network, currently support HRNet_W48.\n drop_prob (float): The probability of dropout.\n proj_dim (int): The projection dimensions.\n align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even,\n e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.\n pretrained (str, optional): The path or url of pretrained model. Default: None.\n \"\"\"\n def __init__(self,\n in_channels,\n num_classes,\n backbone,\n drop_prob,\n proj_dim,\n align_corners=False,\n pretrained=None):\n super().__init__()\n self.in_channels = in_channels\n self.backbone = backbone\n self.num_classes = num_classes\n self.proj_dim = proj_dim\n self.align_corners = align_corners\n self.pretrained = pretrained\n\n self.cls_head = nn.Sequential(\n layers.ConvBNReLU(in_channels,\n in_channels,\n kernel_size=3,\n stride=1,\n padding=1),\n nn.Dropout2D(drop_prob),\n nn.Conv2D(in_channels,\n num_classes,\n kernel_size=1,\n stride=1,\n bias_attr=False),\n )\n self.proj_head = ProjectionHead(dim_in=in_channels,\n proj_dim=self.proj_dim)\n\n def init_weight(self):\n if self.pretrained is not None:\n utils.load_entire_model(self, self.pretrained)\n\n def forward(self, x):\n feats = self.backbone(x)[0]\n out = self.cls_head(feats)\n logit_list = []\n if self.training:\n emb = self.proj_head(feats)\n logit_list.append(\n F.interpolate(out,\n paddle.shape(x)[2:],\n mode='bilinear',\n align_corners=self.align_corners))\n logit_list.append({'seg': out, 'embed': emb})\n else:\n logit_list.append(\n F.interpolate(out,\n paddle.shape(x)[2:],\n mode='bilinear',\n align_corners=self.align_corners))\n return logit_list\n\n\nclass ProjectionHead(nn.Layer):\n \"\"\"\n The projection head used by contrast learning.\n Args:\n dim_in (int): The dimensions of input features.\n proj_dim (int, optional): The output dimensions of projection head. Default: 256.\n proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.\n \"\"\"\n def __init__(self, dim_in, proj_dim=256, proj='convmlp'):\n super(ProjectionHead, self).__init__()\n if proj == 'linear':\n self.proj = nn.Conv2D(dim_in, proj_dim, kernel_size=1)\n elif proj == 'convmlp':\n self.proj = nn.Sequential(\n layers.ConvBNReLU(dim_in, dim_in, kernel_size=1),\n nn.Conv2D(dim_in, proj_dim, kernel_size=1),\n )\n else:\n raise ValueError(\n \"The type of project head only support 'linear' and 'convmlp', but got {}.\"\n .format(proj))\n\n def forward(self, x):\n return F.normalize(self.proj(x), p=2, axis=1)\n", "path": "paddleseg/models/hrnet_contrast.py"}]} | 1,881 | 702 |
gh_patches_debug_1640 | rasdani/github-patches | git_diff | scikit-image__scikit-image-1430 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
measure.label is documented under morphology.label
In the [measure API reference](http://scikit-image.org/docs/stable/api/skimage.measure.html) label is not documented, but it is [documented under morphology module](http://scikit-image.org/docs/stable/api/skimage.morphology.html#label) (which is depreciated).
</issue>
<code>
[start of skimage/measure/__init__.py]
1 from ._find_contours import find_contours
2 from ._marching_cubes import (marching_cubes, mesh_surface_area,
3 correct_mesh_orientation)
4 from ._regionprops import regionprops, perimeter
5 from ._structural_similarity import structural_similarity
6 from ._polygon import approximate_polygon, subdivide_polygon
7 from ._pnpoly import points_in_poly, grid_points_in_poly
8 from ._moments import moments, moments_central, moments_normalized, moments_hu
9 from .profile import profile_line
10 from .fit import LineModel, CircleModel, EllipseModel, ransac
11 from .block import block_reduce
12 from ._ccomp import label
13
14
15 __all__ = ['find_contours',
16 'regionprops',
17 'perimeter',
18 'structural_similarity',
19 'approximate_polygon',
20 'subdivide_polygon',
21 'LineModel',
22 'CircleModel',
23 'EllipseModel',
24 'ransac',
25 'block_reduce',
26 'moments',
27 'moments_central',
28 'moments_normalized',
29 'moments_hu',
30 'marching_cubes',
31 'mesh_surface_area',
32 'correct_mesh_orientation',
33 'profile_line',
34 'label',
35 'points_in_poly',
36 'grid_points_in_poly']
37
[end of skimage/measure/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/measure/__init__.py b/skimage/measure/__init__.py
--- a/skimage/measure/__init__.py
+++ b/skimage/measure/__init__.py
@@ -9,7 +9,7 @@
from .profile import profile_line
from .fit import LineModel, CircleModel, EllipseModel, ransac
from .block import block_reduce
-from ._ccomp import label
+from ._label import label
__all__ = ['find_contours',
| {"golden_diff": "diff --git a/skimage/measure/__init__.py b/skimage/measure/__init__.py\n--- a/skimage/measure/__init__.py\n+++ b/skimage/measure/__init__.py\n@@ -9,7 +9,7 @@\n from .profile import profile_line\n from .fit import LineModel, CircleModel, EllipseModel, ransac\n from .block import block_reduce\n-from ._ccomp import label\n+from ._label import label\n \n \n __all__ = ['find_contours',\n", "issue": "measure.label is documented under morphology.label\nIn the [measure API reference](http://scikit-image.org/docs/stable/api/skimage.measure.html) label is not documented, but it is [documented under morphology module](http://scikit-image.org/docs/stable/api/skimage.morphology.html#label) (which is depreciated).\n\n", "before_files": [{"content": "from ._find_contours import find_contours\nfrom ._marching_cubes import (marching_cubes, mesh_surface_area,\n correct_mesh_orientation)\nfrom ._regionprops import regionprops, perimeter\nfrom ._structural_similarity import structural_similarity\nfrom ._polygon import approximate_polygon, subdivide_polygon\nfrom ._pnpoly import points_in_poly, grid_points_in_poly\nfrom ._moments import moments, moments_central, moments_normalized, moments_hu\nfrom .profile import profile_line\nfrom .fit import LineModel, CircleModel, EllipseModel, ransac\nfrom .block import block_reduce\nfrom ._ccomp import label\n\n\n__all__ = ['find_contours',\n 'regionprops',\n 'perimeter',\n 'structural_similarity',\n 'approximate_polygon',\n 'subdivide_polygon',\n 'LineModel',\n 'CircleModel',\n 'EllipseModel',\n 'ransac',\n 'block_reduce',\n 'moments',\n 'moments_central',\n 'moments_normalized',\n 'moments_hu',\n 'marching_cubes',\n 'mesh_surface_area',\n 'correct_mesh_orientation',\n 'profile_line',\n 'label',\n 'points_in_poly',\n 'grid_points_in_poly']\n", "path": "skimage/measure/__init__.py"}]} | 939 | 115 |
gh_patches_debug_128 | rasdani/github-patches | git_diff | opsdroid__opsdroid-28 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Regex case sensitive
The regex match is currently case insensitive. It shouldn't be.
https://github.com/opsdroid/opsdroid/blob/master/opsdroid/helper.py#L30
</issue>
<code>
[start of opsdroid/helper.py]
1 """Helper functions to use within OpsDroid."""
2
3 import logging
4 import re
5
6
7 def set_logging_level(logging_level):
8 """Set the logger level based on the user configuration."""
9 logger = logging.getLogger()
10 if logging_level == 'critical':
11 logger.setLevel(logging.CRITICAL)
12 elif logging_level == 'error':
13 logger.setLevel(logging.ERROR)
14 elif logging_level == 'warning':
15 logger.setLevel(logging.WARNING)
16 elif logging_level == 'info':
17 logger.setLevel(logging.INFO)
18 elif logging_level == 'debug':
19 logger.setLevel(logging.DEBUG)
20 # No need to log the others as they'll never be seen
21 logging.debug("Set log level to debug")
22 else:
23 logger.setLevel(logging.INFO)
24 logging.warning("Log level '" + logging_level +
25 "' unknown, defaulting to 'info'")
26
27
28 def match(regex, message):
29 """Regex match a string."""
30 return re.match(regex, message, re.M | re.I)
31
[end of opsdroid/helper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opsdroid/helper.py b/opsdroid/helper.py
--- a/opsdroid/helper.py
+++ b/opsdroid/helper.py
@@ -27,4 +27,4 @@
def match(regex, message):
"""Regex match a string."""
- return re.match(regex, message, re.M | re.I)
+ return re.match(regex, message)
| {"golden_diff": "diff --git a/opsdroid/helper.py b/opsdroid/helper.py\n--- a/opsdroid/helper.py\n+++ b/opsdroid/helper.py\n@@ -27,4 +27,4 @@\n \n def match(regex, message):\n \"\"\"Regex match a string.\"\"\"\n- return re.match(regex, message, re.M | re.I)\n+ return re.match(regex, message)\n", "issue": "Regex case sensitive\nThe regex match is currently case insensitive. It shouldn't be.\n\nhttps://github.com/opsdroid/opsdroid/blob/master/opsdroid/helper.py#L30\n\n", "before_files": [{"content": "\"\"\"Helper functions to use within OpsDroid.\"\"\"\n\nimport logging\nimport re\n\n\ndef set_logging_level(logging_level):\n \"\"\"Set the logger level based on the user configuration.\"\"\"\n logger = logging.getLogger()\n if logging_level == 'critical':\n logger.setLevel(logging.CRITICAL)\n elif logging_level == 'error':\n logger.setLevel(logging.ERROR)\n elif logging_level == 'warning':\n logger.setLevel(logging.WARNING)\n elif logging_level == 'info':\n logger.setLevel(logging.INFO)\n elif logging_level == 'debug':\n logger.setLevel(logging.DEBUG)\n # No need to log the others as they'll never be seen\n logging.debug(\"Set log level to debug\")\n else:\n logger.setLevel(logging.INFO)\n logging.warning(\"Log level '\" + logging_level +\n \"' unknown, defaulting to 'info'\")\n\n\ndef match(regex, message):\n \"\"\"Regex match a string.\"\"\"\n return re.match(regex, message, re.M | re.I)\n", "path": "opsdroid/helper.py"}]} | 830 | 87 |
gh_patches_debug_10841 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-435 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Manual creation of Languages results in Bugsplash at page view.
### Describe the Bug
If a system is set up manually without the test data. The manual language setup results in a error when opening the list view of the pages.

### Steps to Reproduce
1. Create Languages (Like German with code DE_de)
2. Create Region (Like Berlin)
3. Add Language Tree model like German -> English
4. Click on Pages and see error
### Expected Behavior
The usual list should be displayed.
### Actual Behavior
Error message from Django.
### Additional Information
I guess this has something to do with manually setting the language code and this can't be matched by django.
Manual creation of Languages results in Bugsplash at page view.
### Describe the Bug
If a system is set up manually without the test data. The manual language setup results in a error when opening the list view of the pages.

### Steps to Reproduce
1. Create Languages (Like German with code DE_de)
2. Create Region (Like Berlin)
3. Add Language Tree model like German -> English
4. Click on Pages and see error
### Expected Behavior
The usual list should be displayed.
### Actual Behavior
Error message from Django.
### Additional Information
I guess this has something to do with manually setting the language code and this can't be matched by django.
</issue>
<code>
[start of src/cms/templatetags/content_filters.py]
1 import logging
2
3 from django import template
4
5 from ..models import Language
6
7 logger = logging.getLogger(__name__)
8 register = template.Library()
9
10
11 @register.simple_tag
12 def get_translation(instance, language_code):
13 return instance.translations.filter(language__code=language_code).first()
14
15
16 @register.simple_tag
17 def translated_language_name(language_code):
18 return Language.objects.get(code=language_code).translated_name
19
20 @register.simple_tag
21 def get_language(language_code):
22 return Language.objects.get(code=language_code)
23
24 # Unify the language codes of backend and content languages
25 @register.simple_tag
26 def unify_language_code(language_code):
27 if language_code == 'en-gb':
28 return 'en-us'
29 return language_code
30
31
32 @register.filter
33 def get_int_list(data, list_name):
34 return [int(item) for item in data.getlist(list_name)]
35
36
37 @register.filter
38 def is_empty(iterable):
39 return not bool(iterable)
40
[end of src/cms/templatetags/content_filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cms/templatetags/content_filters.py b/src/cms/templatetags/content_filters.py
--- a/src/cms/templatetags/content_filters.py
+++ b/src/cms/templatetags/content_filters.py
@@ -15,11 +15,14 @@
@register.simple_tag
def translated_language_name(language_code):
- return Language.objects.get(code=language_code).translated_name
+ language = Language.objects.filter(code=language_code)
+ if language.exists():
+ return language.first().translated_name
+ return ''
@register.simple_tag
def get_language(language_code):
- return Language.objects.get(code=language_code)
+ return Language.objects.filter(code=language_code).first()
# Unify the language codes of backend and content languages
@register.simple_tag
| {"golden_diff": "diff --git a/src/cms/templatetags/content_filters.py b/src/cms/templatetags/content_filters.py\n--- a/src/cms/templatetags/content_filters.py\n+++ b/src/cms/templatetags/content_filters.py\n@@ -15,11 +15,14 @@\n \n @register.simple_tag\n def translated_language_name(language_code):\n- return Language.objects.get(code=language_code).translated_name\n+ language = Language.objects.filter(code=language_code)\n+ if language.exists():\n+ return language.first().translated_name\n+ return ''\n \n @register.simple_tag\n def get_language(language_code):\n- return Language.objects.get(code=language_code)\n+ return Language.objects.filter(code=language_code).first()\n \n # Unify the language codes of backend and content languages\n @register.simple_tag\n", "issue": "Manual creation of Languages results in Bugsplash at page view.\n### Describe the Bug\r\nIf a system is set up manually without the test data. The manual language setup results in a error when opening the list view of the pages.\r\n\r\n\r\n\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create Languages (Like German with code DE_de)\r\n2. Create Region (Like Berlin)\r\n3. Add Language Tree model like German -> English\r\n4. Click on Pages and see error\r\n\r\n### Expected Behavior\r\nThe usual list should be displayed. \r\n\r\n\r\n### Actual Behavior\r\nError message from Django.\r\n\r\n\r\n### Additional Information\r\nI guess this has something to do with manually setting the language code and this can't be matched by django.\r\n\r\n\nManual creation of Languages results in Bugsplash at page view.\n### Describe the Bug\r\nIf a system is set up manually without the test data. The manual language setup results in a error when opening the list view of the pages.\r\n\r\n\r\n\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create Languages (Like German with code DE_de)\r\n2. Create Region (Like Berlin)\r\n3. Add Language Tree model like German -> English\r\n4. Click on Pages and see error\r\n\r\n### Expected Behavior\r\nThe usual list should be displayed. \r\n\r\n\r\n### Actual Behavior\r\nError message from Django.\r\n\r\n\r\n### Additional Information\r\nI guess this has something to do with manually setting the language code and this can't be matched by django.\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom django import template\n\nfrom ..models import Language\n\nlogger = logging.getLogger(__name__)\nregister = template.Library()\n\n\[email protected]_tag\ndef get_translation(instance, language_code):\n return instance.translations.filter(language__code=language_code).first()\n\n\[email protected]_tag\ndef translated_language_name(language_code):\n return Language.objects.get(code=language_code).translated_name\n\[email protected]_tag\ndef get_language(language_code):\n return Language.objects.get(code=language_code)\n\n# Unify the language codes of backend and content languages\[email protected]_tag\ndef unify_language_code(language_code):\n if language_code == 'en-gb':\n return 'en-us'\n return language_code\n\n\[email protected]\ndef get_int_list(data, list_name):\n return [int(item) for item in data.getlist(list_name)]\n\n\[email protected]\ndef is_empty(iterable):\n return not bool(iterable)\n", "path": "src/cms/templatetags/content_filters.py"}]} | 1,231 | 179 |
gh_patches_debug_229 | rasdani/github-patches | git_diff | facebookresearch__hydra-1808 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] hydra-optuna-sweeper 1.1.0 requires numpy<1.20.0
# 🐛 Bug
## Description
<!-- A clear and concise description of what the bug is. -->
I used the guide from
https://hydra.cc/docs/plugins/optuna_sweeper/
And install hydra-optuna-sweeper:
```bash
pip install hydra-optuna-sweeper --upgrade
```
But it seems this plugin requires numpy<1.20.0:

**Edit:**
I searched for optuna's requirements, found this:
https://github.com/optuna/optuna/blob/cbae80476c15b6d39e1d8851dc6a501c63c3ca92/setup.py#L35
Why hydra-optuna-sweeper need to use numpy<1.20.0?
</issue>
<code>
[start of plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
3 __version__ = "1.1.0"
4
[end of plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py b/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py
--- a/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py
+++ b/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py
@@ -1,3 +1,3 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-__version__ = "1.1.0"
+__version__ = "1.1.1"
| {"golden_diff": "diff --git a/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py b/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py\n--- a/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py\n+++ b/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py\n@@ -1,3 +1,3 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n \n-__version__ = \"1.1.0\"\n+__version__ = \"1.1.1\"\n", "issue": "[Bug] hydra-optuna-sweeper 1.1.0 requires numpy<1.20.0\n# \ud83d\udc1b Bug\r\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nI used the guide from\r\nhttps://hydra.cc/docs/plugins/optuna_sweeper/\r\n\r\nAnd install hydra-optuna-sweeper:\r\n\r\n```bash \r\npip install hydra-optuna-sweeper --upgrade\r\n```\r\n\r\nBut it seems this plugin requires numpy<1.20.0:\r\n\r\n\r\n\r\n**Edit:**\r\n\r\nI searched for optuna's requirements, found this:\r\n\r\nhttps://github.com/optuna/optuna/blob/cbae80476c15b6d39e1d8851dc6a501c63c3ca92/setup.py#L35\r\n\r\nWhy hydra-optuna-sweeper need to use numpy<1.20.0?\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n__version__ = \"1.1.0\"\n", "path": "plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py"}]} | 874 | 164 |
gh_patches_debug_43516 | rasdani/github-patches | git_diff | ResonantGeoData__ResonantGeoData-577 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Client: API token saving bug on MacOS
@banesullivan was experiencing the following issue:
When calling `create_rgd_client`, if there isn't already a token stored locally (in `$HOME/.rgd/token`), the client is supposed to make a request to the server to create it, and save it in that file. On MacOS, it seems this might not be occurring. The file doesn't appear to ever be created (notably though, the `.rgd` folder _is_ present). Furthermore, if you try to manually populate that file with your token, it will correctly read it, but the file will then be gone afterwards.
This doesn't actually affect authorization, as it still just fetches the token from the API and stores it in memory, but the storage issue needs to be looked into.
</issue>
<code>
[start of django-rgd/client/rgd_client/client.py]
1 import getpass
2 import os
3 from typing import List, Optional, Type
4
5 import requests
6
7 from .plugin import CorePlugin
8 from .session import RgdClientSession, clone_session
9 from .utils import API_KEY_DIR_PATH, API_KEY_FILE_NAME, DEFAULT_RGD_API
10
11
12 class RgdClient:
13 def __init__(
14 self,
15 api_url: str = DEFAULT_RGD_API,
16 username: Optional[str] = None,
17 password: Optional[str] = None,
18 save: Optional[bool] = True,
19 ) -> None:
20 """
21 Initialize the base RGD Client.
22
23 Args:
24 api_url: The base url of the RGD API instance.
25 username: The username to authenticate to the instance with, if any.
26 password: The password associated with the provided username. If None, a prompt will be provided.
27 save: Whether or not to save the logged-in user's API key to disk for future use.
28
29 Returns:
30 A base RgdClient instance.
31 """
32 # Look for an API key in the environment. If it's not there, check username/password
33 api_key = _read_api_key()
34 if api_key is None:
35 if username is not None and password is None:
36 password = getpass.getpass()
37
38 # Get an API key for this user and save it to disk
39 if username and password:
40 api_key = _get_api_key(api_url, username, password, save)
41
42 auth_header = f'Token {api_key}'
43
44 self.session = RgdClientSession(base_url=api_url, auth_header=auth_header)
45 self.rgd = CorePlugin(clone_session(self.session))
46
47 def clear_token(self):
48 """Delete a locally-stored API key."""
49 (API_KEY_DIR_PATH / API_KEY_FILE_NAME).unlink(missing_ok=True)
50
51
52 def _get_api_key(api_url: str, username: str, password: str, save: bool) -> str:
53 """Get an RGD API Key for the given user from the server, and save it if requested."""
54 resp = requests.post(f'{api_url}/api-token-auth', {'username': username, 'password': password})
55 resp.raise_for_status()
56 token = resp.json()['token']
57 if save:
58 API_KEY_DIR_PATH.mkdir(parents=True, exist_ok=True)
59 with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'w') as fd:
60 fd.write(token)
61 return token
62
63
64 def _read_api_key() -> Optional[str]:
65 """
66 Retrieve an RGD API Key from the users environment.
67
68 This function checks for an environment variable named RGD_API_TOKEN and returns it if it exists.
69 If it does not exist, it looks for a file located at ~/.rgd/token and returns its contents.
70 """
71 token = os.getenv('RGD_API_TOKEN', None)
72 if token is not None:
73 return token
74
75 try:
76 # read the first line of the text file at ~/.rgd/token
77 with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'r') as fd:
78 return fd.readline().strip()
79 except FileNotFoundError:
80 return None
81
82
83 def create_rgd_client(
84 api_url: str = DEFAULT_RGD_API,
85 username: Optional[str] = None,
86 password: Optional[str] = None,
87 save: Optional[bool] = True,
88 extra_plugins: Optional[List[Type]] = None,
89 ):
90 # Avoid circular import
91 from ._plugin_utils import _inject_plugin_deps, _plugin_classes, _plugin_instances
92
93 # Create initial client
94 client = RgdClient(api_url, username, password, save)
95
96 # Perform plugin initialization
97 plugin_classes = _plugin_classes(extra_plugins=extra_plugins)
98 plugin_instances = _plugin_instances(client, plugin_classes)
99 _inject_plugin_deps(plugin_instances)
100
101 return client
102
[end of django-rgd/client/rgd_client/client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django-rgd/client/rgd_client/client.py b/django-rgd/client/rgd_client/client.py
--- a/django-rgd/client/rgd_client/client.py
+++ b/django-rgd/client/rgd_client/client.py
@@ -1,4 +1,5 @@
import getpass
+import logging
import os
from typing import List, Optional, Type
@@ -8,6 +9,8 @@
from .session import RgdClientSession, clone_session
from .utils import API_KEY_DIR_PATH, API_KEY_FILE_NAME, DEFAULT_RGD_API
+logger = logging.getLogger(__name__)
+
class RgdClient:
def __init__(
@@ -30,7 +33,7 @@
A base RgdClient instance.
"""
# Look for an API key in the environment. If it's not there, check username/password
- api_key = _read_api_key()
+ api_key = _read_api_key(api_url=api_url, username=username, password=password)
if api_key is None:
if username is not None and password is None:
password = getpass.getpass()
@@ -38,6 +41,10 @@
# Get an API key for this user and save it to disk
if username and password:
api_key = _get_api_key(api_url, username, password, save)
+ if api_key is None:
+ logger.error(
+ 'Failed to retrieve API key; are your username and password correct?'
+ )
auth_header = f'Token {api_key}'
@@ -49,11 +56,12 @@
(API_KEY_DIR_PATH / API_KEY_FILE_NAME).unlink(missing_ok=True)
-def _get_api_key(api_url: str, username: str, password: str, save: bool) -> str:
+def _get_api_key(api_url: str, username: str, password: str, save: bool) -> Optional[str]:
"""Get an RGD API Key for the given user from the server, and save it if requested."""
resp = requests.post(f'{api_url}/api-token-auth', {'username': username, 'password': password})
- resp.raise_for_status()
- token = resp.json()['token']
+ token = resp.json().get('token')
+ if token is None:
+ return None
if save:
API_KEY_DIR_PATH.mkdir(parents=True, exist_ok=True)
with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'w') as fd:
@@ -61,7 +69,7 @@
return token
-def _read_api_key() -> Optional[str]:
+def _read_api_key(api_url: str, username: str = None, password: str = None) -> Optional[str]:
"""
Retrieve an RGD API Key from the users environment.
@@ -75,10 +83,29 @@
try:
# read the first line of the text file at ~/.rgd/token
with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'r') as fd:
- return fd.readline().strip()
+ api_key = fd.readline().strip()
except FileNotFoundError:
return None
+ # Make sure API key works by hitting a protected endpoint
+ resp = requests.get(f'{api_url}/rgd/collection', headers={'Authorization': f'Token {api_key}'})
+
+ # If it doesn't, try to get a new one and save it to ~/.rgd/token, as the current one is corrupted
+ if resp.status_code == 401:
+ logger.error('API key is invalid.')
+ # If username + password were provided, try to get a new API key with them
+ if username is not None and password is not None:
+ logger.warning('Attempting to fetch a new API key...')
+ api_key = _get_api_key(api_url, username, password, save=True)
+ if api_key is not None:
+ logger.warning('Succeeded.')
+ return api_key
+ else:
+ logger.error('Provide your username and password next time to fetch a new one.')
+ return None
+
+ return api_key
+
def create_rgd_client(
api_url: str = DEFAULT_RGD_API,
| {"golden_diff": "diff --git a/django-rgd/client/rgd_client/client.py b/django-rgd/client/rgd_client/client.py\n--- a/django-rgd/client/rgd_client/client.py\n+++ b/django-rgd/client/rgd_client/client.py\n@@ -1,4 +1,5 @@\n import getpass\n+import logging\n import os\n from typing import List, Optional, Type\n \n@@ -8,6 +9,8 @@\n from .session import RgdClientSession, clone_session\n from .utils import API_KEY_DIR_PATH, API_KEY_FILE_NAME, DEFAULT_RGD_API\n \n+logger = logging.getLogger(__name__)\n+\n \n class RgdClient:\n def __init__(\n@@ -30,7 +33,7 @@\n A base RgdClient instance.\n \"\"\"\n # Look for an API key in the environment. If it's not there, check username/password\n- api_key = _read_api_key()\n+ api_key = _read_api_key(api_url=api_url, username=username, password=password)\n if api_key is None:\n if username is not None and password is None:\n password = getpass.getpass()\n@@ -38,6 +41,10 @@\n # Get an API key for this user and save it to disk\n if username and password:\n api_key = _get_api_key(api_url, username, password, save)\n+ if api_key is None:\n+ logger.error(\n+ 'Failed to retrieve API key; are your username and password correct?'\n+ )\n \n auth_header = f'Token {api_key}'\n \n@@ -49,11 +56,12 @@\n (API_KEY_DIR_PATH / API_KEY_FILE_NAME).unlink(missing_ok=True)\n \n \n-def _get_api_key(api_url: str, username: str, password: str, save: bool) -> str:\n+def _get_api_key(api_url: str, username: str, password: str, save: bool) -> Optional[str]:\n \"\"\"Get an RGD API Key for the given user from the server, and save it if requested.\"\"\"\n resp = requests.post(f'{api_url}/api-token-auth', {'username': username, 'password': password})\n- resp.raise_for_status()\n- token = resp.json()['token']\n+ token = resp.json().get('token')\n+ if token is None:\n+ return None\n if save:\n API_KEY_DIR_PATH.mkdir(parents=True, exist_ok=True)\n with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'w') as fd:\n@@ -61,7 +69,7 @@\n return token\n \n \n-def _read_api_key() -> Optional[str]:\n+def _read_api_key(api_url: str, username: str = None, password: str = None) -> Optional[str]:\n \"\"\"\n Retrieve an RGD API Key from the users environment.\n \n@@ -75,10 +83,29 @@\n try:\n # read the first line of the text file at ~/.rgd/token\n with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'r') as fd:\n- return fd.readline().strip()\n+ api_key = fd.readline().strip()\n except FileNotFoundError:\n return None\n \n+ # Make sure API key works by hitting a protected endpoint\n+ resp = requests.get(f'{api_url}/rgd/collection', headers={'Authorization': f'Token {api_key}'})\n+\n+ # If it doesn't, try to get a new one and save it to ~/.rgd/token, as the current one is corrupted\n+ if resp.status_code == 401:\n+ logger.error('API key is invalid.')\n+ # If username + password were provided, try to get a new API key with them\n+ if username is not None and password is not None:\n+ logger.warning('Attempting to fetch a new API key...')\n+ api_key = _get_api_key(api_url, username, password, save=True)\n+ if api_key is not None:\n+ logger.warning('Succeeded.')\n+ return api_key\n+ else:\n+ logger.error('Provide your username and password next time to fetch a new one.')\n+ return None\n+\n+ return api_key\n+\n \n def create_rgd_client(\n api_url: str = DEFAULT_RGD_API,\n", "issue": "Client: API token saving bug on MacOS\n@banesullivan was experiencing the following issue:\r\n\r\nWhen calling `create_rgd_client`, if there isn't already a token stored locally (in `$HOME/.rgd/token`), the client is supposed to make a request to the server to create it, and save it in that file. On MacOS, it seems this might not be occurring. The file doesn't appear to ever be created (notably though, the `.rgd` folder _is_ present). Furthermore, if you try to manually populate that file with your token, it will correctly read it, but the file will then be gone afterwards.\r\n\r\nThis doesn't actually affect authorization, as it still just fetches the token from the API and stores it in memory, but the storage issue needs to be looked into.\n", "before_files": [{"content": "import getpass\nimport os\nfrom typing import List, Optional, Type\n\nimport requests\n\nfrom .plugin import CorePlugin\nfrom .session import RgdClientSession, clone_session\nfrom .utils import API_KEY_DIR_PATH, API_KEY_FILE_NAME, DEFAULT_RGD_API\n\n\nclass RgdClient:\n def __init__(\n self,\n api_url: str = DEFAULT_RGD_API,\n username: Optional[str] = None,\n password: Optional[str] = None,\n save: Optional[bool] = True,\n ) -> None:\n \"\"\"\n Initialize the base RGD Client.\n\n Args:\n api_url: The base url of the RGD API instance.\n username: The username to authenticate to the instance with, if any.\n password: The password associated with the provided username. If None, a prompt will be provided.\n save: Whether or not to save the logged-in user's API key to disk for future use.\n\n Returns:\n A base RgdClient instance.\n \"\"\"\n # Look for an API key in the environment. If it's not there, check username/password\n api_key = _read_api_key()\n if api_key is None:\n if username is not None and password is None:\n password = getpass.getpass()\n\n # Get an API key for this user and save it to disk\n if username and password:\n api_key = _get_api_key(api_url, username, password, save)\n\n auth_header = f'Token {api_key}'\n\n self.session = RgdClientSession(base_url=api_url, auth_header=auth_header)\n self.rgd = CorePlugin(clone_session(self.session))\n\n def clear_token(self):\n \"\"\"Delete a locally-stored API key.\"\"\"\n (API_KEY_DIR_PATH / API_KEY_FILE_NAME).unlink(missing_ok=True)\n\n\ndef _get_api_key(api_url: str, username: str, password: str, save: bool) -> str:\n \"\"\"Get an RGD API Key for the given user from the server, and save it if requested.\"\"\"\n resp = requests.post(f'{api_url}/api-token-auth', {'username': username, 'password': password})\n resp.raise_for_status()\n token = resp.json()['token']\n if save:\n API_KEY_DIR_PATH.mkdir(parents=True, exist_ok=True)\n with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'w') as fd:\n fd.write(token)\n return token\n\n\ndef _read_api_key() -> Optional[str]:\n \"\"\"\n Retrieve an RGD API Key from the users environment.\n\n This function checks for an environment variable named RGD_API_TOKEN and returns it if it exists.\n If it does not exist, it looks for a file located at ~/.rgd/token and returns its contents.\n \"\"\"\n token = os.getenv('RGD_API_TOKEN', None)\n if token is not None:\n return token\n\n try:\n # read the first line of the text file at ~/.rgd/token\n with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'r') as fd:\n return fd.readline().strip()\n except FileNotFoundError:\n return None\n\n\ndef create_rgd_client(\n api_url: str = DEFAULT_RGD_API,\n username: Optional[str] = None,\n password: Optional[str] = None,\n save: Optional[bool] = True,\n extra_plugins: Optional[List[Type]] = None,\n):\n # Avoid circular import\n from ._plugin_utils import _inject_plugin_deps, _plugin_classes, _plugin_instances\n\n # Create initial client\n client = RgdClient(api_url, username, password, save)\n\n # Perform plugin initialization\n plugin_classes = _plugin_classes(extra_plugins=extra_plugins)\n plugin_instances = _plugin_instances(client, plugin_classes)\n _inject_plugin_deps(plugin_instances)\n\n return client\n", "path": "django-rgd/client/rgd_client/client.py"}]} | 1,739 | 939 |
gh_patches_debug_18713 | rasdani/github-patches | git_diff | pypi__warehouse-3396 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing Purges
Noticed this while setting up new mirror. We don't seem to be purging `project/<normalized_name>` key when projects are deleted.
This leads bandersnatch to get confused and fall behind until the key is purged so the JSON api returns a 404
</issue>
<code>
[start of warehouse/packaging/__init__.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from celery.schedules import crontab
14 from sqlalchemy.orm.base import NO_VALUE
15
16 from warehouse import db
17 from warehouse.accounts.models import User, Email
18 from warehouse.cache.origin import key_factory, receive_set
19 from warehouse.packaging.interfaces import IFileStorage
20 from warehouse.packaging.models import Project, Release
21 from warehouse.packaging.tasks import compute_trending
22
23
24 @db.listens_for(User.name, 'set')
25 def user_name_receive_set(config, target, value, oldvalue, initiator):
26 if oldvalue is not NO_VALUE:
27 receive_set(User.name, config, target)
28
29
30 @db.listens_for(Email.primary, 'set')
31 def email_primary_receive_set(config, target, value, oldvalue, initiator):
32 if oldvalue is not NO_VALUE:
33 receive_set(Email.primary, config, target)
34
35
36 def includeme(config):
37 # Register whatever file storage backend has been configured for storing
38 # our package files.
39 storage_class = config.maybe_dotted(
40 config.registry.settings["files.backend"],
41 )
42 config.register_service_factory(storage_class.create_service, IFileStorage)
43
44 # Register our origin cache keys
45 config.register_origin_cache_keys(
46 Project,
47 cache_keys=["project/{obj.normalized_name}"],
48 purge_keys=[
49 key_factory("project/{obj.normalized_name}"),
50 key_factory("user/{itr.username}", iterate_on='users'),
51 key_factory("all-projects"),
52 ],
53 )
54 config.register_origin_cache_keys(
55 Release,
56 cache_keys=["project/{obj.project.normalized_name}"],
57 purge_keys=[
58 key_factory("project/{obj.project.normalized_name}"),
59 key_factory("user/{itr.username}", iterate_on='project.users'),
60 key_factory("all-projects"),
61 ],
62 )
63 config.register_origin_cache_keys(
64 User,
65 cache_keys=["user/{obj.username}"],
66 )
67 config.register_origin_cache_keys(
68 User.name,
69 purge_keys=[
70 key_factory("user/{obj.username}"),
71 key_factory("project/{itr.normalized_name}", iterate_on='projects')
72 ],
73 )
74 config.register_origin_cache_keys(
75 Email.primary,
76 purge_keys=[
77 key_factory("user/{obj.user.username}"),
78 key_factory(
79 "project/{itr.normalized_name}",
80 iterate_on='user.projects',
81 )
82 ],
83 )
84
85 # Add a periodic task to compute trending once a day, assuming we have
86 # been configured to be able to access BigQuery.
87 if config.get_settings().get("warehouse.trending_table"):
88 config.add_periodic_task(crontab(minute=0, hour=3), compute_trending)
89
[end of warehouse/packaging/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/packaging/__init__.py b/warehouse/packaging/__init__.py
--- a/warehouse/packaging/__init__.py
+++ b/warehouse/packaging/__init__.py
@@ -17,7 +17,7 @@
from warehouse.accounts.models import User, Email
from warehouse.cache.origin import key_factory, receive_set
from warehouse.packaging.interfaces import IFileStorage
-from warehouse.packaging.models import Project, Release
+from warehouse.packaging.models import Project, Release, Role
from warehouse.packaging.tasks import compute_trending
@@ -60,6 +60,13 @@
key_factory("all-projects"),
],
)
+ config.register_origin_cache_keys(
+ Role,
+ purge_keys=[
+ key_factory("user/{obj.user.username}"),
+ key_factory("project/{obj.project.normalized_name}")
+ ],
+ )
config.register_origin_cache_keys(
User,
cache_keys=["user/{obj.username}"],
| {"golden_diff": "diff --git a/warehouse/packaging/__init__.py b/warehouse/packaging/__init__.py\n--- a/warehouse/packaging/__init__.py\n+++ b/warehouse/packaging/__init__.py\n@@ -17,7 +17,7 @@\n from warehouse.accounts.models import User, Email\n from warehouse.cache.origin import key_factory, receive_set\n from warehouse.packaging.interfaces import IFileStorage\n-from warehouse.packaging.models import Project, Release\n+from warehouse.packaging.models import Project, Release, Role\n from warehouse.packaging.tasks import compute_trending\n \n \n@@ -60,6 +60,13 @@\n key_factory(\"all-projects\"),\n ],\n )\n+ config.register_origin_cache_keys(\n+ Role,\n+ purge_keys=[\n+ key_factory(\"user/{obj.user.username}\"),\n+ key_factory(\"project/{obj.project.normalized_name}\")\n+ ],\n+ )\n config.register_origin_cache_keys(\n User,\n cache_keys=[\"user/{obj.username}\"],\n", "issue": "Missing Purges\nNoticed this while setting up new mirror. We don't seem to be purging `project/<normalized_name>` key when projects are deleted.\r\n\r\nThis leads bandersnatch to get confused and fall behind until the key is purged so the JSON api returns a 404\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom celery.schedules import crontab\nfrom sqlalchemy.orm.base import NO_VALUE\n\nfrom warehouse import db\nfrom warehouse.accounts.models import User, Email\nfrom warehouse.cache.origin import key_factory, receive_set\nfrom warehouse.packaging.interfaces import IFileStorage\nfrom warehouse.packaging.models import Project, Release\nfrom warehouse.packaging.tasks import compute_trending\n\n\[email protected]_for(User.name, 'set')\ndef user_name_receive_set(config, target, value, oldvalue, initiator):\n if oldvalue is not NO_VALUE:\n receive_set(User.name, config, target)\n\n\[email protected]_for(Email.primary, 'set')\ndef email_primary_receive_set(config, target, value, oldvalue, initiator):\n if oldvalue is not NO_VALUE:\n receive_set(Email.primary, config, target)\n\n\ndef includeme(config):\n # Register whatever file storage backend has been configured for storing\n # our package files.\n storage_class = config.maybe_dotted(\n config.registry.settings[\"files.backend\"],\n )\n config.register_service_factory(storage_class.create_service, IFileStorage)\n\n # Register our origin cache keys\n config.register_origin_cache_keys(\n Project,\n cache_keys=[\"project/{obj.normalized_name}\"],\n purge_keys=[\n key_factory(\"project/{obj.normalized_name}\"),\n key_factory(\"user/{itr.username}\", iterate_on='users'),\n key_factory(\"all-projects\"),\n ],\n )\n config.register_origin_cache_keys(\n Release,\n cache_keys=[\"project/{obj.project.normalized_name}\"],\n purge_keys=[\n key_factory(\"project/{obj.project.normalized_name}\"),\n key_factory(\"user/{itr.username}\", iterate_on='project.users'),\n key_factory(\"all-projects\"),\n ],\n )\n config.register_origin_cache_keys(\n User,\n cache_keys=[\"user/{obj.username}\"],\n )\n config.register_origin_cache_keys(\n User.name,\n purge_keys=[\n key_factory(\"user/{obj.username}\"),\n key_factory(\"project/{itr.normalized_name}\", iterate_on='projects')\n ],\n )\n config.register_origin_cache_keys(\n Email.primary,\n purge_keys=[\n key_factory(\"user/{obj.user.username}\"),\n key_factory(\n \"project/{itr.normalized_name}\",\n iterate_on='user.projects',\n )\n ],\n )\n\n # Add a periodic task to compute trending once a day, assuming we have\n # been configured to be able to access BigQuery.\n if config.get_settings().get(\"warehouse.trending_table\"):\n config.add_periodic_task(crontab(minute=0, hour=3), compute_trending)\n", "path": "warehouse/packaging/__init__.py"}]} | 1,441 | 217 |
gh_patches_debug_21636 | rasdani/github-patches | git_diff | cloudtools__troposphere-1775 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add AdvancedSecurityOptions property to AWS ElasticSearch Domain
**Description:**
AWS Elasticsearch now supports fine-grained access control with Cloudformation. Need to add AdvancedSecurityOptions and MasterUserOptions to AWS::Elasticsearch::Domain object to enable this new functionality.
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-advancedsecurityoptions
</issue>
<code>
[start of troposphere/elasticsearch.py]
1 # Copyright (c) 2012-2015, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSProperty, AWSObject, Tags
7 from .compat import policytypes
8 from .validators import boolean, integer, integer_range, positive_integer
9
10 VALID_VOLUME_TYPES = ('standard', 'gp2', 'io1')
11
12
13 def validate_volume_type(volume_type):
14 """Validate VolumeType for ElasticsearchDomain"""
15 if volume_type not in VALID_VOLUME_TYPES:
16 raise ValueError("Elasticsearch Domain VolumeType must be one of: %s" %
17 ", ".join(VALID_VOLUME_TYPES))
18 return volume_type
19
20
21 class CognitoOptions(AWSProperty):
22 props = {
23 'Enabled': (boolean, False),
24 'IdentityPoolId': (basestring, False),
25 'RoleArn': (basestring, False),
26 'UserPoolId': (basestring, False),
27 }
28
29
30 class EBSOptions(AWSProperty):
31 props = {
32 'EBSEnabled': (boolean, False),
33 'Iops': (positive_integer, False),
34 'VolumeSize': (integer, False),
35 'VolumeType': (validate_volume_type, False)
36 }
37
38 def validate(self):
39 volume_type = self.properties.get('VolumeType')
40 iops = self.properties.get('Iops')
41 if volume_type == 'io1' and not iops:
42 raise ValueError("Must specify Iops if VolumeType is 'io1'.")
43
44
45 class ZoneAwarenessConfig(AWSProperty):
46 props = {
47 'AvailabilityZoneCount': (integer, False),
48 }
49
50
51 class ElasticsearchClusterConfig(AWSProperty):
52 props = {
53 'DedicatedMasterCount': (integer, False),
54 'DedicatedMasterEnabled': (boolean, False),
55 'DedicatedMasterType': (basestring, False),
56 'InstanceCount': (integer, False),
57 'InstanceType': (basestring, False),
58 'ZoneAwarenessConfig': (ZoneAwarenessConfig, False),
59 'ZoneAwarenessEnabled': (boolean, False)
60 }
61
62
63 class EncryptionAtRestOptions(AWSProperty):
64 props = {
65 'Enabled': (boolean, False),
66 'KmsKeyId': (basestring, False),
67 }
68
69
70 class NodeToNodeEncryptionOptions(AWSProperty):
71 props = {
72 'Enabled': (boolean, False),
73 }
74
75
76 class SnapshotOptions(AWSProperty):
77 props = {
78 'AutomatedSnapshotStartHour': (integer_range(0, 23), False)
79 }
80
81
82 class VPCOptions(AWSProperty):
83 props = {
84 "SecurityGroupIds": ([basestring], False),
85 "SubnetIds": ([basestring], False)
86 }
87
88
89 class Domain(AWSObject):
90 resource_type = "AWS::Elasticsearch::Domain"
91
92 props = {
93 'AccessPolicies': (policytypes, False),
94 'AdvancedOptions': (dict, False),
95 'CognitoOptions': (CognitoOptions, False),
96 'DomainName': (basestring, False),
97 'EBSOptions': (EBSOptions, False),
98 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),
99 'ElasticsearchVersion': (basestring, False),
100 'EncryptionAtRestOptions': (EncryptionAtRestOptions, False),
101 'LogPublishingOptions': (dict, False),
102 'NodeToNodeEncryptionOptions': (NodeToNodeEncryptionOptions, False),
103 'SnapshotOptions': (SnapshotOptions, False),
104 'Tags': ((Tags, list), False),
105 'VPCOptions': (VPCOptions, False),
106 }
107
108
109 # Backward compatibility
110 ElasticsearchDomain = Domain
111
[end of troposphere/elasticsearch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/troposphere/elasticsearch.py b/troposphere/elasticsearch.py
--- a/troposphere/elasticsearch.py
+++ b/troposphere/elasticsearch.py
@@ -81,8 +81,24 @@
class VPCOptions(AWSProperty):
props = {
- "SecurityGroupIds": ([basestring], False),
- "SubnetIds": ([basestring], False)
+ 'SecurityGroupIds': ([basestring], False),
+ 'SubnetIds': ([basestring], False)
+ }
+
+
+class MasterUserOptions(AWSProperty):
+ props = {
+ 'MasterUserARN': (basestring, False),
+ 'MasterUserName': (basestring, False),
+ 'MasterUserPassword': (basestring, False),
+ }
+
+
+class AdvancedSecurityOptionsInput(AWSProperty):
+ props = {
+ 'Enabled': (boolean, False),
+ 'InternalUserDatabaseEnabled': (boolean, False),
+ 'MasterUserOptions': (MasterUserOptions, False),
}
@@ -92,6 +108,7 @@
props = {
'AccessPolicies': (policytypes, False),
'AdvancedOptions': (dict, False),
+ 'AdvancedSecurityOptions': (AdvancedSecurityOptionsInput, False),
'CognitoOptions': (CognitoOptions, False),
'DomainName': (basestring, False),
'EBSOptions': (EBSOptions, False),
| {"golden_diff": "diff --git a/troposphere/elasticsearch.py b/troposphere/elasticsearch.py\n--- a/troposphere/elasticsearch.py\n+++ b/troposphere/elasticsearch.py\n@@ -81,8 +81,24 @@\n \n class VPCOptions(AWSProperty):\n props = {\n- \"SecurityGroupIds\": ([basestring], False),\n- \"SubnetIds\": ([basestring], False)\n+ 'SecurityGroupIds': ([basestring], False),\n+ 'SubnetIds': ([basestring], False)\n+ }\n+\n+\n+class MasterUserOptions(AWSProperty):\n+ props = {\n+ 'MasterUserARN': (basestring, False),\n+ 'MasterUserName': (basestring, False),\n+ 'MasterUserPassword': (basestring, False),\n+ }\n+\n+\n+class AdvancedSecurityOptionsInput(AWSProperty):\n+ props = {\n+ 'Enabled': (boolean, False),\n+ 'InternalUserDatabaseEnabled': (boolean, False),\n+ 'MasterUserOptions': (MasterUserOptions, False),\n }\n \n \n@@ -92,6 +108,7 @@\n props = {\n 'AccessPolicies': (policytypes, False),\n 'AdvancedOptions': (dict, False),\n+ 'AdvancedSecurityOptions': (AdvancedSecurityOptionsInput, False),\n 'CognitoOptions': (CognitoOptions, False),\n 'DomainName': (basestring, False),\n 'EBSOptions': (EBSOptions, False),\n", "issue": "Add AdvancedSecurityOptions property to AWS ElasticSearch Domain\n**Description:**\r\n\r\nAWS Elasticsearch now supports fine-grained access control with Cloudformation. Need to add AdvancedSecurityOptions and MasterUserOptions to AWS::Elasticsearch::Domain object to enable this new functionality. \r\n\r\nhttps://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-advancedsecurityoptions\n", "before_files": [{"content": "# Copyright (c) 2012-2015, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSProperty, AWSObject, Tags\nfrom .compat import policytypes\nfrom .validators import boolean, integer, integer_range, positive_integer\n\nVALID_VOLUME_TYPES = ('standard', 'gp2', 'io1')\n\n\ndef validate_volume_type(volume_type):\n \"\"\"Validate VolumeType for ElasticsearchDomain\"\"\"\n if volume_type not in VALID_VOLUME_TYPES:\n raise ValueError(\"Elasticsearch Domain VolumeType must be one of: %s\" %\n \", \".join(VALID_VOLUME_TYPES))\n return volume_type\n\n\nclass CognitoOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n 'IdentityPoolId': (basestring, False),\n 'RoleArn': (basestring, False),\n 'UserPoolId': (basestring, False),\n }\n\n\nclass EBSOptions(AWSProperty):\n props = {\n 'EBSEnabled': (boolean, False),\n 'Iops': (positive_integer, False),\n 'VolumeSize': (integer, False),\n 'VolumeType': (validate_volume_type, False)\n }\n\n def validate(self):\n volume_type = self.properties.get('VolumeType')\n iops = self.properties.get('Iops')\n if volume_type == 'io1' and not iops:\n raise ValueError(\"Must specify Iops if VolumeType is 'io1'.\")\n\n\nclass ZoneAwarenessConfig(AWSProperty):\n props = {\n 'AvailabilityZoneCount': (integer, False),\n }\n\n\nclass ElasticsearchClusterConfig(AWSProperty):\n props = {\n 'DedicatedMasterCount': (integer, False),\n 'DedicatedMasterEnabled': (boolean, False),\n 'DedicatedMasterType': (basestring, False),\n 'InstanceCount': (integer, False),\n 'InstanceType': (basestring, False),\n 'ZoneAwarenessConfig': (ZoneAwarenessConfig, False),\n 'ZoneAwarenessEnabled': (boolean, False)\n }\n\n\nclass EncryptionAtRestOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n 'KmsKeyId': (basestring, False),\n }\n\n\nclass NodeToNodeEncryptionOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n }\n\n\nclass SnapshotOptions(AWSProperty):\n props = {\n 'AutomatedSnapshotStartHour': (integer_range(0, 23), False)\n }\n\n\nclass VPCOptions(AWSProperty):\n props = {\n \"SecurityGroupIds\": ([basestring], False),\n \"SubnetIds\": ([basestring], False)\n }\n\n\nclass Domain(AWSObject):\n resource_type = \"AWS::Elasticsearch::Domain\"\n\n props = {\n 'AccessPolicies': (policytypes, False),\n 'AdvancedOptions': (dict, False),\n 'CognitoOptions': (CognitoOptions, False),\n 'DomainName': (basestring, False),\n 'EBSOptions': (EBSOptions, False),\n 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),\n 'ElasticsearchVersion': (basestring, False),\n 'EncryptionAtRestOptions': (EncryptionAtRestOptions, False),\n 'LogPublishingOptions': (dict, False),\n 'NodeToNodeEncryptionOptions': (NodeToNodeEncryptionOptions, False),\n 'SnapshotOptions': (SnapshotOptions, False),\n 'Tags': ((Tags, list), False),\n 'VPCOptions': (VPCOptions, False),\n }\n\n\n# Backward compatibility\nElasticsearchDomain = Domain\n", "path": "troposphere/elasticsearch.py"}]} | 1,652 | 323 |
gh_patches_debug_25159 | rasdani/github-patches | git_diff | mlflow__mlflow-9258 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typo fix
https://github.com/mlflow/mlflow/blob/9724c83bd8f0100c465e68e30651a9727de42ce0/dev/show_package_release_dates.py#L49
`package_legnth` -> `package_length`
</issue>
<code>
[start of dev/show_package_release_dates.py]
1 import os
2 import json
3 import sys
4 import subprocess
5 import requests
6 from concurrent.futures import ThreadPoolExecutor
7 import traceback
8
9
10 def get_distributions():
11 res = subprocess.check_output(
12 [sys.executable, "-m", "pip", "list", "--format", "json"], text=True
13 )
14 return [(pkg["name"], pkg["version"]) for pkg in json.loads(res)]
15
16
17 def get_release_date(package, version):
18 resp = requests.get(f"https://pypi.python.org/pypi/{package}/json", timeout=10)
19 if not resp.ok:
20 return ""
21
22 matched = [dist_files for ver, dist_files in resp.json()["releases"].items() if ver == version]
23 if (not matched) or (not matched[0]):
24 return ""
25
26 upload_time = matched[0][0]["upload_time"]
27 return upload_time.split("T")[0] # return year-month-day
28
29
30 def get_longest_string_length(array):
31 return len(max(array, key=len))
32
33
34 def safe_result(future, if_error=""):
35 try:
36 return future.result()
37 except Exception:
38 traceback.print_exc()
39 return if_error
40
41
42 def main():
43 distributions = get_distributions()
44 with ThreadPoolExecutor(max_workers=min(32, os.cpu_count() + 4)) as executor:
45 futures = [executor.submit(get_release_date, pkg, ver) for pkg, ver in distributions]
46 release_dates = [safe_result(f) for f in futures]
47
48 packages, versions = list(zip(*distributions))
49 package_legnth = get_longest_string_length(packages)
50 version_length = get_longest_string_length(versions)
51 release_date_length = len("Release Date")
52 print("Package".ljust(package_legnth), "Version".ljust(version_length), "Release Date")
53 print("-" * (package_legnth + version_length + release_date_length + 2))
54 for package, version, release_date in sorted(
55 zip(packages, versions, release_dates),
56 # Sort by release date in descending order
57 key=lambda x: x[2],
58 reverse=True,
59 ):
60 print(
61 package.ljust(package_legnth),
62 version.ljust(version_length),
63 release_date.ljust(release_date_length),
64 )
65
66
67 if __name__ == "__main__":
68 main()
69
[end of dev/show_package_release_dates.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dev/show_package_release_dates.py b/dev/show_package_release_dates.py
--- a/dev/show_package_release_dates.py
+++ b/dev/show_package_release_dates.py
@@ -46,11 +46,11 @@
release_dates = [safe_result(f) for f in futures]
packages, versions = list(zip(*distributions))
- package_legnth = get_longest_string_length(packages)
+ package_length = get_longest_string_length(packages)
version_length = get_longest_string_length(versions)
release_date_length = len("Release Date")
- print("Package".ljust(package_legnth), "Version".ljust(version_length), "Release Date")
- print("-" * (package_legnth + version_length + release_date_length + 2))
+ print("Package".ljust(package_length), "Version".ljust(version_length), "Release Date")
+ print("-" * (package_length + version_length + release_date_length + 2))
for package, version, release_date in sorted(
zip(packages, versions, release_dates),
# Sort by release date in descending order
@@ -58,7 +58,7 @@
reverse=True,
):
print(
- package.ljust(package_legnth),
+ package.ljust(package_length),
version.ljust(version_length),
release_date.ljust(release_date_length),
)
| {"golden_diff": "diff --git a/dev/show_package_release_dates.py b/dev/show_package_release_dates.py\n--- a/dev/show_package_release_dates.py\n+++ b/dev/show_package_release_dates.py\n@@ -46,11 +46,11 @@\n release_dates = [safe_result(f) for f in futures]\n \n packages, versions = list(zip(*distributions))\n- package_legnth = get_longest_string_length(packages)\n+ package_length = get_longest_string_length(packages)\n version_length = get_longest_string_length(versions)\n release_date_length = len(\"Release Date\")\n- print(\"Package\".ljust(package_legnth), \"Version\".ljust(version_length), \"Release Date\")\n- print(\"-\" * (package_legnth + version_length + release_date_length + 2))\n+ print(\"Package\".ljust(package_length), \"Version\".ljust(version_length), \"Release Date\")\n+ print(\"-\" * (package_length + version_length + release_date_length + 2))\n for package, version, release_date in sorted(\n zip(packages, versions, release_dates),\n # Sort by release date in descending order\n@@ -58,7 +58,7 @@\n reverse=True,\n ):\n print(\n- package.ljust(package_legnth),\n+ package.ljust(package_length),\n version.ljust(version_length),\n release_date.ljust(release_date_length),\n )\n", "issue": "Typo fix\nhttps://github.com/mlflow/mlflow/blob/9724c83bd8f0100c465e68e30651a9727de42ce0/dev/show_package_release_dates.py#L49\r\n\r\n`package_legnth` -> `package_length`\n", "before_files": [{"content": "import os\nimport json\nimport sys\nimport subprocess\nimport requests\nfrom concurrent.futures import ThreadPoolExecutor\nimport traceback\n\n\ndef get_distributions():\n res = subprocess.check_output(\n [sys.executable, \"-m\", \"pip\", \"list\", \"--format\", \"json\"], text=True\n )\n return [(pkg[\"name\"], pkg[\"version\"]) for pkg in json.loads(res)]\n\n\ndef get_release_date(package, version):\n resp = requests.get(f\"https://pypi.python.org/pypi/{package}/json\", timeout=10)\n if not resp.ok:\n return \"\"\n\n matched = [dist_files for ver, dist_files in resp.json()[\"releases\"].items() if ver == version]\n if (not matched) or (not matched[0]):\n return \"\"\n\n upload_time = matched[0][0][\"upload_time\"]\n return upload_time.split(\"T\")[0] # return year-month-day\n\n\ndef get_longest_string_length(array):\n return len(max(array, key=len))\n\n\ndef safe_result(future, if_error=\"\"):\n try:\n return future.result()\n except Exception:\n traceback.print_exc()\n return if_error\n\n\ndef main():\n distributions = get_distributions()\n with ThreadPoolExecutor(max_workers=min(32, os.cpu_count() + 4)) as executor:\n futures = [executor.submit(get_release_date, pkg, ver) for pkg, ver in distributions]\n release_dates = [safe_result(f) for f in futures]\n\n packages, versions = list(zip(*distributions))\n package_legnth = get_longest_string_length(packages)\n version_length = get_longest_string_length(versions)\n release_date_length = len(\"Release Date\")\n print(\"Package\".ljust(package_legnth), \"Version\".ljust(version_length), \"Release Date\")\n print(\"-\" * (package_legnth + version_length + release_date_length + 2))\n for package, version, release_date in sorted(\n zip(packages, versions, release_dates),\n # Sort by release date in descending order\n key=lambda x: x[2],\n reverse=True,\n ):\n print(\n package.ljust(package_legnth),\n version.ljust(version_length),\n release_date.ljust(release_date_length),\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "dev/show_package_release_dates.py"}]} | 1,240 | 302 |
gh_patches_debug_27386 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-8360 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Problems with Spiders built on YextSpider
I've noticed a few of the spiders whose results I was using in my OSM tool have recently stopped returning any results, including five_guys_de_es_fr_gb.py , matalan_gb.py , and three_gb.py .
A common feature of these spiders is that they use the YextSpider class. Checking some other spiders that are also based on this class, reveals they've also stopped working. The spider stats suggest 404 and 403 errors are preventing the data being retrieved.
If this is a general problem affecting multiple spiders, would someone be able to take a look and see if it can be fixed?
</issue>
<code>
[start of locations/spiders/independent_financial_us.py]
1 from locations.categories import Categories, apply_category
2 from locations.storefinders.yext import YextSpider
3
4
5 class IndependentFinancialUSSpider(YextSpider):
6 name = "independent_financial_us"
7 item_attributes = {"brand": "Independent Financial", "brand_wikidata": "Q6016398"}
8 api_key = "ee4600854cf5501c53831bf944472e57"
9 wanted_types = ["location", "atm"]
10
11 def parse_item(self, item, location):
12 if location["meta"]["entityType"] == "location":
13 apply_category(Categories.BANK, item)
14 item["ref"] = location.get("c_branchCode", location["meta"].get("id"))
15 item["name"] = " ".join(filter(None, [location.get("name"), location.get("geomodifier")]))
16 elif location["meta"]["entityType"] == "atm":
17 apply_category(Categories.ATM, item)
18 item["name"] = location.get("geomodifier")
19 item["website"] = location.get("c_pagesURL")
20 item.pop("email", None)
21 item["extras"].pop("contact:instagram", None)
22 item.pop("twitter", None)
23 item.pop("facebook", None)
24 yield item
25
[end of locations/spiders/independent_financial_us.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/independent_financial_us.py b/locations/spiders/independent_financial_us.py
--- a/locations/spiders/independent_financial_us.py
+++ b/locations/spiders/independent_financial_us.py
@@ -1,24 +1,18 @@
from locations.categories import Categories, apply_category
-from locations.storefinders.yext import YextSpider
+from locations.storefinders.yext_answers import YextAnswersSpider
-class IndependentFinancialUSSpider(YextSpider):
+class IndependentFinancialUSSpider(YextAnswersSpider):
name = "independent_financial_us"
item_attributes = {"brand": "Independent Financial", "brand_wikidata": "Q6016398"}
api_key = "ee4600854cf5501c53831bf944472e57"
- wanted_types = ["location", "atm"]
+ experience_key = "independent-financial-search"
- def parse_item(self, item, location):
- if location["meta"]["entityType"] == "location":
- apply_category(Categories.BANK, item)
- item["ref"] = location.get("c_branchCode", location["meta"].get("id"))
- item["name"] = " ".join(filter(None, [location.get("name"), location.get("geomodifier")]))
- elif location["meta"]["entityType"] == "atm":
+ def parse_item(self, location, item):
+ if location["data"]["type"] == "atm":
apply_category(Categories.ATM, item)
- item["name"] = location.get("geomodifier")
- item["website"] = location.get("c_pagesURL")
- item.pop("email", None)
- item["extras"].pop("contact:instagram", None)
- item.pop("twitter", None)
- item.pop("facebook", None)
+ elif location["data"]["type"] == "location":
+ apply_category(Categories.BANK, item)
+ else:
+ self.logger.error("Unknown location type: {}".format(location["data"]["type"]))
yield item
| {"golden_diff": "diff --git a/locations/spiders/independent_financial_us.py b/locations/spiders/independent_financial_us.py\n--- a/locations/spiders/independent_financial_us.py\n+++ b/locations/spiders/independent_financial_us.py\n@@ -1,24 +1,18 @@\n from locations.categories import Categories, apply_category\n-from locations.storefinders.yext import YextSpider\n+from locations.storefinders.yext_answers import YextAnswersSpider\n \n \n-class IndependentFinancialUSSpider(YextSpider):\n+class IndependentFinancialUSSpider(YextAnswersSpider):\n name = \"independent_financial_us\"\n item_attributes = {\"brand\": \"Independent Financial\", \"brand_wikidata\": \"Q6016398\"}\n api_key = \"ee4600854cf5501c53831bf944472e57\"\n- wanted_types = [\"location\", \"atm\"]\n+ experience_key = \"independent-financial-search\"\n \n- def parse_item(self, item, location):\n- if location[\"meta\"][\"entityType\"] == \"location\":\n- apply_category(Categories.BANK, item)\n- item[\"ref\"] = location.get(\"c_branchCode\", location[\"meta\"].get(\"id\"))\n- item[\"name\"] = \" \".join(filter(None, [location.get(\"name\"), location.get(\"geomodifier\")]))\n- elif location[\"meta\"][\"entityType\"] == \"atm\":\n+ def parse_item(self, location, item):\n+ if location[\"data\"][\"type\"] == \"atm\":\n apply_category(Categories.ATM, item)\n- item[\"name\"] = location.get(\"geomodifier\")\n- item[\"website\"] = location.get(\"c_pagesURL\")\n- item.pop(\"email\", None)\n- item[\"extras\"].pop(\"contact:instagram\", None)\n- item.pop(\"twitter\", None)\n- item.pop(\"facebook\", None)\n+ elif location[\"data\"][\"type\"] == \"location\":\n+ apply_category(Categories.BANK, item)\n+ else:\n+ self.logger.error(\"Unknown location type: {}\".format(location[\"data\"][\"type\"]))\n yield item\n", "issue": "Problems with Spiders built on YextSpider\nI've noticed a few of the spiders whose results I was using in my OSM tool have recently stopped returning any results, including five_guys_de_es_fr_gb.py , matalan_gb.py , and three_gb.py .\r\n\r\nA common feature of these spiders is that they use the YextSpider class. Checking some other spiders that are also based on this class, reveals they've also stopped working. The spider stats suggest 404 and 403 errors are preventing the data being retrieved.\r\n\r\nIf this is a general problem affecting multiple spiders, would someone be able to take a look and see if it can be fixed?\n", "before_files": [{"content": "from locations.categories import Categories, apply_category\nfrom locations.storefinders.yext import YextSpider\n\n\nclass IndependentFinancialUSSpider(YextSpider):\n name = \"independent_financial_us\"\n item_attributes = {\"brand\": \"Independent Financial\", \"brand_wikidata\": \"Q6016398\"}\n api_key = \"ee4600854cf5501c53831bf944472e57\"\n wanted_types = [\"location\", \"atm\"]\n\n def parse_item(self, item, location):\n if location[\"meta\"][\"entityType\"] == \"location\":\n apply_category(Categories.BANK, item)\n item[\"ref\"] = location.get(\"c_branchCode\", location[\"meta\"].get(\"id\"))\n item[\"name\"] = \" \".join(filter(None, [location.get(\"name\"), location.get(\"geomodifier\")]))\n elif location[\"meta\"][\"entityType\"] == \"atm\":\n apply_category(Categories.ATM, item)\n item[\"name\"] = location.get(\"geomodifier\")\n item[\"website\"] = location.get(\"c_pagesURL\")\n item.pop(\"email\", None)\n item[\"extras\"].pop(\"contact:instagram\", None)\n item.pop(\"twitter\", None)\n item.pop(\"facebook\", None)\n yield item\n", "path": "locations/spiders/independent_financial_us.py"}]} | 1,016 | 478 |
gh_patches_debug_950 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-py-2204 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
torch.div() (1.6.0) does not have 'rounding_mode' parameter
According to the torch 1.6.0 here: https://pytorch.org/docs/1.6.0/generated/torch.div.html?highlight=torch%20div#torch.div
there is no 'rounding_mode' parameter.
But in translator:
https://github.com/OpenNMT/OpenNMT-py/blob/0f411ce11a83b18c0223ac94ccc11a35403763df/onmt/translate/beam_search.py#L282
That's why I receive this error:
```
onmt_translate -model ./../output/test/nmt/f0/run/model_step_100.pt -src ./../output/test/nmt/f0/src-test.txt -output ./../output/test/nmt/f0/test.epoch100.pred.csv -gpu 0 --min_length 2 -verbose
[2022-09-15 20:32:19,980 INFO] Translating shard 0.
Traceback (most recent call last):
File "c:\programdata\anaconda3\envs\nuecg\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\programdata\anaconda3\envs\nuecg\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\ProgramData\Anaconda3\envs\nuecg\Scripts\onmt_translate.exe\__main__.py", line 7, in <module>
File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\bin\translate.py", line 54, in main
translate(opt)
File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\bin\translate.py", line 38, in translate
align_debug=opt.align_debug
File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\translate\translator.py", line 440, in translate
phrase_table=phrase_table)
File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\translate\translator.py", line 487, in _translate
batch, data.src_vocabs, attn_debug
File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\translate\translator.py", line 861, in translate_batch
batch, src_vocabs, decode_strategy
File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\translate\translator.py", line 947, in _translate_batch_with_strategy
decode_strategy.advance(log_probs, attn)
File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\translate\beam_search.py", line 283, in advance
rounding_mode='trunc')
TypeError: div() got an unexpected keyword argument 'rounding_mode'
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 from setuptools import setup, find_packages
3 from os import path
4
5 this_directory = path.abspath(path.dirname(__file__))
6 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
7 long_description = f.read()
8
9 setup(
10 name='OpenNMT-py',
11 description='A python implementation of OpenNMT',
12 long_description=long_description,
13 long_description_content_type='text/markdown',
14 version='2.3.0',
15 packages=find_packages(),
16 project_urls={
17 "Documentation": "http://opennmt.net/OpenNMT-py/",
18 "Forum": "http://forum.opennmt.net/",
19 "Gitter": "https://gitter.im/OpenNMT/OpenNMT-py",
20 "Source": "https://github.com/OpenNMT/OpenNMT-py/"
21 },
22 python_requires=">=3.5",
23 install_requires=[
24 "torch>=1.6.0",
25 "torchtext==0.5.0",
26 "configargparse",
27 "tensorboard>=2.3",
28 "flask",
29 "waitress",
30 "pyonmttok>=1.23,<2",
31 "pyyaml",
32 "sacrebleu"
33 ],
34 entry_points={
35 "console_scripts": [
36 "onmt_server=onmt.bin.server:main",
37 "onmt_train=onmt.bin.train:main",
38 "onmt_translate=onmt.bin.translate:main",
39 "onmt_translate_dynamic=onmt.bin.translate_dynamic:main",
40 "onmt_release_model=onmt.bin.release_model:main",
41 "onmt_average_models=onmt.bin.average_models:main",
42 "onmt_build_vocab=onmt.bin.build_vocab:main"
43 ],
44 }
45 )
46
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,7 @@
},
python_requires=">=3.5",
install_requires=[
- "torch>=1.6.0",
+ "torch>=1.9.0",
"torchtext==0.5.0",
"configargparse",
"tensorboard>=2.3",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -21,7 +21,7 @@\n },\n python_requires=\">=3.5\",\n install_requires=[\n- \"torch>=1.6.0\",\n+ \"torch>=1.9.0\",\n \"torchtext==0.5.0\",\n \"configargparse\",\n \"tensorboard>=2.3\",\n", "issue": "torch.div() (1.6.0) does not have 'rounding_mode' parameter\nAccording to the torch 1.6.0 here: https://pytorch.org/docs/1.6.0/generated/torch.div.html?highlight=torch%20div#torch.div\r\nthere is no 'rounding_mode' parameter. \r\n\r\nBut in translator:\r\nhttps://github.com/OpenNMT/OpenNMT-py/blob/0f411ce11a83b18c0223ac94ccc11a35403763df/onmt/translate/beam_search.py#L282\r\n\r\nThat's why I receive this error:\r\n```\r\nonmt_translate -model ./../output/test/nmt/f0/run/model_step_100.pt -src ./../output/test/nmt/f0/src-test.txt -output ./../output/test/nmt/f0/test.epoch100.pred.csv -gpu 0 --min_length 2 -verbose \r\n[2022-09-15 20:32:19,980 INFO] Translating shard 0.\r\nTraceback (most recent call last):\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\nuecg\\Scripts\\onmt_translate.exe\\__main__.py\", line 7, in <module>\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\site-packages\\onmt\\bin\\translate.py\", line 54, in main\r\n translate(opt)\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\site-packages\\onmt\\bin\\translate.py\", line 38, in translate\r\n align_debug=opt.align_debug\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\site-packages\\onmt\\translate\\translator.py\", line 440, in translate\r\n phrase_table=phrase_table)\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\site-packages\\onmt\\translate\\translator.py\", line 487, in _translate\r\n batch, data.src_vocabs, attn_debug\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\site-packages\\onmt\\translate\\translator.py\", line 861, in translate_batch\r\n batch, src_vocabs, decode_strategy\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\site-packages\\onmt\\translate\\translator.py\", line 947, in _translate_batch_with_strategy\r\n decode_strategy.advance(log_probs, attn)\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\site-packages\\onmt\\translate\\beam_search.py\", line 283, in advance\r\n rounding_mode='trunc')\r\nTypeError: div() got an unexpected keyword argument 'rounding_mode'\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import setup, find_packages\nfrom os import path\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='OpenNMT-py',\n description='A python implementation of OpenNMT',\n long_description=long_description,\n long_description_content_type='text/markdown',\n version='2.3.0',\n packages=find_packages(),\n project_urls={\n \"Documentation\": \"http://opennmt.net/OpenNMT-py/\",\n \"Forum\": \"http://forum.opennmt.net/\",\n \"Gitter\": \"https://gitter.im/OpenNMT/OpenNMT-py\",\n \"Source\": \"https://github.com/OpenNMT/OpenNMT-py/\"\n },\n python_requires=\">=3.5\",\n install_requires=[\n \"torch>=1.6.0\",\n \"torchtext==0.5.0\",\n \"configargparse\",\n \"tensorboard>=2.3\",\n \"flask\",\n \"waitress\",\n \"pyonmttok>=1.23,<2\",\n \"pyyaml\",\n \"sacrebleu\"\n ],\n entry_points={\n \"console_scripts\": [\n \"onmt_server=onmt.bin.server:main\",\n \"onmt_train=onmt.bin.train:main\",\n \"onmt_translate=onmt.bin.translate:main\",\n \"onmt_translate_dynamic=onmt.bin.translate_dynamic:main\",\n \"onmt_release_model=onmt.bin.release_model:main\",\n \"onmt_average_models=onmt.bin.average_models:main\",\n \"onmt_build_vocab=onmt.bin.build_vocab:main\"\n ],\n }\n)\n", "path": "setup.py"}]} | 1,735 | 96 |
gh_patches_debug_13922 | rasdani/github-patches | git_diff | huggingface__accelerate-445 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`psutil` required by utils/modeling.py but it isn't declared as a dependency
### System Info
```Shell
Accelerate `0.10.0.dev0` on Debian Bullseye running Python 3.10.5.
File "/opt/venv/lib/python3.10/site-packages/accelerate/utils/modeling.py", line 276, in get_max_memory
import psutil
ModuleNotFoundError: No module named 'psutil'
```
I'm not sure if you have any minimum version you need to satisfy for `psutil` as a runtime dependency but I see that there are no constraints on it as a `test` dependency in setup.py.
If you don't have any requirements, I'm happy to just add it myself and open a patch PR.
Thanks!
```
### Information
- [ ] The official example scripts
- [ ] My own modified scripts
### Tasks
- [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)
- [ ] My own task or dataset (give details below)
### Reproduction
Use `device_map="auto"` when loading any model that supports it.
### Expected behavior
```Shell
I expect that `psutil` is declared as a runtime dependency of the `accelerate` package instead of having to install it myself.
```
</issue>
<code>
[start of setup.py]
1 # Copyright 2021 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from setuptools import setup
16 from setuptools import find_packages
17
18 extras = {}
19 extras["quality"] = ["black ~= 22.0", "isort >= 5.5.4", "flake8 >= 3.8.3"]
20 extras["docs"] = []
21 extras["test"] = [
22 "psutil",
23 "pytest",
24 "pytest-xdist",
25 "pytest-subtests",
26 "datasets",
27 "evaluate",
28 "transformers",
29 "scipy",
30 "sklearn",
31 "parameterized",
32 "deepspeed",
33 ]
34
35 extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard"]
36 extras["dev"] = extras["quality"] + extras["test"]
37
38 extras["sagemaker"] = [
39 "sagemaker", # boto3 is a required package in sagemaker
40 ]
41
42 setup(
43 name="accelerate",
44 version="0.10.0.dev0",
45 description="Accelerate",
46 long_description=open("README.md", "r", encoding="utf-8").read(),
47 long_description_content_type="text/markdown",
48 keywords="deep learning",
49 license="Apache",
50 author="The HuggingFace team",
51 author_email="[email protected]",
52 url="https://github.com/huggingface/accelerate",
53 package_dir={"": "src"},
54 packages=find_packages("src"),
55 entry_points={
56 "console_scripts": [
57 "accelerate=accelerate.commands.accelerate_cli:main",
58 "accelerate-config=accelerate.commands.config:main",
59 "accelerate-launch=accelerate.commands.launch:main",
60 ]
61 },
62 python_requires=">=3.7.0",
63 install_requires=["numpy>=1.17", "packaging>=20.0", "pyyaml", "torch>=1.4.0"],
64 extras_require=extras,
65 classifiers=[
66 "Development Status :: 5 - Production/Stable",
67 "Intended Audience :: Developers",
68 "Intended Audience :: Education",
69 "Intended Audience :: Science/Research",
70 "License :: OSI Approved :: Apache Software License",
71 "Operating System :: OS Independent",
72 "Programming Language :: Python :: 3",
73 "Programming Language :: Python :: 3.7",
74 "Topic :: Scientific/Engineering :: Artificial Intelligence",
75 ],
76 )
77
78 # Release checklist
79 # 1. Change the version in __init__.py and setup.py.
80 # 2. Commit these changes with the message: "Release: VERSION"
81 # 3. Add a tag in git to mark the release: "git tag VERSION -m 'Adds tag VERSION for pypi' "
82 # Push the tag to git: git push --tags origin main
83 # 4. Run the following commands in the top-level directory:
84 # python setup.py bdist_wheel
85 # python setup.py sdist
86 # 5. Upload the package to the pypi test server first:
87 # twine upload dist/* -r pypitest
88 # twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
89 # 6. Check that you can install it in a virtualenv by running:
90 # pip install -i https://testpypi.python.org/pypi accelerate
91 # accelerate env
92 # accelerate test
93 # 7. Upload the final version to actual pypi:
94 # twine upload dist/* -r pypi
95 # 8. Add release notes to the tag in github once everything is looking hunky-dory.
96 # 9. Update the version in __init__.py, setup.py to the new version "-dev" and push to master
97
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -19,7 +19,6 @@
extras["quality"] = ["black ~= 22.0", "isort >= 5.5.4", "flake8 >= 3.8.3"]
extras["docs"] = []
extras["test"] = [
- "psutil",
"pytest",
"pytest-xdist",
"pytest-subtests",
@@ -60,7 +59,7 @@
]
},
python_requires=">=3.7.0",
- install_requires=["numpy>=1.17", "packaging>=20.0", "pyyaml", "torch>=1.4.0"],
+ install_requires=["numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.4.0"],
extras_require=extras,
classifiers=[
"Development Status :: 5 - Production/Stable",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -19,7 +19,6 @@\n extras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\"]\n extras[\"docs\"] = []\n extras[\"test\"] = [\n- \"psutil\",\n \"pytest\",\n \"pytest-xdist\",\n \"pytest-subtests\",\n@@ -60,7 +59,7 @@\n ]\n },\n python_requires=\">=3.7.0\",\n- install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"pyyaml\", \"torch>=1.4.0\"],\n+ install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\"],\n extras_require=extras,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n", "issue": "`psutil` required by utils/modeling.py but it isn't declared as a dependency\n### System Info\n\n```Shell\nAccelerate `0.10.0.dev0` on Debian Bullseye running Python 3.10.5.\r\n\r\n\r\nFile \"/opt/venv/lib/python3.10/site-packages/accelerate/utils/modeling.py\", line 276, in get_max_memory\r\n import psutil\r\nModuleNotFoundError: No module named 'psutil'\r\n```\r\n\r\nI'm not sure if you have any minimum version you need to satisfy for `psutil` as a runtime dependency but I see that there are no constraints on it as a `test` dependency in setup.py.\r\n\r\nIf you don't have any requirements, I'm happy to just add it myself and open a patch PR.\r\n\r\nThanks!\n```\n\n\n### Information\n\n- [ ] The official example scripts\n- [ ] My own modified scripts\n\n### Tasks\n\n- [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)\n- [ ] My own task or dataset (give details below)\n\n### Reproduction\n\nUse `device_map=\"auto\"` when loading any model that supports it.\n\n### Expected behavior\n\n```Shell\nI expect that `psutil` is declared as a runtime dependency of the `accelerate` package instead of having to install it myself.\n```\n\n", "before_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nextras = {}\nextras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\"]\nextras[\"docs\"] = []\nextras[\"test\"] = [\n \"psutil\",\n \"pytest\",\n \"pytest-xdist\",\n \"pytest-subtests\",\n \"datasets\",\n \"evaluate\",\n \"transformers\",\n \"scipy\",\n \"sklearn\",\n \"parameterized\",\n \"deepspeed\",\n]\n\nextras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorboard\"]\nextras[\"dev\"] = extras[\"quality\"] + extras[\"test\"]\n\nextras[\"sagemaker\"] = [\n \"sagemaker\", # boto3 is a required package in sagemaker\n]\n\nsetup(\n name=\"accelerate\",\n version=\"0.10.0.dev0\",\n description=\"Accelerate\",\n long_description=open(\"README.md\", \"r\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n keywords=\"deep learning\",\n license=\"Apache\",\n author=\"The HuggingFace team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/huggingface/accelerate\",\n package_dir={\"\": \"src\"},\n packages=find_packages(\"src\"),\n entry_points={\n \"console_scripts\": [\n \"accelerate=accelerate.commands.accelerate_cli:main\",\n \"accelerate-config=accelerate.commands.config:main\",\n \"accelerate-launch=accelerate.commands.launch:main\",\n ]\n },\n python_requires=\">=3.7.0\",\n install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"pyyaml\", \"torch>=1.4.0\"],\n extras_require=extras,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n\n# Release checklist\n# 1. Change the version in __init__.py and setup.py.\n# 2. Commit these changes with the message: \"Release: VERSION\"\n# 3. Add a tag in git to mark the release: \"git tag VERSION -m 'Adds tag VERSION for pypi' \"\n# Push the tag to git: git push --tags origin main\n# 4. Run the following commands in the top-level directory:\n# python setup.py bdist_wheel\n# python setup.py sdist\n# 5. Upload the package to the pypi test server first:\n# twine upload dist/* -r pypitest\n# twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/\n# 6. Check that you can install it in a virtualenv by running:\n# pip install -i https://testpypi.python.org/pypi accelerate\n# accelerate env\n# accelerate test\n# 7. Upload the final version to actual pypi:\n# twine upload dist/* -r pypi\n# 8. Add release notes to the tag in github once everything is looking hunky-dory.\n# 9. Update the version in __init__.py, setup.py to the new version \"-dev\" and push to master\n", "path": "setup.py"}]} | 1,950 | 223 |
gh_patches_debug_9144 | rasdani/github-patches | git_diff | Cloud-CV__EvalAI-1305 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add challenge filtering using featured parameter.
We need to add filtering in challenge model on the basis of `featured` parameter.
Add challenge filtering using featured parameter.
We need to add filtering in challenge model on the basis of `featured` parameter.
</issue>
<code>
[start of apps/challenges/admin.py]
1 from django.contrib import admin
2
3 from base.admin import ImportExportTimeStampedAdmin
4
5 from .models import (Challenge,
6 ChallengeConfiguration,
7 ChallengePhase,
8 ChallengePhaseSplit,
9 DatasetSplit,
10 Leaderboard,
11 LeaderboardData,
12 StarChallenge,)
13
14
15 @admin.register(Challenge)
16 class ChallengeAdmin(ImportExportTimeStampedAdmin):
17 list_display = ("title", "start_date", "end_date", "creator", "published", "enable_forum", "anonymous_leaderboard")
18 list_filter = ("creator", "published", "enable_forum", "anonymous_leaderboard")
19 search_fields = ("title", "creator")
20
21
22 @admin.register(DatasetSplit)
23 class DatasetSplitAdmin(ImportExportTimeStampedAdmin):
24 list_display = ("name", "codename")
25 list_filter = ("name", "codename")
26 search_fields = ("name", "codename")
27
28
29 @admin.register(ChallengePhase)
30 class ChallengePhaseAdmin(ImportExportTimeStampedAdmin):
31 list_display = ("name", "challenge", "start_date", "end_date", "test_annotation", "is_public", "leaderboard_public")
32 list_filter = ("leaderboard_public", "challenge")
33 search_fields = ("name",)
34
35
36 @admin.register(Leaderboard)
37 class LeaderboardAdmin(ImportExportTimeStampedAdmin):
38 list_display = ("id", "schema")
39 search_fields = ("id",)
40
41
42 @admin.register(ChallengePhaseSplit)
43 class ChallengePhaseSplitAdmin(ImportExportTimeStampedAdmin):
44 list_display = ("id", "challenge_phase", "dataset_split", "leaderboard", "visibility")
45 list_filter = ("challenge_phase", "dataset_split", "leaderboard", "visibility")
46 search_fields = ("challenge_phase", "dataset_split", "leaderboard")
47
48
49 @admin.register(LeaderboardData)
50 class LeaderboardDataAdmin(ImportExportTimeStampedAdmin):
51 list_display = ("challenge_phase_split", "submission", "leaderboard", "result")
52 list_filter = ("challenge_phase_split", "leaderboard",)
53 search_fields = ("challenge_phase_split", "submission", "leaderboard", "result")
54
55
56 @admin.register(ChallengeConfiguration)
57 class ChallengeConfigurationAdmin(ImportExportTimeStampedAdmin):
58 list_display = ('user', 'challenge', 'is_created', 'zip_configuration',)
59 list_filter = ('user', 'is_created',)
60 search_fields = ('user', 'challenge',)
61
62
63 @admin.register(StarChallenge)
64 class StarChallengeAdmin(ImportExportTimeStampedAdmin):
65 list_display = ('user', 'challenge', 'is_starred')
66 search_fields = ('user', 'challenge',)
67
[end of apps/challenges/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/challenges/admin.py b/apps/challenges/admin.py
--- a/apps/challenges/admin.py
+++ b/apps/challenges/admin.py
@@ -14,8 +14,9 @@
@admin.register(Challenge)
class ChallengeAdmin(ImportExportTimeStampedAdmin):
- list_display = ("title", "start_date", "end_date", "creator", "published", "enable_forum", "anonymous_leaderboard")
- list_filter = ("creator", "published", "enable_forum", "anonymous_leaderboard")
+ list_display = ("title", "start_date", "end_date", "creator", "published", "enable_forum", "anonymous_leaderboard",
+ "featured")
+ list_filter = ("creator", "published", "enable_forum", "anonymous_leaderboard", "featured")
search_fields = ("title", "creator")
| {"golden_diff": "diff --git a/apps/challenges/admin.py b/apps/challenges/admin.py\n--- a/apps/challenges/admin.py\n+++ b/apps/challenges/admin.py\n@@ -14,8 +14,9 @@\n \n @admin.register(Challenge)\n class ChallengeAdmin(ImportExportTimeStampedAdmin):\n- list_display = (\"title\", \"start_date\", \"end_date\", \"creator\", \"published\", \"enable_forum\", \"anonymous_leaderboard\")\n- list_filter = (\"creator\", \"published\", \"enable_forum\", \"anonymous_leaderboard\")\n+ list_display = (\"title\", \"start_date\", \"end_date\", \"creator\", \"published\", \"enable_forum\", \"anonymous_leaderboard\",\n+ \"featured\")\n+ list_filter = (\"creator\", \"published\", \"enable_forum\", \"anonymous_leaderboard\", \"featured\")\n search_fields = (\"title\", \"creator\")\n", "issue": "Add challenge filtering using featured parameter.\nWe need to add filtering in challenge model on the basis of `featured` parameter.\nAdd challenge filtering using featured parameter.\nWe need to add filtering in challenge model on the basis of `featured` parameter.\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom base.admin import ImportExportTimeStampedAdmin\n\nfrom .models import (Challenge,\n ChallengeConfiguration,\n ChallengePhase,\n ChallengePhaseSplit,\n DatasetSplit,\n Leaderboard,\n LeaderboardData,\n StarChallenge,)\n\n\[email protected](Challenge)\nclass ChallengeAdmin(ImportExportTimeStampedAdmin):\n list_display = (\"title\", \"start_date\", \"end_date\", \"creator\", \"published\", \"enable_forum\", \"anonymous_leaderboard\")\n list_filter = (\"creator\", \"published\", \"enable_forum\", \"anonymous_leaderboard\")\n search_fields = (\"title\", \"creator\")\n\n\[email protected](DatasetSplit)\nclass DatasetSplitAdmin(ImportExportTimeStampedAdmin):\n list_display = (\"name\", \"codename\")\n list_filter = (\"name\", \"codename\")\n search_fields = (\"name\", \"codename\")\n\n\[email protected](ChallengePhase)\nclass ChallengePhaseAdmin(ImportExportTimeStampedAdmin):\n list_display = (\"name\", \"challenge\", \"start_date\", \"end_date\", \"test_annotation\", \"is_public\", \"leaderboard_public\")\n list_filter = (\"leaderboard_public\", \"challenge\")\n search_fields = (\"name\",)\n\n\[email protected](Leaderboard)\nclass LeaderboardAdmin(ImportExportTimeStampedAdmin):\n list_display = (\"id\", \"schema\")\n search_fields = (\"id\",)\n\n\[email protected](ChallengePhaseSplit)\nclass ChallengePhaseSplitAdmin(ImportExportTimeStampedAdmin):\n list_display = (\"id\", \"challenge_phase\", \"dataset_split\", \"leaderboard\", \"visibility\")\n list_filter = (\"challenge_phase\", \"dataset_split\", \"leaderboard\", \"visibility\")\n search_fields = (\"challenge_phase\", \"dataset_split\", \"leaderboard\")\n\n\[email protected](LeaderboardData)\nclass LeaderboardDataAdmin(ImportExportTimeStampedAdmin):\n list_display = (\"challenge_phase_split\", \"submission\", \"leaderboard\", \"result\")\n list_filter = (\"challenge_phase_split\", \"leaderboard\",)\n search_fields = (\"challenge_phase_split\", \"submission\", \"leaderboard\", \"result\")\n\n\[email protected](ChallengeConfiguration)\nclass ChallengeConfigurationAdmin(ImportExportTimeStampedAdmin):\n list_display = ('user', 'challenge', 'is_created', 'zip_configuration',)\n list_filter = ('user', 'is_created',)\n search_fields = ('user', 'challenge',)\n\n\[email protected](StarChallenge)\nclass StarChallengeAdmin(ImportExportTimeStampedAdmin):\n list_display = ('user', 'challenge', 'is_starred')\n search_fields = ('user', 'challenge',)\n", "path": "apps/challenges/admin.py"}]} | 1,260 | 182 |
gh_patches_debug_5820 | rasdani/github-patches | git_diff | aws__aws-cli-761 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
datapipeline query-object --query argument is shadowed
The top level `--query` option shadows the `--query` from datapipeline query-object. This can be addressed in the argrename customization model. We can also take this opportunity to remove the `cli_name` from the `.extra.json` files in botocore.
</issue>
<code>
[start of awscli/customizations/argrename.py]
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """
14 """
15
16 from awscli.customizations import utils
17
18
19 ARGUMENT_RENAMES = {
20 # Mapping of original arg to renamed arg.
21 # The key is <service>.<operation>.argname
22 # The first part of the key is used for event registration
23 # so if you wanted to rename something for an entire service you
24 # could say 'ec2.*.dry-run': 'renamed-arg-name', or if you wanted
25 # to rename across all services you could say '*.*.dry-run': 'new-name'.
26 'ec2.create-image.no-no-reboot': 'reboot',
27 'ec2.*.no-egress': 'ingress',
28 'ec2.*.no-disable-api-termination': 'enable-api-termination',
29 }
30
31
32 def register_arg_renames(cli):
33 for original, new_name in ARGUMENT_RENAMES.items():
34 event_portion, original_arg_name = original.rsplit('.', 1)
35 cli.register('building-argument-table.%s' % event_portion,
36 rename_arg(original_arg_name, new_name))
37
38
39 def rename_arg(original_arg_name, new_name):
40 def _rename_arg(argument_table, **kwargs):
41 if original_arg_name in argument_table:
42 utils.rename_argument(argument_table, original_arg_name, new_name)
43 return _rename_arg
44
[end of awscli/customizations/argrename.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/customizations/argrename.py b/awscli/customizations/argrename.py
--- a/awscli/customizations/argrename.py
+++ b/awscli/customizations/argrename.py
@@ -26,6 +26,11 @@
'ec2.create-image.no-no-reboot': 'reboot',
'ec2.*.no-egress': 'ingress',
'ec2.*.no-disable-api-termination': 'enable-api-termination',
+ 'opsworks.*.region': 'stack-region',
+ 'elastictranscoder.*.output': 'job-output',
+ 'swf.register-activity-type.version': 'activity-version',
+ 'swf.register-workflow-type.version': 'workflow-version',
+ 'datapipeline.*.query': 'objects-query',
}
| {"golden_diff": "diff --git a/awscli/customizations/argrename.py b/awscli/customizations/argrename.py\n--- a/awscli/customizations/argrename.py\n+++ b/awscli/customizations/argrename.py\n@@ -26,6 +26,11 @@\n 'ec2.create-image.no-no-reboot': 'reboot',\n 'ec2.*.no-egress': 'ingress',\n 'ec2.*.no-disable-api-termination': 'enable-api-termination',\n+ 'opsworks.*.region': 'stack-region',\n+ 'elastictranscoder.*.output': 'job-output',\n+ 'swf.register-activity-type.version': 'activity-version',\n+ 'swf.register-workflow-type.version': 'workflow-version',\n+ 'datapipeline.*.query': 'objects-query',\n }\n", "issue": "datapipeline query-object --query argument is shadowed\nThe top level `--query` option shadows the `--query` from datapipeline query-object. This can be addressed in the argrename customization model. We can also take this opportunity to remove the `cli_name` from the `.extra.json` files in botocore.\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"\n\"\"\"\n\nfrom awscli.customizations import utils\n\n\nARGUMENT_RENAMES = {\n # Mapping of original arg to renamed arg.\n # The key is <service>.<operation>.argname\n # The first part of the key is used for event registration\n # so if you wanted to rename something for an entire service you\n # could say 'ec2.*.dry-run': 'renamed-arg-name', or if you wanted\n # to rename across all services you could say '*.*.dry-run': 'new-name'.\n 'ec2.create-image.no-no-reboot': 'reboot',\n 'ec2.*.no-egress': 'ingress',\n 'ec2.*.no-disable-api-termination': 'enable-api-termination',\n}\n\n\ndef register_arg_renames(cli):\n for original, new_name in ARGUMENT_RENAMES.items():\n event_portion, original_arg_name = original.rsplit('.', 1)\n cli.register('building-argument-table.%s' % event_portion,\n rename_arg(original_arg_name, new_name))\n\n\ndef rename_arg(original_arg_name, new_name):\n def _rename_arg(argument_table, **kwargs):\n if original_arg_name in argument_table:\n utils.rename_argument(argument_table, original_arg_name, new_name)\n return _rename_arg\n", "path": "awscli/customizations/argrename.py"}]} | 1,109 | 177 |
gh_patches_debug_17055 | rasdani/github-patches | git_diff | svthalia__concrexit-1680 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Show non-current FoodEvents in API v2.
### Describe the bug
The `api/v2/food/events/` and `api/v2/food/events/<pk>/` endpoints currently do not return FoodEvents that are not current.
I think to change that we’d only need to replace some `FoodEvent.current_objects.all()`s with `FoodEvent.objects.all()`.
</issue>
<code>
[start of website/pizzas/api/v2/views.py]
1 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
2 from rest_framework.generics import (
3 ListAPIView,
4 RetrieveAPIView,
5 get_object_or_404,
6 CreateAPIView,
7 DestroyAPIView,
8 UpdateAPIView,
9 )
10
11 from rest_framework import filters as framework_filters, status
12 from rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly
13 from rest_framework.response import Response
14
15 from pizzas.api.v2 import filters
16 from pizzas.api.v2.serializers import (
17 ProductSerializer,
18 FoodOrderSerializer,
19 FoodOrderUpdateSerializer,
20 FoodOrderCreateSerializer,
21 )
22 from pizzas.api.v2.serializers.food_event import FoodEventSerializer
23 from pizzas.models import FoodEvent, Product, FoodOrder
24 from thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod
25
26
27 class FoodEventListView(ListAPIView):
28 """Returns an overview of all food events."""
29
30 serializer_class = FoodEventSerializer
31 queryset = FoodEvent.current_objects.all()
32 filter_backends = (
33 framework_filters.OrderingFilter,
34 filters.FoodEventDateFilterBackend,
35 )
36 ordering_fields = ("start", "end")
37 permission_classes = [
38 IsAuthenticatedOrTokenHasScope,
39 DjangoModelPermissionsOrAnonReadOnly,
40 ]
41 required_scopes = ["food:read"]
42
43
44 class FoodEventDetailView(RetrieveAPIView):
45 """Returns one single food event."""
46
47 serializer_class = FoodEventSerializer
48 queryset = FoodEvent.current_objects.all()
49 permission_classes = [
50 IsAuthenticatedOrTokenHasScope,
51 DjangoModelPermissionsOrAnonReadOnly,
52 ]
53 required_scopes = ["food:read"]
54
55
56 class FoodEventProductsListView(ListAPIView):
57 """Returns an overview of all products."""
58
59 serializer_class = ProductSerializer
60 queryset = Product.available_products.all()
61 filter_backends = (framework_filters.SearchFilter,)
62 search_fields = ("name",)
63 permission_classes = [
64 IsAuthenticatedOrTokenHasScope,
65 DjangoModelPermissionsOrAnonReadOnly,
66 ]
67 required_scopes = ["food:read"]
68
69
70 class FoodEventOrderDetailView(
71 RetrieveAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView
72 ):
73 """Returns details of a food order."""
74
75 permission_classes = [
76 IsAuthenticatedOrTokenHasScopeForMethod,
77 DjangoModelPermissionsOrAnonReadOnly,
78 ]
79 required_scopes_per_method = {
80 "GET": ["food:read"],
81 "POST": ["food:order"],
82 "PUT": ["food:order"],
83 "PATCH": ["food:order"],
84 "DELETE": ["food:order"],
85 }
86
87 def get_serializer_class(self):
88 if self.request.method.lower() == "get":
89 return FoodOrderSerializer
90 if self.request.method.lower() == "post":
91 return FoodOrderCreateSerializer
92 return FoodOrderUpdateSerializer
93
94 def get_queryset(self):
95 return FoodOrder.objects.filter(food_event=self.food_event)
96
97 def get_object(self):
98 queryset = self.filter_queryset(self.get_queryset())
99 obj = get_object_or_404(queryset, member=self.request.member)
100
101 # May raise a permission denied
102 self.check_object_permissions(self.request, obj)
103
104 return obj
105
106 def dispatch(self, request, *args, **kwargs):
107 self.food_event = get_object_or_404(FoodEvent, pk=self.kwargs.get("pk"))
108 return super().dispatch(request, *args, **kwargs)
109
110 def update(self, request, *args, **kwargs):
111 super().update(request, *args, **kwargs)
112 instance = self.get_object()
113 return Response(
114 FoodOrderSerializer(instance, context=self.get_serializer_context()).data
115 )
116
117 def create(self, request, *args, **kwargs):
118 serializer = self.get_serializer(data=request.data)
119 serializer.is_valid(raise_exception=True)
120 instance = serializer.save(food_event=self.food_event)
121 return Response(
122 FoodOrderSerializer(instance, context=self.get_serializer_context()).data,
123 status=status.HTTP_201_CREATED,
124 )
125
[end of website/pizzas/api/v2/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/pizzas/api/v2/views.py b/website/pizzas/api/v2/views.py
--- a/website/pizzas/api/v2/views.py
+++ b/website/pizzas/api/v2/views.py
@@ -28,7 +28,7 @@
"""Returns an overview of all food events."""
serializer_class = FoodEventSerializer
- queryset = FoodEvent.current_objects.all()
+ queryset = FoodEvent.objects.all()
filter_backends = (
framework_filters.OrderingFilter,
filters.FoodEventDateFilterBackend,
@@ -45,7 +45,7 @@
"""Returns one single food event."""
serializer_class = FoodEventSerializer
- queryset = FoodEvent.current_objects.all()
+ queryset = FoodEvent.objects.all()
permission_classes = [
IsAuthenticatedOrTokenHasScope,
DjangoModelPermissionsOrAnonReadOnly,
| {"golden_diff": "diff --git a/website/pizzas/api/v2/views.py b/website/pizzas/api/v2/views.py\n--- a/website/pizzas/api/v2/views.py\n+++ b/website/pizzas/api/v2/views.py\n@@ -28,7 +28,7 @@\n \"\"\"Returns an overview of all food events.\"\"\"\n \n serializer_class = FoodEventSerializer\n- queryset = FoodEvent.current_objects.all()\n+ queryset = FoodEvent.objects.all()\n filter_backends = (\n framework_filters.OrderingFilter,\n filters.FoodEventDateFilterBackend,\n@@ -45,7 +45,7 @@\n \"\"\"Returns one single food event.\"\"\"\n \n serializer_class = FoodEventSerializer\n- queryset = FoodEvent.current_objects.all()\n+ queryset = FoodEvent.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n", "issue": "Show non-current FoodEvents in API v2.\n### Describe the bug\r\nThe `api/v2/food/events/` and `api/v2/food/events/<pk>/` endpoints currently do not return FoodEvents that are not current. \r\n\r\nI think to change that we\u2019d only need to replace some `FoodEvent.current_objects.all()`s with `FoodEvent.objects.all()`.\r\n\n", "before_files": [{"content": "from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework.generics import (\n ListAPIView,\n RetrieveAPIView,\n get_object_or_404,\n CreateAPIView,\n DestroyAPIView,\n UpdateAPIView,\n)\n\nfrom rest_framework import filters as framework_filters, status\nfrom rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly\nfrom rest_framework.response import Response\n\nfrom pizzas.api.v2 import filters\nfrom pizzas.api.v2.serializers import (\n ProductSerializer,\n FoodOrderSerializer,\n FoodOrderUpdateSerializer,\n FoodOrderCreateSerializer,\n)\nfrom pizzas.api.v2.serializers.food_event import FoodEventSerializer\nfrom pizzas.models import FoodEvent, Product, FoodOrder\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\n\n\nclass FoodEventListView(ListAPIView):\n \"\"\"Returns an overview of all food events.\"\"\"\n\n serializer_class = FoodEventSerializer\n queryset = FoodEvent.current_objects.all()\n filter_backends = (\n framework_filters.OrderingFilter,\n filters.FoodEventDateFilterBackend,\n )\n ordering_fields = (\"start\", \"end\")\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventDetailView(RetrieveAPIView):\n \"\"\"Returns one single food event.\"\"\"\n\n serializer_class = FoodEventSerializer\n queryset = FoodEvent.current_objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventProductsListView(ListAPIView):\n \"\"\"Returns an overview of all products.\"\"\"\n\n serializer_class = ProductSerializer\n queryset = Product.available_products.all()\n filter_backends = (framework_filters.SearchFilter,)\n search_fields = (\"name\",)\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventOrderDetailView(\n RetrieveAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView\n):\n \"\"\"Returns details of a food order.\"\"\"\n\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes_per_method = {\n \"GET\": [\"food:read\"],\n \"POST\": [\"food:order\"],\n \"PUT\": [\"food:order\"],\n \"PATCH\": [\"food:order\"],\n \"DELETE\": [\"food:order\"],\n }\n\n def get_serializer_class(self):\n if self.request.method.lower() == \"get\":\n return FoodOrderSerializer\n if self.request.method.lower() == \"post\":\n return FoodOrderCreateSerializer\n return FoodOrderUpdateSerializer\n\n def get_queryset(self):\n return FoodOrder.objects.filter(food_event=self.food_event)\n\n def get_object(self):\n queryset = self.filter_queryset(self.get_queryset())\n obj = get_object_or_404(queryset, member=self.request.member)\n\n # May raise a permission denied\n self.check_object_permissions(self.request, obj)\n\n return obj\n\n def dispatch(self, request, *args, **kwargs):\n self.food_event = get_object_or_404(FoodEvent, pk=self.kwargs.get(\"pk\"))\n return super().dispatch(request, *args, **kwargs)\n\n def update(self, request, *args, **kwargs):\n super().update(request, *args, **kwargs)\n instance = self.get_object()\n return Response(\n FoodOrderSerializer(instance, context=self.get_serializer_context()).data\n )\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n instance = serializer.save(food_event=self.food_event)\n return Response(\n FoodOrderSerializer(instance, context=self.get_serializer_context()).data,\n status=status.HTTP_201_CREATED,\n )\n", "path": "website/pizzas/api/v2/views.py"}]} | 1,740 | 191 |
gh_patches_debug_30501 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-306 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Calendar used as a trigger for automations never fire.
So I created an automation that uses the generated calendar to notify me when I have to put the bins out using the new calendar triggers.
However, the automation never runs because the trigger never fires.
I debugged this a bit and found tha following issue:
HA asks the integration [here](https://github.com/home-assistant/core/blob/dev/homeassistant/components/calendar/trigger.py#L98) for all applicable events. However, the returned list is not quite correct. The timestamps are datetimes and the integration checks only the date component [here](https://github.com/mampfes/hacs_waste_collection_schedule/blob/master/custom_components/waste_collection_schedule/calendar.py#L53).
In my case, my local timezone is Europe/Berlin, which is currently UTC+2. HA gives UTC timestamps to the integration, so they are shifted by two hours "into the past" (not really, but you know what I mean). This means that the date check is wrong, as it misses the events for the day.
I changed the following and it worked in my testing but maybe you have a better idea on how to fix that:
```python
async def async_get_events(self, hass, start_datetime, end_datetime):
"""Return all events within specified time span."""
collections = []
for a in self._scraper.get_upcoming(include_today=True):
event = self._convert(a)
if event.start_datetime_local >= start_datetime and event.end_datetime_local <= end_datetime:
collections.append(event)
return collections
def _convert(self, collection):
"""Convert an collection into a Home Assistant calendar event."""
return CalendarEvent(
summary=collection.type,
start=collection.date,
end=collection.date,
)
```
Essentially, I convert to a HA calender event first and then let HA convert the start/end times of the event to local time to compare them against the given start/end times which are still in UTC. But both are now proper datetime objects with timezone information so comparing them works fine.
</issue>
<code>
[start of custom_components/waste_collection_schedule/calendar.py]
1 """Calendar platform support for Waste Collection Schedule."""
2
3 import logging
4 from datetime import timedelta
5
6 from homeassistant.components.calendar import CalendarEntity, CalendarEvent
7
8 from custom_components.waste_collection_schedule.waste_collection_schedule.scraper import (
9 Scraper,
10 )
11
12 _LOGGER = logging.getLogger(__name__)
13
14
15 async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
16 """Set up calendar platform."""
17 # We only want this platform to be set up via discovery.
18 if discovery_info is None:
19 return
20
21 entities = []
22
23 api = discovery_info["api"]
24
25 for scraper in api.scrapers:
26 dedicated_calendar_types = scraper.get_dedicated_calendar_types()
27 global_calendar_types = scraper.get_global_calendar_types()
28
29 if dedicated_calendar_types is not None:
30 for type in dedicated_calendar_types:
31 unique_id = calc_unique_calendar_id(scraper, type)
32
33 entities.append(
34 WasteCollectionCalendar(
35 api,
36 scraper,
37 scraper.get_calendar_title_for_type(type),
38 [scraper.get_collection_type(type)],
39 unique_id,
40 )
41 )
42
43 if global_calendar_types is not None or dedicated_calendar_types is None:
44 unique_id = calc_unique_calendar_id(scraper)
45 entities.append(
46 WasteCollectionCalendar(
47 api,
48 scraper,
49 scraper.calendar_title,
50 [
51 scraper.get_collection_type(type)
52 for type in global_calendar_types
53 ]
54 if global_calendar_types is not None
55 else None,
56 unique_id,
57 )
58 )
59
60 async_add_entities(entities)
61
62
63 class WasteCollectionCalendar(CalendarEntity):
64 """Calendar entity class."""
65
66 def __init__(self, api, scraper, name, types, unique_id: str):
67 self._api = api
68 self._scraper = scraper
69 self._name = name
70 self._types = types
71 self._unique_id = unique_id
72 self._attr_unique_id = unique_id
73
74 @property
75 def name(self):
76 """Return entity name."""
77 return self._name
78
79 @property
80 def event(self):
81 """Return next collection event."""
82 collections = self._scraper.get_upcoming(
83 count=1, include_today=True, types=self._types
84 )
85
86 if len(collections) == 0:
87 return None
88 else:
89 return self._convert(collections[0])
90
91 async def async_get_events(self, hass, start_date, end_date):
92 """Return all events within specified time span."""
93 collections = []
94 for a in self._scraper.get_upcoming(include_today=True, types=self._types):
95 if a.date >= start_date.date() and a.date <= end_date.date():
96 collections.append(self._convert(a))
97 return collections
98
99 def _convert(self, collection):
100 """Convert an collection into a Home Assistant calendar event."""
101 return CalendarEvent(
102 summary=collection.type,
103 start=collection.date,
104 end=collection.date + timedelta(days=1),
105 )
106
107
108 def calc_unique_calendar_id(scraper: Scraper, type: str | None = None):
109 return scraper.unique_id + ("_" + type if type is not None else "") + "_calendar"
110
[end of custom_components/waste_collection_schedule/calendar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/calendar.py b/custom_components/waste_collection_schedule/calendar.py
--- a/custom_components/waste_collection_schedule/calendar.py
+++ b/custom_components/waste_collection_schedule/calendar.py
@@ -1,9 +1,11 @@
"""Calendar platform support for Waste Collection Schedule."""
import logging
-from datetime import timedelta
+from datetime import timedelta, timezone, datetime
from homeassistant.components.calendar import CalendarEntity, CalendarEvent
+from homeassistant.core import HomeAssistant
+from homeassistant.util.dt import DEFAULT_TIME_ZONE
from custom_components.waste_collection_schedule.waste_collection_schedule.scraper import (
Scraper,
@@ -88,15 +90,23 @@
else:
return self._convert(collections[0])
- async def async_get_events(self, hass, start_date, end_date):
+ async def async_get_events(
+ self, hass: HomeAssistant, start_date: datetime, end_date: datetime
+ ):
"""Return all events within specified time span."""
- collections = []
- for a in self._scraper.get_upcoming(include_today=True, types=self._types):
- if a.date >= start_date.date() and a.date <= end_date.date():
- collections.append(self._convert(a))
- return collections
+ events = []
- def _convert(self, collection):
+ for collection in self._scraper.get_upcoming(
+ include_today=True, types=self._types
+ ):
+ event = self._convert(collection)
+
+ if start_date <= event.start_datetime_local <= end_date:
+ events.append(event)
+
+ return events
+
+ def _convert(self, collection) -> CalendarEvent:
"""Convert an collection into a Home Assistant calendar event."""
return CalendarEvent(
summary=collection.type,
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/calendar.py b/custom_components/waste_collection_schedule/calendar.py\n--- a/custom_components/waste_collection_schedule/calendar.py\n+++ b/custom_components/waste_collection_schedule/calendar.py\n@@ -1,9 +1,11 @@\n \"\"\"Calendar platform support for Waste Collection Schedule.\"\"\"\n \n import logging\n-from datetime import timedelta\n+from datetime import timedelta, timezone, datetime\n \n from homeassistant.components.calendar import CalendarEntity, CalendarEvent\n+from homeassistant.core import HomeAssistant\n+from homeassistant.util.dt import DEFAULT_TIME_ZONE\n \n from custom_components.waste_collection_schedule.waste_collection_schedule.scraper import (\n Scraper,\n@@ -88,15 +90,23 @@\n else:\n return self._convert(collections[0])\n \n- async def async_get_events(self, hass, start_date, end_date):\n+ async def async_get_events(\n+ self, hass: HomeAssistant, start_date: datetime, end_date: datetime\n+ ):\n \"\"\"Return all events within specified time span.\"\"\"\n- collections = []\n- for a in self._scraper.get_upcoming(include_today=True, types=self._types):\n- if a.date >= start_date.date() and a.date <= end_date.date():\n- collections.append(self._convert(a))\n- return collections\n+ events = []\n \n- def _convert(self, collection):\n+ for collection in self._scraper.get_upcoming(\n+ include_today=True, types=self._types\n+ ):\n+ event = self._convert(collection)\n+\n+ if start_date <= event.start_datetime_local <= end_date:\n+ events.append(event)\n+\n+ return events\n+\n+ def _convert(self, collection) -> CalendarEvent:\n \"\"\"Convert an collection into a Home Assistant calendar event.\"\"\"\n return CalendarEvent(\n summary=collection.type,\n", "issue": "Calendar used as a trigger for automations never fire.\nSo I created an automation that uses the generated calendar to notify me when I have to put the bins out using the new calendar triggers.\r\n\r\nHowever, the automation never runs because the trigger never fires.\r\n\r\nI debugged this a bit and found tha following issue:\r\n\r\nHA asks the integration [here](https://github.com/home-assistant/core/blob/dev/homeassistant/components/calendar/trigger.py#L98) for all applicable events. However, the returned list is not quite correct. The timestamps are datetimes and the integration checks only the date component [here](https://github.com/mampfes/hacs_waste_collection_schedule/blob/master/custom_components/waste_collection_schedule/calendar.py#L53).\r\n\r\nIn my case, my local timezone is Europe/Berlin, which is currently UTC+2. HA gives UTC timestamps to the integration, so they are shifted by two hours \"into the past\" (not really, but you know what I mean). This means that the date check is wrong, as it misses the events for the day.\r\n\r\nI changed the following and it worked in my testing but maybe you have a better idea on how to fix that:\r\n\r\n```python\r\n async def async_get_events(self, hass, start_datetime, end_datetime):\r\n \"\"\"Return all events within specified time span.\"\"\"\r\n collections = []\r\n for a in self._scraper.get_upcoming(include_today=True):\r\n event = self._convert(a)\r\n if event.start_datetime_local >= start_datetime and event.end_datetime_local <= end_datetime:\r\n collections.append(event)\r\n return collections\r\n\r\n def _convert(self, collection):\r\n \"\"\"Convert an collection into a Home Assistant calendar event.\"\"\"\r\n return CalendarEvent(\r\n summary=collection.type,\r\n start=collection.date,\r\n end=collection.date,\r\n )\r\n```\r\n\r\nEssentially, I convert to a HA calender event first and then let HA convert the start/end times of the event to local time to compare them against the given start/end times which are still in UTC. But both are now proper datetime objects with timezone information so comparing them works fine.\n", "before_files": [{"content": "\"\"\"Calendar platform support for Waste Collection Schedule.\"\"\"\n\nimport logging\nfrom datetime import timedelta\n\nfrom homeassistant.components.calendar import CalendarEntity, CalendarEvent\n\nfrom custom_components.waste_collection_schedule.waste_collection_schedule.scraper import (\n Scraper,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n \"\"\"Set up calendar platform.\"\"\"\n # We only want this platform to be set up via discovery.\n if discovery_info is None:\n return\n\n entities = []\n\n api = discovery_info[\"api\"]\n\n for scraper in api.scrapers:\n dedicated_calendar_types = scraper.get_dedicated_calendar_types()\n global_calendar_types = scraper.get_global_calendar_types()\n\n if dedicated_calendar_types is not None:\n for type in dedicated_calendar_types:\n unique_id = calc_unique_calendar_id(scraper, type)\n\n entities.append(\n WasteCollectionCalendar(\n api,\n scraper,\n scraper.get_calendar_title_for_type(type),\n [scraper.get_collection_type(type)],\n unique_id,\n )\n )\n\n if global_calendar_types is not None or dedicated_calendar_types is None:\n unique_id = calc_unique_calendar_id(scraper)\n entities.append(\n WasteCollectionCalendar(\n api,\n scraper,\n scraper.calendar_title,\n [\n scraper.get_collection_type(type)\n for type in global_calendar_types\n ]\n if global_calendar_types is not None\n else None,\n unique_id,\n )\n )\n\n async_add_entities(entities)\n\n\nclass WasteCollectionCalendar(CalendarEntity):\n \"\"\"Calendar entity class.\"\"\"\n\n def __init__(self, api, scraper, name, types, unique_id: str):\n self._api = api\n self._scraper = scraper\n self._name = name\n self._types = types\n self._unique_id = unique_id\n self._attr_unique_id = unique_id\n\n @property\n def name(self):\n \"\"\"Return entity name.\"\"\"\n return self._name\n\n @property\n def event(self):\n \"\"\"Return next collection event.\"\"\"\n collections = self._scraper.get_upcoming(\n count=1, include_today=True, types=self._types\n )\n\n if len(collections) == 0:\n return None\n else:\n return self._convert(collections[0])\n\n async def async_get_events(self, hass, start_date, end_date):\n \"\"\"Return all events within specified time span.\"\"\"\n collections = []\n for a in self._scraper.get_upcoming(include_today=True, types=self._types):\n if a.date >= start_date.date() and a.date <= end_date.date():\n collections.append(self._convert(a))\n return collections\n\n def _convert(self, collection):\n \"\"\"Convert an collection into a Home Assistant calendar event.\"\"\"\n return CalendarEvent(\n summary=collection.type,\n start=collection.date,\n end=collection.date + timedelta(days=1),\n )\n\n\ndef calc_unique_calendar_id(scraper: Scraper, type: str | None = None):\n return scraper.unique_id + (\"_\" + type if type is not None else \"\") + \"_calendar\"\n", "path": "custom_components/waste_collection_schedule/calendar.py"}]} | 1,876 | 393 |
gh_patches_debug_36612 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2633 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider officedepot is broken
During the global build at 2021-08-18-14-42-26, spider **officedepot** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/logs/officedepot.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/officedepot.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/officedepot.geojson))
</issue>
<code>
[start of locations/spiders/officedepot.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3
4 from locations.items import GeojsonPointItem
5 from locations.hours import OpeningHours
6
7
8 class OfficedepotSpider(scrapy.Spider):
9 name = 'officedepot'
10 allowed_domains = ['www.officedepot.com']
11 start_urls = ['https://www.officedepot.com/storelocator/states/']
12
13 def parse_store(self, response):
14 o = OpeningHours()
15 for d in response.xpath('//time[@itemprop="openingHours"]/@datetime').extract():
16 day, times = d.split(' ', 1)
17 s, f = times.split('-')
18
19 # They seem to have a bug where they put down 24:00 when they mean noon
20 if s == '24:00': s = '12:00'
21
22 o.add_range(day, s, f)
23
24
25 store_number_results = response.xpath('//dt[@class="lsp_number"]/text()')
26 if store_number_results:
27 ref = store_number_results[-1].extract().strip()
28
29 yield GeojsonPointItem(
30 lat=response.xpath('//meta[@itemprop="latitude"]/@content').extract_first(),
31 lon=response.xpath('//meta[@itemprop="longitude"]/@content').extract_first(),
32 phone=response.xpath('//p[@itemprop="telephone"]/text()').extract_first(),
33 addr_full=response.xpath('//p[@itemprop="streetAddress"]/text()').extract_first(),
34 city=response.xpath('//p[@itemprop="addressLocality"]/text()').extract_first(),
35 state=response.xpath('//p[@itemprop="addressRegion"]/text()').extract_first(),
36 postcode=response.xpath('//p[@itemprop="postalCode"]/text()').extract_first(),
37 website=response.url,
38 ref=ref,
39 opening_hours=o.as_opening_hours(),
40 )
41
42 def parse(self, response):
43 for state in response.xpath('//div[@style="float: left; width: 200px;"]/a/@href').extract():
44 yield scrapy.Request(
45 response.urljoin(state),
46 callback=self.parse,
47 )
48
49 for store in response.xpath('//div[@style="float: left; width: 300px; padding-top: 10px;"]/a/@href').extract():
50 yield scrapy.Request(
51 response.urljoin(store),
52 callback=self.parse_store,
53 )
54
[end of locations/spiders/officedepot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/officedepot.py b/locations/spiders/officedepot.py
--- a/locations/spiders/officedepot.py
+++ b/locations/spiders/officedepot.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
+import json
import scrapy
from locations.items import GeojsonPointItem
@@ -7,8 +8,41 @@
class OfficedepotSpider(scrapy.Spider):
name = 'officedepot'
- allowed_domains = ['www.officedepot.com']
- start_urls = ['https://www.officedepot.com/storelocator/states/']
+ allowed_domains = ["where2getit.com"]
+
+ def start_requests(self):
+ url = 'https://locations.where2getit.com/officedepot/rest/getlist?like=0.9145201524205426&lang=en_US'
+
+ headers = {
+ 'Accept-Language': 'en-US,en;q=0.9',
+ 'Origin': 'https://hosted.where2getit.com',
+ 'Accept-Encoding': 'gzip, deflate, br',
+ 'Accept': 'application/json, text/javascript, */*; q=0.01',
+ 'Referer': 'https://hosted.where2getit.com/officedepot/2015/index1.html',
+ 'Connection': 'keep-alive',
+ 'Content-Type': 'application/json',
+ }
+
+ form_data = {
+ "request": {
+ "appkey": "592778B0-A13B-11EB-B3DB-84030D516365",
+ "formdata": {
+ "order": "city",
+ "objectname": "Locator::Store",
+ "softmatch": "1",
+ "where": {
+ }
+ }
+ }
+ }
+
+ yield scrapy.http.FormRequest(
+ url=url,
+ method='POST',
+ body=json.dumps(form_data),
+ headers=headers,
+ callback=self.parse,
+ )
def parse_store(self, response):
o = OpeningHours()
@@ -40,14 +74,20 @@
)
def parse(self, response):
- for state in response.xpath('//div[@style="float: left; width: 200px;"]/a/@href').extract():
- yield scrapy.Request(
- response.urljoin(state),
- callback=self.parse,
- )
-
- for store in response.xpath('//div[@style="float: left; width: 300px; padding-top: 10px;"]/a/@href').extract():
- yield scrapy.Request(
- response.urljoin(store),
- callback=self.parse_store,
- )
+ data = json.loads(response.body_as_unicode())
+
+ for store in data["response"]["collection"]:
+ properties = {
+ 'ref': store["clientkey"],
+ 'name': store.get("name"),
+ 'addr_full': store["address1"],
+ 'city': store["city"],
+ 'state': store["state"],
+ 'postcode': store["postalcode"],
+ 'country': store["country"],
+ 'lat': store["latitude"],
+ 'lon': store["longitude"],
+ 'phone': store["phone"],
+ }
+
+ yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/officedepot.py b/locations/spiders/officedepot.py\n--- a/locations/spiders/officedepot.py\n+++ b/locations/spiders/officedepot.py\n@@ -1,4 +1,5 @@\n # -*- coding: utf-8 -*-\n+import json\n import scrapy\n \n from locations.items import GeojsonPointItem\n@@ -7,8 +8,41 @@\n \n class OfficedepotSpider(scrapy.Spider):\n name = 'officedepot'\n- allowed_domains = ['www.officedepot.com']\n- start_urls = ['https://www.officedepot.com/storelocator/states/']\n+ allowed_domains = [\"where2getit.com\"]\n+\n+ def start_requests(self):\n+ url = 'https://locations.where2getit.com/officedepot/rest/getlist?like=0.9145201524205426&lang=en_US'\n+\n+ headers = {\n+ 'Accept-Language': 'en-US,en;q=0.9',\n+ 'Origin': 'https://hosted.where2getit.com',\n+ 'Accept-Encoding': 'gzip, deflate, br',\n+ 'Accept': 'application/json, text/javascript, */*; q=0.01',\n+ 'Referer': 'https://hosted.where2getit.com/officedepot/2015/index1.html',\n+ 'Connection': 'keep-alive',\n+ 'Content-Type': 'application/json',\n+ }\n+\n+ form_data = {\n+ \"request\": {\n+ \"appkey\": \"592778B0-A13B-11EB-B3DB-84030D516365\",\n+ \"formdata\": {\n+ \"order\": \"city\",\n+ \"objectname\": \"Locator::Store\",\n+ \"softmatch\": \"1\",\n+ \"where\": {\n+ }\n+ }\n+ }\n+ }\n+\n+ yield scrapy.http.FormRequest(\n+ url=url,\n+ method='POST',\n+ body=json.dumps(form_data),\n+ headers=headers,\n+ callback=self.parse,\n+ )\n \n def parse_store(self, response):\n o = OpeningHours()\n@@ -40,14 +74,20 @@\n )\n \n def parse(self, response):\n- for state in response.xpath('//div[@style=\"float: left; width: 200px;\"]/a/@href').extract():\n- yield scrapy.Request(\n- response.urljoin(state),\n- callback=self.parse,\n- )\n-\n- for store in response.xpath('//div[@style=\"float: left; width: 300px; padding-top: 10px;\"]/a/@href').extract():\n- yield scrapy.Request(\n- response.urljoin(store),\n- callback=self.parse_store,\n- )\n+ data = json.loads(response.body_as_unicode())\n+\n+ for store in data[\"response\"][\"collection\"]:\n+ properties = {\n+ 'ref': store[\"clientkey\"],\n+ 'name': store.get(\"name\"),\n+ 'addr_full': store[\"address1\"],\n+ 'city': store[\"city\"],\n+ 'state': store[\"state\"],\n+ 'postcode': store[\"postalcode\"],\n+ 'country': store[\"country\"],\n+ 'lat': store[\"latitude\"],\n+ 'lon': store[\"longitude\"],\n+ 'phone': store[\"phone\"],\n+ }\n+\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider officedepot is broken\nDuring the global build at 2021-08-18-14-42-26, spider **officedepot** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/logs/officedepot.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/officedepot.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/officedepot.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass OfficedepotSpider(scrapy.Spider):\n name = 'officedepot'\n allowed_domains = ['www.officedepot.com']\n start_urls = ['https://www.officedepot.com/storelocator/states/']\n\n def parse_store(self, response):\n o = OpeningHours()\n for d in response.xpath('//time[@itemprop=\"openingHours\"]/@datetime').extract():\n day, times = d.split(' ', 1)\n s, f = times.split('-')\n\n # They seem to have a bug where they put down 24:00 when they mean noon\n if s == '24:00': s = '12:00'\n\n o.add_range(day, s, f)\n\n\n store_number_results = response.xpath('//dt[@class=\"lsp_number\"]/text()')\n if store_number_results:\n ref = store_number_results[-1].extract().strip()\n\n yield GeojsonPointItem(\n lat=response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first(),\n lon=response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first(),\n phone=response.xpath('//p[@itemprop=\"telephone\"]/text()').extract_first(),\n addr_full=response.xpath('//p[@itemprop=\"streetAddress\"]/text()').extract_first(),\n city=response.xpath('//p[@itemprop=\"addressLocality\"]/text()').extract_first(),\n state=response.xpath('//p[@itemprop=\"addressRegion\"]/text()').extract_first(),\n postcode=response.xpath('//p[@itemprop=\"postalCode\"]/text()').extract_first(),\n website=response.url,\n ref=ref,\n opening_hours=o.as_opening_hours(),\n )\n\n def parse(self, response):\n for state in response.xpath('//div[@style=\"float: left; width: 200px;\"]/a/@href').extract():\n yield scrapy.Request(\n response.urljoin(state),\n callback=self.parse,\n )\n\n for store in response.xpath('//div[@style=\"float: left; width: 300px; padding-top: 10px;\"]/a/@href').extract():\n yield scrapy.Request(\n response.urljoin(store),\n callback=self.parse_store,\n )\n", "path": "locations/spiders/officedepot.py"}]} | 1,338 | 778 |
gh_patches_debug_29567 | rasdani/github-patches | git_diff | GPflow__GPflow-1350 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tf2.2-rc1 gast requirement
Hi,
gpflow 2.0.0-rc1 has gast requirement 0.2.2.
TensorFlow has gast requirement 0.3.3 from 2.2-rc1, which is incompatible with gpflow requirement.
Best Regards,
Marco
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 # pylint: skip-file
5
6 import os
7 import sys
8 from pathlib import Path
9
10 from pkg_resources import parse_version
11 from setuptools import find_packages, setup
12
13 is_py37 = sys.version_info.major == 3 and sys.version_info.minor == 7
14 on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # copied from the docs
15
16 # Dependencies of GPflow
17 requirements = [
18 'numpy>=1.10.0',
19 'scipy>=0.18.0',
20 'multipledispatch>=0.4.9',
21 'tabulate',
22 'gast==0.2.2',
23 ]
24
25 if not is_py37:
26 requirements.append("dataclasses")
27
28 if not on_rtd:
29 requirements.append("tensorflow-probability>=0.9")
30
31 min_tf_version = '2.1.0'
32 tf_cpu = 'tensorflow'
33 tf_gpu = 'tensorflow-gpu'
34
35 # Only detect TF if not installed or outdated. If not, do not do not list as
36 # requirement to avoid installing over e.g. tensorflow-gpu
37 # To avoid this, rely on importing rather than the package name (like pip).
38
39 try:
40 # If tf not installed, import raises ImportError
41 import tensorflow as tf
42 if parse_version(tf.__version__) < parse_version(min_tf_version):
43 # TF pre-installed, but below the minimum required version
44 raise DeprecationWarning("TensorFlow version below minimum requirement")
45 except (ImportError, DeprecationWarning):
46 # Add TensorFlow to dependencies to trigger installation/update
47 if not on_rtd:
48 # Do not add TF if we are installing GPflow on readthedocs
49 requirements.append(tf_cpu)
50
51 with open(str(Path(".", "VERSION").absolute())) as version_file:
52 version = version_file.read().strip()
53
54 packages = find_packages('.', exclude=["tests"])
55
56 setup(name='gpflow',
57 version=version,
58 author="James Hensman, Alex Matthews",
59 author_email="[email protected]",
60 description="Gaussian process methods in TensorFlow",
61 license="Apache License 2.0",
62 keywords="machine-learning gaussian-processes kernels tensorflow",
63 url="http://github.com/GPflow/GPflow",
64 packages=packages,
65 include_package_data=True,
66 install_requires=requirements,
67 extras_require={'Tensorflow with GPU': [tf_gpu]},
68 python_requires=">=3.6",
69 classifiers=[
70 'License :: OSI Approved :: Apache Software License',
71 'Natural Language :: English',
72 'Operating System :: MacOS :: MacOS X',
73 'Operating System :: Microsoft :: Windows',
74 'Operating System :: POSIX :: Linux',
75 'Programming Language :: Python :: 3.6',
76 'Topic :: Scientific/Engineering :: Artificial Intelligence'
77 ])
78
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,8 +18,7 @@
'numpy>=1.10.0',
'scipy>=0.18.0',
'multipledispatch>=0.4.9',
- 'tabulate',
- 'gast==0.2.2',
+ 'tabulate'
]
if not is_py37:
@@ -32,6 +31,22 @@
tf_cpu = 'tensorflow'
tf_gpu = 'tensorflow-gpu'
+
+# for latest_version() [see https://github.com/GPflow/GPflow/issues/1348]:
+def latest_version(package_name):
+ import json
+ from urllib import request
+ import re
+
+ url = f"https://pypi.python.org/pypi/{package_name}/json"
+ data = json.load(request.urlopen(url))
+ # filter out rc and beta releases and, more generally, any releases that
+ # do not contain exclusively numbers and dots.
+ versions = [parse_version(v) for v in data["releases"].keys() if re.match("^[0-9.]+$", v)]
+ versions.sort()
+ return versions[-1] # return latest version
+
+
# Only detect TF if not installed or outdated. If not, do not do not list as
# requirement to avoid installing over e.g. tensorflow-gpu
# To avoid this, rely on importing rather than the package name (like pip).
@@ -47,6 +62,9 @@
if not on_rtd:
# Do not add TF if we are installing GPflow on readthedocs
requirements.append(tf_cpu)
+ gast_requirement = 'gast>=0.2.2,<0.3' if latest_version('tensorflow') < parse_version('2.2') else 'gast>=0.3.3'
+ requirements.append(gast_requirement)
+
with open(str(Path(".", "VERSION").absolute())) as version_file:
version = version_file.read().strip()
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,8 +18,7 @@\n 'numpy>=1.10.0',\n 'scipy>=0.18.0',\n 'multipledispatch>=0.4.9',\n- 'tabulate',\n- 'gast==0.2.2',\n+ 'tabulate'\n ]\n \n if not is_py37:\n@@ -32,6 +31,22 @@\n tf_cpu = 'tensorflow'\n tf_gpu = 'tensorflow-gpu'\n \n+\n+# for latest_version() [see https://github.com/GPflow/GPflow/issues/1348]:\n+def latest_version(package_name):\n+ import json\n+ from urllib import request\n+ import re\n+\n+ url = f\"https://pypi.python.org/pypi/{package_name}/json\"\n+ data = json.load(request.urlopen(url))\n+ # filter out rc and beta releases and, more generally, any releases that\n+ # do not contain exclusively numbers and dots.\n+ versions = [parse_version(v) for v in data[\"releases\"].keys() if re.match(\"^[0-9.]+$\", v)] \n+ versions.sort()\n+ return versions[-1] # return latest version\n+\n+\n # Only detect TF if not installed or outdated. If not, do not do not list as\n # requirement to avoid installing over e.g. tensorflow-gpu\n # To avoid this, rely on importing rather than the package name (like pip).\n@@ -47,6 +62,9 @@\n if not on_rtd:\n # Do not add TF if we are installing GPflow on readthedocs\n requirements.append(tf_cpu)\n+ gast_requirement = 'gast>=0.2.2,<0.3' if latest_version('tensorflow') < parse_version('2.2') else 'gast>=0.3.3'\n+ requirements.append(gast_requirement)\n+ \n \n with open(str(Path(\".\", \"VERSION\").absolute())) as version_file:\n version = version_file.read().strip()\n", "issue": "tf2.2-rc1 gast requirement\nHi,\r\n\r\ngpflow 2.0.0-rc1 has gast requirement 0.2.2.\r\n\r\nTensorFlow has gast requirement 0.3.3 from 2.2-rc1, which is incompatible with gpflow requirement.\r\n\r\nBest Regards,\r\n\r\nMarco\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# pylint: skip-file\n\nimport os\nimport sys\nfrom pathlib import Path\n\nfrom pkg_resources import parse_version\nfrom setuptools import find_packages, setup\n\nis_py37 = sys.version_info.major == 3 and sys.version_info.minor == 7\non_rtd = os.environ.get('READTHEDOCS', None) == 'True' # copied from the docs\n\n# Dependencies of GPflow\nrequirements = [\n 'numpy>=1.10.0',\n 'scipy>=0.18.0',\n 'multipledispatch>=0.4.9',\n 'tabulate',\n 'gast==0.2.2',\n]\n\nif not is_py37:\n requirements.append(\"dataclasses\")\n\nif not on_rtd:\n requirements.append(\"tensorflow-probability>=0.9\")\n\nmin_tf_version = '2.1.0'\ntf_cpu = 'tensorflow'\ntf_gpu = 'tensorflow-gpu'\n\n# Only detect TF if not installed or outdated. If not, do not do not list as\n# requirement to avoid installing over e.g. tensorflow-gpu\n# To avoid this, rely on importing rather than the package name (like pip).\n\ntry:\n # If tf not installed, import raises ImportError\n import tensorflow as tf\n if parse_version(tf.__version__) < parse_version(min_tf_version):\n # TF pre-installed, but below the minimum required version\n raise DeprecationWarning(\"TensorFlow version below minimum requirement\")\nexcept (ImportError, DeprecationWarning):\n # Add TensorFlow to dependencies to trigger installation/update\n if not on_rtd:\n # Do not add TF if we are installing GPflow on readthedocs\n requirements.append(tf_cpu)\n\nwith open(str(Path(\".\", \"VERSION\").absolute())) as version_file:\n version = version_file.read().strip()\n\npackages = find_packages('.', exclude=[\"tests\"])\n\nsetup(name='gpflow',\n version=version,\n author=\"James Hensman, Alex Matthews\",\n author_email=\"[email protected]\",\n description=\"Gaussian process methods in TensorFlow\",\n license=\"Apache License 2.0\",\n keywords=\"machine-learning gaussian-processes kernels tensorflow\",\n url=\"http://github.com/GPflow/GPflow\",\n packages=packages,\n include_package_data=True,\n install_requires=requirements,\n extras_require={'Tensorflow with GPU': [tf_gpu]},\n python_requires=\">=3.6\",\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence'\n ])\n", "path": "setup.py"}]} | 1,359 | 463 |
gh_patches_debug_18286 | rasdani/github-patches | git_diff | spotify__luigi-908 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Exception when a command-line parametter contains a non-ascii character
Hey,
We've just upgraded to Luigi 1.1.2, and now we get an error when launching Tasks that have utf-8 characters on the parameters. (using python 2.7.3)
Here's the stacktrace we get :
```
Traceback (most recent call last):
File "examples/top_artists.py", line 283, in <module>
luigi.run()
File "/home/jonas/.virtualenvs/website/local/lib/python2.7/site-packages/luigi/interface.py", line 434, in run
return interface.run(tasks, worker_scheduler_factory, override_defaults=override_defaults)
File "/home/jonas/.virtualenvs/website/local/lib/python2.7/site-packages/luigi/interface.py", line 165, in run
not(lock.acquire_for(env_params.lock_pid_dir, env_params.lock_size))):
File "/home/jonas/.virtualenvs/website/local/lib/python2.7/site-packages/luigi/lock.py", line 59, in acquire_for
my_pid, my_cmd, pid_file = get_info(pid_dir)
File "/home/jonas/.virtualenvs/website/local/lib/python2.7/site-packages/luigi/lock.py", line 44, in get_info
pid_file = os.path.join(pid_dir, hashlib.md5(my_cmd.encode('utf8')).hexdigest()) + '.pid'
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 97: ordinal not in range(128)
```
Here I've just added a `plop = luigi.Parameter()` to `AggregateArtists`, and called it with
`python examples/top_artists.py AggregateArtists --local-scheduler --date-interval 2012-06 --plop à`
The error seems to come from the [Python3 refactoring](https://github.com/spotify/luigi/pull/745). Removing `.encode('utf8')` on [this line](https://github.com/spotify/luigi/blob/master/luigi/lock.py#L44) seems to solve the problem, but I guess it was added for a reason ^^.
Any ideas of how we could solve this on our side, other than removing the encode? (I'll try to add a test for it later)
</issue>
<code>
[start of luigi/lock.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2012-2015 Spotify AB
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #
17 """
18 Locking functionality when launching things from the command line.
19 Uses a pidfile.
20 This prevents multiple identical workflows to be launched simultaneously.
21 """
22 from __future__ import print_function
23
24 import hashlib
25 import os
26
27
28 def getpcmd(pid):
29 """
30 Returns command of process.
31
32 :param pid:
33 """
34 cmd = 'ps -p %s -o command=' % (pid,)
35 p = os.popen(cmd, 'r')
36 return p.readline().strip()
37
38
39 def get_info(pid_dir):
40 # Check the name and pid of this process
41 my_pid = os.getpid()
42 my_cmd = getpcmd(my_pid)
43
44 pid_file = os.path.join(pid_dir, hashlib.md5(my_cmd.encode('utf8')).hexdigest()) + '.pid'
45
46 return my_pid, my_cmd, pid_file
47
48
49 def acquire_for(pid_dir, num_available=1):
50 """
51 Makes sure the process is only run once at the same time with the same name.
52
53 Notice that we since we check the process name, different parameters to the same
54 command can spawn multiple processes at the same time, i.e. running
55 "/usr/bin/my_process" does not prevent anyone from launching
56 "/usr/bin/my_process --foo bar".
57 """
58
59 my_pid, my_cmd, pid_file = get_info(pid_dir)
60
61 # Check if there is a pid file corresponding to this name
62 if not os.path.exists(pid_dir):
63 os.mkdir(pid_dir)
64 os.chmod(pid_dir, 0o777)
65
66 pids = set()
67 pid_cmds = {}
68 if os.path.exists(pid_file):
69 # There is such a file - read the pid and look up its process name
70 pids.update(filter(None, map(str.strip, open(pid_file))))
71 pid_cmds = dict((pid, getpcmd(pid)) for pid in pids)
72 matching_pids = list(filter(lambda pid: pid_cmds[pid] == my_cmd, pids))
73
74 if len(matching_pids) >= num_available:
75 # We are already running under a different pid
76 print('Pid(s)', ', '.join(matching_pids), 'already running')
77 return False
78 else:
79 # The pid belongs to something else, we could
80 pass
81 pid_cmds[str(my_pid)] = my_cmd
82
83 # Write pids
84 pids.add(str(my_pid))
85 with open(pid_file, 'w') as f:
86 f.writelines('%s\n' % (pid, ) for pid in filter(pid_cmds.__getitem__, pids))
87
88 # Make the file writable by all
89 if os.name == 'nt':
90 pass
91 else:
92 s = os.stat(pid_file)
93 if os.getuid() == s.st_uid:
94 os.chmod(pid_file, s.st_mode | 0o777)
95
96 return True
97
[end of luigi/lock.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/luigi/lock.py b/luigi/lock.py
--- a/luigi/lock.py
+++ b/luigi/lock.py
@@ -24,6 +24,8 @@
import hashlib
import os
+from luigi import six
+
def getpcmd(pid):
"""
@@ -32,16 +34,23 @@
:param pid:
"""
cmd = 'ps -p %s -o command=' % (pid,)
- p = os.popen(cmd, 'r')
- return p.readline().strip()
+ with os.popen(cmd, 'r') as p:
+ return p.readline().strip()
-def get_info(pid_dir):
+def get_info(pid_dir, my_pid=None):
# Check the name and pid of this process
- my_pid = os.getpid()
+ if my_pid is None:
+ my_pid = os.getpid()
+
my_cmd = getpcmd(my_pid)
- pid_file = os.path.join(pid_dir, hashlib.md5(my_cmd.encode('utf8')).hexdigest()) + '.pid'
+ if six.PY3:
+ cmd_hash = my_cmd.encode('utf8')
+ else:
+ cmd_hash = my_cmd
+
+ pid_file = os.path.join(pid_dir, hashlib.md5(cmd_hash).hexdigest()) + '.pid'
return my_pid, my_cmd, pid_file
| {"golden_diff": "diff --git a/luigi/lock.py b/luigi/lock.py\n--- a/luigi/lock.py\n+++ b/luigi/lock.py\n@@ -24,6 +24,8 @@\n import hashlib\n import os\n \n+from luigi import six\n+\n \n def getpcmd(pid):\n \"\"\"\n@@ -32,16 +34,23 @@\n :param pid:\n \"\"\"\n cmd = 'ps -p %s -o command=' % (pid,)\n- p = os.popen(cmd, 'r')\n- return p.readline().strip()\n+ with os.popen(cmd, 'r') as p:\n+ return p.readline().strip()\n \n \n-def get_info(pid_dir):\n+def get_info(pid_dir, my_pid=None):\n # Check the name and pid of this process\n- my_pid = os.getpid()\n+ if my_pid is None:\n+ my_pid = os.getpid()\n+\n my_cmd = getpcmd(my_pid)\n \n- pid_file = os.path.join(pid_dir, hashlib.md5(my_cmd.encode('utf8')).hexdigest()) + '.pid'\n+ if six.PY3:\n+ cmd_hash = my_cmd.encode('utf8')\n+ else:\n+ cmd_hash = my_cmd\n+\n+ pid_file = os.path.join(pid_dir, hashlib.md5(cmd_hash).hexdigest()) + '.pid'\n \n return my_pid, my_cmd, pid_file\n", "issue": "Exception when a command-line parametter contains a non-ascii character\nHey,\n\nWe've just upgraded to Luigi 1.1.2, and now we get an error when launching Tasks that have utf-8 characters on the parameters. (using python 2.7.3)\n\nHere's the stacktrace we get : \n\n```\nTraceback (most recent call last):\n File \"examples/top_artists.py\", line 283, in <module>\n luigi.run()\n File \"/home/jonas/.virtualenvs/website/local/lib/python2.7/site-packages/luigi/interface.py\", line 434, in run\n return interface.run(tasks, worker_scheduler_factory, override_defaults=override_defaults)\n File \"/home/jonas/.virtualenvs/website/local/lib/python2.7/site-packages/luigi/interface.py\", line 165, in run\n not(lock.acquire_for(env_params.lock_pid_dir, env_params.lock_size))):\n File \"/home/jonas/.virtualenvs/website/local/lib/python2.7/site-packages/luigi/lock.py\", line 59, in acquire_for\n my_pid, my_cmd, pid_file = get_info(pid_dir)\n File \"/home/jonas/.virtualenvs/website/local/lib/python2.7/site-packages/luigi/lock.py\", line 44, in get_info\n pid_file = os.path.join(pid_dir, hashlib.md5(my_cmd.encode('utf8')).hexdigest()) + '.pid'\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 97: ordinal not in range(128)\n```\n\nHere I've just added a `plop = luigi.Parameter()` to `AggregateArtists`, and called it with \n`python examples/top_artists.py AggregateArtists --local-scheduler --date-interval 2012-06 --plop \u00e0`\n\nThe error seems to come from the [Python3 refactoring](https://github.com/spotify/luigi/pull/745). Removing `.encode('utf8')` on [this line](https://github.com/spotify/luigi/blob/master/luigi/lock.py#L44) seems to solve the problem, but I guess it was added for a reason ^^.\n\nAny ideas of how we could solve this on our side, other than removing the encode? (I'll try to add a test for it later)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nLocking functionality when launching things from the command line.\nUses a pidfile.\nThis prevents multiple identical workflows to be launched simultaneously.\n\"\"\"\nfrom __future__ import print_function\n\nimport hashlib\nimport os\n\n\ndef getpcmd(pid):\n \"\"\"\n Returns command of process.\n\n :param pid:\n \"\"\"\n cmd = 'ps -p %s -o command=' % (pid,)\n p = os.popen(cmd, 'r')\n return p.readline().strip()\n\n\ndef get_info(pid_dir):\n # Check the name and pid of this process\n my_pid = os.getpid()\n my_cmd = getpcmd(my_pid)\n\n pid_file = os.path.join(pid_dir, hashlib.md5(my_cmd.encode('utf8')).hexdigest()) + '.pid'\n\n return my_pid, my_cmd, pid_file\n\n\ndef acquire_for(pid_dir, num_available=1):\n \"\"\"\n Makes sure the process is only run once at the same time with the same name.\n\n Notice that we since we check the process name, different parameters to the same\n command can spawn multiple processes at the same time, i.e. running\n \"/usr/bin/my_process\" does not prevent anyone from launching\n \"/usr/bin/my_process --foo bar\".\n \"\"\"\n\n my_pid, my_cmd, pid_file = get_info(pid_dir)\n\n # Check if there is a pid file corresponding to this name\n if not os.path.exists(pid_dir):\n os.mkdir(pid_dir)\n os.chmod(pid_dir, 0o777)\n\n pids = set()\n pid_cmds = {}\n if os.path.exists(pid_file):\n # There is such a file - read the pid and look up its process name\n pids.update(filter(None, map(str.strip, open(pid_file))))\n pid_cmds = dict((pid, getpcmd(pid)) for pid in pids)\n matching_pids = list(filter(lambda pid: pid_cmds[pid] == my_cmd, pids))\n\n if len(matching_pids) >= num_available:\n # We are already running under a different pid\n print('Pid(s)', ', '.join(matching_pids), 'already running')\n return False\n else:\n # The pid belongs to something else, we could\n pass\n pid_cmds[str(my_pid)] = my_cmd\n\n # Write pids\n pids.add(str(my_pid))\n with open(pid_file, 'w') as f:\n f.writelines('%s\\n' % (pid, ) for pid in filter(pid_cmds.__getitem__, pids))\n\n # Make the file writable by all\n if os.name == 'nt':\n pass\n else:\n s = os.stat(pid_file)\n if os.getuid() == s.st_uid:\n os.chmod(pid_file, s.st_mode | 0o777)\n\n return True\n", "path": "luigi/lock.py"}]} | 2,001 | 314 |
gh_patches_debug_22058 | rasdani/github-patches | git_diff | pex-tool__pex-258 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update PyPI page
It would be nice if the `README.rst` were included in the `setup.py` `long_description` in addition to the `CHANGES.rst` so that users browsing PyPI could read the README without having to travel to GitHub.
Would also be nice if the trove classifiers in `setup.py` reflected which versions of Python were officially supported (e.g. `'Programming Language :: Python :: 3.5'`).
</issue>
<code>
[start of setup.py]
1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 import os
5
6 from setuptools import setup
7
8
9 with open(os.path.join(os.path.dirname(__file__), 'CHANGES.rst')) as fp:
10 LONG_DESCRIPTION = fp.read()
11
12
13 # This seems to be a fairly standard version file pattern.
14 #
15 # Populates the following variables:
16 # __version__
17 # __setuptools_requirement
18 # __wheel_requirement
19 __version__ = ''
20 version_py_file = os.path.join(os.path.dirname(__file__), 'pex', 'version.py')
21 with open(version_py_file) as version_py:
22 exec(compile(version_py.read(), version_py_file, 'exec'))
23
24
25 setup(
26 name = 'pex',
27 version = __version__,
28 description = "The PEX packaging toolchain.",
29 long_description = LONG_DESCRIPTION,
30 url = 'https://github.com/pantsbuild/pex',
31 license = 'Apache License, Version 2.0',
32 zip_safe = True,
33 classifiers = [
34 'Intended Audience :: Developers',
35 'License :: OSI Approved :: Apache Software License',
36 'Operating System :: OS Independent',
37 'Programming Language :: Python',
38 ],
39 packages = [
40 'pex',
41 'pex.bin',
42 'pex.commands',
43 ],
44 install_requires = [
45 SETUPTOOLS_REQUIREMENT,
46 ],
47 tests_require = [
48 'mock',
49 'twitter.common.contextutil>=0.3.1,<0.4.0',
50 'twitter.common.lang>=0.3.1,<0.4.0',
51 'twitter.common.testing>=0.3.1,<0.4.0',
52 'twitter.common.dirutil>=0.3.1,<0.4.0',
53 'pytest',
54 ],
55 entry_points = {
56 'distutils.commands': [
57 'bdist_pex = pex.commands.bdist_pex:bdist_pex',
58 ],
59 'console_scripts': [
60 'pex = pex.bin.pex:main',
61 ],
62 },
63 )
64
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -5,9 +5,11 @@
from setuptools import setup
+with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as fp:
+ LONG_DESCRIPTION = fp.read() + '\n'
with open(os.path.join(os.path.dirname(__file__), 'CHANGES.rst')) as fp:
- LONG_DESCRIPTION = fp.read()
+ LONG_DESCRIPTION += fp.read()
# This seems to be a fairly standard version file pattern.
@@ -33,8 +35,17 @@
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
- 'Operating System :: OS Independent',
+ 'Operating System :: Unix',
+ 'Operating System :: POSIX :: Linux',
+ 'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.3',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
],
packages = [
'pex',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,9 +5,11 @@\n \n from setuptools import setup\n \n+with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as fp:\n+ LONG_DESCRIPTION = fp.read() + '\\n'\n \n with open(os.path.join(os.path.dirname(__file__), 'CHANGES.rst')) as fp:\n- LONG_DESCRIPTION = fp.read()\n+ LONG_DESCRIPTION += fp.read()\n \n \n # This seems to be a fairly standard version file pattern.\n@@ -33,8 +35,17 @@\n classifiers = [\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n- 'Operating System :: OS Independent',\n+ 'Operating System :: Unix',\n+ 'Operating System :: POSIX :: Linux',\n+ 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python',\n+ 'Programming Language :: Python :: 2',\n+ 'Programming Language :: Python :: 2.6',\n+ 'Programming Language :: Python :: 2.7',\n+ 'Programming Language :: Python :: 3',\n+ 'Programming Language :: Python :: 3.3',\n+ 'Programming Language :: Python :: 3.4',\n+ 'Programming Language :: Python :: 3.5',\n ],\n packages = [\n 'pex',\n", "issue": "Update PyPI page\nIt would be nice if the `README.rst` were included in the `setup.py` `long_description` in addition to the `CHANGES.rst` so that users browsing PyPI could read the README without having to travel to GitHub.\n\nWould also be nice if the trove classifiers in `setup.py` reflected which versions of Python were officially supported (e.g. `'Programming Language :: Python :: 3.5'`).\n\n", "before_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport os\n\nfrom setuptools import setup\n\n\nwith open(os.path.join(os.path.dirname(__file__), 'CHANGES.rst')) as fp:\n LONG_DESCRIPTION = fp.read()\n\n\n# This seems to be a fairly standard version file pattern.\n#\n# Populates the following variables:\n# __version__\n# __setuptools_requirement\n# __wheel_requirement\n__version__ = ''\nversion_py_file = os.path.join(os.path.dirname(__file__), 'pex', 'version.py')\nwith open(version_py_file) as version_py:\n exec(compile(version_py.read(), version_py_file, 'exec'))\n\n\nsetup(\n name = 'pex',\n version = __version__,\n description = \"The PEX packaging toolchain.\",\n long_description = LONG_DESCRIPTION,\n url = 'https://github.com/pantsbuild/pex',\n license = 'Apache License, Version 2.0',\n zip_safe = True,\n classifiers = [\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n ],\n packages = [\n 'pex',\n 'pex.bin',\n 'pex.commands',\n ],\n install_requires = [\n SETUPTOOLS_REQUIREMENT,\n ],\n tests_require = [\n 'mock',\n 'twitter.common.contextutil>=0.3.1,<0.4.0',\n 'twitter.common.lang>=0.3.1,<0.4.0',\n 'twitter.common.testing>=0.3.1,<0.4.0',\n 'twitter.common.dirutil>=0.3.1,<0.4.0',\n 'pytest',\n ],\n entry_points = {\n 'distutils.commands': [\n 'bdist_pex = pex.commands.bdist_pex:bdist_pex',\n ],\n 'console_scripts': [\n 'pex = pex.bin.pex:main',\n ],\n },\n)\n", "path": "setup.py"}]} | 1,200 | 299 |
gh_patches_debug_19082 | rasdani/github-patches | git_diff | mkdocs__mkdocs-1322 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Version to 0.17.0
See discussion in #1166.
</issue>
<code>
[start of mkdocs/contrib/legacy_search/__init__.py]
1 # coding: utf-8
2
3 from __future__ import absolute_import, unicode_literals
4
5 import os
6 import logging
7 from mkdocs import utils
8 from mkdocs.plugins import BasePlugin
9 from mkdocs.contrib.legacy_search.search_index import SearchIndex
10
11
12 log = logging.getLogger(__name__)
13
14
15 class SearchPlugin(BasePlugin):
16 """ Add a search feature to MkDocs. """
17
18 def on_config(self, config, **kwargs):
19 "Add plugin templates and scripts to config."
20 path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
21 config['theme'].dirs.append(path)
22 config['theme'].static_templates.add('search.html')
23 config['extra_javascript'].append('search/require.js')
24 config['extra_javascript'].append('search/search.js')
25 return config
26
27 def on_pre_build(self, config, **kwargs):
28 "Create search index instance for later use."
29 self.search_index = SearchIndex()
30
31 def on_page_context(self, context, **kwargs):
32 "Add page to search index."
33 self.search_index.add_entry_from_context(context['page'])
34
35 def on_post_build(self, config, **kwargs):
36 "Build search index."
37 search_index = self.search_index.generate_search_index()
38 json_output_path = os.path.join(config['site_dir'], 'search', 'search_index.json')
39 utils.write_file(search_index.encode('utf-8'), json_output_path)
40
[end of mkdocs/contrib/legacy_search/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mkdocs/contrib/legacy_search/__init__.py b/mkdocs/contrib/legacy_search/__init__.py
--- a/mkdocs/contrib/legacy_search/__init__.py
+++ b/mkdocs/contrib/legacy_search/__init__.py
@@ -17,11 +17,13 @@
def on_config(self, config, **kwargs):
"Add plugin templates and scripts to config."
- path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
- config['theme'].dirs.append(path)
- config['theme'].static_templates.add('search.html')
- config['extra_javascript'].append('search/require.js')
- config['extra_javascript'].append('search/search.js')
+ if 'include_search_page' in config['theme'] and config['theme']['include_search_page']:
+ config['theme'].static_templates.add('search.html')
+ if not ('search_index_only' in config['theme'] and config['theme']['search_index_only']):
+ path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
+ config['theme'].dirs.append(path)
+ config['extra_javascript'].append('search/require.js')
+ config['extra_javascript'].append('search/search.js')
return config
def on_pre_build(self, config, **kwargs):
| {"golden_diff": "diff --git a/mkdocs/contrib/legacy_search/__init__.py b/mkdocs/contrib/legacy_search/__init__.py\n--- a/mkdocs/contrib/legacy_search/__init__.py\n+++ b/mkdocs/contrib/legacy_search/__init__.py\n@@ -17,11 +17,13 @@\n \n def on_config(self, config, **kwargs):\n \"Add plugin templates and scripts to config.\"\n- path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')\n- config['theme'].dirs.append(path)\n- config['theme'].static_templates.add('search.html')\n- config['extra_javascript'].append('search/require.js')\n- config['extra_javascript'].append('search/search.js')\n+ if 'include_search_page' in config['theme'] and config['theme']['include_search_page']:\n+ config['theme'].static_templates.add('search.html')\n+ if not ('search_index_only' in config['theme'] and config['theme']['search_index_only']):\n+ path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')\n+ config['theme'].dirs.append(path)\n+ config['extra_javascript'].append('search/require.js')\n+ config['extra_javascript'].append('search/search.js')\n return config\n \n def on_pre_build(self, config, **kwargs):\n", "issue": "Version to 0.17.0\nSee discussion in #1166.\n", "before_files": [{"content": "# coding: utf-8\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\nimport logging\nfrom mkdocs import utils\nfrom mkdocs.plugins import BasePlugin\nfrom mkdocs.contrib.legacy_search.search_index import SearchIndex\n\n\nlog = logging.getLogger(__name__)\n\n\nclass SearchPlugin(BasePlugin):\n \"\"\" Add a search feature to MkDocs. \"\"\"\n\n def on_config(self, config, **kwargs):\n \"Add plugin templates and scripts to config.\"\n path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')\n config['theme'].dirs.append(path)\n config['theme'].static_templates.add('search.html')\n config['extra_javascript'].append('search/require.js')\n config['extra_javascript'].append('search/search.js')\n return config\n\n def on_pre_build(self, config, **kwargs):\n \"Create search index instance for later use.\"\n self.search_index = SearchIndex()\n\n def on_page_context(self, context, **kwargs):\n \"Add page to search index.\"\n self.search_index.add_entry_from_context(context['page'])\n\n def on_post_build(self, config, **kwargs):\n \"Build search index.\"\n search_index = self.search_index.generate_search_index()\n json_output_path = os.path.join(config['site_dir'], 'search', 'search_index.json')\n utils.write_file(search_index.encode('utf-8'), json_output_path)\n", "path": "mkdocs/contrib/legacy_search/__init__.py"}]} | 943 | 302 |
gh_patches_debug_79 | rasdani/github-patches | git_diff | flairNLP__flair-447 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
__version__ attribute?
I'm always frustrated when flair doesn't have a __version__attribute... :-)
Please, add a __version__attribute to the module.
Thank you!
DC
</issue>
<code>
[start of flair/__init__.py]
1 import torch
2
3 from . import data
4 from . import models
5 from . import visual
6 from . import trainers
7
8 import logging.config
9
10
11 logging.config.dictConfig({
12 'version': 1,
13 'disable_existing_loggers': False,
14 'formatters': {
15 'standard': {
16 'format': '%(asctime)-15s %(message)s'
17 },
18 },
19 'handlers': {
20 'console': {
21 'level': 'INFO',
22 'class': 'logging.StreamHandler',
23 'formatter': 'standard',
24 'stream': 'ext://sys.stdout'
25 },
26 },
27 'loggers': {
28 'flair': {
29 'handlers': ['console'],
30 'level': 'INFO',
31 'propagate': False
32 }
33 },
34 'root': {
35 'handlers': ['console'],
36 'level': 'WARNING'
37 }
38 })
39
40 logger = logging.getLogger('flair')
41
42
43 device = None
44 if torch.cuda.is_available():
45 device = torch.device('cuda:0')
46 else:
47 device = torch.device('cpu')
48
[end of flair/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flair/__init__.py b/flair/__init__.py
--- a/flair/__init__.py
+++ b/flair/__init__.py
@@ -7,6 +7,7 @@
import logging.config
+__version__ = "0.4.1"
logging.config.dictConfig({
'version': 1,
| {"golden_diff": "diff --git a/flair/__init__.py b/flair/__init__.py\n--- a/flair/__init__.py\n+++ b/flair/__init__.py\n@@ -7,6 +7,7 @@\n \n import logging.config\n \n+__version__ = \"0.4.1\"\n \n logging.config.dictConfig({\n 'version': 1,\n", "issue": "__version__ attribute?\nI'm always frustrated when flair doesn't have a __version__attribute... :-)\r\n\r\nPlease, add a __version__attribute to the module.\r\n\r\nThank you!\r\nDC\r\n\n", "before_files": [{"content": "import torch\n\nfrom . import data\nfrom . import models\nfrom . import visual\nfrom . import trainers\n\nimport logging.config\n\n\nlogging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)-15s %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n 'stream': 'ext://sys.stdout'\n },\n },\n 'loggers': {\n 'flair': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': False\n }\n },\n 'root': {\n 'handlers': ['console'],\n 'level': 'WARNING'\n }\n})\n\nlogger = logging.getLogger('flair')\n\n\ndevice = None\nif torch.cuda.is_available():\n device = torch.device('cuda:0')\nelse:\n device = torch.device('cpu')\n", "path": "flair/__init__.py"}]} | 892 | 77 |
gh_patches_debug_29935 | rasdani/github-patches | git_diff | google__jax-2481 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add multivariate normal pdf evalutation to jax.scipy
It would be great to have a Multivariate gaussian pdf/logpdf implementation, similar to the univariate version in [jax.scipy.stats.norm](https://jax.readthedocs.io/en/latest/_modules/jax/scipy/stats/norm.html#logpdf). I am currently working with this hacky function:
```
@jit
def multi_gauss_logpdf(x, mean, cov):
""" Calculate the probability density of a
sample from the multivariate normal. """
D = mean.shape[0]
(sign, logdet) = np.linalg.slogdet(cov)
p1 = D*np.log(2*np.pi) + logdet
p2 = (x-mean).T @ np.linalg.inv(cov) @ (x-mean)
return -1./2 * (p1 + p2)
batch_logpdf = vmap(multi_gauss_logpdf, in_axes=(0, None, None))
```
My `lax`/primitive knowledge is still fairly limited but I will try to put together a pr. Any recommendations how to speed things up?
</issue>
<code>
[start of jax/scipy/stats/multivariate_normal.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 import numpy as np
17 import scipy.stats as osp_stats
18
19 from ... import lax
20 from ...numpy.lax_numpy import _promote_dtypes_inexact, _constant_like, _wraps
21 from ...numpy.lax_numpy import dot, subtract, einsum
22 from ...numpy.linalg import det, inv
23
24
25 @_wraps(osp_stats.multivariate_normal.logpdf, update_doc=False)
26 def logpdf(x, mean, cov):
27 x, mean, cov = _promote_dtypes_inexact(x, mean, cov)
28 two = _constant_like(x, 2)
29 dim = _constant_like(x, mean.shape[0])
30 det_sig = det(cov).astype(cov.dtype)
31 log_normalizer = lax.log(lax.mul(lax.pow(_constant_like(x, 2 * np.pi), dim),
32 det_sig))
33 x_shape = x.shape[:-1]
34 if x_shape:
35 x_2d = x.reshape((-1, mean.shape[0]))
36 quadratic = einsum("ij,jk,ik->i", subtract(x_2d, mean), inv(cov),
37 subtract(x_2d, mean)).reshape(x_shape).astype(cov.dtype)
38 else:
39 quadratic = dot(dot(subtract(x, mean), inv(cov)), subtract(x, mean).T).astype(cov.dtype)
40 return lax.div(lax.neg(lax.add(log_normalizer, quadratic)), two)
41
42 @_wraps(osp_stats.multivariate_normal.pdf, update_doc=False)
43 def pdf(x, mean, cov):
44 return lax.exp(logpdf(x, mean, cov))
45
[end of jax/scipy/stats/multivariate_normal.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/jax/scipy/stats/multivariate_normal.py b/jax/scipy/stats/multivariate_normal.py
--- a/jax/scipy/stats/multivariate_normal.py
+++ b/jax/scipy/stats/multivariate_normal.py
@@ -17,27 +17,29 @@
import scipy.stats as osp_stats
from ... import lax
+from ...lax_linalg import cholesky, triangular_solve
+from ... import numpy as jnp
from ...numpy.lax_numpy import _promote_dtypes_inexact, _constant_like, _wraps
-from ...numpy.lax_numpy import dot, subtract, einsum
-from ...numpy.linalg import det, inv
@_wraps(osp_stats.multivariate_normal.logpdf, update_doc=False)
def logpdf(x, mean, cov):
x, mean, cov = _promote_dtypes_inexact(x, mean, cov)
- two = _constant_like(x, 2)
- dim = _constant_like(x, mean.shape[0])
- det_sig = det(cov).astype(cov.dtype)
- log_normalizer = lax.log(lax.mul(lax.pow(_constant_like(x, 2 * np.pi), dim),
- det_sig))
- x_shape = x.shape[:-1]
- if x_shape:
- x_2d = x.reshape((-1, mean.shape[0]))
- quadratic = einsum("ij,jk,ik->i", subtract(x_2d, mean), inv(cov),
- subtract(x_2d, mean)).reshape(x_shape).astype(cov.dtype)
+ if not mean.shape:
+ return -1/2 * (x - mean) ** 2 / cov - 1/2 * (np.log(2*np.pi) + jnp.log(cov))
else:
- quadratic = dot(dot(subtract(x, mean), inv(cov)), subtract(x, mean).T).astype(cov.dtype)
- return lax.div(lax.neg(lax.add(log_normalizer, quadratic)), two)
+ n = mean.shape[-1]
+ if not np.shape(cov):
+ y = x - mean
+ return (-1/2 * jnp.einsum('...i,...i->...', y, y) / cov
+ - n/2 * (np.log(2*np.pi) + jnp.log(cov)))
+ else:
+ if cov.ndim < 2 or cov.shape[-2:] != (n, n):
+ raise ValueError("multivariate_normal.logpdf got incompatible shapes")
+ L = cholesky(cov)
+ y = triangular_solve(L, x - mean, lower=True, transpose_a=True)
+ return (-1/2 * jnp.einsum('...i,...i->...', y, y) - n/2*np.log(2*np.pi)
+ - jnp.log(L.diagonal()).sum())
@_wraps(osp_stats.multivariate_normal.pdf, update_doc=False)
def pdf(x, mean, cov):
| {"golden_diff": "diff --git a/jax/scipy/stats/multivariate_normal.py b/jax/scipy/stats/multivariate_normal.py\n--- a/jax/scipy/stats/multivariate_normal.py\n+++ b/jax/scipy/stats/multivariate_normal.py\n@@ -17,27 +17,29 @@\n import scipy.stats as osp_stats\n \n from ... import lax\n+from ...lax_linalg import cholesky, triangular_solve\n+from ... import numpy as jnp\n from ...numpy.lax_numpy import _promote_dtypes_inexact, _constant_like, _wraps\n-from ...numpy.lax_numpy import dot, subtract, einsum\n-from ...numpy.linalg import det, inv\n \n \n @_wraps(osp_stats.multivariate_normal.logpdf, update_doc=False)\n def logpdf(x, mean, cov):\n x, mean, cov = _promote_dtypes_inexact(x, mean, cov)\n- two = _constant_like(x, 2)\n- dim = _constant_like(x, mean.shape[0])\n- det_sig = det(cov).astype(cov.dtype)\n- log_normalizer = lax.log(lax.mul(lax.pow(_constant_like(x, 2 * np.pi), dim),\n- det_sig))\n- x_shape = x.shape[:-1]\n- if x_shape:\n- x_2d = x.reshape((-1, mean.shape[0]))\n- quadratic = einsum(\"ij,jk,ik->i\", subtract(x_2d, mean), inv(cov), \n- subtract(x_2d, mean)).reshape(x_shape).astype(cov.dtype)\n+ if not mean.shape:\n+ return -1/2 * (x - mean) ** 2 / cov - 1/2 * (np.log(2*np.pi) + jnp.log(cov))\n else:\n- quadratic = dot(dot(subtract(x, mean), inv(cov)), subtract(x, mean).T).astype(cov.dtype)\n- return lax.div(lax.neg(lax.add(log_normalizer, quadratic)), two)\n+ n = mean.shape[-1]\n+ if not np.shape(cov):\n+ y = x - mean\n+ return (-1/2 * jnp.einsum('...i,...i->...', y, y) / cov\n+ - n/2 * (np.log(2*np.pi) + jnp.log(cov)))\n+ else:\n+ if cov.ndim < 2 or cov.shape[-2:] != (n, n):\n+ raise ValueError(\"multivariate_normal.logpdf got incompatible shapes\")\n+ L = cholesky(cov)\n+ y = triangular_solve(L, x - mean, lower=True, transpose_a=True)\n+ return (-1/2 * jnp.einsum('...i,...i->...', y, y) - n/2*np.log(2*np.pi)\n+ - jnp.log(L.diagonal()).sum())\n \n @_wraps(osp_stats.multivariate_normal.pdf, update_doc=False)\n def pdf(x, mean, cov):\n", "issue": "Add multivariate normal pdf evalutation to jax.scipy\nIt would be great to have a Multivariate gaussian pdf/logpdf implementation, similar to the univariate version in [jax.scipy.stats.norm](https://jax.readthedocs.io/en/latest/_modules/jax/scipy/stats/norm.html#logpdf). I am currently working with this hacky function: \r\n\r\n```\r\n@jit\r\ndef multi_gauss_logpdf(x, mean, cov):\r\n \"\"\" Calculate the probability density of a\r\n sample from the multivariate normal. \"\"\"\r\n D = mean.shape[0]\r\n (sign, logdet) = np.linalg.slogdet(cov)\r\n p1 = D*np.log(2*np.pi) + logdet\r\n p2 = (x-mean).T @ np.linalg.inv(cov) @ (x-mean)\r\n return -1./2 * (p1 + p2)\r\n\r\nbatch_logpdf = vmap(multi_gauss_logpdf, in_axes=(0, None, None))\r\n```\r\n\r\nMy `lax`/primitive knowledge is still fairly limited but I will try to put together a pr. Any recommendations how to speed things up?\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport numpy as np\nimport scipy.stats as osp_stats\n\nfrom ... import lax\nfrom ...numpy.lax_numpy import _promote_dtypes_inexact, _constant_like, _wraps\nfrom ...numpy.lax_numpy import dot, subtract, einsum\nfrom ...numpy.linalg import det, inv\n\n\n@_wraps(osp_stats.multivariate_normal.logpdf, update_doc=False)\ndef logpdf(x, mean, cov):\n x, mean, cov = _promote_dtypes_inexact(x, mean, cov)\n two = _constant_like(x, 2)\n dim = _constant_like(x, mean.shape[0])\n det_sig = det(cov).astype(cov.dtype)\n log_normalizer = lax.log(lax.mul(lax.pow(_constant_like(x, 2 * np.pi), dim),\n det_sig))\n x_shape = x.shape[:-1]\n if x_shape:\n x_2d = x.reshape((-1, mean.shape[0]))\n quadratic = einsum(\"ij,jk,ik->i\", subtract(x_2d, mean), inv(cov), \n subtract(x_2d, mean)).reshape(x_shape).astype(cov.dtype)\n else:\n quadratic = dot(dot(subtract(x, mean), inv(cov)), subtract(x, mean).T).astype(cov.dtype)\n return lax.div(lax.neg(lax.add(log_normalizer, quadratic)), two)\n\n@_wraps(osp_stats.multivariate_normal.pdf, update_doc=False)\ndef pdf(x, mean, cov):\n return lax.exp(logpdf(x, mean, cov))\n", "path": "jax/scipy/stats/multivariate_normal.py"}]} | 1,336 | 656 |
gh_patches_debug_17452 | rasdani/github-patches | git_diff | streamlink__streamlink-5908 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.vkplay: vkplay.live has moved to another domain (live.vkplay.ru)
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
streamlink 6.7.2
### Description
https://vk.com/wall-212496568_91026
yesterday, vkplay live changed its domain. if you specify the old domain in the link to the stream, then everything still works, but on the site itself there are links to a new domain, to which the existing plugin does not respond.
I just tried to change the updated part of the domain in the plugin code (vkplay.live -> live.vkplay.ru ), and everything seems to be working well. It's a bit difficult for me to create a pull request, but here's the corrected plugin on gist:
https://gist.github.com/oexlkinq/eef0a260dddad473c5febafd91b980d9
the old domain is also listed in the documentation (https://streamlink.github.io/plugins.html#vkplay)
### Debug log
```text
streamlink https://live.vkplay.ru/ruwarface 720p --loglevel=debug
[cli][debug] OS: Linux-6.8.1-arch1-1-x86_64-with-glibc2.39
[cli][debug] Python: 3.11.8
[cli][debug] OpenSSL: OpenSSL 3.2.1 30 Jan 2024
[cli][debug] Streamlink: 6.7.2
[cli][debug] Dependencies:
[cli][debug] certifi: 2024.2.2
[cli][debug] exceptiongroup: 1.2.0
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 5.1.0
[cli][debug] pycountry: 23.12.11
[cli][debug] pycryptodome: 3.20.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.31.0
[cli][debug] trio: 0.25.0
[cli][debug] trio-websocket: 0.11.1
[cli][debug] typing-extensions: 4.10.0
[cli][debug] urllib3: 1.26.18
[cli][debug] websocket-client: 1.7.0
[cli][debug] Arguments:
[cli][debug] url=https://live.vkplay.ru/ruwarface
[cli][debug] stream=['720p']
[cli][debug] --loglevel=debug
error: No plugin can handle URL: https://live.vkplay.ru/ruwarface
```
</issue>
<code>
[start of src/streamlink/plugins/vkplay.py]
1 """
2 $description Russian live-streaming platform for gaming and esports, owned by VKontakte.
3 $url vkplay.live
4 $type live
5 $metadata id
6 $metadata author
7 $metadata category
8 $metadata title
9 """
10
11 import logging
12 import re
13
14 from streamlink.plugin import Plugin, pluginmatcher
15 from streamlink.plugin.api import validate
16 from streamlink.stream.hls import HLSStream
17
18
19 log = logging.getLogger(__name__)
20
21
22 @pluginmatcher(re.compile(
23 r"https?://vkplay\.live/(?P<channel_name>\w+)/?$",
24 ))
25 class VKplay(Plugin):
26 API_URL = "https://api.vkplay.live/v1"
27
28 def _get_streams(self):
29 self.author = self.match.group("channel_name")
30 log.debug(f"Channel name: {self.author}")
31
32 data = self.session.http.get(
33 f"{self.API_URL}/blog/{self.author}/public_video_stream",
34 headers={"Referer": self.url},
35 acceptable_status=(200, 404),
36 schema=validate.Schema(
37 validate.parse_json(),
38 validate.any(
39 validate.all(
40 {"error": str, "error_description": str},
41 validate.get("error_description"),
42 ),
43 validate.all(
44 {
45 validate.optional("category"): validate.all(
46 {
47 "title": str,
48 },
49 validate.get("title"),
50 ),
51 "title": str,
52 "data": validate.any(
53 [
54 validate.all(
55 {
56 "vid": str,
57 "playerUrls": [
58 validate.all(
59 {
60 "type": str,
61 "url": validate.any("", validate.url()),
62 },
63 validate.union_get("type", "url"),
64 ),
65 ],
66 },
67 validate.union_get("vid", "playerUrls"),
68 ),
69 ],
70 [],
71 ),
72 },
73 validate.union_get(
74 "category",
75 "title",
76 ("data", 0),
77 ),
78 ),
79 ),
80 ),
81 )
82 if isinstance(data, str):
83 log.error(data)
84 return
85
86 self.category, self.title, streamdata = data
87 if not streamdata:
88 return
89
90 self.id, streams = streamdata
91
92 for streamtype, streamurl in streams:
93 if streamurl and streamtype == "live_hls":
94 return HLSStream.parse_variant_playlist(self.session, streamurl)
95
96
97 __plugin__ = VKplay
98
[end of src/streamlink/plugins/vkplay.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/vkplay.py b/src/streamlink/plugins/vkplay.py
--- a/src/streamlink/plugins/vkplay.py
+++ b/src/streamlink/plugins/vkplay.py
@@ -1,6 +1,6 @@
"""
$description Russian live-streaming platform for gaming and esports, owned by VKontakte.
-$url vkplay.live
+$url live.vkplay.ru
$type live
$metadata id
$metadata author
@@ -20,13 +20,13 @@
@pluginmatcher(re.compile(
- r"https?://vkplay\.live/(?P<channel_name>\w+)/?$",
+ r"https?://(?:live\.vkplay\.ru|vkplay\.live)/(?P<channel_name>\w+)/?$",
))
class VKplay(Plugin):
- API_URL = "https://api.vkplay.live/v1"
+ API_URL = "https://api.live.vkplay.ru/v1"
def _get_streams(self):
- self.author = self.match.group("channel_name")
+ self.author = self.match["channel_name"]
log.debug(f"Channel name: {self.author}")
data = self.session.http.get(
| {"golden_diff": "diff --git a/src/streamlink/plugins/vkplay.py b/src/streamlink/plugins/vkplay.py\n--- a/src/streamlink/plugins/vkplay.py\n+++ b/src/streamlink/plugins/vkplay.py\n@@ -1,6 +1,6 @@\n \"\"\"\n $description Russian live-streaming platform for gaming and esports, owned by VKontakte.\n-$url vkplay.live\n+$url live.vkplay.ru\n $type live\n $metadata id\n $metadata author\n@@ -20,13 +20,13 @@\n \n \n @pluginmatcher(re.compile(\n- r\"https?://vkplay\\.live/(?P<channel_name>\\w+)/?$\",\n+ r\"https?://(?:live\\.vkplay\\.ru|vkplay\\.live)/(?P<channel_name>\\w+)/?$\",\n ))\n class VKplay(Plugin):\n- API_URL = \"https://api.vkplay.live/v1\"\n+ API_URL = \"https://api.live.vkplay.ru/v1\"\n \n def _get_streams(self):\n- self.author = self.match.group(\"channel_name\")\n+ self.author = self.match[\"channel_name\"]\n log.debug(f\"Channel name: {self.author}\")\n \n data = self.session.http.get(\n", "issue": "plugins.vkplay: vkplay.live has moved to another domain (live.vkplay.ru)\n### Checklist\n\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nstreamlink 6.7.2\n\n### Description\n\nhttps://vk.com/wall-212496568_91026\r\n\r\nyesterday, vkplay live changed its domain. if you specify the old domain in the link to the stream, then everything still works, but on the site itself there are links to a new domain, to which the existing plugin does not respond.\r\n\r\nI just tried to change the updated part of the domain in the plugin code (vkplay.live -> live.vkplay.ru ), and everything seems to be working well. It's a bit difficult for me to create a pull request, but here's the corrected plugin on gist:\r\nhttps://gist.github.com/oexlkinq/eef0a260dddad473c5febafd91b980d9\r\n\r\nthe old domain is also listed in the documentation (https://streamlink.github.io/plugins.html#vkplay)\n\n### Debug log\n\n```text\nstreamlink https://live.vkplay.ru/ruwarface 720p --loglevel=debug\r\n[cli][debug] OS: Linux-6.8.1-arch1-1-x86_64-with-glibc2.39\r\n[cli][debug] Python: 3.11.8\r\n[cli][debug] OpenSSL: OpenSSL 3.2.1 30 Jan 2024\r\n[cli][debug] Streamlink: 6.7.2\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2024.2.2\r\n[cli][debug] exceptiongroup: 1.2.0\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 5.1.0\r\n[cli][debug] pycountry: 23.12.11\r\n[cli][debug] pycryptodome: 3.20.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.31.0\r\n[cli][debug] trio: 0.25.0\r\n[cli][debug] trio-websocket: 0.11.1\r\n[cli][debug] typing-extensions: 4.10.0\r\n[cli][debug] urllib3: 1.26.18\r\n[cli][debug] websocket-client: 1.7.0\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://live.vkplay.ru/ruwarface\r\n[cli][debug] stream=['720p']\r\n[cli][debug] --loglevel=debug\r\nerror: No plugin can handle URL: https://live.vkplay.ru/ruwarface\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Russian live-streaming platform for gaming and esports, owned by VKontakte.\n$url vkplay.live\n$type live\n$metadata id\n$metadata author\n$metadata category\n$metadata title\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://vkplay\\.live/(?P<channel_name>\\w+)/?$\",\n))\nclass VKplay(Plugin):\n API_URL = \"https://api.vkplay.live/v1\"\n\n def _get_streams(self):\n self.author = self.match.group(\"channel_name\")\n log.debug(f\"Channel name: {self.author}\")\n\n data = self.session.http.get(\n f\"{self.API_URL}/blog/{self.author}/public_video_stream\",\n headers={\"Referer\": self.url},\n acceptable_status=(200, 404),\n schema=validate.Schema(\n validate.parse_json(),\n validate.any(\n validate.all(\n {\"error\": str, \"error_description\": str},\n validate.get(\"error_description\"),\n ),\n validate.all(\n {\n validate.optional(\"category\"): validate.all(\n {\n \"title\": str,\n },\n validate.get(\"title\"),\n ),\n \"title\": str,\n \"data\": validate.any(\n [\n validate.all(\n {\n \"vid\": str,\n \"playerUrls\": [\n validate.all(\n {\n \"type\": str,\n \"url\": validate.any(\"\", validate.url()),\n },\n validate.union_get(\"type\", \"url\"),\n ),\n ],\n },\n validate.union_get(\"vid\", \"playerUrls\"),\n ),\n ],\n [],\n ),\n },\n validate.union_get(\n \"category\",\n \"title\",\n (\"data\", 0),\n ),\n ),\n ),\n ),\n )\n if isinstance(data, str):\n log.error(data)\n return\n\n self.category, self.title, streamdata = data\n if not streamdata:\n return\n\n self.id, streams = streamdata\n\n for streamtype, streamurl in streams:\n if streamurl and streamtype == \"live_hls\":\n return HLSStream.parse_variant_playlist(self.session, streamurl)\n\n\n__plugin__ = VKplay\n", "path": "src/streamlink/plugins/vkplay.py"}]} | 2,042 | 262 |
gh_patches_debug_958 | rasdani/github-patches | git_diff | nvaccess__nvda-10921 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
VS Code: Reduce the number of times one has to use NVDA+Space to switch modes.
I just filed [this issue](https://github.com/microsoft/vscode/issues/93087) against VS Code where I suggest to use targeted role="document" in those places that produce HTML output for consumption, to make NVDA switch in and out of browse mode in a smart, automated, manner, reducing the number of times one has to use NVDA+Space to toggle modes. Examples I found while using the 1.44 VS Code Insider builds were:
* The Welcome page
* The details page for an extension
* The ReadMe file that may be displayed after an extension has been installed.
@leonardder suggested that, once this lands in stable, a modification might be needed for the VS Code app module. So filing this issue here.
</issue>
<code>
[start of source/appModules/code.py]
1 #appModules/code.py
2 #A part of NonVisual Desktop Access (NVDA)
3 #Copyright (C) 2019 NV Access Limited, Babbage B.V.
4 #This file is covered by the GNU General Public License.
5 #See the file COPYING for more details.
6
7 import appModuleHandler
8
9 class AppModule(appModuleHandler.AppModule):
10 disableBrowseModeByDefault = True
11
[end of source/appModules/code.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/source/appModules/code.py b/source/appModules/code.py
deleted file mode 100644
--- a/source/appModules/code.py
+++ /dev/null
@@ -1,10 +0,0 @@
-#appModules/code.py
-#A part of NonVisual Desktop Access (NVDA)
-#Copyright (C) 2019 NV Access Limited, Babbage B.V.
-#This file is covered by the GNU General Public License.
-#See the file COPYING for more details.
-
-import appModuleHandler
-
-class AppModule(appModuleHandler.AppModule):
- disableBrowseModeByDefault = True
| {"golden_diff": "diff --git a/source/appModules/code.py b/source/appModules/code.py\ndeleted file mode 100644\n--- a/source/appModules/code.py\n+++ /dev/null\n@@ -1,10 +0,0 @@\n-#appModules/code.py\n-#A part of NonVisual Desktop Access (NVDA)\n-#Copyright (C) 2019 NV Access Limited, Babbage B.V.\n-#This file is covered by the GNU General Public License.\n-#See the file COPYING for more details.\n-\n-import appModuleHandler\n-\n-class AppModule(appModuleHandler.AppModule):\n-\tdisableBrowseModeByDefault = True\n", "issue": "VS Code: Reduce the number of times one has to use NVDA+Space to switch modes.\nI just filed [this issue](https://github.com/microsoft/vscode/issues/93087) against VS Code where I suggest to use targeted role=\"document\" in those places that produce HTML output for consumption, to make NVDA switch in and out of browse mode in a smart, automated, manner, reducing the number of times one has to use NVDA+Space to toggle modes. Examples I found while using the 1.44 VS Code Insider builds were:\r\n\r\n* The Welcome page\r\n* The details page for an extension\r\n* The ReadMe file that may be displayed after an extension has been installed.\r\n\r\n@leonardder suggested that, once this lands in stable, a modification might be needed for the VS Code app module. So filing this issue here.\n", "before_files": [{"content": "#appModules/code.py\n#A part of NonVisual Desktop Access (NVDA)\n#Copyright (C) 2019 NV Access Limited, Babbage B.V.\n#This file is covered by the GNU General Public License.\n#See the file COPYING for more details.\n\nimport appModuleHandler\n\nclass AppModule(appModuleHandler.AppModule):\n\tdisableBrowseModeByDefault = True\n", "path": "source/appModules/code.py"}]} | 812 | 140 |
gh_patches_debug_27836 | rasdani/github-patches | git_diff | encode__starlette-151 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wildcard domains and TrustedhostMiddleware
Support for wildcard domains
</issue>
<code>
[start of starlette/middleware/trustedhost.py]
1 from starlette.datastructures import Headers
2 from starlette.responses import PlainTextResponse
3 from starlette.types import ASGIApp, ASGIInstance, Scope
4 import typing
5
6
7 class TrustedHostMiddleware:
8 def __init__(
9 self, app: ASGIApp, allowed_hosts: typing.Sequence[str] = ["*"]
10 ) -> None:
11 self.app = app
12 self.allowed_hosts = allowed_hosts
13 self.allow_any = "*" in allowed_hosts
14
15 def __call__(self, scope: Scope) -> ASGIInstance:
16 if scope["type"] in ("http", "websocket") and not self.allow_any:
17 headers = Headers(scope=scope)
18 host = headers.get("host")
19 if host not in self.allowed_hosts:
20 return PlainTextResponse("Invalid host header", status_code=400)
21
22 return self.app(scope)
23
[end of starlette/middleware/trustedhost.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlette/middleware/trustedhost.py b/starlette/middleware/trustedhost.py
--- a/starlette/middleware/trustedhost.py
+++ b/starlette/middleware/trustedhost.py
@@ -4,10 +4,17 @@
import typing
+ENFORCE_DOMAIN_WILDCARD = "Domain wildcard patterns must be like '*.example.com'."
+
+
class TrustedHostMiddleware:
def __init__(
self, app: ASGIApp, allowed_hosts: typing.Sequence[str] = ["*"]
) -> None:
+ for pattern in allowed_hosts:
+ assert "*" not in pattern[1:], ENFORCE_DOMAIN_WILDCARD
+ if pattern.startswith("*") and pattern != "*":
+ assert pattern.startswith("*."), ENFORCE_DOMAIN_WILDCARD
self.app = app
self.allowed_hosts = allowed_hosts
self.allow_any = "*" in allowed_hosts
@@ -15,8 +22,15 @@
def __call__(self, scope: Scope) -> ASGIInstance:
if scope["type"] in ("http", "websocket") and not self.allow_any:
headers = Headers(scope=scope)
- host = headers.get("host")
- if host not in self.allowed_hosts:
+ host = headers.get("host", "").split(":")[0]
+ for pattern in self.allowed_hosts:
+ if (
+ host == pattern
+ or pattern.startswith("*")
+ and host.endswith(pattern[1:])
+ ):
+ break
+ else:
return PlainTextResponse("Invalid host header", status_code=400)
return self.app(scope)
| {"golden_diff": "diff --git a/starlette/middleware/trustedhost.py b/starlette/middleware/trustedhost.py\n--- a/starlette/middleware/trustedhost.py\n+++ b/starlette/middleware/trustedhost.py\n@@ -4,10 +4,17 @@\n import typing\n \n \n+ENFORCE_DOMAIN_WILDCARD = \"Domain wildcard patterns must be like '*.example.com'.\"\n+\n+\n class TrustedHostMiddleware:\n def __init__(\n self, app: ASGIApp, allowed_hosts: typing.Sequence[str] = [\"*\"]\n ) -> None:\n+ for pattern in allowed_hosts:\n+ assert \"*\" not in pattern[1:], ENFORCE_DOMAIN_WILDCARD\n+ if pattern.startswith(\"*\") and pattern != \"*\":\n+ assert pattern.startswith(\"*.\"), ENFORCE_DOMAIN_WILDCARD\n self.app = app\n self.allowed_hosts = allowed_hosts\n self.allow_any = \"*\" in allowed_hosts\n@@ -15,8 +22,15 @@\n def __call__(self, scope: Scope) -> ASGIInstance:\n if scope[\"type\"] in (\"http\", \"websocket\") and not self.allow_any:\n headers = Headers(scope=scope)\n- host = headers.get(\"host\")\n- if host not in self.allowed_hosts:\n+ host = headers.get(\"host\", \"\").split(\":\")[0]\n+ for pattern in self.allowed_hosts:\n+ if (\n+ host == pattern\n+ or pattern.startswith(\"*\")\n+ and host.endswith(pattern[1:])\n+ ):\n+ break\n+ else:\n return PlainTextResponse(\"Invalid host header\", status_code=400)\n \n return self.app(scope)\n", "issue": "Wildcard domains and TrustedhostMiddleware\nSupport for wildcard domains\n", "before_files": [{"content": "from starlette.datastructures import Headers\nfrom starlette.responses import PlainTextResponse\nfrom starlette.types import ASGIApp, ASGIInstance, Scope\nimport typing\n\n\nclass TrustedHostMiddleware:\n def __init__(\n self, app: ASGIApp, allowed_hosts: typing.Sequence[str] = [\"*\"]\n ) -> None:\n self.app = app\n self.allowed_hosts = allowed_hosts\n self.allow_any = \"*\" in allowed_hosts\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n if scope[\"type\"] in (\"http\", \"websocket\") and not self.allow_any:\n headers = Headers(scope=scope)\n host = headers.get(\"host\")\n if host not in self.allowed_hosts:\n return PlainTextResponse(\"Invalid host header\", status_code=400)\n\n return self.app(scope)\n", "path": "starlette/middleware/trustedhost.py"}]} | 772 | 356 |
gh_patches_debug_30801 | rasdani/github-patches | git_diff | numba__numba-1719 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LinkedList jitclass example is broken
```
Internal error:
TypeError: Invalid store of %"deferred.4329823704.value" to {i8*, {i32, {%"deferred.4329823704.data", i8}}*} in <numba.datamodel.models.OptionalModel object at 0x106713278>
File "linkedlist.py", line 53
```
</issue>
<code>
[start of numba/targets/optional.py]
1 from __future__ import print_function, absolute_import, division
2
3 from numba import types, cgutils
4
5 from .imputils import lower_cast
6
7
8 def make_optional(valtype):
9 """
10 Return the Structure representation of a optional value
11 """
12 return cgutils.create_struct_proxy(types.Optional(valtype))
13
14
15 def always_return_true_impl(context, builder, sig, args):
16 return cgutils.true_bit
17
18
19 def always_return_false_impl(context, builder, sig, args):
20 return cgutils.false_bit
21
22
23 @lower_cast(types.Any, types.Optional)
24 def any_to_optional(context, builder, fromty, toty, val):
25 if fromty == types.none:
26 return context.make_optional_none(builder, toty.type)
27 else:
28 val = context.cast(builder, val, fromty, toty.type)
29 return context.make_optional_value(builder, toty.type, val)
30
31 @lower_cast(types.Optional, types.Any)
32 def optional_to_any(context, builder, fromty, toty, val):
33 optty = context.make_optional(fromty)
34 optval = optty(context, builder, value=val)
35 validbit = cgutils.as_bool_bit(builder, optval.valid)
36 with builder.if_then(builder.not_(validbit), likely=False):
37 msg = "expected %s, got None" % (fromty.type,)
38 context.call_conv.return_user_exc(builder, TypeError, (msg,))
39
40 return optval.data
41
[end of numba/targets/optional.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/numba/targets/optional.py b/numba/targets/optional.py
--- a/numba/targets/optional.py
+++ b/numba/targets/optional.py
@@ -20,6 +20,39 @@
return cgutils.false_bit
+@lower_cast(types.Optional, types.Optional)
+def optional_to_optional(context, builder, fromty, toty, val):
+ """
+ The handling of optional->optional cast must be special cased for
+ correct propagation of None value. Given type T and U. casting of
+ T? to U? (? denotes optional) should always succeed. If the from-value
+ is None, the None value the casted value (U?) should be None; otherwise,
+ the from-value is casted to U. This is different from casting T? to U,
+ which requires the from-value must not be None.
+ """
+ optty = context.make_optional(fromty)
+ optval = optty(context, builder, value=val)
+ validbit = cgutils.as_bool_bit(builder, optval.valid)
+ # Create uninitialized optional value
+ outoptty = context.make_optional(toty)
+ outoptval = outoptty(context, builder)
+
+ with builder.if_else(validbit) as (is_valid, is_not_valid):
+ with is_valid:
+ # Cast internal value
+ outoptval.valid = cgutils.true_bit
+ outoptval.data = context.cast(builder, optval.data,
+ fromty.type, toty.type)
+
+ with is_not_valid:
+ # Store None to result
+ outoptval.valid = cgutils.false_bit
+ outoptval.data = cgutils.get_null_value(
+ outoptval.data.type)
+
+ return outoptval._getvalue()
+
+
@lower_cast(types.Any, types.Optional)
def any_to_optional(context, builder, fromty, toty, val):
if fromty == types.none:
@@ -28,6 +61,7 @@
val = context.cast(builder, val, fromty, toty.type)
return context.make_optional_value(builder, toty.type, val)
+
@lower_cast(types.Optional, types.Any)
def optional_to_any(context, builder, fromty, toty, val):
optty = context.make_optional(fromty)
| {"golden_diff": "diff --git a/numba/targets/optional.py b/numba/targets/optional.py\n--- a/numba/targets/optional.py\n+++ b/numba/targets/optional.py\n@@ -20,6 +20,39 @@\n return cgutils.false_bit\n \n \n+@lower_cast(types.Optional, types.Optional)\n+def optional_to_optional(context, builder, fromty, toty, val):\n+ \"\"\"\n+ The handling of optional->optional cast must be special cased for\n+ correct propagation of None value. Given type T and U. casting of\n+ T? to U? (? denotes optional) should always succeed. If the from-value\n+ is None, the None value the casted value (U?) should be None; otherwise,\n+ the from-value is casted to U. This is different from casting T? to U,\n+ which requires the from-value must not be None.\n+ \"\"\"\n+ optty = context.make_optional(fromty)\n+ optval = optty(context, builder, value=val)\n+ validbit = cgutils.as_bool_bit(builder, optval.valid)\n+ # Create uninitialized optional value\n+ outoptty = context.make_optional(toty)\n+ outoptval = outoptty(context, builder)\n+\n+ with builder.if_else(validbit) as (is_valid, is_not_valid):\n+ with is_valid:\n+ # Cast internal value\n+ outoptval.valid = cgutils.true_bit\n+ outoptval.data = context.cast(builder, optval.data,\n+ fromty.type, toty.type)\n+\n+ with is_not_valid:\n+ # Store None to result\n+ outoptval.valid = cgutils.false_bit\n+ outoptval.data = cgutils.get_null_value(\n+ outoptval.data.type)\n+\n+ return outoptval._getvalue()\n+\n+\n @lower_cast(types.Any, types.Optional)\n def any_to_optional(context, builder, fromty, toty, val):\n if fromty == types.none:\n@@ -28,6 +61,7 @@\n val = context.cast(builder, val, fromty, toty.type)\n return context.make_optional_value(builder, toty.type, val)\n \n+\n @lower_cast(types.Optional, types.Any)\n def optional_to_any(context, builder, fromty, toty, val):\n optty = context.make_optional(fromty)\n", "issue": "LinkedList jitclass example is broken\n```\nInternal error:\nTypeError: Invalid store of %\"deferred.4329823704.value\" to {i8*, {i32, {%\"deferred.4329823704.data\", i8}}*} in <numba.datamodel.models.OptionalModel object at 0x106713278>\nFile \"linkedlist.py\", line 53\n```\n\n", "before_files": [{"content": "from __future__ import print_function, absolute_import, division\n\nfrom numba import types, cgutils\n\nfrom .imputils import lower_cast\n\n\ndef make_optional(valtype):\n \"\"\"\n Return the Structure representation of a optional value\n \"\"\"\n return cgutils.create_struct_proxy(types.Optional(valtype))\n\n\ndef always_return_true_impl(context, builder, sig, args):\n return cgutils.true_bit\n\n\ndef always_return_false_impl(context, builder, sig, args):\n return cgutils.false_bit\n\n\n@lower_cast(types.Any, types.Optional)\ndef any_to_optional(context, builder, fromty, toty, val):\n if fromty == types.none:\n return context.make_optional_none(builder, toty.type)\n else:\n val = context.cast(builder, val, fromty, toty.type)\n return context.make_optional_value(builder, toty.type, val)\n\n@lower_cast(types.Optional, types.Any)\ndef optional_to_any(context, builder, fromty, toty, val):\n optty = context.make_optional(fromty)\n optval = optty(context, builder, value=val)\n validbit = cgutils.as_bool_bit(builder, optval.valid)\n with builder.if_then(builder.not_(validbit), likely=False):\n msg = \"expected %s, got None\" % (fromty.type,)\n context.call_conv.return_user_exc(builder, TypeError, (msg,))\n\n return optval.data\n", "path": "numba/targets/optional.py"}]} | 1,022 | 526 |
gh_patches_debug_14145 | rasdani/github-patches | git_diff | facebookresearch__nevergrad-11 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hello-world install question
## Steps to reproduce
Install nevergrad
```
pip3 install -e [email protected]:facebookresearch/nevergrad@master#egg=nevergrad
Obtaining nevergrad from [email protected]:facebookresearch/nevergrad@master#egg=nevergrad
Cloning [email protected]:facebookresearch/nevergrad (to revision master) to ./src/nevergrad
Warning: Permanently added the RSA host key for IP address '192.30.255.112' to the list of known hosts.
[email protected]: Permission denied (publickey).
fatal: Could not read from remote repository.
Please make sure you have the correct access rights
and the repository exists.
Command "git clone -q [email protected]:facebookresearch/nevergrad /Users/ME/Documents/workspace/temp/src/nevergrad" failed with error code 128 in None
```
ok. trying git clone and `python3 setup.py install` method. That seems to work.
run the sample program:
python3 mynevergrad.py
```
from nevergrad.optimization import optimizerlib
def square(x):
return (x - .5)**2
optimizer = optimizerlib.OnePlusOne(dimension=1, budget=100, num_workers=5)
recommendation = optimizer.optimize(square, executor=None, batch_mode=True)
```
## Observed Results
```
Traceback (most recent call last):
File "mynevergrad.py", line 6, in <module>
from nevergrad.optimization import optimizerlib
ModuleNotFoundError: No module named 'nevergrad.optimization'
```
## Expected Results
It should run the sample
## Relevant Code
```
import pkg_resources
for d in pkg_resources.working_set:
print(d)
```
DOES include `nevergrad 0.1.0`
This is very likely an install, python3, homebrew "installed in user directory", or paths issue, but given that `nevergrad 0.1.0` shows up in the list, it is odd...
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
3 #
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 # from distutils.core import setup
8 from setuptools import setup
9
10
11 with open('requirements.txt') as f:
12 requirements = f.read().splitlines()
13
14
15 setup(name='nevergrad',
16 version='0.1.0',
17 description='Gradient-free optimization toolbox',
18 author='Facebook AI Research',
19 packages=['nevergrad'],
20 install_requires=requirements,)
21
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,14 +7,22 @@
# from distutils.core import setup
from setuptools import setup
-
with open('requirements.txt') as f:
requirements = f.read().splitlines()
-
-setup(name='nevergrad',
- version='0.1.0',
- description='Gradient-free optimization toolbox',
- author='Facebook AI Research',
- packages=['nevergrad'],
- install_requires=requirements,)
+setup(
+ name='nevergrad',
+ version='0.1.0',
+ description='Gradient-free optimization toolbox',
+ author='Facebook AI Research',
+ packages=[
+ 'nevergrad',
+ 'nevergrad.benchmark',
+ 'nevergrad.benchmark.additional',
+ 'nevergrad.common',
+ 'nevergrad.functions',
+ 'nevergrad.instrumentation',
+ 'nevergrad.optimization',
+ ],
+ install_requires=requirements,
+)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -7,14 +7,22 @@\n # from distutils.core import setup\n from setuptools import setup\n \n-\n with open('requirements.txt') as f:\n requirements = f.read().splitlines()\n \n-\n-setup(name='nevergrad',\n- version='0.1.0',\n- description='Gradient-free optimization toolbox',\n- author='Facebook AI Research',\n- packages=['nevergrad'],\n- install_requires=requirements,)\n+setup(\n+ name='nevergrad',\n+ version='0.1.0',\n+ description='Gradient-free optimization toolbox',\n+ author='Facebook AI Research',\n+ packages=[\n+ 'nevergrad',\n+ 'nevergrad.benchmark',\n+ 'nevergrad.benchmark.additional',\n+ 'nevergrad.common',\n+ 'nevergrad.functions',\n+ 'nevergrad.instrumentation',\n+ 'nevergrad.optimization',\n+ ],\n+ install_requires=requirements,\n+)\n", "issue": "Hello-world install question\n## Steps to reproduce\r\n\r\nInstall nevergrad\r\n\r\n```\r\npip3 install -e [email protected]:facebookresearch/nevergrad@master#egg=nevergrad\r\nObtaining nevergrad from [email protected]:facebookresearch/nevergrad@master#egg=nevergrad\r\n Cloning [email protected]:facebookresearch/nevergrad (to revision master) to ./src/nevergrad\r\nWarning: Permanently added the RSA host key for IP address '192.30.255.112' to the list of known hosts.\r\[email protected]: Permission denied (publickey).\r\nfatal: Could not read from remote repository.\r\n\r\nPlease make sure you have the correct access rights\r\nand the repository exists.\r\nCommand \"git clone -q [email protected]:facebookresearch/nevergrad /Users/ME/Documents/workspace/temp/src/nevergrad\" failed with error code 128 in None\r\n```\r\n\r\nok. trying git clone and `python3 setup.py install` method. That seems to work.\r\n\r\nrun the sample program:\r\n\r\npython3 mynevergrad.py\r\n\r\n```\r\nfrom nevergrad.optimization import optimizerlib\r\n\r\ndef square(x):\r\n return (x - .5)**2\r\n\r\noptimizer = optimizerlib.OnePlusOne(dimension=1, budget=100, num_workers=5)\r\nrecommendation = optimizer.optimize(square, executor=None, batch_mode=True)\r\n```\r\n\r\n\r\n## Observed Results\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"mynevergrad.py\", line 6, in <module>\r\n from nevergrad.optimization import optimizerlib\r\nModuleNotFoundError: No module named 'nevergrad.optimization'\r\n```\r\n\r\n\r\n## Expected Results\r\n\r\nIt should run the sample\r\n\r\n## Relevant Code\r\n\r\n```\r\nimport pkg_resources\r\nfor d in pkg_resources.working_set:\r\n\tprint(d)\r\n```\r\nDOES include `nevergrad 0.1.0`\r\n\r\n\r\nThis is very likely an install, python3, homebrew \"installed in user directory\", or paths issue, but given that `nevergrad 0.1.0` shows up in the list, it is odd...\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# from distutils.core import setup\nfrom setuptools import setup\n\n\nwith open('requirements.txt') as f:\n requirements = f.read().splitlines()\n\n\nsetup(name='nevergrad',\n version='0.1.0',\n description='Gradient-free optimization toolbox',\n author='Facebook AI Research',\n packages=['nevergrad'],\n install_requires=requirements,)\n", "path": "setup.py"}]} | 1,144 | 220 |
gh_patches_debug_33030 | rasdani/github-patches | git_diff | pypa__cibuildwheel-1613 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
setup.py setup() not detected in __name__ == '__main__' block
### Description
My setup.py setup() includes:
python_requires=">=3.8"
However cibuildwheel still tries and fails to compile under Python 3.6.
I understand there is [CIBW_BUILD / CIBW_SKIP](https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip) but that is then duplicating the python requires information.
I can add a \[project\] section to pyproject.toml but that leads to a lot of problems because it ends up fighting with setup() parameters and they **really** don't like it.
I believe cibuildwheel should establish the Python version support automatically whether it comes from setuptools or pyproject.toml, and not try to build on unsupported versions. My [pyproject.toml](https://github.com/rogerbinns/apsw/blob/master/pyproject.toml) is:
````
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
````
### Build log
https://github.com/rogerbinns/apsw/actions/runs/6175182758/job/16761477543
### CI config
https://github.com/rogerbinns/apsw/actions/runs/6175182758/workflow
</issue>
<code>
[start of cibuildwheel/projectfiles.py]
1 from __future__ import annotations
2
3 import ast
4 import configparser
5 import contextlib
6 from pathlib import Path
7
8 from ._compat import tomllib
9
10
11 class Analyzer(ast.NodeVisitor):
12 def __init__(self) -> None:
13 self.requires_python: str | None = None
14
15 def visit(self, node: ast.AST) -> None:
16 for inner_node in ast.walk(node):
17 for child in ast.iter_child_nodes(inner_node):
18 child.parent = inner_node # type: ignore[attr-defined]
19 super().visit(node)
20
21 def visit_keyword(self, node: ast.keyword) -> None:
22 self.generic_visit(node)
23 # Must not be nested in an if or other structure
24 # This will be Module -> Expr -> Call -> keyword
25 if (
26 node.arg == "python_requires"
27 and not hasattr(node.parent.parent.parent, "parent") # type: ignore[attr-defined]
28 and isinstance(node.value, ast.Constant)
29 ):
30 self.requires_python = node.value.value
31
32
33 def setup_py_python_requires(content: str) -> str | None:
34 try:
35 tree = ast.parse(content)
36 analyzer = Analyzer()
37 analyzer.visit(tree)
38 return analyzer.requires_python or None
39 except Exception: # pylint: disable=broad-except
40 return None
41
42
43 def get_requires_python_str(package_dir: Path) -> str | None:
44 """Return the python requires string from the most canonical source available, or None"""
45
46 # Read in from pyproject.toml:project.requires-python
47 with contextlib.suppress(FileNotFoundError):
48 with (package_dir / "pyproject.toml").open("rb") as f1:
49 info = tomllib.load(f1)
50 with contextlib.suppress(KeyError, IndexError, TypeError):
51 return str(info["project"]["requires-python"])
52
53 # Read in from setup.cfg:options.python_requires
54 config = configparser.ConfigParser()
55 with contextlib.suppress(FileNotFoundError):
56 config.read(package_dir / "setup.cfg")
57 with contextlib.suppress(KeyError, IndexError, TypeError):
58 return str(config["options"]["python_requires"])
59
60 setup_py = package_dir / "setup.py"
61 with contextlib.suppress(FileNotFoundError), setup_py.open(encoding="utf8") as f2:
62 return setup_py_python_requires(f2.read())
63
64 return None
65
[end of cibuildwheel/projectfiles.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cibuildwheel/projectfiles.py b/cibuildwheel/projectfiles.py
--- a/cibuildwheel/projectfiles.py
+++ b/cibuildwheel/projectfiles.py
@@ -8,6 +8,43 @@
from ._compat import tomllib
+def get_parent(node: ast.AST | None, depth: int = 1) -> ast.AST | None:
+ for _ in range(depth):
+ node = getattr(node, "parent", None)
+ return node
+
+
+def is_main(parent: ast.AST | None) -> bool:
+ if parent is None:
+ return False
+
+ # This would be much nicer with 3.10's pattern matching!
+ if not isinstance(parent, ast.If):
+ return False
+ if not isinstance(parent.test, ast.Compare):
+ return False
+
+ try:
+ (op,) = parent.test.ops
+ (comp,) = parent.test.comparators
+ except ValueError:
+ return False
+
+ if not isinstance(op, ast.Eq):
+ return False
+
+ values = {comp, parent.test.left}
+
+ mains = {x for x in values if isinstance(x, ast.Constant) and x.value == "__main__"}
+ if len(mains) != 1:
+ return False
+ consts = {x for x in values if isinstance(x, ast.Name) and x.id == "__name__"}
+ if len(consts) != 1:
+ return False
+
+ return True
+
+
class Analyzer(ast.NodeVisitor):
def __init__(self) -> None:
self.requires_python: str | None = None
@@ -19,13 +56,22 @@
super().visit(node)
def visit_keyword(self, node: ast.keyword) -> None:
+ # Must not be nested except for if __name__ == "__main__"
+
self.generic_visit(node)
- # Must not be nested in an if or other structure
# This will be Module -> Expr -> Call -> keyword
+ parent = get_parent(node, 4)
+ unnested = parent is None
+
+ # This will be Module -> If -> Expr -> Call -> keyword
+ name_main_unnested = (
+ parent is not None and get_parent(parent) is None and is_main(get_parent(node, 3))
+ )
+
if (
node.arg == "python_requires"
- and not hasattr(node.parent.parent.parent, "parent") # type: ignore[attr-defined]
and isinstance(node.value, ast.Constant)
+ and (unnested or name_main_unnested)
):
self.requires_python = node.value.value
| {"golden_diff": "diff --git a/cibuildwheel/projectfiles.py b/cibuildwheel/projectfiles.py\n--- a/cibuildwheel/projectfiles.py\n+++ b/cibuildwheel/projectfiles.py\n@@ -8,6 +8,43 @@\n from ._compat import tomllib\n \n \n+def get_parent(node: ast.AST | None, depth: int = 1) -> ast.AST | None:\n+ for _ in range(depth):\n+ node = getattr(node, \"parent\", None)\n+ return node\n+\n+\n+def is_main(parent: ast.AST | None) -> bool:\n+ if parent is None:\n+ return False\n+\n+ # This would be much nicer with 3.10's pattern matching!\n+ if not isinstance(parent, ast.If):\n+ return False\n+ if not isinstance(parent.test, ast.Compare):\n+ return False\n+\n+ try:\n+ (op,) = parent.test.ops\n+ (comp,) = parent.test.comparators\n+ except ValueError:\n+ return False\n+\n+ if not isinstance(op, ast.Eq):\n+ return False\n+\n+ values = {comp, parent.test.left}\n+\n+ mains = {x for x in values if isinstance(x, ast.Constant) and x.value == \"__main__\"}\n+ if len(mains) != 1:\n+ return False\n+ consts = {x for x in values if isinstance(x, ast.Name) and x.id == \"__name__\"}\n+ if len(consts) != 1:\n+ return False\n+\n+ return True\n+\n+\n class Analyzer(ast.NodeVisitor):\n def __init__(self) -> None:\n self.requires_python: str | None = None\n@@ -19,13 +56,22 @@\n super().visit(node)\n \n def visit_keyword(self, node: ast.keyword) -> None:\n+ # Must not be nested except for if __name__ == \"__main__\"\n+\n self.generic_visit(node)\n- # Must not be nested in an if or other structure\n # This will be Module -> Expr -> Call -> keyword\n+ parent = get_parent(node, 4)\n+ unnested = parent is None\n+\n+ # This will be Module -> If -> Expr -> Call -> keyword\n+ name_main_unnested = (\n+ parent is not None and get_parent(parent) is None and is_main(get_parent(node, 3))\n+ )\n+\n if (\n node.arg == \"python_requires\"\n- and not hasattr(node.parent.parent.parent, \"parent\") # type: ignore[attr-defined]\n and isinstance(node.value, ast.Constant)\n+ and (unnested or name_main_unnested)\n ):\n self.requires_python = node.value.value\n", "issue": "setup.py setup() not detected in __name__ == '__main__' block\n### Description\n\nMy setup.py setup() includes:\r\n\r\n python_requires=\">=3.8\"\r\n\r\nHowever cibuildwheel still tries and fails to compile under Python 3.6.\r\n\r\nI understand there is [CIBW_BUILD / CIBW_SKIP](https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip) but that is then duplicating the python requires information.\r\n\r\nI can add a \\[project\\] section to pyproject.toml but that leads to a lot of problems because it ends up fighting with setup() parameters and they **really** don't like it.\r\n\r\nI believe cibuildwheel should establish the Python version support automatically whether it comes from setuptools or pyproject.toml, and not try to build on unsupported versions. My [pyproject.toml](https://github.com/rogerbinns/apsw/blob/master/pyproject.toml) is:\r\n\r\n````\r\n[build-system]\r\nrequires = [\"setuptools\"]\r\nbuild-backend = \"setuptools.build_meta\"\r\n````\r\n\n\n### Build log\n\nhttps://github.com/rogerbinns/apsw/actions/runs/6175182758/job/16761477543\n\n### CI config\n\nhttps://github.com/rogerbinns/apsw/actions/runs/6175182758/workflow\n", "before_files": [{"content": "from __future__ import annotations\n\nimport ast\nimport configparser\nimport contextlib\nfrom pathlib import Path\n\nfrom ._compat import tomllib\n\n\nclass Analyzer(ast.NodeVisitor):\n def __init__(self) -> None:\n self.requires_python: str | None = None\n\n def visit(self, node: ast.AST) -> None:\n for inner_node in ast.walk(node):\n for child in ast.iter_child_nodes(inner_node):\n child.parent = inner_node # type: ignore[attr-defined]\n super().visit(node)\n\n def visit_keyword(self, node: ast.keyword) -> None:\n self.generic_visit(node)\n # Must not be nested in an if or other structure\n # This will be Module -> Expr -> Call -> keyword\n if (\n node.arg == \"python_requires\"\n and not hasattr(node.parent.parent.parent, \"parent\") # type: ignore[attr-defined]\n and isinstance(node.value, ast.Constant)\n ):\n self.requires_python = node.value.value\n\n\ndef setup_py_python_requires(content: str) -> str | None:\n try:\n tree = ast.parse(content)\n analyzer = Analyzer()\n analyzer.visit(tree)\n return analyzer.requires_python or None\n except Exception: # pylint: disable=broad-except\n return None\n\n\ndef get_requires_python_str(package_dir: Path) -> str | None:\n \"\"\"Return the python requires string from the most canonical source available, or None\"\"\"\n\n # Read in from pyproject.toml:project.requires-python\n with contextlib.suppress(FileNotFoundError):\n with (package_dir / \"pyproject.toml\").open(\"rb\") as f1:\n info = tomllib.load(f1)\n with contextlib.suppress(KeyError, IndexError, TypeError):\n return str(info[\"project\"][\"requires-python\"])\n\n # Read in from setup.cfg:options.python_requires\n config = configparser.ConfigParser()\n with contextlib.suppress(FileNotFoundError):\n config.read(package_dir / \"setup.cfg\")\n with contextlib.suppress(KeyError, IndexError, TypeError):\n return str(config[\"options\"][\"python_requires\"])\n\n setup_py = package_dir / \"setup.py\"\n with contextlib.suppress(FileNotFoundError), setup_py.open(encoding=\"utf8\") as f2:\n return setup_py_python_requires(f2.read())\n\n return None\n", "path": "cibuildwheel/projectfiles.py"}]} | 1,459 | 597 |
gh_patches_debug_13024 | rasdani/github-patches | git_diff | vega__altair-2570 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tooltip doesn't support "image" key
My understanding is that to render tooltips, Altair uses the `vega-tooltip` plugin. Per that project's [README](https://github.com/vega/vega-tooltip), one awesome feature that library has is the ability to render images as part of the tooltip with the image key. From the docs:
> Supports special keys title (becomes the title of the tooltip) and image (used as the url for an embedded image)
Using the tooltip without the `image` key is fine:
```
mydata = pd.DataFrame.from_records([{'a': 1, 'b': 1}, {'a': 2, 'b': 2}])
chart = alt.Chart(mydata).mark_circle().encode(
x='a',
y='b',
tooltip=alt.Tooltip(['a'], title='My Cool Tooltip')
).interactive()
display(chart)
```
However, when I add the `image` key, it stops working:
```
chart = alt.Chart(mydata).mark_circle().encode(
x='a',
y='b',
tooltip=alt.Tooltip(['a'], title='My Cool Tooltip', image='https://picsum.photos/200')
).interactive()
display(chart)
```
```
SchemaValidationError: Invalid specification
altair.vegalite.v4.schema.channels.Tooltip, validating 'additionalProperties'
Additional properties are not allowed ('image' was unexpected)
```
Maybe this feature is already supported but the schema is out of date? Thanks.
---
Please follow these steps to make it more efficient to solve your issue:
- [N/A] Since Altair is a Python wrapper around the Vega-Lite visualization grammar, [most bugs should be reported directly to Vega-Lite](https://github.com/vega/vega-lite/issues). You can click the Action Button of your Altair chart and "Open in Vega Editor" to create a reproducible Vega-Lite example and see if you get the same error in the Vega Editor.
- [x] Search for duplicate issues.
- [x] Use the latest version of Altair.
- [x] Describe how to reproduce the bug and include the full code and data to reproduce it, ideally using a sample data set from `vega_datasets`.
</issue>
<code>
[start of altair/examples/image_tooltip.py]
1 """
2 Image tooltip
3 -------------
4 This example shows how to render images in tooltips.
5 Either URLs or local file paths can be used to reference
6 the images.
7 """
8 # category: other charts
9
10 import altair as alt
11 import pandas as pd
12
13 source = pd.DataFrame.from_records(
14 [{'a': 1, 'b': 1, 'image': 'https://altair-viz.github.io/_static/altair-logo-light.png'},
15 {'a': 2, 'b': 2, 'image': 'https://avatars.githubusercontent.com/u/11796929?s=200&v=4'}]
16 )
17 alt.Chart(source).mark_circle(size=200).encode(
18 x='a',
19 y='b',
20 tooltip=['image'] # Must be a list for the image to render
21 )
22
[end of altair/examples/image_tooltip.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/altair/examples/image_tooltip.py b/altair/examples/image_tooltip.py
--- a/altair/examples/image_tooltip.py
+++ b/altair/examples/image_tooltip.py
@@ -3,7 +3,9 @@
-------------
This example shows how to render images in tooltips.
Either URLs or local file paths can be used to reference
-the images.
+the images. To render the image, you must use the special
+column name "image" in your data and pass it as a list to
+the tooltip encoding.
"""
# category: other charts
@@ -17,5 +19,5 @@
alt.Chart(source).mark_circle(size=200).encode(
x='a',
y='b',
- tooltip=['image'] # Must be a list for the image to render
+ tooltip=['image'] # Must be a list containing a field called "image"
)
| {"golden_diff": "diff --git a/altair/examples/image_tooltip.py b/altair/examples/image_tooltip.py\n--- a/altair/examples/image_tooltip.py\n+++ b/altair/examples/image_tooltip.py\n@@ -3,7 +3,9 @@\n -------------\n This example shows how to render images in tooltips.\n Either URLs or local file paths can be used to reference\n-the images.\n+the images. To render the image, you must use the special\n+column name \"image\" in your data and pass it as a list to\n+the tooltip encoding.\n \"\"\"\n # category: other charts\n \n@@ -17,5 +19,5 @@\n alt.Chart(source).mark_circle(size=200).encode(\n x='a',\n y='b',\n- tooltip=['image'] # Must be a list for the image to render\n+ tooltip=['image'] # Must be a list containing a field called \"image\"\n )\n", "issue": "Tooltip doesn't support \"image\" key\nMy understanding is that to render tooltips, Altair uses the `vega-tooltip` plugin. Per that project's [README](https://github.com/vega/vega-tooltip), one awesome feature that library has is the ability to render images as part of the tooltip with the image key. From the docs:\r\n\r\n> Supports special keys title (becomes the title of the tooltip) and image (used as the url for an embedded image)\r\n\r\nUsing the tooltip without the `image` key is fine:\r\n\r\n```\r\nmydata = pd.DataFrame.from_records([{'a': 1, 'b': 1}, {'a': 2, 'b': 2}])\r\nchart = alt.Chart(mydata).mark_circle().encode(\r\n x='a',\r\n y='b',\r\n tooltip=alt.Tooltip(['a'], title='My Cool Tooltip')\r\n).interactive()\r\n\r\ndisplay(chart)\r\n```\r\n\r\nHowever, when I add the `image` key, it stops working:\r\n\r\n```\r\nchart = alt.Chart(mydata).mark_circle().encode(\r\n x='a',\r\n y='b',\r\n tooltip=alt.Tooltip(['a'], title='My Cool Tooltip', image='https://picsum.photos/200')\r\n).interactive()\r\n\r\ndisplay(chart)\r\n```\r\n\r\n```\r\nSchemaValidationError: Invalid specification\r\n\r\n altair.vegalite.v4.schema.channels.Tooltip, validating 'additionalProperties'\r\n\r\n Additional properties are not allowed ('image' was unexpected)\r\n```\r\n\r\nMaybe this feature is already supported but the schema is out of date? Thanks.\r\n\r\n---\r\n\r\nPlease follow these steps to make it more efficient to solve your issue:\r\n\r\n- [N/A] Since Altair is a Python wrapper around the Vega-Lite visualization grammar, [most bugs should be reported directly to Vega-Lite](https://github.com/vega/vega-lite/issues). You can click the Action Button of your Altair chart and \"Open in Vega Editor\" to create a reproducible Vega-Lite example and see if you get the same error in the Vega Editor.\r\n- [x] Search for duplicate issues.\r\n- [x] Use the latest version of Altair.\r\n- [x] Describe how to reproduce the bug and include the full code and data to reproduce it, ideally using a sample data set from `vega_datasets`.\r\n\n", "before_files": [{"content": "\"\"\"\nImage tooltip\n-------------\nThis example shows how to render images in tooltips.\nEither URLs or local file paths can be used to reference\nthe images.\n\"\"\"\n# category: other charts\n\nimport altair as alt\nimport pandas as pd\n\nsource = pd.DataFrame.from_records(\n [{'a': 1, 'b': 1, 'image': 'https://altair-viz.github.io/_static/altair-logo-light.png'},\n {'a': 2, 'b': 2, 'image': 'https://avatars.githubusercontent.com/u/11796929?s=200&v=4'}]\n)\nalt.Chart(source).mark_circle(size=200).encode(\n x='a',\n y='b',\n tooltip=['image'] # Must be a list for the image to render\n)\n", "path": "altair/examples/image_tooltip.py"}]} | 1,230 | 196 |
gh_patches_debug_2489 | rasdani/github-patches | git_diff | rucio__rucio-2776 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Account and Scope new types
Motivation
----------
For multi-vo the internal representation of scope and account will need to be different from the external representation. The translations for these should be done in a consistent way and this can be prepared beforehand.
Modification
------------
Create a new type for each of scope and account.
Convert incoming account names and scopes to these types in the API directory so that multi-vo changes are more straight forward.
If normal strings are used in core, raise an error.
</issue>
<code>
[start of lib/rucio/vcsversion.py]
1
2 '''
3 This file is automatically generated; Do not edit it. :)
4 '''
5 VERSION_INFO = {
6 'final': True,
7 'version': '1.20.3',
8 'branch_nick': 'patch-0-Release__Rucio_1_20_3_preparation',
9 'revision_id': 'f05e019f7178590718bf3f1eee415cc46cb59159',
10 'revno': 8410
11 }
12
[end of lib/rucio/vcsversion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py
--- a/lib/rucio/vcsversion.py
+++ b/lib/rucio/vcsversion.py
@@ -4,8 +4,8 @@
'''
VERSION_INFO = {
'final': True,
- 'version': '1.20.3',
- 'branch_nick': 'patch-0-Release__Rucio_1_20_3_preparation',
- 'revision_id': 'f05e019f7178590718bf3f1eee415cc46cb59159',
- 'revno': 8410
+ 'version': '1.20.4rc1',
+ 'branch_nick': 'patch-0-Release__1_20_4rc1_preparation',
+ 'revision_id': '525812b8f83f1069d38ab78aebedb732f21e77ec',
+ 'revno': 8418
}
| {"golden_diff": "diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py\n--- a/lib/rucio/vcsversion.py\n+++ b/lib/rucio/vcsversion.py\n@@ -4,8 +4,8 @@\n '''\n VERSION_INFO = {\n 'final': True,\n- 'version': '1.20.3',\n- 'branch_nick': 'patch-0-Release__Rucio_1_20_3_preparation',\n- 'revision_id': 'f05e019f7178590718bf3f1eee415cc46cb59159',\n- 'revno': 8410\n+ 'version': '1.20.4rc1',\n+ 'branch_nick': 'patch-0-Release__1_20_4rc1_preparation',\n+ 'revision_id': '525812b8f83f1069d38ab78aebedb732f21e77ec',\n+ 'revno': 8418\n }\n", "issue": "Account and Scope new types\nMotivation\r\n----------\r\nFor multi-vo the internal representation of scope and account will need to be different from the external representation. The translations for these should be done in a consistent way and this can be prepared beforehand.\r\n\r\n\r\nModification\r\n------------\r\nCreate a new type for each of scope and account. \r\nConvert incoming account names and scopes to these types in the API directory so that multi-vo changes are more straight forward.\r\nIf normal strings are used in core, raise an error.\r\n\n", "before_files": [{"content": "\n'''\nThis file is automatically generated; Do not edit it. :)\n'''\nVERSION_INFO = {\n 'final': True,\n 'version': '1.20.3',\n 'branch_nick': 'patch-0-Release__Rucio_1_20_3_preparation',\n 'revision_id': 'f05e019f7178590718bf3f1eee415cc46cb59159',\n 'revno': 8410\n}\n", "path": "lib/rucio/vcsversion.py"}]} | 777 | 255 |
gh_patches_debug_9901 | rasdani/github-patches | git_diff | bridgecrewio__checkov-1062 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV_AZURE_12: Retention policy of 0 is not supported
**Describe the bug**
When setting the `retention_period.days` value on `azurerm_network_watcher_flow_log` to `0`, `CKV_AZURE_12` still fails. `0` is the value to signify indefinite/forever retention.
**To Reproduce**
Steps to reproduce the behavior:
1. Create an `azurerm_network_watcher_flow_log` resource
2. Set
```
retention_policy {
enabled = true
days = 0
}
```
3. Run `checkov`
4. Receive failure for `CKV_AZURE_12`
**Expected behavior**
`0` is an accepted value as documented at https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-overview#how-logging-works
**Desktop (please complete the following information):**
- OS: Linux
- Checkov Version: 2.0.26
**Additional context**
At https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py#L17 likely need to adjust the innermost `if` statement to something like:
```python
if retention_in_days is not None and (retention_days == 0 or retention_days >= 90):
```
Happy to open a pull request if this is an acceptable solution.
</issue>
<code>
[start of checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.common.util.type_forcers import force_int
3 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck
4
5
6 class NetworkWatcherFlowLogPeriod(BaseResourceCheck):
7 def __init__(self):
8 name = "Ensure that Network Security Group Flow Log retention period is 'greater than 90 days'"
9 id = "CKV_AZURE_12"
10 supported_resources = ['azurerm_network_watcher_flow_log']
11 categories = [CheckCategories.LOGGING]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def scan_resource_conf(self, conf):
15 if 'enabled' in conf and conf['enabled'][0]:
16 retention_block = conf['retention_policy'][0]
17 if retention_block['enabled'][0]:
18 retention_in_days = force_int(retention_block['days'][0])
19 if retention_in_days and retention_in_days >= 90:
20 return CheckResult.PASSED
21 return CheckResult.FAILED
22
23
24 check = NetworkWatcherFlowLogPeriod()
25
[end of checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py b/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py
--- a/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py
+++ b/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py
@@ -16,7 +16,7 @@
retention_block = conf['retention_policy'][0]
if retention_block['enabled'][0]:
retention_in_days = force_int(retention_block['days'][0])
- if retention_in_days and retention_in_days >= 90:
+ if retention_in_days is not None and (retention_in_days == 0 or retention_in_days >= 90):
return CheckResult.PASSED
return CheckResult.FAILED
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py b/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py\n--- a/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py\n+++ b/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py\n@@ -16,7 +16,7 @@\n retention_block = conf['retention_policy'][0]\n if retention_block['enabled'][0]:\n retention_in_days = force_int(retention_block['days'][0])\n- if retention_in_days and retention_in_days >= 90:\n+ if retention_in_days is not None and (retention_in_days == 0 or retention_in_days >= 90):\n return CheckResult.PASSED\n return CheckResult.FAILED\n", "issue": "CKV_AZURE_12: Retention policy of 0 is not supported\n**Describe the bug**\r\nWhen setting the `retention_period.days` value on `azurerm_network_watcher_flow_log` to `0`, `CKV_AZURE_12` still fails. `0` is the value to signify indefinite/forever retention.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create an `azurerm_network_watcher_flow_log` resource\r\n2. Set\r\n```\r\n retention_policy {\r\n enabled = true\r\n days = 0\r\n }\r\n```\r\n3. Run `checkov`\r\n4. Receive failure for `CKV_AZURE_12`\r\n\r\n**Expected behavior**\r\n`0` is an accepted value as documented at https://docs.microsoft.com/en-us/azure/network-watcher/network-watcher-nsg-flow-logging-overview#how-logging-works\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Linux\r\n - Checkov Version: 2.0.26\r\n\r\n\r\n**Additional context**\r\nAt https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py#L17 likely need to adjust the innermost `if` statement to something like:\r\n\r\n```python\r\nif retention_in_days is not None and (retention_days == 0 or retention_days >= 90):\r\n```\r\n\r\nHappy to open a pull request if this is an acceptable solution.\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.common.util.type_forcers import force_int\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\n\n\nclass NetworkWatcherFlowLogPeriod(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure that Network Security Group Flow Log retention period is 'greater than 90 days'\"\n id = \"CKV_AZURE_12\"\n supported_resources = ['azurerm_network_watcher_flow_log']\n categories = [CheckCategories.LOGGING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'enabled' in conf and conf['enabled'][0]:\n retention_block = conf['retention_policy'][0]\n if retention_block['enabled'][0]:\n retention_in_days = force_int(retention_block['days'][0])\n if retention_in_days and retention_in_days >= 90:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = NetworkWatcherFlowLogPeriod()\n", "path": "checkov/terraform/checks/resource/azure/NetworkWatcherFlowLogPeriod.py"}]} | 1,155 | 183 |
gh_patches_debug_30688 | rasdani/github-patches | git_diff | joke2k__faker-592 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reseed code broke in 0.8 unnecessarily
Commit ea4f189bbff1925d7a7e2d7cbc6e91e2e8a9a3f3 changed the name of the `random.Random()` object in `faker.generator` from `random` to `mod_random`, re-exposing the `random` module as the name `faker.generator.random`. This broke code like:
```py
from faker.generator import random
random.seed(1)
```
which is basically what my `pytest-randomly` plugin does. At first I thought this was #586 but then I saw that every run was broken. The failure is silent because `random` is still a name in `faker.generator`, it just now points at the global `random` module 😱
I suggest just doing some renaming in the module to fix this so old code continues to work.
</issue>
<code>
[start of faker/utils/distribution.py]
1 # coding=utf-8
2
3 import bisect
4 from faker.generator import mod_random
5
6 def random_sample(random=None):
7 if random is None:
8 random = mod_random
9 return random.uniform(0.0, 1.0)
10
11
12 def cumsum(it):
13 total = 0
14 for x in it:
15 total += x
16 yield total
17
18
19 def choice_distribution(a, p, random=None):
20 if random is None:
21 random = mod_random
22
23 assert len(a) == len(p)
24
25 if hasattr(random, 'choices'):
26 return random.choices(a, weights=p)[0]
27 else:
28 cdf = list(cumsum(p))
29 normal = cdf[-1]
30 cdf2 = [float(i) / float(normal) for i in cdf]
31 uniform_sample = random_sample(random=random)
32 idx = bisect.bisect_right(cdf2, uniform_sample)
33 return a[idx]
34
[end of faker/utils/distribution.py]
[start of faker/generator.py]
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4
5 import re
6 import random
7
8
9 _re_token = re.compile(r'\{\{(\s?)(\w+)(\s?)\}\}')
10 mod_random = random.Random()
11
12
13 class Generator(object):
14
15 __config = {}
16
17 def __init__(self, **config):
18 self.providers = []
19 self.__config = dict(
20 list(self.__config.items()) + list(config.items()))
21 self.__random = mod_random
22
23 def add_provider(self, provider):
24
25 if type(provider) is type:
26 provider = provider(self)
27
28 self.providers.insert(0, provider)
29
30 for method_name in dir(provider):
31 # skip 'private' method
32 if method_name.startswith('_'):
33 continue
34
35 faker_function = getattr(provider, method_name)
36
37 if hasattr(faker_function, '__call__') or \
38 isinstance(faker_function, (classmethod, staticmethod)):
39 # add all faker method to generator
40 self.set_formatter(method_name, faker_function)
41
42 def provider(self, name):
43 try:
44 lst = [p for p in self.get_providers()
45 if p.__provider__ == name.lower()]
46 return lst[0]
47 except IndexError:
48 return None
49
50 def get_providers(self):
51 """Returns added providers."""
52 return self.providers
53
54 @property
55 def random(self):
56 return self.__random
57
58 def seed_instance(self, seed=None):
59 """Calls random.seed"""
60 if self.__random == mod_random:
61 # create per-instance random obj when first time seed_instance() is called
62 self.__random = random.Random()
63 self.__random.seed(seed)
64
65 @classmethod
66 def seed(cls, seed=None):
67 mod_random.seed(seed)
68
69 def format(self, formatter, *args, **kwargs):
70 """
71 This is a secure way to make a fake from another Provider.
72 """
73 # TODO: data export?
74 return self.get_formatter(formatter)(*args, **kwargs)
75
76 def get_formatter(self, formatter):
77 try:
78 return getattr(self, formatter)
79 except AttributeError:
80 if 'locale' in self.__config:
81 msg = 'Unknown formatter "{0}" with locale "{1}"'.format(
82 formatter, self.__config['locale']
83 )
84 else:
85 raise AttributeError('Unknown formatter "{0}"'.format(
86 formatter
87 ))
88 raise AttributeError(msg)
89
90 def set_formatter(self, name, method):
91 """
92 This method adds a provider method to generator.
93 Override this method to add some decoration or logging stuff.
94 """
95 setattr(self, name, method)
96
97 def parse(self, text):
98 """
99 Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')
100 with the result from the token method call.
101 """
102 return _re_token.sub(self.__format_token, text)
103
104 def __format_token(self, matches):
105 formatter = list(matches.groups())
106 formatter[1] = self.format(formatter[1])
107 return ''.join(formatter)
108
[end of faker/generator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/generator.py b/faker/generator.py
--- a/faker/generator.py
+++ b/faker/generator.py
@@ -3,11 +3,12 @@
from __future__ import unicode_literals
import re
-import random
+import random as random_module
_re_token = re.compile(r'\{\{(\s?)(\w+)(\s?)\}\}')
-mod_random = random.Random()
+random = random_module.Random()
+mod_random = random # compat with name released in 0.8
class Generator(object):
@@ -18,7 +19,7 @@
self.providers = []
self.__config = dict(
list(self.__config.items()) + list(config.items()))
- self.__random = mod_random
+ self.__random = random
def add_provider(self, provider):
@@ -57,14 +58,14 @@
def seed_instance(self, seed=None):
"""Calls random.seed"""
- if self.__random == mod_random:
+ if self.__random == random:
# create per-instance random obj when first time seed_instance() is called
- self.__random = random.Random()
+ self.__random = random_module.Random()
self.__random.seed(seed)
@classmethod
def seed(cls, seed=None):
- mod_random.seed(seed)
+ random.seed(seed)
def format(self, formatter, *args, **kwargs):
"""
diff --git a/faker/utils/distribution.py b/faker/utils/distribution.py
--- a/faker/utils/distribution.py
+++ b/faker/utils/distribution.py
@@ -1,7 +1,8 @@
# coding=utf-8
import bisect
-from faker.generator import mod_random
+from faker.generator import random as mod_random
+
def random_sample(random=None):
if random is None:
| {"golden_diff": "diff --git a/faker/generator.py b/faker/generator.py\n--- a/faker/generator.py\n+++ b/faker/generator.py\n@@ -3,11 +3,12 @@\n from __future__ import unicode_literals\n \n import re\n-import random\n+import random as random_module\n \n \n _re_token = re.compile(r'\\{\\{(\\s?)(\\w+)(\\s?)\\}\\}')\n-mod_random = random.Random()\n+random = random_module.Random()\n+mod_random = random # compat with name released in 0.8\n \n \n class Generator(object):\n@@ -18,7 +19,7 @@\n self.providers = []\n self.__config = dict(\n list(self.__config.items()) + list(config.items()))\n- self.__random = mod_random\n+ self.__random = random\n \n def add_provider(self, provider):\n \n@@ -57,14 +58,14 @@\n \n def seed_instance(self, seed=None):\n \"\"\"Calls random.seed\"\"\"\n- if self.__random == mod_random:\n+ if self.__random == random:\n # create per-instance random obj when first time seed_instance() is called\n- self.__random = random.Random()\n+ self.__random = random_module.Random()\n self.__random.seed(seed)\n \n @classmethod\n def seed(cls, seed=None):\n- mod_random.seed(seed)\n+ random.seed(seed)\n \n def format(self, formatter, *args, **kwargs):\n \"\"\"\ndiff --git a/faker/utils/distribution.py b/faker/utils/distribution.py\n--- a/faker/utils/distribution.py\n+++ b/faker/utils/distribution.py\n@@ -1,7 +1,8 @@\n # coding=utf-8\n \n import bisect\n-from faker.generator import mod_random\n+from faker.generator import random as mod_random\n+\n \n def random_sample(random=None):\n if random is None:\n", "issue": "Reseed code broke in 0.8 unnecessarily\nCommit ea4f189bbff1925d7a7e2d7cbc6e91e2e8a9a3f3 changed the name of the `random.Random()` object in `faker.generator` from `random` to `mod_random`, re-exposing the `random` module as the name `faker.generator.random`. This broke code like:\r\n\r\n```py\r\nfrom faker.generator import random\r\nrandom.seed(1)\r\n``` \r\n\r\nwhich is basically what my `pytest-randomly` plugin does. At first I thought this was #586 but then I saw that every run was broken. The failure is silent because `random` is still a name in `faker.generator`, it just now points at the global `random` module \ud83d\ude31\r\n\r\nI suggest just doing some renaming in the module to fix this so old code continues to work.\n", "before_files": [{"content": "# coding=utf-8\n\nimport bisect\nfrom faker.generator import mod_random\n\ndef random_sample(random=None):\n if random is None:\n random = mod_random\n return random.uniform(0.0, 1.0)\n\n\ndef cumsum(it):\n total = 0\n for x in it:\n total += x\n yield total\n\n\ndef choice_distribution(a, p, random=None):\n if random is None:\n random = mod_random\n\n assert len(a) == len(p)\n\n if hasattr(random, 'choices'):\n return random.choices(a, weights=p)[0]\n else:\n cdf = list(cumsum(p))\n normal = cdf[-1]\n cdf2 = [float(i) / float(normal) for i in cdf]\n uniform_sample = random_sample(random=random)\n idx = bisect.bisect_right(cdf2, uniform_sample)\n return a[idx]\n", "path": "faker/utils/distribution.py"}, {"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport re\nimport random\n\n\n_re_token = re.compile(r'\\{\\{(\\s?)(\\w+)(\\s?)\\}\\}')\nmod_random = random.Random()\n\n\nclass Generator(object):\n\n __config = {}\n\n def __init__(self, **config):\n self.providers = []\n self.__config = dict(\n list(self.__config.items()) + list(config.items()))\n self.__random = mod_random\n\n def add_provider(self, provider):\n\n if type(provider) is type:\n provider = provider(self)\n\n self.providers.insert(0, provider)\n\n for method_name in dir(provider):\n # skip 'private' method\n if method_name.startswith('_'):\n continue\n\n faker_function = getattr(provider, method_name)\n\n if hasattr(faker_function, '__call__') or \\\n isinstance(faker_function, (classmethod, staticmethod)):\n # add all faker method to generator\n self.set_formatter(method_name, faker_function)\n\n def provider(self, name):\n try:\n lst = [p for p in self.get_providers()\n if p.__provider__ == name.lower()]\n return lst[0]\n except IndexError:\n return None\n\n def get_providers(self):\n \"\"\"Returns added providers.\"\"\"\n return self.providers\n\n @property\n def random(self):\n return self.__random\n\n def seed_instance(self, seed=None):\n \"\"\"Calls random.seed\"\"\"\n if self.__random == mod_random:\n # create per-instance random obj when first time seed_instance() is called\n self.__random = random.Random()\n self.__random.seed(seed)\n\n @classmethod\n def seed(cls, seed=None):\n mod_random.seed(seed)\n\n def format(self, formatter, *args, **kwargs):\n \"\"\"\n This is a secure way to make a fake from another Provider.\n \"\"\"\n # TODO: data export?\n return self.get_formatter(formatter)(*args, **kwargs)\n\n def get_formatter(self, formatter):\n try:\n return getattr(self, formatter)\n except AttributeError:\n if 'locale' in self.__config:\n msg = 'Unknown formatter \"{0}\" with locale \"{1}\"'.format(\n formatter, self.__config['locale']\n )\n else:\n raise AttributeError('Unknown formatter \"{0}\"'.format(\n formatter\n ))\n raise AttributeError(msg)\n\n def set_formatter(self, name, method):\n \"\"\"\n This method adds a provider method to generator.\n Override this method to add some decoration or logging stuff.\n \"\"\"\n setattr(self, name, method)\n\n def parse(self, text):\n \"\"\"\n Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')\n with the result from the token method call.\n \"\"\"\n return _re_token.sub(self.__format_token, text)\n\n def __format_token(self, matches):\n formatter = list(matches.groups())\n formatter[1] = self.format(formatter[1])\n return ''.join(formatter)\n", "path": "faker/generator.py"}]} | 1,873 | 408 |
gh_patches_debug_19281 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-7251 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`TypeError: unsupported operand type(s) for |: 'KeyboardModifier' and 'Key'` with PySide 6.4.0.1 application
<!--
Welcome to the PyInstaller issue tracker! Before creating an issue, please heed the following:
1. This tracker should only be used to report bugs and request features / enhancements to PyInstaller
- For questions and general support, use the discussions forum.
2. Use the search function before creating a new issue. Duplicates will be closed and directed to
the original discussion.
3. When making a bug report, make sure you provide all required information. The easier it is for
maintainers to reproduce, the faster it'll be fixed.
-->
<!-- +++ ONLY TEXT +++ DO NOT POST IMAGES +++ -->
## Description of the issue
When running a frozen PySide6 application using PySide 6.4.0.1, the `|` operator between Qt.KeyboardModifier and Qt.Key stops working, for example:
```
Traceback (most recent call last):
File "script.py", line 28, in <module>
window = MainWindow()
File "script.py", line 18, in __init__
button_action.setShortcut(QKeySequence(Qt.AltModifier | Qt.Key_D))
TypeError: unsupported operand type(s) for |: 'KeyboardModifier' and 'Key'
[10109] Failed to execute script 'script' due to unhandled exception!
```
Note that the script used for this works just fine if invoked directly from a development environment, and it works both in a development and a frozen build if downgraded to 6.3.2.
Before filing this I made a repo to demonstrate the bug, but most of the info is included in this issue template already. https://github.com/twizmwazin/pyside-pyinstaller-unsupported-operand-bug
### Context information (for bug reports)
* Output of `pyinstaller --version`: ```5.6.2```
* Version of Python: Python 3.10.8
* Platform: Ubuntu 20.04 on WSL 2
* How you installed Python: Python 3.10.8 from source
* Did you also try this on another platform? Does it work there?
First discovered after upgrading to PySide 6.4.0.1 in Azure DevOps CI using Ubuntu 20.04 and MS's build of Python 3.10. Also reproduced on Windows 11 using Python 3.10.8 64-bit installed using the python.org installer.
* try the latest development version, using the following command:
```shell
pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip
```
* follow *all* the instructions in our "If Things Go Wrong" Guide
(https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and
### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly)
* [x] start with clean installation
* [x] use the latest development version
* [x] Run your frozen program **from a command window (shell)** — instead of double-clicking on it
* [x] Package your program in **--onedir mode**
* [x] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file
* [x] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file.
### A minimal example program which shows the error
```
#!/usr/bin/env python3
from PySide6.QtCore import Qt
from PySide6.QtGui import QAction, QKeySequence
from PySide6.QtWidgets import QApplication, QToolBar, QMainWindow
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.setWindowTitle("My Awesome App")
toolbar = QToolBar("My main toolbar")
self.addToolBar(toolbar)
button_action = QAction("Your button", self)
button_action.setShortcut(QKeySequence(Qt.AltModifier | Qt.Key_D))
button_action.triggered.connect(self.onMyToolBarButtonClick)
toolbar.addAction(button_action)
def onMyToolBarButtonClick(self, s):
print("click", s)
if __name__ == "__main__":
app = QApplication()
window = MainWindow()
window.show()
app.exec()
```
### Stacktrace / full error message
```
Traceback (most recent call last):
File "script.py", line 31, in <module>
window = MainWindow()
File "script.py", line 21, in __init__
button_action.setShortcut(QKeySequence(Qt.AltModifier | Qt.Key_D))
TypeError: unsupported operand type(s) for |: 'KeyboardModifier' and 'Key'
```
Please also see <https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs>
for more about what would use to solve the issue.
</issue>
<code>
[start of PyInstaller/hooks/hook-PySide6.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2021-2022, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12 from PyInstaller.utils.hooks.qt import get_qt_binaries, pyside6_library_info
13
14 # Only proceed if PySide6 can be imported.
15 if pyside6_library_info.version is not None:
16 hiddenimports = ['shiboken6', 'inspect']
17
18 # Collect required Qt binaries.
19 binaries = get_qt_binaries(pyside6_library_info)
20
[end of PyInstaller/hooks/hook-PySide6.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/hook-PySide6.py b/PyInstaller/hooks/hook-PySide6.py
--- a/PyInstaller/hooks/hook-PySide6.py
+++ b/PyInstaller/hooks/hook-PySide6.py
@@ -9,11 +9,17 @@
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
+from PyInstaller.utils.hooks import is_module_satisfies
from PyInstaller.utils.hooks.qt import get_qt_binaries, pyside6_library_info
# Only proceed if PySide6 can be imported.
if pyside6_library_info.version is not None:
hiddenimports = ['shiboken6', 'inspect']
+ # Starting with PySide6 6.4.0, we need to collect PySide6.support.deprecated for | and & operators to work with
+ # Qt key and key modifiers enums. See #7249.
+ if is_module_satisfies("PySide6 >= 6.4.0"):
+ hiddenimports += ['PySide6.support.deprecated']
+
# Collect required Qt binaries.
binaries = get_qt_binaries(pyside6_library_info)
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-PySide6.py b/PyInstaller/hooks/hook-PySide6.py\n--- a/PyInstaller/hooks/hook-PySide6.py\n+++ b/PyInstaller/hooks/hook-PySide6.py\n@@ -9,11 +9,17 @@\n # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n #-----------------------------------------------------------------------------\n \n+from PyInstaller.utils.hooks import is_module_satisfies\n from PyInstaller.utils.hooks.qt import get_qt_binaries, pyside6_library_info\n \n # Only proceed if PySide6 can be imported.\n if pyside6_library_info.version is not None:\n hiddenimports = ['shiboken6', 'inspect']\n \n+ # Starting with PySide6 6.4.0, we need to collect PySide6.support.deprecated for | and & operators to work with\n+ # Qt key and key modifiers enums. See #7249.\n+ if is_module_satisfies(\"PySide6 >= 6.4.0\"):\n+ hiddenimports += ['PySide6.support.deprecated']\n+\n # Collect required Qt binaries.\n binaries = get_qt_binaries(pyside6_library_info)\n", "issue": "`TypeError: unsupported operand type(s) for |: 'KeyboardModifier' and 'Key'` with PySide 6.4.0.1 application\n<!--\r\nWelcome to the PyInstaller issue tracker! Before creating an issue, please heed the following:\r\n\r\n1. This tracker should only be used to report bugs and request features / enhancements to PyInstaller\r\n - For questions and general support, use the discussions forum.\r\n2. Use the search function before creating a new issue. Duplicates will be closed and directed to\r\n the original discussion.\r\n3. When making a bug report, make sure you provide all required information. The easier it is for\r\n maintainers to reproduce, the faster it'll be fixed.\r\n-->\r\n\r\n<!-- +++ ONLY TEXT +++ DO NOT POST IMAGES +++ -->\r\n\r\n## Description of the issue\r\n\r\nWhen running a frozen PySide6 application using PySide 6.4.0.1, the `|` operator between Qt.KeyboardModifier and Qt.Key stops working, for example:\r\n```\r\nTraceback (most recent call last):\r\n File \"script.py\", line 28, in <module>\r\n window = MainWindow()\r\n File \"script.py\", line 18, in __init__\r\n button_action.setShortcut(QKeySequence(Qt.AltModifier | Qt.Key_D))\r\nTypeError: unsupported operand type(s) for |: 'KeyboardModifier' and 'Key'\r\n[10109] Failed to execute script 'script' due to unhandled exception!\r\n```\r\n\r\nNote that the script used for this works just fine if invoked directly from a development environment, and it works both in a development and a frozen build if downgraded to 6.3.2.\r\n\r\nBefore filing this I made a repo to demonstrate the bug, but most of the info is included in this issue template already. https://github.com/twizmwazin/pyside-pyinstaller-unsupported-operand-bug\r\n\r\n### Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```5.6.2```\r\n* Version of Python: Python 3.10.8\r\n* Platform: Ubuntu 20.04 on WSL 2\r\n* How you installed Python: Python 3.10.8 from source\r\n* Did you also try this on another platform? Does it work there?\r\nFirst discovered after upgrading to PySide 6.4.0.1 in Azure DevOps CI using Ubuntu 20.04 and MS's build of Python 3.10. Also reproduced on Windows 11 using Python 3.10.8 64-bit installed using the python.org installer.\r\n\r\n\r\n* try the latest development version, using the following command:\r\n\r\n```shell\r\npip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip\r\n```\r\n\r\n* follow *all* the instructions in our \"If Things Go Wrong\" Guide\r\n (https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and\r\n\r\n### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly)\r\n\r\n * [x] start with clean installation\r\n * [x] use the latest development version\r\n * [x] Run your frozen program **from a command window (shell)** \u2014 instead of double-clicking on it\r\n * [x] Package your program in **--onedir mode**\r\n * [x] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file\r\n * [x] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file.\r\n\r\n\r\n### A minimal example program which shows the error\r\n\r\n```\r\n#!/usr/bin/env python3\r\n\r\nfrom PySide6.QtCore import Qt\r\nfrom PySide6.QtGui import QAction, QKeySequence\r\nfrom PySide6.QtWidgets import QApplication, QToolBar, QMainWindow\r\n\r\n\r\nclass MainWindow(QMainWindow):\r\n def __init__(self):\r\n super(MainWindow, self).__init__()\r\n\r\n self.setWindowTitle(\"My Awesome App\")\r\n\r\n toolbar = QToolBar(\"My main toolbar\")\r\n self.addToolBar(toolbar)\r\n\r\n button_action = QAction(\"Your button\", self)\r\n button_action.setShortcut(QKeySequence(Qt.AltModifier | Qt.Key_D))\r\n button_action.triggered.connect(self.onMyToolBarButtonClick)\r\n toolbar.addAction(button_action)\r\n\r\n def onMyToolBarButtonClick(self, s):\r\n print(\"click\", s)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = QApplication()\r\n window = MainWindow()\r\n window.show()\r\n app.exec()\r\n```\r\n\r\n### Stacktrace / full error message\r\n\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"script.py\", line 31, in <module>\r\n window = MainWindow()\r\n File \"script.py\", line 21, in __init__\r\n button_action.setShortcut(QKeySequence(Qt.AltModifier | Qt.Key_D))\r\nTypeError: unsupported operand type(s) for |: 'KeyboardModifier' and 'Key'\r\n```\r\n\r\nPlease also see <https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs>\r\nfor more about what would use to solve the issue.\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2021-2022, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks.qt import get_qt_binaries, pyside6_library_info\n\n# Only proceed if PySide6 can be imported.\nif pyside6_library_info.version is not None:\n hiddenimports = ['shiboken6', 'inspect']\n\n # Collect required Qt binaries.\n binaries = get_qt_binaries(pyside6_library_info)\n", "path": "PyInstaller/hooks/hook-PySide6.py"}]} | 1,882 | 270 |
gh_patches_debug_26508 | rasdani/github-patches | git_diff | gammapy__gammapy-1567 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
What's the correct value for errordef in iminuit?
see https://github.com/gammapy/gammapy/blob/master/gammapy/utils/fitting/iminuit.py#L88
Reminder:
out fit statistic is -2 log (likelihood)
https://github.com/gammapy/gammapy/blob/master/gammapy/stats/fit_statistics.py#L58
</issue>
<code>
[start of gammapy/utils/fitting/iminuit.py]
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2 """iminuit fitting functions.
3 """
4 from __future__ import absolute_import, division, print_function, unicode_literals
5 import numpy as np
6
7 __all__ = [
8 'fit_iminuit',
9 ]
10
11
12 def fit_iminuit(parameters, function, opts_minuit=None):
13 """iminuit optimization
14
15 Parameters
16 ----------
17 parameters : `~gammapy.utils.modeling.ParameterList`
18 Parameters with starting values
19 function : callable
20 Likelihood function
21 opts_minuit : dict (optional)
22 Options passed to `iminuit.Minuit` constructor
23
24 Returns
25 -------
26 parameters : `~gammapy.utils.modeling.ParameterList`
27 Parameters with best-fit values
28 minuit : `~iminuit.Minuit`
29 Minuit object
30 """
31 from iminuit import Minuit
32
33 minuit_func = MinuitFunction(function, parameters)
34
35 if opts_minuit is None:
36 opts_minuit = {}
37 opts_minuit.update(make_minuit_par_kwargs(parameters))
38
39 minuit = Minuit(minuit_func.fcn,
40 forced_parameters=parameters.names,
41 **opts_minuit)
42
43 minuit.migrad()
44 parameters.covariance = _get_covar(minuit)
45
46 return parameters, minuit
47
48
49 class MinuitFunction(object):
50 """Wrapper for iminuit
51
52 Parameters
53 ----------
54 parameters : `~gammapy.utils.modeling.ParameterList`
55 Parameters with starting values
56 function : callable
57 Likelihood function
58 """
59
60 def __init__(self, function, parameters):
61 self.function = function
62 self.parameters = parameters
63
64 def fcn(self, *values):
65 for value, parameter in zip(values, self.parameters.parameters):
66 parameter.value = value
67 return self.function(self.parameters)
68
69
70 def make_minuit_par_kwargs(parameters):
71 """Create *Parameter Keyword Arguments* for the `Minuit` constructor.
72
73 See: http://iminuit.readthedocs.io/en/latest/api.html#iminuit.Minuit
74 """
75 kwargs = {}
76 for par in parameters.parameters:
77 kwargs[par.name] = par.value
78 if par.frozen:
79 kwargs['fix_{}'.format(par.name)] = True
80 min_ = None if np.isnan(par.min) else par.min
81 max_ = None if np.isnan(par.max) else par.max
82 kwargs['limit_{}'.format(par.name)] = (min_, max_)
83
84 if parameters.covariance is None:
85 kwargs['error_{}'.format(par.name)] = 1
86 else:
87 kwargs['error_{}'.format(par.name)] = parameters.error(par.name)
88
89 # TODO: Check if we need 0.5 or 1
90 kwargs['errordef'] = 1
91
92 return kwargs
93
94
95 def _get_covar(minuit):
96 """Get full covar matrix as Numpy array.
97
98 This was added as `minuit.np_covariance` in `iminuit` in v1.3,
99 but we still want to support v1.2
100 """
101 n = len(minuit.parameters)
102 m = np.zeros((n, n))
103 for i1, k1 in enumerate(minuit.parameters):
104 for i2, k2 in enumerate(minuit.parameters):
105 if set([k1, k2]).issubset(minuit.list_of_vary_param()):
106 m[i1, i2] = minuit.covariance[(k1, k2)]
107 return m
108
[end of gammapy/utils/fitting/iminuit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gammapy/utils/fitting/iminuit.py b/gammapy/utils/fitting/iminuit.py
--- a/gammapy/utils/fitting/iminuit.py
+++ b/gammapy/utils/fitting/iminuit.py
@@ -36,6 +36,10 @@
opts_minuit = {}
opts_minuit.update(make_minuit_par_kwargs(parameters))
+ # In Gammapy, we have the factor 2 in the likelihood function
+ # This means `errordef=1` in the Minuit interface is correct
+ opts_minuit.setdefault('errordef', 1)
+
minuit = Minuit(minuit_func.fcn,
forced_parameters=parameters.names,
**opts_minuit)
@@ -75,8 +79,7 @@
kwargs = {}
for par in parameters.parameters:
kwargs[par.name] = par.value
- if par.frozen:
- kwargs['fix_{}'.format(par.name)] = True
+
min_ = None if np.isnan(par.min) else par.min
max_ = None if np.isnan(par.max) else par.max
kwargs['limit_{}'.format(par.name)] = (min_, max_)
@@ -86,8 +89,8 @@
else:
kwargs['error_{}'.format(par.name)] = parameters.error(par.name)
- # TODO: Check if we need 0.5 or 1
- kwargs['errordef'] = 1
+ if par.frozen:
+ kwargs['fix_{}'.format(par.name)] = True
return kwargs
| {"golden_diff": "diff --git a/gammapy/utils/fitting/iminuit.py b/gammapy/utils/fitting/iminuit.py\n--- a/gammapy/utils/fitting/iminuit.py\n+++ b/gammapy/utils/fitting/iminuit.py\n@@ -36,6 +36,10 @@\n opts_minuit = {}\n opts_minuit.update(make_minuit_par_kwargs(parameters))\n \n+ # In Gammapy, we have the factor 2 in the likelihood function\n+ # This means `errordef=1` in the Minuit interface is correct\n+ opts_minuit.setdefault('errordef', 1)\n+\n minuit = Minuit(minuit_func.fcn,\n forced_parameters=parameters.names,\n **opts_minuit)\n@@ -75,8 +79,7 @@\n kwargs = {}\n for par in parameters.parameters:\n kwargs[par.name] = par.value\n- if par.frozen:\n- kwargs['fix_{}'.format(par.name)] = True\n+\n min_ = None if np.isnan(par.min) else par.min\n max_ = None if np.isnan(par.max) else par.max\n kwargs['limit_{}'.format(par.name)] = (min_, max_)\n@@ -86,8 +89,8 @@\n else:\n kwargs['error_{}'.format(par.name)] = parameters.error(par.name)\n \n- # TODO: Check if we need 0.5 or 1\n- kwargs['errordef'] = 1\n+ if par.frozen:\n+ kwargs['fix_{}'.format(par.name)] = True\n \n return kwargs\n", "issue": "What's the correct value for errordef in iminuit?\nsee https://github.com/gammapy/gammapy/blob/master/gammapy/utils/fitting/iminuit.py#L88\r\n\r\nReminder:\r\nout fit statistic is -2 log (likelihood)\r\nhttps://github.com/gammapy/gammapy/blob/master/gammapy/stats/fit_statistics.py#L58\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"iminuit fitting functions.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\n\n__all__ = [\n 'fit_iminuit',\n]\n\n\ndef fit_iminuit(parameters, function, opts_minuit=None):\n \"\"\"iminuit optimization\n\n Parameters\n ----------\n parameters : `~gammapy.utils.modeling.ParameterList`\n Parameters with starting values\n function : callable\n Likelihood function\n opts_minuit : dict (optional)\n Options passed to `iminuit.Minuit` constructor\n\n Returns\n -------\n parameters : `~gammapy.utils.modeling.ParameterList`\n Parameters with best-fit values\n minuit : `~iminuit.Minuit`\n Minuit object\n \"\"\"\n from iminuit import Minuit\n\n minuit_func = MinuitFunction(function, parameters)\n\n if opts_minuit is None:\n opts_minuit = {}\n opts_minuit.update(make_minuit_par_kwargs(parameters))\n\n minuit = Minuit(minuit_func.fcn,\n forced_parameters=parameters.names,\n **opts_minuit)\n\n minuit.migrad()\n parameters.covariance = _get_covar(minuit)\n\n return parameters, minuit\n\n\nclass MinuitFunction(object):\n \"\"\"Wrapper for iminuit\n\n Parameters\n ----------\n parameters : `~gammapy.utils.modeling.ParameterList`\n Parameters with starting values\n function : callable\n Likelihood function\n \"\"\"\n\n def __init__(self, function, parameters):\n self.function = function\n self.parameters = parameters\n\n def fcn(self, *values):\n for value, parameter in zip(values, self.parameters.parameters):\n parameter.value = value\n return self.function(self.parameters)\n\n\ndef make_minuit_par_kwargs(parameters):\n \"\"\"Create *Parameter Keyword Arguments* for the `Minuit` constructor.\n\n See: http://iminuit.readthedocs.io/en/latest/api.html#iminuit.Minuit\n \"\"\"\n kwargs = {}\n for par in parameters.parameters:\n kwargs[par.name] = par.value\n if par.frozen:\n kwargs['fix_{}'.format(par.name)] = True\n min_ = None if np.isnan(par.min) else par.min\n max_ = None if np.isnan(par.max) else par.max\n kwargs['limit_{}'.format(par.name)] = (min_, max_)\n\n if parameters.covariance is None:\n kwargs['error_{}'.format(par.name)] = 1\n else:\n kwargs['error_{}'.format(par.name)] = parameters.error(par.name)\n\n # TODO: Check if we need 0.5 or 1\n kwargs['errordef'] = 1\n\n return kwargs\n\n\ndef _get_covar(minuit):\n \"\"\"Get full covar matrix as Numpy array.\n\n This was added as `minuit.np_covariance` in `iminuit` in v1.3,\n but we still want to support v1.2\n \"\"\"\n n = len(minuit.parameters)\n m = np.zeros((n, n))\n for i1, k1 in enumerate(minuit.parameters):\n for i2, k2 in enumerate(minuit.parameters):\n if set([k1, k2]).issubset(minuit.list_of_vary_param()):\n m[i1, i2] = minuit.covariance[(k1, k2)]\n return m\n", "path": "gammapy/utils/fitting/iminuit.py"}]} | 1,612 | 356 |
gh_patches_debug_2560 | rasdani/github-patches | git_diff | python-poetry__poetry-1673 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`poetry shell` with fish does not echo in python REPL
- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
- **OS version and name**: MacOS Catalina 10.15.1
- **Poetry version**: 1.0.0b8
- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: N/A
## Issue
As described by [L0stLink](https://github.com/sdispater/poetry/issues/1593#issuecomment-555132468) in a now-closed issue, `fish` still appears to have some issues when using `poetry shell`. Specifically, in the Python REPL "typed text not visible, but pressing enter shows that the input was registered and whatever was typed, executes".
It appears that the fix in #1621 only addressed the `bash` case.
</issue>
<code>
[start of poetry/utils/shell.py]
1 import os
2 import signal
3 import sys
4
5 import pexpect
6
7 from clikit.utils.terminal import Terminal
8 from shellingham import ShellDetectionFailure
9 from shellingham import detect_shell
10
11 from ._compat import WINDOWS
12 from .env import VirtualEnv
13
14
15 class Shell:
16 """
17 Represents the current shell.
18 """
19
20 _shell = None
21
22 def __init__(self, name, path): # type: (str, str) -> None
23 self._name = name
24 self._path = path
25
26 @property
27 def name(self): # type: () -> str
28 return self._name
29
30 @property
31 def path(self): # type: () -> str
32 return self._path
33
34 @classmethod
35 def get(cls): # type: () -> Shell
36 """
37 Retrieve the current shell.
38 """
39 if cls._shell is not None:
40 return cls._shell
41
42 try:
43 name, path = detect_shell(os.getpid())
44 except (RuntimeError, ShellDetectionFailure):
45 raise RuntimeError("Unable to detect the current shell.")
46
47 cls._shell = cls(name, path)
48
49 return cls._shell
50
51 def activate(self, env): # type: (VirtualEnv) -> None
52 if WINDOWS:
53 return env.execute(self.path)
54
55 terminal = Terminal()
56 with env.temp_environ():
57 c = pexpect.spawn(
58 self._path, ["-i"], dimensions=(terminal.height, terminal.width)
59 )
60
61 if not self._name == "bash":
62 c.setecho(False)
63
64 activate_script = self._get_activate_script()
65 bin_dir = "Scripts" if WINDOWS else "bin"
66 activate_path = env.path / bin_dir / activate_script
67 c.sendline("{} {}".format(self._get_source_command(), activate_path))
68
69 def resize(sig, data):
70 terminal = Terminal()
71 c.setwinsize(terminal.height, terminal.width)
72
73 signal.signal(signal.SIGWINCH, resize)
74
75 # Interact with the new shell.
76 c.interact(escape_character=None)
77 c.close()
78
79 sys.exit(c.exitstatus)
80
81 def _get_activate_script(self):
82 if "fish" == self._name:
83 suffix = ".fish"
84 elif "csh" == self._name:
85 suffix = ".csh"
86 else:
87 suffix = ""
88
89 return "activate" + suffix
90
91 def _get_source_command(self):
92 if "fish" == self._name:
93 return "source"
94 elif "csh" == self._name:
95 return "source"
96
97 return "."
98
99 def __repr__(self): # type: () -> str
100 return '{}("{}", "{}")'.format(self.__class__.__name__, self._name, self._path)
101
[end of poetry/utils/shell.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/poetry/utils/shell.py b/poetry/utils/shell.py
--- a/poetry/utils/shell.py
+++ b/poetry/utils/shell.py
@@ -58,7 +58,7 @@
self._path, ["-i"], dimensions=(terminal.height, terminal.width)
)
- if not self._name == "bash":
+ if self._name == "zsh":
c.setecho(False)
activate_script = self._get_activate_script()
| {"golden_diff": "diff --git a/poetry/utils/shell.py b/poetry/utils/shell.py\n--- a/poetry/utils/shell.py\n+++ b/poetry/utils/shell.py\n@@ -58,7 +58,7 @@\n self._path, [\"-i\"], dimensions=(terminal.height, terminal.width)\n )\n \n- if not self._name == \"bash\":\n+ if self._name == \"zsh\":\n c.setecho(False)\n \n activate_script = self._get_activate_script()\n", "issue": "`poetry shell` with fish does not echo in python REPL\n- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n- **OS version and name**: MacOS Catalina 10.15.1\r\n- **Poetry version**: 1.0.0b8\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: N/A\r\n\r\n## Issue\r\nAs described by [L0stLink](https://github.com/sdispater/poetry/issues/1593#issuecomment-555132468) in a now-closed issue, `fish` still appears to have some issues when using `poetry shell`. Specifically, in the Python REPL \"typed text not visible, but pressing enter shows that the input was registered and whatever was typed, executes\".\r\n\r\nIt appears that the fix in #1621 only addressed the `bash` case.\n", "before_files": [{"content": "import os\nimport signal\nimport sys\n\nimport pexpect\n\nfrom clikit.utils.terminal import Terminal\nfrom shellingham import ShellDetectionFailure\nfrom shellingham import detect_shell\n\nfrom ._compat import WINDOWS\nfrom .env import VirtualEnv\n\n\nclass Shell:\n \"\"\"\n Represents the current shell.\n \"\"\"\n\n _shell = None\n\n def __init__(self, name, path): # type: (str, str) -> None\n self._name = name\n self._path = path\n\n @property\n def name(self): # type: () -> str\n return self._name\n\n @property\n def path(self): # type: () -> str\n return self._path\n\n @classmethod\n def get(cls): # type: () -> Shell\n \"\"\"\n Retrieve the current shell.\n \"\"\"\n if cls._shell is not None:\n return cls._shell\n\n try:\n name, path = detect_shell(os.getpid())\n except (RuntimeError, ShellDetectionFailure):\n raise RuntimeError(\"Unable to detect the current shell.\")\n\n cls._shell = cls(name, path)\n\n return cls._shell\n\n def activate(self, env): # type: (VirtualEnv) -> None\n if WINDOWS:\n return env.execute(self.path)\n\n terminal = Terminal()\n with env.temp_environ():\n c = pexpect.spawn(\n self._path, [\"-i\"], dimensions=(terminal.height, terminal.width)\n )\n\n if not self._name == \"bash\":\n c.setecho(False)\n\n activate_script = self._get_activate_script()\n bin_dir = \"Scripts\" if WINDOWS else \"bin\"\n activate_path = env.path / bin_dir / activate_script\n c.sendline(\"{} {}\".format(self._get_source_command(), activate_path))\n\n def resize(sig, data):\n terminal = Terminal()\n c.setwinsize(terminal.height, terminal.width)\n\n signal.signal(signal.SIGWINCH, resize)\n\n # Interact with the new shell.\n c.interact(escape_character=None)\n c.close()\n\n sys.exit(c.exitstatus)\n\n def _get_activate_script(self):\n if \"fish\" == self._name:\n suffix = \".fish\"\n elif \"csh\" == self._name:\n suffix = \".csh\"\n else:\n suffix = \"\"\n\n return \"activate\" + suffix\n\n def _get_source_command(self):\n if \"fish\" == self._name:\n return \"source\"\n elif \"csh\" == self._name:\n return \"source\"\n\n return \".\"\n\n def __repr__(self): # type: () -> str\n return '{}(\"{}\", \"{}\")'.format(self.__class__.__name__, self._name, self._path)\n", "path": "poetry/utils/shell.py"}]} | 1,621 | 112 |
gh_patches_debug_34178 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1127 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add check for mandatory functions in model files
Users are required to implement some functions(e.g, `loss()`) in the model file. We can add some checks for the model file to see if these required functions are implemented correctly. Otherwise, if the functions are not implemented (correctly), it won't be found until the job starts to run on k8s cluster.
</issue>
<code>
[start of elasticdl/python/common/model_helper.py]
1 import importlib.util
2 import os
3
4 from elasticdl.python.common.log_util import default_logger as logger
5 from elasticdl.python.worker.prediction_outputs_processor import (
6 BasePredictionOutputsProcessor,
7 )
8
9
10 def load_module(module_file):
11 spec = importlib.util.spec_from_file_location(module_file, module_file)
12 module = importlib.util.module_from_spec(spec)
13 spec.loader.exec_module(module)
14 return module
15
16
17 # TODO: Discuss whether we need to support default model
18 # function/class names such as `custom_model()`
19 # or `CustomModel()`
20 def load_model_from_module(model_def, model_module, model_params):
21 model_def_name = model_def.split(".")[-1]
22 if model_def_name in model_module:
23 custom_model_name = model_def_name
24 else:
25 raise ValueError(
26 "Cannot find the custom model function/class "
27 "in model definition files"
28 )
29 if model_params:
30 kvs = model_params.split(",")
31 model_params_dict = {}
32 for kv in kvs:
33 k, v = kv.split("=")
34 model_params_dict[k] = eval(v)
35 return model_module[custom_model_name](**model_params_dict)
36 else:
37 return model_module[custom_model_name]()
38
39
40 def get_module_file_path(model_zoo, spec_key):
41 """Get the path to module file from model zoo and the spec string.
42
43 For example, if `model_zoo = "model_zoo"` and
44 `spec_key = "test_module.custom_model"`, the function returns
45 "model_zoo/test_module.py".
46 """
47 return os.path.join(model_zoo, "/".join(spec_key.split(".")[:-1]) + ".py")
48
49
50 def _get_spec_value(spec_key, model_zoo, default_module):
51 """Get the value to the given spec key.
52
53 Notes:
54
55 * If the dot-splitted spec key (e.g. "test_module.custom_model"
56 is splitted into "test_module" and "custom_model") is of length 1
57 (e.g. `spec_key` is "custom_model"), return the value in the
58 specified `default_module`.
59 * If the spec key does not exist in the module, return `None`.
60 """
61 spec_key_items = spec_key.split(".")
62 spec_key_base = spec_key_items[-1]
63 if len(spec_key_items) == 1:
64 spec_key_module = default_module
65 else:
66 spec_key_module = load_module(
67 get_module_file_path(model_zoo, spec_key)
68 ).__dict__
69 return (
70 spec_key_module[spec_key_base]
71 if spec_key_base in spec_key_module
72 else None
73 )
74
75
76 def get_model_spec(
77 model_zoo,
78 model_def,
79 model_params,
80 dataset_fn,
81 loss,
82 optimizer,
83 eval_metrics_fn,
84 prediction_outputs_processor,
85 ):
86 """Get the model spec items in a tuple.
87
88 The model spec tuple contains the following items in order:
89
90 * The model object instantiated with parameters specified
91 in `model_params`,
92 * The `dataset_fn`,
93 * The `loss`,
94 * The `optimizer`,
95 * The `eval_metrics_fn`,
96 * The `prediction_outputs_processor`. Note that it will print
97 warning if it's not inherited from `BasePredictionOutputsProcessor`.
98 """
99 model_def_module_file = get_module_file_path(model_zoo, model_def)
100 default_module = load_module(model_def_module_file).__dict__
101 model = load_model_from_module(model_def, default_module, model_params)
102 prediction_outputs_processor = _get_spec_value(
103 prediction_outputs_processor, model_zoo, default_module
104 )
105 if prediction_outputs_processor and not isinstance(
106 prediction_outputs_processor, BasePredictionOutputsProcessor
107 ):
108 logger.warning(
109 "prediction_outputs_processor is not "
110 "inherited from BasePredictionOutputsProcessor. "
111 "Prediction outputs may not be processed correctly."
112 )
113 return (
114 model,
115 _get_spec_value(dataset_fn, model_zoo, default_module),
116 _get_spec_value(loss, model_zoo, default_module),
117 _get_spec_value(optimizer, model_zoo, default_module),
118 _get_spec_value(eval_metrics_fn, model_zoo, default_module),
119 prediction_outputs_processor,
120 )
121
122
123 def save_checkpoint_to_file(pb_model, file_name):
124 encoded_model = pb_model.SerializeToString()
125 with open(file_name, "wb") as f:
126 f.write(encoded_model)
127
128
129 def load_from_checkpoint_file(file_name):
130 from elasticdl.proto import elasticdl_pb2
131
132 pb_model = elasticdl_pb2.Model()
133 with open(file_name, "rb") as f:
134 pb_model.ParseFromString(f.read())
135 return pb_model
136
137
138 def find_layer(model, layer_class):
139 """
140 Find all layers in model that are instances of layer_class
141 """
142 layers = []
143 for layer in model.layers:
144 if isinstance(layer, layer_class):
145 layers.append(layer)
146 elif hasattr(layer, "layers"):
147 # search in nested layers
148 layers += find_layer(layer, layer_class)
149 return layers
150
[end of elasticdl/python/common/model_helper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/python/common/model_helper.py b/elasticdl/python/common/model_helper.py
--- a/elasticdl/python/common/model_helper.py
+++ b/elasticdl/python/common/model_helper.py
@@ -14,9 +14,6 @@
return module
-# TODO: Discuss whether we need to support default model
-# function/class names such as `custom_model()`
-# or `CustomModel()`
def load_model_from_module(model_def, model_module, model_params):
model_def_name = model_def.split(".")[-1]
if model_def_name in model_module:
@@ -47,7 +44,7 @@
return os.path.join(model_zoo, "/".join(spec_key.split(".")[:-1]) + ".py")
-def _get_spec_value(spec_key, model_zoo, default_module):
+def _get_spec_value(spec_key, model_zoo, default_module, required=False):
"""Get the value to the given spec key.
Notes:
@@ -66,11 +63,17 @@
spec_key_module = load_module(
get_module_file_path(model_zoo, spec_key)
).__dict__
- return (
+ spec_value = (
spec_key_module[spec_key_base]
if spec_key_base in spec_key_module
else None
)
+ if required and spec_value is None:
+ raise Exception(
+ "Missing required spec key %s in the module: %s"
+ % (spec_key_base, spec_key)
+ )
+ return spec_value
def get_model_spec(
@@ -112,10 +115,12 @@
)
return (
model,
- _get_spec_value(dataset_fn, model_zoo, default_module),
- _get_spec_value(loss, model_zoo, default_module),
- _get_spec_value(optimizer, model_zoo, default_module),
- _get_spec_value(eval_metrics_fn, model_zoo, default_module),
+ _get_spec_value(dataset_fn, model_zoo, default_module, required=True),
+ _get_spec_value(loss, model_zoo, default_module, required=True),
+ _get_spec_value(optimizer, model_zoo, default_module, required=True),
+ _get_spec_value(
+ eval_metrics_fn, model_zoo, default_module, required=True
+ ),
prediction_outputs_processor,
)
| {"golden_diff": "diff --git a/elasticdl/python/common/model_helper.py b/elasticdl/python/common/model_helper.py\n--- a/elasticdl/python/common/model_helper.py\n+++ b/elasticdl/python/common/model_helper.py\n@@ -14,9 +14,6 @@\n return module\n \n \n-# TODO: Discuss whether we need to support default model\n-# function/class names such as `custom_model()`\n-# or `CustomModel()`\n def load_model_from_module(model_def, model_module, model_params):\n model_def_name = model_def.split(\".\")[-1]\n if model_def_name in model_module:\n@@ -47,7 +44,7 @@\n return os.path.join(model_zoo, \"/\".join(spec_key.split(\".\")[:-1]) + \".py\")\n \n \n-def _get_spec_value(spec_key, model_zoo, default_module):\n+def _get_spec_value(spec_key, model_zoo, default_module, required=False):\n \"\"\"Get the value to the given spec key.\n \n Notes:\n@@ -66,11 +63,17 @@\n spec_key_module = load_module(\n get_module_file_path(model_zoo, spec_key)\n ).__dict__\n- return (\n+ spec_value = (\n spec_key_module[spec_key_base]\n if spec_key_base in spec_key_module\n else None\n )\n+ if required and spec_value is None:\n+ raise Exception(\n+ \"Missing required spec key %s in the module: %s\"\n+ % (spec_key_base, spec_key)\n+ )\n+ return spec_value\n \n \n def get_model_spec(\n@@ -112,10 +115,12 @@\n )\n return (\n model,\n- _get_spec_value(dataset_fn, model_zoo, default_module),\n- _get_spec_value(loss, model_zoo, default_module),\n- _get_spec_value(optimizer, model_zoo, default_module),\n- _get_spec_value(eval_metrics_fn, model_zoo, default_module),\n+ _get_spec_value(dataset_fn, model_zoo, default_module, required=True),\n+ _get_spec_value(loss, model_zoo, default_module, required=True),\n+ _get_spec_value(optimizer, model_zoo, default_module, required=True),\n+ _get_spec_value(\n+ eval_metrics_fn, model_zoo, default_module, required=True\n+ ),\n prediction_outputs_processor,\n )\n", "issue": "Add check for mandatory functions in model files\nUsers are required to implement some functions(e.g, `loss()`) in the model file. We can add some checks for the model file to see if these required functions are implemented correctly. Otherwise, if the functions are not implemented (correctly), it won't be found until the job starts to run on k8s cluster.\n", "before_files": [{"content": "import importlib.util\nimport os\n\nfrom elasticdl.python.common.log_util import default_logger as logger\nfrom elasticdl.python.worker.prediction_outputs_processor import (\n BasePredictionOutputsProcessor,\n)\n\n\ndef load_module(module_file):\n spec = importlib.util.spec_from_file_location(module_file, module_file)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n\n\n# TODO: Discuss whether we need to support default model\n# function/class names such as `custom_model()`\n# or `CustomModel()`\ndef load_model_from_module(model_def, model_module, model_params):\n model_def_name = model_def.split(\".\")[-1]\n if model_def_name in model_module:\n custom_model_name = model_def_name\n else:\n raise ValueError(\n \"Cannot find the custom model function/class \"\n \"in model definition files\"\n )\n if model_params:\n kvs = model_params.split(\",\")\n model_params_dict = {}\n for kv in kvs:\n k, v = kv.split(\"=\")\n model_params_dict[k] = eval(v)\n return model_module[custom_model_name](**model_params_dict)\n else:\n return model_module[custom_model_name]()\n\n\ndef get_module_file_path(model_zoo, spec_key):\n \"\"\"Get the path to module file from model zoo and the spec string.\n\n For example, if `model_zoo = \"model_zoo\"` and\n `spec_key = \"test_module.custom_model\"`, the function returns\n \"model_zoo/test_module.py\".\n \"\"\"\n return os.path.join(model_zoo, \"/\".join(spec_key.split(\".\")[:-1]) + \".py\")\n\n\ndef _get_spec_value(spec_key, model_zoo, default_module):\n \"\"\"Get the value to the given spec key.\n\n Notes:\n\n * If the dot-splitted spec key (e.g. \"test_module.custom_model\"\n is splitted into \"test_module\" and \"custom_model\") is of length 1\n (e.g. `spec_key` is \"custom_model\"), return the value in the\n specified `default_module`.\n * If the spec key does not exist in the module, return `None`.\n \"\"\"\n spec_key_items = spec_key.split(\".\")\n spec_key_base = spec_key_items[-1]\n if len(spec_key_items) == 1:\n spec_key_module = default_module\n else:\n spec_key_module = load_module(\n get_module_file_path(model_zoo, spec_key)\n ).__dict__\n return (\n spec_key_module[spec_key_base]\n if spec_key_base in spec_key_module\n else None\n )\n\n\ndef get_model_spec(\n model_zoo,\n model_def,\n model_params,\n dataset_fn,\n loss,\n optimizer,\n eval_metrics_fn,\n prediction_outputs_processor,\n):\n \"\"\"Get the model spec items in a tuple.\n\n The model spec tuple contains the following items in order:\n\n * The model object instantiated with parameters specified\n in `model_params`,\n * The `dataset_fn`,\n * The `loss`,\n * The `optimizer`,\n * The `eval_metrics_fn`,\n * The `prediction_outputs_processor`. Note that it will print\n warning if it's not inherited from `BasePredictionOutputsProcessor`.\n \"\"\"\n model_def_module_file = get_module_file_path(model_zoo, model_def)\n default_module = load_module(model_def_module_file).__dict__\n model = load_model_from_module(model_def, default_module, model_params)\n prediction_outputs_processor = _get_spec_value(\n prediction_outputs_processor, model_zoo, default_module\n )\n if prediction_outputs_processor and not isinstance(\n prediction_outputs_processor, BasePredictionOutputsProcessor\n ):\n logger.warning(\n \"prediction_outputs_processor is not \"\n \"inherited from BasePredictionOutputsProcessor. \"\n \"Prediction outputs may not be processed correctly.\"\n )\n return (\n model,\n _get_spec_value(dataset_fn, model_zoo, default_module),\n _get_spec_value(loss, model_zoo, default_module),\n _get_spec_value(optimizer, model_zoo, default_module),\n _get_spec_value(eval_metrics_fn, model_zoo, default_module),\n prediction_outputs_processor,\n )\n\n\ndef save_checkpoint_to_file(pb_model, file_name):\n encoded_model = pb_model.SerializeToString()\n with open(file_name, \"wb\") as f:\n f.write(encoded_model)\n\n\ndef load_from_checkpoint_file(file_name):\n from elasticdl.proto import elasticdl_pb2\n\n pb_model = elasticdl_pb2.Model()\n with open(file_name, \"rb\") as f:\n pb_model.ParseFromString(f.read())\n return pb_model\n\n\ndef find_layer(model, layer_class):\n \"\"\"\n Find all layers in model that are instances of layer_class\n \"\"\"\n layers = []\n for layer in model.layers:\n if isinstance(layer, layer_class):\n layers.append(layer)\n elif hasattr(layer, \"layers\"):\n # search in nested layers\n layers += find_layer(layer, layer_class)\n return layers\n", "path": "elasticdl/python/common/model_helper.py"}]} | 2,042 | 521 |
gh_patches_debug_39525 | rasdani/github-patches | git_diff | lnbits__lnbits-836 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Poetry does not gracefully shutdown on CTRL+C
This is a bit critical, bad things can happen if LNbits is just killed without finishing off all routines.
Poetry with `poetry run lnbits`:
<img width="700" alt="image" src="https://user-images.githubusercontent.com/93376500/182562297-6850567b-6fb3-4003-ac9c-317d92392b04.png">
Running lnbits using `./venv/bin/uvicorn lnbits.__main__:app`:
<img width="671" alt="image" src="https://user-images.githubusercontent.com/93376500/182562341-b4d56bfa-bf4f-4981-84e6-06678923439f.png">
</issue>
<code>
[start of lnbits/server.py]
1 import click
2 import uvicorn
3
4
5 @click.command()
6 @click.option("--port", default="5000", help="Port to run LNBits on")
7 @click.option("--host", default="127.0.0.1", help="Host to run LNBits on")
8 def main(port, host):
9 """Launched with `poetry run lnbits` at root level"""
10 uvicorn.run("lnbits.__main__:app", port=port, host=host)
11
12
13 if __name__ == "__main__":
14 main()
15
16 # def main():
17 # """Launched with `poetry run start` at root level"""
18 # uvicorn.run("lnbits.__main__:app")
19
[end of lnbits/server.py]
[start of build.py]
1 import warnings
2 import subprocess
3 import glob
4 import os
5 from os import path
6 from typing import Any, List, NamedTuple, Optional
7 from pathlib import Path
8
9 LNBITS_PATH = path.dirname(path.realpath(__file__)) + "/lnbits"
10
11 def get_js_vendored(prefer_minified: bool = False) -> List[str]:
12 paths = get_vendored(".js", prefer_minified)
13
14 def sorter(key: str):
15 if "moment@" in key:
16 return 1
17 if "vue@" in key:
18 return 2
19 if "vue-router@" in key:
20 return 3
21 if "polyfills" in key:
22 return 4
23 return 9
24
25 return sorted(paths, key=sorter)
26
27
28 def get_css_vendored(prefer_minified: bool = False) -> List[str]:
29 paths = get_vendored(".css", prefer_minified)
30
31 def sorter(key: str):
32 if "quasar@" in key:
33 return 1
34 if "vue@" in key:
35 return 2
36 if "chart.js@" in key:
37 return 100
38 return 9
39
40 return sorted(paths, key=sorter)
41
42
43 def get_vendored(ext: str, prefer_minified: bool = False) -> List[str]:
44 paths: List[str] = []
45 for path in glob.glob(
46 os.path.join(LNBITS_PATH, "static/vendor/**"), recursive=True
47 ):
48 if path.endswith(".min" + ext):
49 # path is minified
50 unminified = path.replace(".min" + ext, ext)
51 if prefer_minified:
52 paths.append(path)
53 if unminified in paths:
54 paths.remove(unminified)
55 elif unminified not in paths:
56 paths.append(path)
57
58 elif path.endswith(ext):
59 # path is not minified
60 minified = path.replace(ext, ".min" + ext)
61 if not prefer_minified:
62 paths.append(path)
63 if minified in paths:
64 paths.remove(minified)
65 elif minified not in paths:
66 paths.append(path)
67
68 return sorted(paths)
69
70
71 def url_for_vendored(abspath: str) -> str:
72 return "/" + os.path.relpath(abspath, LNBITS_PATH)
73
74 def transpile_scss():
75 with warnings.catch_warnings():
76 warnings.simplefilter("ignore")
77 from scss.compiler import compile_string # type: ignore
78
79 with open(os.path.join(LNBITS_PATH, "static/scss/base.scss")) as scss:
80 with open(os.path.join(LNBITS_PATH, "static/css/base.css"), "w") as css:
81 css.write(compile_string(scss.read()))
82
83 def bundle_vendored():
84 for getfiles, outputpath in [
85 (get_js_vendored, os.path.join(LNBITS_PATH, "static/bundle.js")),
86 (get_css_vendored, os.path.join(LNBITS_PATH, "static/bundle.css")),
87 ]:
88 output = ""
89 for path in getfiles():
90 with open(path) as f:
91 output += "/* " + url_for_vendored(path) + " */\n" + f.read() + ";\n"
92 with open(outputpath, "w") as f:
93 f.write(output)
94
95
96 def build():
97 transpile_scss()
98 bundle_vendored()
99 # root = Path("lnbits/static/foo")
100 # root.mkdir(parents=True)
101 # root.joinpath("example.css").write_text("")
102
103 if __name__ == "__main__":
104 build()
105
106 #def build(setup_kwargs):
107 # """Build """
108 # transpile_scss()
109 # bundle_vendored()
110 # subprocess.run(["ls", "-la", "./lnbits/static"])
111
[end of build.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/build.py b/build.py
--- a/build.py
+++ b/build.py
@@ -8,6 +8,7 @@
LNBITS_PATH = path.dirname(path.realpath(__file__)) + "/lnbits"
+
def get_js_vendored(prefer_minified: bool = False) -> List[str]:
paths = get_vendored(".js", prefer_minified)
@@ -71,6 +72,7 @@
def url_for_vendored(abspath: str) -> str:
return "/" + os.path.relpath(abspath, LNBITS_PATH)
+
def transpile_scss():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
@@ -80,6 +82,7 @@
with open(os.path.join(LNBITS_PATH, "static/css/base.css"), "w") as css:
css.write(compile_string(scss.read()))
+
def bundle_vendored():
for getfiles, outputpath in [
(get_js_vendored, os.path.join(LNBITS_PATH, "static/bundle.js")),
@@ -96,15 +99,7 @@
def build():
transpile_scss()
bundle_vendored()
-# root = Path("lnbits/static/foo")
-# root.mkdir(parents=True)
-# root.joinpath("example.css").write_text("")
-if __name__ == "__main__":
- build()
-#def build(setup_kwargs):
-# """Build """
-# transpile_scss()
-# bundle_vendored()
-# subprocess.run(["ls", "-la", "./lnbits/static"])
+if __name__ == "__main__":
+ build()
diff --git a/lnbits/server.py b/lnbits/server.py
--- a/lnbits/server.py
+++ b/lnbits/server.py
@@ -1,18 +1,45 @@
+import time
+
import click
import uvicorn
+from lnbits.settings import HOST, PORT
+
[email protected]()
[email protected]("--port", default="5000", help="Port to run LNBits on")
[email protected]("--host", default="127.0.0.1", help="Host to run LNBits on")
-def main(port, host):
[email protected](
+ context_settings=dict(
+ ignore_unknown_options=True,
+ allow_extra_args=True,
+ )
+)
[email protected]("--port", default=PORT, help="Port to listen on")
[email protected]("--host", default=HOST, help="Host to run LNBits on")
[email protected]("--ssl-keyfile", default=None, help="Path to SSL keyfile")
[email protected]("--ssl-certfile", default=None, help="Path to SSL certificate")
[email protected]_context
+def main(ctx, port: int, host: str, ssl_keyfile: str, ssl_certfile: str):
"""Launched with `poetry run lnbits` at root level"""
- uvicorn.run("lnbits.__main__:app", port=port, host=host)
+ # this beautiful beast parses all command line arguments and passes them to the uvicorn server
+ d = dict(
+ [
+ (
+ item[0].strip("--").replace("-", "_"),
+ int(item[1]) if item[1].isdigit() else item[1],
+ )
+ for item in zip(*[iter(ctx.args)] * 2)
+ ]
+ )
+ config = uvicorn.Config(
+ "lnbits.__main__:app",
+ port=port,
+ host=host,
+ ssl_keyfile=ssl_keyfile,
+ ssl_certfile=ssl_certfile,
+ **d
+ )
+ server = uvicorn.Server(config)
+ server.run()
if __name__ == "__main__":
main()
-
-# def main():
-# """Launched with `poetry run start` at root level"""
-# uvicorn.run("lnbits.__main__:app")
| {"golden_diff": "diff --git a/build.py b/build.py\n--- a/build.py\n+++ b/build.py\n@@ -8,6 +8,7 @@\n \n LNBITS_PATH = path.dirname(path.realpath(__file__)) + \"/lnbits\"\n \n+\n def get_js_vendored(prefer_minified: bool = False) -> List[str]:\n paths = get_vendored(\".js\", prefer_minified)\n \n@@ -71,6 +72,7 @@\n def url_for_vendored(abspath: str) -> str:\n return \"/\" + os.path.relpath(abspath, LNBITS_PATH)\n \n+\n def transpile_scss():\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n@@ -80,6 +82,7 @@\n with open(os.path.join(LNBITS_PATH, \"static/css/base.css\"), \"w\") as css:\n css.write(compile_string(scss.read()))\n \n+\n def bundle_vendored():\n for getfiles, outputpath in [\n (get_js_vendored, os.path.join(LNBITS_PATH, \"static/bundle.js\")),\n@@ -96,15 +99,7 @@\n def build():\n transpile_scss()\n bundle_vendored()\n-# root = Path(\"lnbits/static/foo\")\n-# root.mkdir(parents=True)\n-# root.joinpath(\"example.css\").write_text(\"\")\n \n-if __name__ == \"__main__\":\n- build()\n \n-#def build(setup_kwargs):\n-# \"\"\"Build \"\"\"\n-# transpile_scss()\n-# bundle_vendored()\n-# subprocess.run([\"ls\", \"-la\", \"./lnbits/static\"])\n+if __name__ == \"__main__\":\n+ build()\ndiff --git a/lnbits/server.py b/lnbits/server.py\n--- a/lnbits/server.py\n+++ b/lnbits/server.py\n@@ -1,18 +1,45 @@\n+import time\n+\n import click\n import uvicorn\n \n+from lnbits.settings import HOST, PORT\n+\n \[email protected]()\[email protected](\"--port\", default=\"5000\", help=\"Port to run LNBits on\")\[email protected](\"--host\", default=\"127.0.0.1\", help=\"Host to run LNBits on\")\n-def main(port, host):\[email protected](\n+ context_settings=dict(\n+ ignore_unknown_options=True,\n+ allow_extra_args=True,\n+ )\n+)\[email protected](\"--port\", default=PORT, help=\"Port to listen on\")\[email protected](\"--host\", default=HOST, help=\"Host to run LNBits on\")\[email protected](\"--ssl-keyfile\", default=None, help=\"Path to SSL keyfile\")\[email protected](\"--ssl-certfile\", default=None, help=\"Path to SSL certificate\")\[email protected]_context\n+def main(ctx, port: int, host: str, ssl_keyfile: str, ssl_certfile: str):\n \"\"\"Launched with `poetry run lnbits` at root level\"\"\"\n- uvicorn.run(\"lnbits.__main__:app\", port=port, host=host)\n+ # this beautiful beast parses all command line arguments and passes them to the uvicorn server\n+ d = dict(\n+ [\n+ (\n+ item[0].strip(\"--\").replace(\"-\", \"_\"),\n+ int(item[1]) if item[1].isdigit() else item[1],\n+ )\n+ for item in zip(*[iter(ctx.args)] * 2)\n+ ]\n+ )\n+ config = uvicorn.Config(\n+ \"lnbits.__main__:app\",\n+ port=port,\n+ host=host,\n+ ssl_keyfile=ssl_keyfile,\n+ ssl_certfile=ssl_certfile,\n+ **d\n+ )\n+ server = uvicorn.Server(config)\n+ server.run()\n \n \n if __name__ == \"__main__\":\n main()\n-\n-# def main():\n-# \"\"\"Launched with `poetry run start` at root level\"\"\"\n-# uvicorn.run(\"lnbits.__main__:app\")\n", "issue": "Poetry does not gracefully shutdown on CTRL+C\nThis is a bit critical, bad things can happen if LNbits is just killed without finishing off all routines.\r\n\r\nPoetry with `poetry run lnbits`:\r\n<img width=\"700\" alt=\"image\" src=\"https://user-images.githubusercontent.com/93376500/182562297-6850567b-6fb3-4003-ac9c-317d92392b04.png\">\r\n\r\nRunning lnbits using `./venv/bin/uvicorn lnbits.__main__:app`:\r\n<img width=\"671\" alt=\"image\" src=\"https://user-images.githubusercontent.com/93376500/182562341-b4d56bfa-bf4f-4981-84e6-06678923439f.png\">\r\n\n", "before_files": [{"content": "import click\nimport uvicorn\n\n\[email protected]()\[email protected](\"--port\", default=\"5000\", help=\"Port to run LNBits on\")\[email protected](\"--host\", default=\"127.0.0.1\", help=\"Host to run LNBits on\")\ndef main(port, host):\n \"\"\"Launched with `poetry run lnbits` at root level\"\"\"\n uvicorn.run(\"lnbits.__main__:app\", port=port, host=host)\n\n\nif __name__ == \"__main__\":\n main()\n\n# def main():\n# \"\"\"Launched with `poetry run start` at root level\"\"\"\n# uvicorn.run(\"lnbits.__main__:app\")\n", "path": "lnbits/server.py"}, {"content": "import warnings\nimport subprocess\nimport glob\nimport os\nfrom os import path\nfrom typing import Any, List, NamedTuple, Optional\nfrom pathlib import Path\n\nLNBITS_PATH = path.dirname(path.realpath(__file__)) + \"/lnbits\"\n\ndef get_js_vendored(prefer_minified: bool = False) -> List[str]:\n paths = get_vendored(\".js\", prefer_minified)\n\n def sorter(key: str):\n if \"moment@\" in key:\n return 1\n if \"vue@\" in key:\n return 2\n if \"vue-router@\" in key:\n return 3\n if \"polyfills\" in key:\n return 4\n return 9\n\n return sorted(paths, key=sorter)\n\n\ndef get_css_vendored(prefer_minified: bool = False) -> List[str]:\n paths = get_vendored(\".css\", prefer_minified)\n\n def sorter(key: str):\n if \"quasar@\" in key:\n return 1\n if \"vue@\" in key:\n return 2\n if \"chart.js@\" in key:\n return 100\n return 9\n\n return sorted(paths, key=sorter)\n\n\ndef get_vendored(ext: str, prefer_minified: bool = False) -> List[str]:\n paths: List[str] = []\n for path in glob.glob(\n os.path.join(LNBITS_PATH, \"static/vendor/**\"), recursive=True\n ):\n if path.endswith(\".min\" + ext):\n # path is minified\n unminified = path.replace(\".min\" + ext, ext)\n if prefer_minified:\n paths.append(path)\n if unminified in paths:\n paths.remove(unminified)\n elif unminified not in paths:\n paths.append(path)\n\n elif path.endswith(ext):\n # path is not minified\n minified = path.replace(ext, \".min\" + ext)\n if not prefer_minified:\n paths.append(path)\n if minified in paths:\n paths.remove(minified)\n elif minified not in paths:\n paths.append(path)\n\n return sorted(paths)\n\n\ndef url_for_vendored(abspath: str) -> str:\n return \"/\" + os.path.relpath(abspath, LNBITS_PATH)\n\ndef transpile_scss():\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n from scss.compiler import compile_string # type: ignore\n\n with open(os.path.join(LNBITS_PATH, \"static/scss/base.scss\")) as scss:\n with open(os.path.join(LNBITS_PATH, \"static/css/base.css\"), \"w\") as css:\n css.write(compile_string(scss.read()))\n\ndef bundle_vendored():\n for getfiles, outputpath in [\n (get_js_vendored, os.path.join(LNBITS_PATH, \"static/bundle.js\")),\n (get_css_vendored, os.path.join(LNBITS_PATH, \"static/bundle.css\")),\n ]:\n output = \"\"\n for path in getfiles():\n with open(path) as f:\n output += \"/* \" + url_for_vendored(path) + \" */\\n\" + f.read() + \";\\n\"\n with open(outputpath, \"w\") as f:\n f.write(output)\n\n\ndef build():\n transpile_scss()\n bundle_vendored()\n# root = Path(\"lnbits/static/foo\")\n# root.mkdir(parents=True)\n# root.joinpath(\"example.css\").write_text(\"\")\n\nif __name__ == \"__main__\":\n build()\n\n#def build(setup_kwargs):\n# \"\"\"Build \"\"\"\n# transpile_scss()\n# bundle_vendored()\n# subprocess.run([\"ls\", \"-la\", \"./lnbits/static\"])\n", "path": "build.py"}]} | 1,983 | 882 |
gh_patches_debug_646 | rasdani/github-patches | git_diff | pex-tool__pex-2034 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.120
On the docket:
+ [x] Support REPL command history #2019
+ [x] Using --complete-platform with --resolve-local-platforms should build sdists when local platform provides a subset of complete-platforms #2026
+ [x] A loose layout, venv-with-symlink PEX creates brittle symlinks #2023
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.119"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.119"
+__version__ = "2.1.120"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.119\"\n+__version__ = \"2.1.120\"\n", "issue": "Release 2.1.120\nOn the docket:\r\n+ [x] Support REPL command history #2019 \r\n+ [x] Using --complete-platform with --resolve-local-platforms should build sdists when local platform provides a subset of complete-platforms #2026\r\n+ [x] A loose layout, venv-with-symlink PEX creates brittle symlinks #2023\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.119\"\n", "path": "pex/version.py"}]} | 674 | 99 |
gh_patches_debug_6399 | rasdani/github-patches | git_diff | facebookresearch__hydra-277 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clean up --cfg
It will be cleaner for --cfg to always take one of job|hydra|all, and not have it default to job.
this will eliminate the problem that occures when --cfg is not the last flag in the command line and some override is associated with --cfg.
</issue>
<code>
[start of hydra/_internal/utils.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import inspect
3 import os
4 import sys
5 from .hydra import Hydra
6 import argparse
7
8
9 def run_hydra(args_parser, task_function, config_path, strict):
10 stack = inspect.stack()
11 frame = stack[2]
12
13 calling_file = None
14 calling__module = None
15 try:
16 calling_file = frame[0].f_locals["__file__"]
17 except KeyError:
18 pass
19 try:
20 module_envs = ["HYDRA_MAIN_MODULE", "FB_PAR_MAIN_MODULE", "FB_XAR_MAIN_MODULE"]
21 for module_env in module_envs:
22 if module_env in os.environ:
23 calling__module = os.environ[module_env]
24 break
25
26 if calling__module is None:
27 calling__module = frame[0].f_globals[frame[3]].__module__
28 except KeyError:
29 pass
30
31 hydra = Hydra(
32 calling_file=calling_file,
33 calling_module=calling__module,
34 config_path=config_path,
35 task_function=task_function,
36 strict=strict,
37 )
38
39 args = args_parser.parse_args()
40 if args.help:
41 hydra.app_help(args_parser=args_parser, args=args)
42 sys.exit(0)
43 if args.hydra_help:
44 hydra.hydra_help(args_parser=args_parser, args=args)
45 sys.exit(0)
46
47 has_show_cfg = args.cfg is not None
48 num_commands = args.run + has_show_cfg + args.multirun + args.shell_completion
49 if num_commands > 1:
50 raise ValueError(
51 "Only one of --run, --multirun, -cfg and --shell_completion can be specified"
52 )
53 if num_commands == 0:
54 args.run = True
55 if args.run:
56 hydra.run(overrides=args.overrides)
57 elif args.multirun:
58 hydra.multirun(overrides=args.overrides)
59 elif args.cfg:
60 hydra.show_cfg(overrides=args.overrides, cfg_type=args.cfg)
61 elif args.shell_completion:
62 hydra.shell_completion(overrides=args.overrides)
63 else:
64 print("Command not specified")
65 sys.exit(1)
66
67
68 def _get_exec_command():
69 if sys.argv[0].endswith(".py"):
70 return "python {}".format(sys.argv[0])
71 else:
72 # Running as an installed app (setuptools entry point)
73 executable = os.path.basename(sys.argv[0])
74 return executable
75
76
77 def get_args_parser():
78 from .. import __version__
79
80 parser = argparse.ArgumentParser(add_help=False, description="Hydra")
81 parser.add_argument("--help", "-h", action="store_true", help="Application's help")
82 parser.add_argument("--hydra-help", action="store_true", help="Hydra's help")
83 parser.add_argument(
84 "--version", action="version", version="Hydra {}".format(__version__)
85 )
86 parser.add_argument(
87 "overrides",
88 nargs="*",
89 help="Any key=value arguments to override config values (use dots for.nested=overrides)",
90 )
91
92 parser.add_argument(
93 "--cfg",
94 "-c",
95 const="job",
96 nargs="?",
97 choices=["job", "hydra", "all"],
98 help="Show config instead of running, optional value indicates which config to show (defaults to job)",
99 )
100
101 parser.add_argument("--run", "-r", action="store_true", help="Run a job")
102
103 parser.add_argument(
104 "--multirun",
105 "-m",
106 action="store_true",
107 help="Run multiple jobs with the configured launcher",
108 )
109
110 shell = "SHELL_NAME"
111 install_cmd = 'eval "$({} -sc install={})"'.format(_get_exec_command(), shell)
112 uninstall_cmd = 'eval "$({} -sc uninstall={})"'.format(_get_exec_command(), shell)
113 parser.add_argument(
114 "--shell_completion",
115 "-sc",
116 action="store_true",
117 help="""Install or Uninstall shell completion:
118 Install:
119 {}
120
121 Uninstall:
122 {}
123 """.format(
124 install_cmd, uninstall_cmd
125 ),
126 )
127 return parser
128
129
130 def get_args(args=None):
131 return get_args_parser().parse_args(args=args)
132
[end of hydra/_internal/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hydra/_internal/utils.py b/hydra/_internal/utils.py
--- a/hydra/_internal/utils.py
+++ b/hydra/_internal/utils.py
@@ -92,10 +92,8 @@
parser.add_argument(
"--cfg",
"-c",
- const="job",
- nargs="?",
choices=["job", "hydra", "all"],
- help="Show config instead of running, optional value indicates which config to show (defaults to job)",
+ help="Show config instead of running [job|hydra|all]",
)
parser.add_argument("--run", "-r", action="store_true", help="Run a job")
| {"golden_diff": "diff --git a/hydra/_internal/utils.py b/hydra/_internal/utils.py\n--- a/hydra/_internal/utils.py\n+++ b/hydra/_internal/utils.py\n@@ -92,10 +92,8 @@\n parser.add_argument(\n \"--cfg\",\n \"-c\",\n- const=\"job\",\n- nargs=\"?\",\n choices=[\"job\", \"hydra\", \"all\"],\n- help=\"Show config instead of running, optional value indicates which config to show (defaults to job)\",\n+ help=\"Show config instead of running [job|hydra|all]\",\n )\n \n parser.add_argument(\"--run\", \"-r\", action=\"store_true\", help=\"Run a job\")\n", "issue": "Clean up --cfg\nIt will be cleaner for --cfg to always take one of job|hydra|all, and not have it default to job.\r\nthis will eliminate the problem that occures when --cfg is not the last flag in the command line and some override is associated with --cfg.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport inspect\nimport os\nimport sys\nfrom .hydra import Hydra\nimport argparse\n\n\ndef run_hydra(args_parser, task_function, config_path, strict):\n stack = inspect.stack()\n frame = stack[2]\n\n calling_file = None\n calling__module = None\n try:\n calling_file = frame[0].f_locals[\"__file__\"]\n except KeyError:\n pass\n try:\n module_envs = [\"HYDRA_MAIN_MODULE\", \"FB_PAR_MAIN_MODULE\", \"FB_XAR_MAIN_MODULE\"]\n for module_env in module_envs:\n if module_env in os.environ:\n calling__module = os.environ[module_env]\n break\n\n if calling__module is None:\n calling__module = frame[0].f_globals[frame[3]].__module__\n except KeyError:\n pass\n\n hydra = Hydra(\n calling_file=calling_file,\n calling_module=calling__module,\n config_path=config_path,\n task_function=task_function,\n strict=strict,\n )\n\n args = args_parser.parse_args()\n if args.help:\n hydra.app_help(args_parser=args_parser, args=args)\n sys.exit(0)\n if args.hydra_help:\n hydra.hydra_help(args_parser=args_parser, args=args)\n sys.exit(0)\n\n has_show_cfg = args.cfg is not None\n num_commands = args.run + has_show_cfg + args.multirun + args.shell_completion\n if num_commands > 1:\n raise ValueError(\n \"Only one of --run, --multirun, -cfg and --shell_completion can be specified\"\n )\n if num_commands == 0:\n args.run = True\n if args.run:\n hydra.run(overrides=args.overrides)\n elif args.multirun:\n hydra.multirun(overrides=args.overrides)\n elif args.cfg:\n hydra.show_cfg(overrides=args.overrides, cfg_type=args.cfg)\n elif args.shell_completion:\n hydra.shell_completion(overrides=args.overrides)\n else:\n print(\"Command not specified\")\n sys.exit(1)\n\n\ndef _get_exec_command():\n if sys.argv[0].endswith(\".py\"):\n return \"python {}\".format(sys.argv[0])\n else:\n # Running as an installed app (setuptools entry point)\n executable = os.path.basename(sys.argv[0])\n return executable\n\n\ndef get_args_parser():\n from .. import __version__\n\n parser = argparse.ArgumentParser(add_help=False, description=\"Hydra\")\n parser.add_argument(\"--help\", \"-h\", action=\"store_true\", help=\"Application's help\")\n parser.add_argument(\"--hydra-help\", action=\"store_true\", help=\"Hydra's help\")\n parser.add_argument(\n \"--version\", action=\"version\", version=\"Hydra {}\".format(__version__)\n )\n parser.add_argument(\n \"overrides\",\n nargs=\"*\",\n help=\"Any key=value arguments to override config values (use dots for.nested=overrides)\",\n )\n\n parser.add_argument(\n \"--cfg\",\n \"-c\",\n const=\"job\",\n nargs=\"?\",\n choices=[\"job\", \"hydra\", \"all\"],\n help=\"Show config instead of running, optional value indicates which config to show (defaults to job)\",\n )\n\n parser.add_argument(\"--run\", \"-r\", action=\"store_true\", help=\"Run a job\")\n\n parser.add_argument(\n \"--multirun\",\n \"-m\",\n action=\"store_true\",\n help=\"Run multiple jobs with the configured launcher\",\n )\n\n shell = \"SHELL_NAME\"\n install_cmd = 'eval \"$({} -sc install={})\"'.format(_get_exec_command(), shell)\n uninstall_cmd = 'eval \"$({} -sc uninstall={})\"'.format(_get_exec_command(), shell)\n parser.add_argument(\n \"--shell_completion\",\n \"-sc\",\n action=\"store_true\",\n help=\"\"\"Install or Uninstall shell completion:\n Install:\n {}\n\n Uninstall:\n {}\n\"\"\".format(\n install_cmd, uninstall_cmd\n ),\n )\n return parser\n\n\ndef get_args(args=None):\n return get_args_parser().parse_args(args=args)\n", "path": "hydra/_internal/utils.py"}]} | 1,804 | 153 |
gh_patches_debug_33836 | rasdani/github-patches | git_diff | bridgecrewio__checkov-1166 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to see any CKV2 checks in the list
**Describe the bug**
I posted this on Slack first and was confirmed it's a bug. I can't see any of the CKV2 checks when running `checkov -l`
**Expected behavior**
CKV2 / graph checks should be present working.
**Screenshots**

**Desktop (please complete the following information):**
- OS: os X
- Checkov Version 2.0.107
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "pytest==5.3.1",
28 "coverage",
29 "coverage-badge",
30 "GitPython==3.1.7",
31 "bandit"
32 ]
33 },
34 install_requires=[
35 "bc-python-hcl2>=0.3.18",
36 "cloudsplaining>=0.4.1",
37 "deep_merge",
38 "tabulate",
39 "colorama",
40 "termcolor",
41 "junit-xml",
42 "dpath>=1.5.0,<2",
43 "pyyaml>=5.4.1",
44 "boto3==1.17.27",
45 "GitPython",
46 "six==1.15.0",
47 "jmespath",
48 "tqdm",
49 "update_checker",
50 "semantic_version",
51 "packaging",
52 "networkx",
53 "dockerfile-parse",
54 "docker"
55 ],
56 license="Apache License 2.0",
57 name="checkov",
58 version=version,
59 python_requires=">=3.7",
60 description="Infrastructure as code static analysis",
61 author="bridgecrew",
62 author_email="[email protected]",
63 url="https://github.com/nimrodkor/checkov",
64 packages=setuptools.find_packages(exclude=["tests*","integration_tests*"]),
65 scripts=["bin/checkov", "bin/checkov.cmd"],
66 long_description=long_description,
67 long_description_content_type="text/markdown",
68 classifiers=[
69 'Environment :: Console',
70 'Intended Audience :: Developers',
71 'Intended Audience :: System Administrators',
72 'Programming Language :: Python :: 3.7',
73 'Programming Language :: Python :: 3.8',
74 'Programming Language :: Python :: 3.9',
75 'Topic :: Security',
76 'Topic :: Software Development :: Build Tools'
77 ]
78 )
79
[end of setup.py]
[start of checkov/terraform/checks_infra/registry.py]
1 import json
2 import logging
3 import os
4
5 import yaml
6
7 from checkov.common.graph.checks_infra.base_parser import BaseGraphCheckParser
8 from checkov.common.graph.checks_infra.registry import BaseRegistry
9 from checkov.terraform.checks_infra.resources_types import resources_types
10
11 CHECKS_POSSIBLE_ENDING = [".yaml", ".yml"]
12
13
14 class Registry(BaseRegistry):
15 def __init__(self, parser=BaseGraphCheckParser(), checks_dir=None):
16 super().__init__(parser)
17 self.checks = []
18 self.parser = parser
19 self.checks_dir = checks_dir if checks_dir else \
20 os.path.join(os.path.dirname(os.path.dirname(__file__)), "checks", "graph_checks")
21 self.logger = logging.getLogger(__name__)
22
23 def load_checks(self):
24 self._load_checks_from_dir(self.checks_dir)
25
26 def _load_checks_from_dir(self, directory: str):
27 dir = os.path.expanduser(directory)
28 self.logger.debug("Loading external checks from {}".format(dir))
29 for root, d_names, f_names in os.walk(dir):
30 for file in f_names:
31 file_ending = os.path.splitext(file)[1]
32 if file_ending in CHECKS_POSSIBLE_ENDING:
33 with open(f'{root}/{file}', "r") as f:
34 if dir != self.checks_dir:
35 # This is a custom check, log its loading
36 logging.info(f"loading {file}")
37 check_yaml = yaml.safe_load(f)
38 check_json = json.loads(json.dumps(check_yaml))
39 check = self.parser.parse_raw_check(check_json, resources_types=self._get_resource_types(check_json))
40 if not any([c for c in self.checks if check.id == c.id]):
41 self.checks.append(check)
42
43 def load_external_checks(self, dir: str):
44 self._load_checks_from_dir(dir)
45
46 @staticmethod
47 def _get_resource_types(check_json):
48 provider = check_json.get("scope", {}).get("provider", "").lower()
49 return resources_types.get(provider)
50
[end of checkov/terraform/checks_infra/registry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks_infra/registry.py b/checkov/terraform/checks_infra/registry.py
--- a/checkov/terraform/checks_infra/registry.py
+++ b/checkov/terraform/checks_infra/registry.py
@@ -25,15 +25,20 @@
def _load_checks_from_dir(self, directory: str):
dir = os.path.expanduser(directory)
- self.logger.debug("Loading external checks from {}".format(dir))
+
+ checks_dir_content = os.listdir(os.path.dirname(dir))
+ self.logger.info(f'Checks dir contents: {checks_dir_content}')
+
+ self.logger.info("Loading external checks from {}".format(dir))
for root, d_names, f_names in os.walk(dir):
+ self.logger.info(f'Searching through {d_names} and {f_names}')
for file in f_names:
file_ending = os.path.splitext(file)[1]
if file_ending in CHECKS_POSSIBLE_ENDING:
with open(f'{root}/{file}', "r") as f:
- if dir != self.checks_dir:
+ # if dir != self.checks_dir:
# This is a custom check, log its loading
- logging.info(f"loading {file}")
+ self.logger.info(f"loading {file}")
check_yaml = yaml.safe_load(f)
check_json = json.loads(json.dumps(check_yaml))
check = self.parser.parse_raw_check(check_json, resources_types=self._get_resource_types(check_json))
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -62,6 +62,8 @@
author_email="[email protected]",
url="https://github.com/nimrodkor/checkov",
packages=setuptools.find_packages(exclude=["tests*","integration_tests*"]),
+ include_package_data=True,
+ package_data = {'': ['*.yaml', '*.yml']},
scripts=["bin/checkov", "bin/checkov.cmd"],
long_description=long_description,
long_description_content_type="text/markdown",
| {"golden_diff": "diff --git a/checkov/terraform/checks_infra/registry.py b/checkov/terraform/checks_infra/registry.py\n--- a/checkov/terraform/checks_infra/registry.py\n+++ b/checkov/terraform/checks_infra/registry.py\n@@ -25,15 +25,20 @@\n \n def _load_checks_from_dir(self, directory: str):\n dir = os.path.expanduser(directory)\n- self.logger.debug(\"Loading external checks from {}\".format(dir))\n+\n+ checks_dir_content = os.listdir(os.path.dirname(dir))\n+ self.logger.info(f'Checks dir contents: {checks_dir_content}')\n+\n+ self.logger.info(\"Loading external checks from {}\".format(dir))\n for root, d_names, f_names in os.walk(dir):\n+ self.logger.info(f'Searching through {d_names} and {f_names}')\n for file in f_names:\n file_ending = os.path.splitext(file)[1]\n if file_ending in CHECKS_POSSIBLE_ENDING:\n with open(f'{root}/{file}', \"r\") as f:\n- if dir != self.checks_dir:\n+ # if dir != self.checks_dir:\n # This is a custom check, log its loading\n- logging.info(f\"loading {file}\")\n+ self.logger.info(f\"loading {file}\")\n check_yaml = yaml.safe_load(f)\n check_json = json.loads(json.dumps(check_yaml))\n check = self.parser.parse_raw_check(check_json, resources_types=self._get_resource_types(check_json))\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -62,6 +62,8 @@\n author_email=\"[email protected]\",\n url=\"https://github.com/nimrodkor/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\",\"integration_tests*\"]),\n+ include_package_data=True,\n+ package_data = {'': ['*.yaml', '*.yml']},\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n", "issue": "Unable to see any CKV2 checks in the list\n**Describe the bug**\r\nI posted this on Slack first and was confirmed it's a bug. I can't see any of the CKV2 checks when running `checkov -l`\r\n\r\n**Expected behavior**\r\nCKV2 / graph checks should be present working.\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: os X\r\n - Checkov Version 2.0.107\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\"\n ]\n },\n install_requires=[\n \"bc-python-hcl2>=0.3.18\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3==1.17.27\",\n \"GitPython\",\n \"six==1.15.0\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/nimrodkor/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\",\"integration_tests*\"]),\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Security',\n 'Topic :: Software Development :: Build Tools'\n ]\n)\n", "path": "setup.py"}, {"content": "import json\nimport logging\nimport os\n\nimport yaml\n\nfrom checkov.common.graph.checks_infra.base_parser import BaseGraphCheckParser\nfrom checkov.common.graph.checks_infra.registry import BaseRegistry\nfrom checkov.terraform.checks_infra.resources_types import resources_types\n\nCHECKS_POSSIBLE_ENDING = [\".yaml\", \".yml\"]\n\n\nclass Registry(BaseRegistry):\n def __init__(self, parser=BaseGraphCheckParser(), checks_dir=None):\n super().__init__(parser)\n self.checks = []\n self.parser = parser\n self.checks_dir = checks_dir if checks_dir else \\\n os.path.join(os.path.dirname(os.path.dirname(__file__)), \"checks\", \"graph_checks\")\n self.logger = logging.getLogger(__name__)\n\n def load_checks(self):\n self._load_checks_from_dir(self.checks_dir)\n\n def _load_checks_from_dir(self, directory: str):\n dir = os.path.expanduser(directory)\n self.logger.debug(\"Loading external checks from {}\".format(dir))\n for root, d_names, f_names in os.walk(dir):\n for file in f_names:\n file_ending = os.path.splitext(file)[1]\n if file_ending in CHECKS_POSSIBLE_ENDING:\n with open(f'{root}/{file}', \"r\") as f:\n if dir != self.checks_dir:\n # This is a custom check, log its loading\n logging.info(f\"loading {file}\")\n check_yaml = yaml.safe_load(f)\n check_json = json.loads(json.dumps(check_yaml))\n check = self.parser.parse_raw_check(check_json, resources_types=self._get_resource_types(check_json))\n if not any([c for c in self.checks if check.id == c.id]):\n self.checks.append(check)\n\n def load_external_checks(self, dir: str):\n self._load_checks_from_dir(dir)\n\n @staticmethod\n def _get_resource_types(check_json):\n provider = check_json.get(\"scope\", {}).get(\"provider\", \"\").lower()\n return resources_types.get(provider)\n", "path": "checkov/terraform/checks_infra/registry.py"}]} | 1,954 | 454 |
gh_patches_debug_801 | rasdani/github-patches | git_diff | google__flax-2407 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Outdated `rich` dependency version
The version of `rich` is currently limited to `rich~=11.1`, causing problems with `pip` dependency resolution when installing with other packages.
https://github.com/google/flax/blob/cda7a4c85bbce744e412ab82e298ddf76d4770d2/setup.py#L33
Should be a trivial fix since `flax.linen.summary` doesn't seem to need any changes, I'll open a PR.
</issue>
<code>
[start of setup.py]
1 # Copyright 2022 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """setup.py for Flax."""
16
17 import os
18 from setuptools import find_packages
19 from setuptools import setup
20
21 here = os.path.abspath(os.path.dirname(__file__))
22 try:
23 README = open(os.path.join(here, "README.md"), encoding="utf-8").read()
24 except OSError:
25 README = ""
26
27 install_requires = [
28 "numpy>=1.12",
29 "jax>=0.3.16",
30 "matplotlib", # only needed for tensorboard export
31 "msgpack",
32 "optax",
33 "rich~=11.1",
34 "typing_extensions>=4.1.1",
35 "PyYAML>=5.4.1",
36 ]
37
38 tests_require = [
39 "atari-py==0.2.5", # Last version does not have the ROMs we test on pre-packaged
40 "clu", # All examples.
41 "gym==0.18.3",
42 "jaxlib",
43 "jraph>=0.0.6dev0",
44 "ml-collections",
45 "opencv-python",
46 "pytest",
47 "pytest-cov",
48 "pytest-custom_exit_code",
49 "pytest-xdist==1.34.0", # upgrading to 2.0 broke tests, need to investigate
50 "pytype",
51 "sentencepiece", # WMT example.
52 "svn",
53 "tensorflow_text>=2.4.0", # WMT example.
54 "tensorflow_datasets",
55 "tensorflow",
56 "torch",
57 ]
58
59 __version__ = None
60
61 with open("flax/version.py") as f:
62 exec(f.read(), globals())
63
64 setup(
65 name="flax",
66 version=__version__,
67 description="Flax: A neural network library for JAX designed for flexibility",
68 long_description="\n\n".join([README]),
69 long_description_content_type="text/markdown",
70 classifiers=[
71 "Development Status :: 3 - Alpha",
72 "Intended Audience :: Developers",
73 "Intended Audience :: Science/Research",
74 "License :: OSI Approved :: Apache Software License",
75 "Programming Language :: Python :: 3.7",
76 "Topic :: Scientific/Engineering :: Artificial Intelligence",
77 ],
78 keywords="",
79 author="Flax team",
80 author_email="[email protected]",
81 url="https://github.com/google/flax",
82 packages=find_packages(),
83 package_data={"flax": ["py.typed"]},
84 zip_safe=False,
85 install_requires=install_requires,
86 extras_require={
87 "testing": tests_require,
88 },
89 )
90
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,7 +30,7 @@
"matplotlib", # only needed for tensorboard export
"msgpack",
"optax",
- "rich~=11.1",
+ "rich>=11.1",
"typing_extensions>=4.1.1",
"PyYAML>=5.4.1",
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,7 +30,7 @@\n \"matplotlib\", # only needed for tensorboard export\n \"msgpack\",\n \"optax\",\n- \"rich~=11.1\",\n+ \"rich>=11.1\",\n \"typing_extensions>=4.1.1\",\n \"PyYAML>=5.4.1\",\n ]\n", "issue": "Outdated `rich` dependency version\nThe version of `rich` is currently limited to `rich~=11.1`, causing problems with `pip` dependency resolution when installing with other packages.\r\n\r\nhttps://github.com/google/flax/blob/cda7a4c85bbce744e412ab82e298ddf76d4770d2/setup.py#L33\r\n\r\nShould be a trivial fix since `flax.linen.summary` doesn't seem to need any changes, I'll open a PR.\r\n\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"setup.py for Flax.\"\"\"\n\nimport os\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n README = open(os.path.join(here, \"README.md\"), encoding=\"utf-8\").read()\nexcept OSError:\n README = \"\"\n\ninstall_requires = [\n \"numpy>=1.12\",\n \"jax>=0.3.16\",\n \"matplotlib\", # only needed for tensorboard export\n \"msgpack\",\n \"optax\",\n \"rich~=11.1\",\n \"typing_extensions>=4.1.1\",\n \"PyYAML>=5.4.1\",\n]\n\ntests_require = [\n \"atari-py==0.2.5\", # Last version does not have the ROMs we test on pre-packaged\n \"clu\", # All examples.\n \"gym==0.18.3\",\n \"jaxlib\",\n \"jraph>=0.0.6dev0\",\n \"ml-collections\",\n \"opencv-python\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-custom_exit_code\",\n \"pytest-xdist==1.34.0\", # upgrading to 2.0 broke tests, need to investigate\n \"pytype\",\n \"sentencepiece\", # WMT example.\n \"svn\",\n \"tensorflow_text>=2.4.0\", # WMT example.\n \"tensorflow_datasets\",\n \"tensorflow\",\n \"torch\",\n]\n\n__version__ = None\n\nwith open(\"flax/version.py\") as f:\n exec(f.read(), globals())\n\nsetup(\n name=\"flax\",\n version=__version__,\n description=\"Flax: A neural network library for JAX designed for flexibility\",\n long_description=\"\\n\\n\".join([README]),\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"\",\n author=\"Flax team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/google/flax\",\n packages=find_packages(),\n package_data={\"flax\": [\"py.typed\"]},\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n \"testing\": tests_require,\n },\n )\n", "path": "setup.py"}]} | 1,503 | 100 |
gh_patches_debug_7456 | rasdani/github-patches | git_diff | encode__httpx-421 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
HTTPError should be importable frop the top-level httpx package
From #365:
> `HTTPError` is not available at the top level like the other exceptions and like it was in requests. This is a somewhat common exception to catch raise_for_status, so having to add another import statement for it is a bit odd if intentional.
Put differently, `HTTPError` is missing from `httpx/__init__.py`. Adding it would allow us to do:
```python
import httpx
try:
r = httpx.get("https://example.org")
r.raise_for_status()
except httpx.HTTPError:
...
```
</issue>
<code>
[start of httpx/__init__.py]
1 from .__version__ import __description__, __title__, __version__
2 from .api import delete, get, head, options, patch, post, put, request
3 from .client import AsyncClient, Client
4 from .concurrency.asyncio import AsyncioBackend
5 from .concurrency.base import (
6 BaseBackgroundManager,
7 BasePoolSemaphore,
8 BaseTCPStream,
9 ConcurrencyBackend,
10 )
11 from .config import (
12 USER_AGENT,
13 CertTypes,
14 HTTPVersionConfig,
15 HTTPVersionTypes,
16 PoolLimits,
17 SSLConfig,
18 TimeoutConfig,
19 TimeoutTypes,
20 VerifyTypes,
21 )
22 from .dispatch.base import AsyncDispatcher, Dispatcher
23 from .dispatch.connection import HTTPConnection
24 from .dispatch.connection_pool import ConnectionPool
25 from .dispatch.proxy_http import HTTPProxy, HTTPProxyMode
26 from .exceptions import (
27 ConnectTimeout,
28 CookieConflict,
29 DecodingError,
30 InvalidURL,
31 NotRedirectResponse,
32 PoolTimeout,
33 ProtocolError,
34 ProxyError,
35 ReadTimeout,
36 RedirectBodyUnavailable,
37 RedirectLoop,
38 ResponseClosed,
39 ResponseNotRead,
40 StreamConsumed,
41 Timeout,
42 TooManyRedirects,
43 WriteTimeout,
44 )
45 from .middleware.digest_auth import DigestAuth
46 from .models import (
47 URL,
48 AsyncRequest,
49 AsyncRequestData,
50 AsyncResponse,
51 AsyncResponseContent,
52 AuthTypes,
53 Cookies,
54 CookieTypes,
55 Headers,
56 HeaderTypes,
57 Origin,
58 QueryParams,
59 QueryParamTypes,
60 Request,
61 RequestData,
62 RequestFiles,
63 Response,
64 ResponseContent,
65 URLTypes,
66 )
67 from .status_codes import StatusCode, codes
68
69 __all__ = [
70 "__description__",
71 "__title__",
72 "__version__",
73 "delete",
74 "get",
75 "head",
76 "options",
77 "patch",
78 "post",
79 "patch",
80 "put",
81 "request",
82 "AsyncClient",
83 "Client",
84 "AsyncioBackend",
85 "USER_AGENT",
86 "CertTypes",
87 "PoolLimits",
88 "SSLConfig",
89 "TimeoutConfig",
90 "VerifyTypes",
91 "HTTPConnection",
92 "BasePoolSemaphore",
93 "BaseBackgroundManager",
94 "ConnectionPool",
95 "HTTPProxy",
96 "HTTPProxyMode",
97 "ConnectTimeout",
98 "CookieConflict",
99 "DecodingError",
100 "InvalidURL",
101 "NotRedirectResponse",
102 "PoolTimeout",
103 "ProtocolError",
104 "ReadTimeout",
105 "RedirectBodyUnavailable",
106 "RedirectLoop",
107 "ResponseClosed",
108 "ResponseNotRead",
109 "StreamConsumed",
110 "ProxyError",
111 "Timeout",
112 "TooManyRedirects",
113 "WriteTimeout",
114 "AsyncDispatcher",
115 "BaseTCPStream",
116 "ConcurrencyBackend",
117 "Dispatcher",
118 "URL",
119 "URLTypes",
120 "StatusCode",
121 "codes",
122 "TimeoutTypes",
123 "HTTPVersionTypes",
124 "HTTPVersionConfig",
125 "AsyncRequest",
126 "AsyncRequestData",
127 "AsyncResponse",
128 "AsyncResponseContent",
129 "AuthTypes",
130 "Cookies",
131 "CookieTypes",
132 "Headers",
133 "HeaderTypes",
134 "Origin",
135 "QueryParams",
136 "QueryParamTypes",
137 "Request",
138 "RequestData",
139 "Response",
140 "ResponseContent",
141 "RequestFiles",
142 "DigestAuth",
143 ]
144
[end of httpx/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/httpx/__init__.py b/httpx/__init__.py
--- a/httpx/__init__.py
+++ b/httpx/__init__.py
@@ -27,6 +27,7 @@
ConnectTimeout,
CookieConflict,
DecodingError,
+ HTTPError,
InvalidURL,
NotRedirectResponse,
PoolTimeout,
@@ -97,6 +98,7 @@
"ConnectTimeout",
"CookieConflict",
"DecodingError",
+ "HTTPError",
"InvalidURL",
"NotRedirectResponse",
"PoolTimeout",
| {"golden_diff": "diff --git a/httpx/__init__.py b/httpx/__init__.py\n--- a/httpx/__init__.py\n+++ b/httpx/__init__.py\n@@ -27,6 +27,7 @@\n ConnectTimeout,\n CookieConflict,\n DecodingError,\n+ HTTPError,\n InvalidURL,\n NotRedirectResponse,\n PoolTimeout,\n@@ -97,6 +98,7 @@\n \"ConnectTimeout\",\n \"CookieConflict\",\n \"DecodingError\",\n+ \"HTTPError\",\n \"InvalidURL\",\n \"NotRedirectResponse\",\n \"PoolTimeout\",\n", "issue": "HTTPError should be importable frop the top-level httpx package\nFrom #365:\r\n\r\n> `HTTPError` is not available at the top level like the other exceptions and like it was in requests. This is a somewhat common exception to catch raise_for_status, so having to add another import statement for it is a bit odd if intentional.\r\n\r\nPut differently, `HTTPError` is missing from `httpx/__init__.py`. Adding it would allow us to do:\r\n\r\n```python\r\nimport httpx\r\n\r\ntry:\r\n r = httpx.get(\"https://example.org\")\r\n r.raise_for_status()\r\nexcept httpx.HTTPError:\r\n ...\r\n```\n", "before_files": [{"content": "from .__version__ import __description__, __title__, __version__\nfrom .api import delete, get, head, options, patch, post, put, request\nfrom .client import AsyncClient, Client\nfrom .concurrency.asyncio import AsyncioBackend\nfrom .concurrency.base import (\n BaseBackgroundManager,\n BasePoolSemaphore,\n BaseTCPStream,\n ConcurrencyBackend,\n)\nfrom .config import (\n USER_AGENT,\n CertTypes,\n HTTPVersionConfig,\n HTTPVersionTypes,\n PoolLimits,\n SSLConfig,\n TimeoutConfig,\n TimeoutTypes,\n VerifyTypes,\n)\nfrom .dispatch.base import AsyncDispatcher, Dispatcher\nfrom .dispatch.connection import HTTPConnection\nfrom .dispatch.connection_pool import ConnectionPool\nfrom .dispatch.proxy_http import HTTPProxy, HTTPProxyMode\nfrom .exceptions import (\n ConnectTimeout,\n CookieConflict,\n DecodingError,\n InvalidURL,\n NotRedirectResponse,\n PoolTimeout,\n ProtocolError,\n ProxyError,\n ReadTimeout,\n RedirectBodyUnavailable,\n RedirectLoop,\n ResponseClosed,\n ResponseNotRead,\n StreamConsumed,\n Timeout,\n TooManyRedirects,\n WriteTimeout,\n)\nfrom .middleware.digest_auth import DigestAuth\nfrom .models import (\n URL,\n AsyncRequest,\n AsyncRequestData,\n AsyncResponse,\n AsyncResponseContent,\n AuthTypes,\n Cookies,\n CookieTypes,\n Headers,\n HeaderTypes,\n Origin,\n QueryParams,\n QueryParamTypes,\n Request,\n RequestData,\n RequestFiles,\n Response,\n ResponseContent,\n URLTypes,\n)\nfrom .status_codes import StatusCode, codes\n\n__all__ = [\n \"__description__\",\n \"__title__\",\n \"__version__\",\n \"delete\",\n \"get\",\n \"head\",\n \"options\",\n \"patch\",\n \"post\",\n \"patch\",\n \"put\",\n \"request\",\n \"AsyncClient\",\n \"Client\",\n \"AsyncioBackend\",\n \"USER_AGENT\",\n \"CertTypes\",\n \"PoolLimits\",\n \"SSLConfig\",\n \"TimeoutConfig\",\n \"VerifyTypes\",\n \"HTTPConnection\",\n \"BasePoolSemaphore\",\n \"BaseBackgroundManager\",\n \"ConnectionPool\",\n \"HTTPProxy\",\n \"HTTPProxyMode\",\n \"ConnectTimeout\",\n \"CookieConflict\",\n \"DecodingError\",\n \"InvalidURL\",\n \"NotRedirectResponse\",\n \"PoolTimeout\",\n \"ProtocolError\",\n \"ReadTimeout\",\n \"RedirectBodyUnavailable\",\n \"RedirectLoop\",\n \"ResponseClosed\",\n \"ResponseNotRead\",\n \"StreamConsumed\",\n \"ProxyError\",\n \"Timeout\",\n \"TooManyRedirects\",\n \"WriteTimeout\",\n \"AsyncDispatcher\",\n \"BaseTCPStream\",\n \"ConcurrencyBackend\",\n \"Dispatcher\",\n \"URL\",\n \"URLTypes\",\n \"StatusCode\",\n \"codes\",\n \"TimeoutTypes\",\n \"HTTPVersionTypes\",\n \"HTTPVersionConfig\",\n \"AsyncRequest\",\n \"AsyncRequestData\",\n \"AsyncResponse\",\n \"AsyncResponseContent\",\n \"AuthTypes\",\n \"Cookies\",\n \"CookieTypes\",\n \"Headers\",\n \"HeaderTypes\",\n \"Origin\",\n \"QueryParams\",\n \"QueryParamTypes\",\n \"Request\",\n \"RequestData\",\n \"Response\",\n \"ResponseContent\",\n \"RequestFiles\",\n \"DigestAuth\",\n]\n", "path": "httpx/__init__.py"}]} | 1,709 | 130 |
gh_patches_debug_10189 | rasdani/github-patches | git_diff | beetbox__beets-1980 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
hook: Crash when using non-ASCII paths in command template
Using the config
```
hook:
hooks:
- event: album_imported
command: beet convert -ay path:{album.path}
```
and this command
```
beet import -a -L path:/path/to/lib/Ария
```
I get an UnicodeDecodeError from `hook.py` line 50.
</issue>
<code>
[start of beetsplug/hook.py]
1 # This file is part of beets.
2 # Copyright 2015, Adrian Sampson.
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining
5 # a copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish,
8 # distribute, sublicense, and/or sell copies of the Software, and to
9 # permit persons to whom the Software is furnished to do so, subject to
10 # the following conditions:
11 #
12 # The above copyright notice and this permission notice shall be
13 # included in all copies or substantial portions of the Software.
14
15 """Allows custom commands to be run when an event is emitted by beets"""
16 from __future__ import division, absolute_import, print_function
17
18 import shlex
19 import subprocess
20
21 from beets.plugins import BeetsPlugin
22 from beets.ui import _arg_encoding
23
24
25 class HookPlugin(BeetsPlugin):
26 """Allows custom commands to be run when an event is emitted by beets"""
27 def __init__(self):
28 super(HookPlugin, self).__init__()
29
30 self.config.add({
31 'hooks': []
32 })
33
34 hooks = self.config['hooks'].get(list)
35
36 for hook_index in range(len(hooks)):
37 hook = self.config['hooks'][hook_index]
38
39 hook_event = hook['event'].get()
40 hook_command = hook['command'].get()
41
42 self.create_and_register_hook(hook_event, hook_command)
43
44 def create_and_register_hook(self, event, command):
45 def hook_function(**kwargs):
46 if command is None or len(command) == 0:
47 self._log.error('invalid command "{0}"', command)
48 return
49
50 formatted_command = command.format(event=event, **kwargs)
51 encoded_command = formatted_command.decode(_arg_encoding())
52 command_pieces = shlex.split(encoded_command)
53
54 self._log.debug('Running command "{0}" for event {1}',
55 encoded_command, event)
56
57 try:
58 subprocess.Popen(command_pieces).wait()
59 except OSError as exc:
60 self._log.error('hook for {0} failed: {1}', event, exc)
61
62 self.register_listener(event, hook_function)
63
[end of beetsplug/hook.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/beetsplug/hook.py b/beetsplug/hook.py
--- a/beetsplug/hook.py
+++ b/beetsplug/hook.py
@@ -47,7 +47,9 @@
self._log.error('invalid command "{0}"', command)
return
- formatted_command = command.format(event=event, **kwargs)
+ unicode_command = command.decode('utf-8')
+ formatted_command = unicode_command.format(event=event,
+ **kwargs)
encoded_command = formatted_command.decode(_arg_encoding())
command_pieces = shlex.split(encoded_command)
| {"golden_diff": "diff --git a/beetsplug/hook.py b/beetsplug/hook.py\n--- a/beetsplug/hook.py\n+++ b/beetsplug/hook.py\n@@ -47,7 +47,9 @@\n self._log.error('invalid command \"{0}\"', command)\n return\n \n- formatted_command = command.format(event=event, **kwargs)\n+ unicode_command = command.decode('utf-8')\n+ formatted_command = unicode_command.format(event=event,\n+ **kwargs)\n encoded_command = formatted_command.decode(_arg_encoding())\n command_pieces = shlex.split(encoded_command)\n", "issue": "hook: Crash when using non-ASCII paths in command template\nUsing the config\n\n```\nhook:\n hooks:\n - event: album_imported\n command: beet convert -ay path:{album.path}\n```\n\nand this command\n\n```\nbeet import -a -L path:/path/to/lib/\u0410\u0440\u0438\u044f\n```\n\nI get an UnicodeDecodeError from `hook.py` line 50.\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2015, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Allows custom commands to be run when an event is emitted by beets\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport shlex\nimport subprocess\n\nfrom beets.plugins import BeetsPlugin\nfrom beets.ui import _arg_encoding\n\n\nclass HookPlugin(BeetsPlugin):\n \"\"\"Allows custom commands to be run when an event is emitted by beets\"\"\"\n def __init__(self):\n super(HookPlugin, self).__init__()\n\n self.config.add({\n 'hooks': []\n })\n\n hooks = self.config['hooks'].get(list)\n\n for hook_index in range(len(hooks)):\n hook = self.config['hooks'][hook_index]\n\n hook_event = hook['event'].get()\n hook_command = hook['command'].get()\n\n self.create_and_register_hook(hook_event, hook_command)\n\n def create_and_register_hook(self, event, command):\n def hook_function(**kwargs):\n if command is None or len(command) == 0:\n self._log.error('invalid command \"{0}\"', command)\n return\n\n formatted_command = command.format(event=event, **kwargs)\n encoded_command = formatted_command.decode(_arg_encoding())\n command_pieces = shlex.split(encoded_command)\n\n self._log.debug('Running command \"{0}\" for event {1}',\n encoded_command, event)\n\n try:\n subprocess.Popen(command_pieces).wait()\n except OSError as exc:\n self._log.error('hook for {0} failed: {1}', event, exc)\n\n self.register_listener(event, hook_function)\n", "path": "beetsplug/hook.py"}]} | 1,221 | 128 |
gh_patches_debug_37135 | rasdani/github-patches | git_diff | sopel-irc__sopel-843 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Strip leading . on .tld queries
`.tld .py` should be the same as `.tld py`
</issue>
<code>
[start of willie/modules/tld.py]
1 # coding=utf8
2 """
3 tld.py - Willie TLD Module
4 Copyright 2009-10, Michael Yanovich, yanovich.net
5 Licensed under the Eiffel Forum License 2.
6
7 http://willie.dftba.net
8 """
9 from __future__ import unicode_literals
10
11 from willie import web
12 from willie.module import commands, example
13 import re
14 import sys
15 if sys.version_info.major >= 3:
16 unicode = str
17
18 uri = 'https://en.wikipedia.org/wiki/List_of_Internet_top-level_domains'
19 r_tag = re.compile(r'<(?!!)[^>]+>')
20
21
22 @commands('tld')
23 @example('.tld ru')
24 def gettld(bot, trigger):
25 """Show information about the given Top Level Domain."""
26 page = web.get(uri)
27 search = r'(?i)<td><a href="\S+" title="\S+">\.{0}</a></td>\n(<td><a href=".*</a></td>\n)?<td>([A-Za-z0-9].*?)</td>\n<td>(.*)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
28 search = search.format(trigger.group(2))
29 re_country = re.compile(search)
30 matches = re_country.findall(page)
31 if not matches:
32 search = r'(?i)<td><a href="\S+" title="(\S+)">\.{0}</a></td>\n<td><a href=".*">(.*)</a></td>\n<td>([A-Za-z0-9].*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
33 search = search.format(trigger.group(2))
34 re_country = re.compile(search)
35 matches = re_country.findall(page)
36 if matches:
37 matches = list(matches[0])
38 i = 0
39 while i < len(matches):
40 matches[i] = r_tag.sub("", matches[i])
41 i += 1
42 desc = matches[2]
43 if len(desc) > 400:
44 desc = desc[:400] + "..."
45 reply = "%s -- %s. IDN: %s, DNSSEC: %s" % (matches[1], desc,
46 matches[3], matches[4])
47 bot.reply(reply)
48 else:
49 search = r'<td><a href="\S+" title="\S+">.{0}</a></td>\n<td><span class="flagicon"><img.*?\">(.*?)</a></td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
50 search = search.format(unicode(trigger.group(2)))
51 re_country = re.compile(search)
52 matches = re_country.findall(page)
53 if matches:
54 matches = matches[0]
55 dict_val = dict()
56 dict_val["country"], dict_val["expl"], dict_val["notes"], dict_val["idn"], dict_val["dnssec"], dict_val["sld"] = matches
57 for key in dict_val:
58 if dict_val[key] == " ":
59 dict_val[key] = "N/A"
60 dict_val[key] = r_tag.sub('', dict_val[key])
61 if len(dict_val["notes"]) > 400:
62 dict_val["notes"] = dict_val["notes"][:400] + "..."
63 reply = "%s (%s, %s). IDN: %s, DNSSEC: %s, SLD: %s" % (dict_val["country"], dict_val["expl"], dict_val["notes"], dict_val["idn"], dict_val["dnssec"], dict_val["sld"])
64 else:
65 reply = "No matches found for TLD: {0}".format(unicode(trigger.group(2)))
66 bot.reply(reply)
67
[end of willie/modules/tld.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/willie/modules/tld.py b/willie/modules/tld.py
--- a/willie/modules/tld.py
+++ b/willie/modules/tld.py
@@ -24,13 +24,16 @@
def gettld(bot, trigger):
"""Show information about the given Top Level Domain."""
page = web.get(uri)
+ tld = trigger.group(2)
+ if tld[0] == '.':
+ tld = tld[1:]
search = r'(?i)<td><a href="\S+" title="\S+">\.{0}</a></td>\n(<td><a href=".*</a></td>\n)?<td>([A-Za-z0-9].*?)</td>\n<td>(.*)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
- search = search.format(trigger.group(2))
+ search = search.format(tld)
re_country = re.compile(search)
matches = re_country.findall(page)
if not matches:
search = r'(?i)<td><a href="\S+" title="(\S+)">\.{0}</a></td>\n<td><a href=".*">(.*)</a></td>\n<td>([A-Za-z0-9].*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
- search = search.format(trigger.group(2))
+ search = search.format(tld)
re_country = re.compile(search)
matches = re_country.findall(page)
if matches:
@@ -47,7 +50,7 @@
bot.reply(reply)
else:
search = r'<td><a href="\S+" title="\S+">.{0}</a></td>\n<td><span class="flagicon"><img.*?\">(.*?)</a></td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
- search = search.format(unicode(trigger.group(2)))
+ search = search.format(unicode(tld))
re_country = re.compile(search)
matches = re_country.findall(page)
if matches:
@@ -62,5 +65,5 @@
dict_val["notes"] = dict_val["notes"][:400] + "..."
reply = "%s (%s, %s). IDN: %s, DNSSEC: %s, SLD: %s" % (dict_val["country"], dict_val["expl"], dict_val["notes"], dict_val["idn"], dict_val["dnssec"], dict_val["sld"])
else:
- reply = "No matches found for TLD: {0}".format(unicode(trigger.group(2)))
+ reply = "No matches found for TLD: {0}".format(unicode(tld))
bot.reply(reply)
| {"golden_diff": "diff --git a/willie/modules/tld.py b/willie/modules/tld.py\n--- a/willie/modules/tld.py\n+++ b/willie/modules/tld.py\n@@ -24,13 +24,16 @@\n def gettld(bot, trigger):\n \"\"\"Show information about the given Top Level Domain.\"\"\"\n page = web.get(uri)\n+ tld = trigger.group(2)\n+ if tld[0] == '.':\n+ tld = tld[1:]\n search = r'(?i)<td><a href=\"\\S+\" title=\"\\S+\">\\.{0}</a></td>\\n(<td><a href=\".*</a></td>\\n)?<td>([A-Za-z0-9].*?)</td>\\n<td>(.*)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n- search = search.format(trigger.group(2))\n+ search = search.format(tld)\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if not matches:\n search = r'(?i)<td><a href=\"\\S+\" title=\"(\\S+)\">\\.{0}</a></td>\\n<td><a href=\".*\">(.*)</a></td>\\n<td>([A-Za-z0-9].*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n- search = search.format(trigger.group(2))\n+ search = search.format(tld)\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if matches:\n@@ -47,7 +50,7 @@\n bot.reply(reply)\n else:\n search = r'<td><a href=\"\\S+\" title=\"\\S+\">.{0}</a></td>\\n<td><span class=\"flagicon\"><img.*?\\\">(.*?)</a></td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n- search = search.format(unicode(trigger.group(2)))\n+ search = search.format(unicode(tld))\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if matches:\n@@ -62,5 +65,5 @@\n dict_val[\"notes\"] = dict_val[\"notes\"][:400] + \"...\"\n reply = \"%s (%s, %s). IDN: %s, DNSSEC: %s, SLD: %s\" % (dict_val[\"country\"], dict_val[\"expl\"], dict_val[\"notes\"], dict_val[\"idn\"], dict_val[\"dnssec\"], dict_val[\"sld\"])\n else:\n- reply = \"No matches found for TLD: {0}\".format(unicode(trigger.group(2)))\n+ reply = \"No matches found for TLD: {0}\".format(unicode(tld))\n bot.reply(reply)\n", "issue": "Strip leading . on .tld queries\n`.tld .py` should be the same as `.tld py`\n\n", "before_files": [{"content": "# coding=utf8\n\"\"\"\ntld.py - Willie TLD Module\nCopyright 2009-10, Michael Yanovich, yanovich.net\nLicensed under the Eiffel Forum License 2.\n\nhttp://willie.dftba.net\n\"\"\"\nfrom __future__ import unicode_literals\n\nfrom willie import web\nfrom willie.module import commands, example\nimport re\nimport sys\nif sys.version_info.major >= 3:\n unicode = str\n\nuri = 'https://en.wikipedia.org/wiki/List_of_Internet_top-level_domains'\nr_tag = re.compile(r'<(?!!)[^>]+>')\n\n\n@commands('tld')\n@example('.tld ru')\ndef gettld(bot, trigger):\n \"\"\"Show information about the given Top Level Domain.\"\"\"\n page = web.get(uri)\n search = r'(?i)<td><a href=\"\\S+\" title=\"\\S+\">\\.{0}</a></td>\\n(<td><a href=\".*</a></td>\\n)?<td>([A-Za-z0-9].*?)</td>\\n<td>(.*)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n search = search.format(trigger.group(2))\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if not matches:\n search = r'(?i)<td><a href=\"\\S+\" title=\"(\\S+)\">\\.{0}</a></td>\\n<td><a href=\".*\">(.*)</a></td>\\n<td>([A-Za-z0-9].*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n search = search.format(trigger.group(2))\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if matches:\n matches = list(matches[0])\n i = 0\n while i < len(matches):\n matches[i] = r_tag.sub(\"\", matches[i])\n i += 1\n desc = matches[2]\n if len(desc) > 400:\n desc = desc[:400] + \"...\"\n reply = \"%s -- %s. IDN: %s, DNSSEC: %s\" % (matches[1], desc,\n matches[3], matches[4])\n bot.reply(reply)\n else:\n search = r'<td><a href=\"\\S+\" title=\"\\S+\">.{0}</a></td>\\n<td><span class=\"flagicon\"><img.*?\\\">(.*?)</a></td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n search = search.format(unicode(trigger.group(2)))\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if matches:\n matches = matches[0]\n dict_val = dict()\n dict_val[\"country\"], dict_val[\"expl\"], dict_val[\"notes\"], dict_val[\"idn\"], dict_val[\"dnssec\"], dict_val[\"sld\"] = matches\n for key in dict_val:\n if dict_val[key] == \" \":\n dict_val[key] = \"N/A\"\n dict_val[key] = r_tag.sub('', dict_val[key])\n if len(dict_val[\"notes\"]) > 400:\n dict_val[\"notes\"] = dict_val[\"notes\"][:400] + \"...\"\n reply = \"%s (%s, %s). IDN: %s, DNSSEC: %s, SLD: %s\" % (dict_val[\"country\"], dict_val[\"expl\"], dict_val[\"notes\"], dict_val[\"idn\"], dict_val[\"dnssec\"], dict_val[\"sld\"])\n else:\n reply = \"No matches found for TLD: {0}\".format(unicode(trigger.group(2)))\n bot.reply(reply)\n", "path": "willie/modules/tld.py"}]} | 1,555 | 669 |
gh_patches_debug_19871 | rasdani/github-patches | git_diff | ManimCommunity__manim-1516 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove references to t_min and t_max in ParametricFunction
## Description of bug / unexpected behavior
With PR #1161 the old way of setting the parameter's range (e.g. by setting `t_min` and `t_max`) was replaced by a single parameter `t_range`. However, the docs still show usage of `t_min` and `t_max`.
</issue>
<code>
[start of manim/mobject/functions.py]
1 """Mobjects representing function graphs."""
2
3 __all__ = ["ParametricFunction", "FunctionGraph"]
4
5
6 import numpy as np
7
8 from .. import config
9 from ..constants import *
10 from ..mobject.types.vectorized_mobject import VMobject
11 from ..utils.color import YELLOW
12
13
14 class ParametricFunction(VMobject):
15 """A parametric curve.
16
17 Examples
18 --------
19
20 .. manim:: PlotParametricFunction
21 :save_last_frame:
22
23 class PlotParametricFunction(Scene):
24 def func(self, t):
25 return np.array((np.sin(2 * t), np.sin(3 * t), 0))
26
27 def construct(self):
28 func = ParametricFunction(self.func, t_max = TAU, fill_opacity=0).set_color(RED)
29 self.add(func.scale(3))
30
31 .. manim:: ThreeDParametricSpring
32 :save_last_frame:
33
34 class ThreeDParametricSpring(ThreeDScene):
35 def construct(self):
36 curve1 = ParametricFunction(
37 lambda u: np.array([
38 1.2 * np.cos(u),
39 1.2 * np.sin(u),
40 u * 0.05
41 ]), color=RED, t_min=-3 * TAU, t_max=5 * TAU,
42 ).set_shade_in_3d(True)
43 axes = ThreeDAxes()
44 self.add(axes, curve1)
45 self.set_camera_orientation(phi=80 * DEGREES, theta=-60 * DEGREES)
46 self.wait()
47 """
48
49 def __init__(
50 self,
51 function=None,
52 t_range=None,
53 dt=1e-8,
54 discontinuities=None,
55 use_smoothing=True,
56 **kwargs
57 ):
58 self.function = function
59 t_range = np.array([0, 1, 0.01]) if t_range is None else t_range
60 if len(t_range) == 2:
61 t_range = [*t_range, 0.01]
62
63 self.dt = dt
64 self.discontinuities = [] if discontinuities is None else discontinuities
65 self.use_smoothing = use_smoothing
66 self.t_min, self.t_max, self.t_step = t_range
67
68 VMobject.__init__(self, **kwargs)
69
70 def get_function(self):
71 return self.function
72
73 def get_point_from_function(self, t):
74 return self.function(t)
75
76 def generate_points(self):
77
78 discontinuities = filter(
79 lambda t: self.t_min <= t <= self.t_max, self.discontinuities
80 )
81 discontinuities = np.array(list(discontinuities))
82 boundary_times = [
83 self.t_min,
84 self.t_max,
85 *(discontinuities - self.dt),
86 *(discontinuities + self.dt),
87 ]
88 boundary_times.sort()
89 for t1, t2 in zip(boundary_times[0::2], boundary_times[1::2]):
90 t_range = [*np.arange(t1, t2, self.t_step), t2]
91 points = np.array([self.function(t) for t in t_range])
92 self.start_new_path(points[0])
93 self.add_points_as_corners(points[1:])
94 if self.use_smoothing:
95 # TODO: not in line with upstream, approx_smooth does not exist
96 self.make_smooth()
97 return self
98
99
100 class FunctionGraph(ParametricFunction):
101 def __init__(self, function, x_range=None, color=YELLOW, **kwargs):
102
103 if x_range is None:
104 x_range = np.array([-config["frame_x_radius"], config["frame_x_radius"]])
105
106 self.x_range = x_range
107 self.parametric_function = lambda t: np.array([t, function(t), 0])
108 self.function = function
109 super().__init__(self.parametric_function, self.x_range, color=color, **kwargs)
110
111 def get_function(self):
112 return self.function
113
114 def get_point_from_function(self, x):
115 return self.parametric_function(x)
116
[end of manim/mobject/functions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/manim/mobject/functions.py b/manim/mobject/functions.py
--- a/manim/mobject/functions.py
+++ b/manim/mobject/functions.py
@@ -25,7 +25,7 @@
return np.array((np.sin(2 * t), np.sin(3 * t), 0))
def construct(self):
- func = ParametricFunction(self.func, t_max = TAU, fill_opacity=0).set_color(RED)
+ func = ParametricFunction(self.func, t_range = np.array([0, TAU]), fill_opacity=0).set_color(RED)
self.add(func.scale(3))
.. manim:: ThreeDParametricSpring
@@ -38,7 +38,7 @@
1.2 * np.cos(u),
1.2 * np.sin(u),
u * 0.05
- ]), color=RED, t_min=-3 * TAU, t_max=5 * TAU,
+ ]), color=RED, t_range = np.array([-3*TAU, 5*TAU, 0.01])
).set_shade_in_3d(True)
axes = ThreeDAxes()
self.add(axes, curve1)
| {"golden_diff": "diff --git a/manim/mobject/functions.py b/manim/mobject/functions.py\n--- a/manim/mobject/functions.py\n+++ b/manim/mobject/functions.py\n@@ -25,7 +25,7 @@\n return np.array((np.sin(2 * t), np.sin(3 * t), 0))\n \n def construct(self):\n- func = ParametricFunction(self.func, t_max = TAU, fill_opacity=0).set_color(RED)\n+ func = ParametricFunction(self.func, t_range = np.array([0, TAU]), fill_opacity=0).set_color(RED)\n self.add(func.scale(3))\n \n .. manim:: ThreeDParametricSpring\n@@ -38,7 +38,7 @@\n 1.2 * np.cos(u),\n 1.2 * np.sin(u),\n u * 0.05\n- ]), color=RED, t_min=-3 * TAU, t_max=5 * TAU,\n+ ]), color=RED, t_range = np.array([-3*TAU, 5*TAU, 0.01])\n ).set_shade_in_3d(True)\n axes = ThreeDAxes()\n self.add(axes, curve1)\n", "issue": "Remove references to t_min and t_max in ParametricFunction\n## Description of bug / unexpected behavior\r\n\r\nWith PR #1161 the old way of setting the parameter's range (e.g. by setting `t_min` and `t_max`) was replaced by a single parameter `t_range`. However, the docs still show usage of `t_min` and `t_max`.\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"Mobjects representing function graphs.\"\"\"\n\n__all__ = [\"ParametricFunction\", \"FunctionGraph\"]\n\n\nimport numpy as np\n\nfrom .. import config\nfrom ..constants import *\nfrom ..mobject.types.vectorized_mobject import VMobject\nfrom ..utils.color import YELLOW\n\n\nclass ParametricFunction(VMobject):\n \"\"\"A parametric curve.\n\n Examples\n --------\n\n .. manim:: PlotParametricFunction\n :save_last_frame:\n\n class PlotParametricFunction(Scene):\n def func(self, t):\n return np.array((np.sin(2 * t), np.sin(3 * t), 0))\n\n def construct(self):\n func = ParametricFunction(self.func, t_max = TAU, fill_opacity=0).set_color(RED)\n self.add(func.scale(3))\n\n .. manim:: ThreeDParametricSpring\n :save_last_frame:\n\n class ThreeDParametricSpring(ThreeDScene):\n def construct(self):\n curve1 = ParametricFunction(\n lambda u: np.array([\n 1.2 * np.cos(u),\n 1.2 * np.sin(u),\n u * 0.05\n ]), color=RED, t_min=-3 * TAU, t_max=5 * TAU,\n ).set_shade_in_3d(True)\n axes = ThreeDAxes()\n self.add(axes, curve1)\n self.set_camera_orientation(phi=80 * DEGREES, theta=-60 * DEGREES)\n self.wait()\n \"\"\"\n\n def __init__(\n self,\n function=None,\n t_range=None,\n dt=1e-8,\n discontinuities=None,\n use_smoothing=True,\n **kwargs\n ):\n self.function = function\n t_range = np.array([0, 1, 0.01]) if t_range is None else t_range\n if len(t_range) == 2:\n t_range = [*t_range, 0.01]\n\n self.dt = dt\n self.discontinuities = [] if discontinuities is None else discontinuities\n self.use_smoothing = use_smoothing\n self.t_min, self.t_max, self.t_step = t_range\n\n VMobject.__init__(self, **kwargs)\n\n def get_function(self):\n return self.function\n\n def get_point_from_function(self, t):\n return self.function(t)\n\n def generate_points(self):\n\n discontinuities = filter(\n lambda t: self.t_min <= t <= self.t_max, self.discontinuities\n )\n discontinuities = np.array(list(discontinuities))\n boundary_times = [\n self.t_min,\n self.t_max,\n *(discontinuities - self.dt),\n *(discontinuities + self.dt),\n ]\n boundary_times.sort()\n for t1, t2 in zip(boundary_times[0::2], boundary_times[1::2]):\n t_range = [*np.arange(t1, t2, self.t_step), t2]\n points = np.array([self.function(t) for t in t_range])\n self.start_new_path(points[0])\n self.add_points_as_corners(points[1:])\n if self.use_smoothing:\n # TODO: not in line with upstream, approx_smooth does not exist\n self.make_smooth()\n return self\n\n\nclass FunctionGraph(ParametricFunction):\n def __init__(self, function, x_range=None, color=YELLOW, **kwargs):\n\n if x_range is None:\n x_range = np.array([-config[\"frame_x_radius\"], config[\"frame_x_radius\"]])\n\n self.x_range = x_range\n self.parametric_function = lambda t: np.array([t, function(t), 0])\n self.function = function\n super().__init__(self.parametric_function, self.x_range, color=color, **kwargs)\n\n def get_function(self):\n return self.function\n\n def get_point_from_function(self, x):\n return self.parametric_function(x)\n", "path": "manim/mobject/functions.py"}]} | 1,736 | 277 |
gh_patches_debug_3330 | rasdani/github-patches | git_diff | LibraryOfCongress__concordia-240 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enable Adobe DTM for Analytics (closes #160)
This embeds the code but Adobe’s instructions violate web
performance guidelines and we should review this carefully
to see how much Adobe is affecting site performance.
</issue>
<code>
[start of concordia/context_processors.py]
1 from django.conf import settings
2
3
4 def system_configuration(request):
5 """
6 Expose some system configuration to the default template context
7 """
8
9 return {"SENTRY_PUBLIC_DSN": getattr(settings, "SENTRY_PUBLIC_DSN", None)}
10
11
12 def site_navigation(request):
13 data = {"VIEW_NAME": request.resolver_match.view_name}
14
15 data["VIEW_NAME_FOR_CSS"] = data["VIEW_NAME"].replace(":", "--")
16
17 path_components = request.path.strip("/").split("/")
18 for i, component in enumerate(path_components, start=1):
19 data["PATH_LEVEL_%d" % i] = component
20
21 return data
22
[end of concordia/context_processors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/concordia/context_processors.py b/concordia/context_processors.py
--- a/concordia/context_processors.py
+++ b/concordia/context_processors.py
@@ -6,7 +6,10 @@
Expose some system configuration to the default template context
"""
- return {"SENTRY_PUBLIC_DSN": getattr(settings, "SENTRY_PUBLIC_DSN", None)}
+ return {
+ "SENTRY_PUBLIC_DSN": getattr(settings, "SENTRY_PUBLIC_DSN", None),
+ "CONCORDIA_ENVIRONMENT": settings.CONCORDIA_ENVIRONMENT,
+ }
def site_navigation(request):
| {"golden_diff": "diff --git a/concordia/context_processors.py b/concordia/context_processors.py\n--- a/concordia/context_processors.py\n+++ b/concordia/context_processors.py\n@@ -6,7 +6,10 @@\n Expose some system configuration to the default template context\n \"\"\"\n \n- return {\"SENTRY_PUBLIC_DSN\": getattr(settings, \"SENTRY_PUBLIC_DSN\", None)}\n+ return {\n+ \"SENTRY_PUBLIC_DSN\": getattr(settings, \"SENTRY_PUBLIC_DSN\", None),\n+ \"CONCORDIA_ENVIRONMENT\": settings.CONCORDIA_ENVIRONMENT,\n+ }\n \n \n def site_navigation(request):\n", "issue": "Enable Adobe DTM for Analytics (closes #160)\nThis embeds the code but Adobe\u2019s instructions violate web\r\nperformance guidelines and we should review this carefully\r\nto see how much Adobe is affecting site performance.\n", "before_files": [{"content": "from django.conf import settings\n\n\ndef system_configuration(request):\n \"\"\"\n Expose some system configuration to the default template context\n \"\"\"\n\n return {\"SENTRY_PUBLIC_DSN\": getattr(settings, \"SENTRY_PUBLIC_DSN\", None)}\n\n\ndef site_navigation(request):\n data = {\"VIEW_NAME\": request.resolver_match.view_name}\n\n data[\"VIEW_NAME_FOR_CSS\"] = data[\"VIEW_NAME\"].replace(\":\", \"--\")\n\n path_components = request.path.strip(\"/\").split(\"/\")\n for i, component in enumerate(path_components, start=1):\n data[\"PATH_LEVEL_%d\" % i] = component\n\n return data\n", "path": "concordia/context_processors.py"}]} | 755 | 135 |
gh_patches_debug_26332 | rasdani/github-patches | git_diff | jupyter__docker-stacks-388 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consider not writing in user home
All the images (starting from base notebook) write part of the configuration inside the user home folder, and assume that `/home/$NB_USER/work` will be mounted. This has a practical limitation that it is not easy to setup a hub such that useful folders like `.ssh`, or e.g. `.bash_history` persist across container restarts.
I might be missing underlying assumptions and use cases, but I suggest to assume that home itself would be mounted, and instead modify the global configuration options.
</issue>
<code>
[start of scipy-notebook/mplimporthook.py]
1 """Startup script for IPython kernel.
2
3 Installs an import hook to configure the matplotlib backend on the fly.
4
5 Originally from @minrk at
6 https://github.com/minrk/profile_default/blob/master/startup/mplimporthook.py
7 Repurposed for docker-stacks to address repeat bugs like
8 https://github.com/jupyter/docker-stacks/issues/235.
9 """
10 import sys
11 from IPython import get_ipython
12
13 class MatplotlibFinder(object):
14 """Import hook that notices when matplotlib.pyplot or pylab is imported
15 and tries to configure the matplotlib backend appropriately for the
16 environment.
17 """
18 _called = False
19
20 def find_module(self, fullname, path=None):
21 if self._called:
22 # already handled
23 return
24
25 if fullname not in ('pylab', 'matplotlib.pyplot'):
26 # not matplotlib
27 return
28
29 # don't call me again
30 self._called = True
31
32 try:
33 # remove myself from the import hooks
34 sys.meta_path = [loader for loader in sys.meta_path if loader is not self]
35 except ValueError:
36 pass
37
38 ip = get_ipython()
39 if ip is None:
40 # not in an interactive environment
41 return
42
43 if ip.pylab_gui_select:
44 # backend already selected
45 return
46
47 if hasattr(ip, 'kernel'):
48 # default to inline in kernel environments
49 ip.enable_matplotlib('inline')
50 else:
51 print('enabling matplotlib')
52 ip.enable_matplotlib()
53
54 # install the finder immediately
55 sys.meta_path.insert(0, MatplotlibFinder())
[end of scipy-notebook/mplimporthook.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scipy-notebook/mplimporthook.py b/scipy-notebook/mplimporthook.py
deleted file mode 100644
--- a/scipy-notebook/mplimporthook.py
+++ /dev/null
@@ -1,55 +0,0 @@
-"""Startup script for IPython kernel.
-
-Installs an import hook to configure the matplotlib backend on the fly.
-
-Originally from @minrk at
-https://github.com/minrk/profile_default/blob/master/startup/mplimporthook.py
-Repurposed for docker-stacks to address repeat bugs like
-https://github.com/jupyter/docker-stacks/issues/235.
-"""
-import sys
-from IPython import get_ipython
-
-class MatplotlibFinder(object):
- """Import hook that notices when matplotlib.pyplot or pylab is imported
- and tries to configure the matplotlib backend appropriately for the
- environment.
- """
- _called = False
-
- def find_module(self, fullname, path=None):
- if self._called:
- # already handled
- return
-
- if fullname not in ('pylab', 'matplotlib.pyplot'):
- # not matplotlib
- return
-
- # don't call me again
- self._called = True
-
- try:
- # remove myself from the import hooks
- sys.meta_path = [loader for loader in sys.meta_path if loader is not self]
- except ValueError:
- pass
-
- ip = get_ipython()
- if ip is None:
- # not in an interactive environment
- return
-
- if ip.pylab_gui_select:
- # backend already selected
- return
-
- if hasattr(ip, 'kernel'):
- # default to inline in kernel environments
- ip.enable_matplotlib('inline')
- else:
- print('enabling matplotlib')
- ip.enable_matplotlib()
-
-# install the finder immediately
-sys.meta_path.insert(0, MatplotlibFinder())
\ No newline at end of file
| {"golden_diff": "diff --git a/scipy-notebook/mplimporthook.py b/scipy-notebook/mplimporthook.py\ndeleted file mode 100644\n--- a/scipy-notebook/mplimporthook.py\n+++ /dev/null\n@@ -1,55 +0,0 @@\n-\"\"\"Startup script for IPython kernel.\n-\n-Installs an import hook to configure the matplotlib backend on the fly.\n-\n-Originally from @minrk at \n-https://github.com/minrk/profile_default/blob/master/startup/mplimporthook.py\n-Repurposed for docker-stacks to address repeat bugs like\n-https://github.com/jupyter/docker-stacks/issues/235.\n-\"\"\"\n-import sys\n-from IPython import get_ipython\n-\n-class MatplotlibFinder(object):\n- \"\"\"Import hook that notices when matplotlib.pyplot or pylab is imported\n- and tries to configure the matplotlib backend appropriately for the\n- environment.\n- \"\"\"\n- _called = False\n- \n- def find_module(self, fullname, path=None):\n- if self._called:\n- # already handled\n- return\n- \n- if fullname not in ('pylab', 'matplotlib.pyplot'):\n- # not matplotlib\n- return\n- \n- # don't call me again\n- self._called = True\n- \n- try:\n- # remove myself from the import hooks\n- sys.meta_path = [loader for loader in sys.meta_path if loader is not self]\n- except ValueError:\n- pass\n- \n- ip = get_ipython()\n- if ip is None:\n- # not in an interactive environment\n- return\n- \n- if ip.pylab_gui_select:\n- # backend already selected\n- return\n- \n- if hasattr(ip, 'kernel'):\n- # default to inline in kernel environments\n- ip.enable_matplotlib('inline')\n- else:\n- print('enabling matplotlib')\n- ip.enable_matplotlib()\n-\n-# install the finder immediately\n-sys.meta_path.insert(0, MatplotlibFinder())\n\\ No newline at end of file\n", "issue": "Consider not writing in user home\nAll the images (starting from base notebook) write part of the configuration inside the user home folder, and assume that `/home/$NB_USER/work` will be mounted. This has a practical limitation that it is not easy to setup a hub such that useful folders like `.ssh`, or e.g. `.bash_history` persist across container restarts.\r\n\r\nI might be missing underlying assumptions and use cases, but I suggest to assume that home itself would be mounted, and instead modify the global configuration options.\n", "before_files": [{"content": "\"\"\"Startup script for IPython kernel.\n\nInstalls an import hook to configure the matplotlib backend on the fly.\n\nOriginally from @minrk at \nhttps://github.com/minrk/profile_default/blob/master/startup/mplimporthook.py\nRepurposed for docker-stacks to address repeat bugs like\nhttps://github.com/jupyter/docker-stacks/issues/235.\n\"\"\"\nimport sys\nfrom IPython import get_ipython\n\nclass MatplotlibFinder(object):\n \"\"\"Import hook that notices when matplotlib.pyplot or pylab is imported\n and tries to configure the matplotlib backend appropriately for the\n environment.\n \"\"\"\n _called = False\n \n def find_module(self, fullname, path=None):\n if self._called:\n # already handled\n return\n \n if fullname not in ('pylab', 'matplotlib.pyplot'):\n # not matplotlib\n return\n \n # don't call me again\n self._called = True\n \n try:\n # remove myself from the import hooks\n sys.meta_path = [loader for loader in sys.meta_path if loader is not self]\n except ValueError:\n pass\n \n ip = get_ipython()\n if ip is None:\n # not in an interactive environment\n return\n \n if ip.pylab_gui_select:\n # backend already selected\n return\n \n if hasattr(ip, 'kernel'):\n # default to inline in kernel environments\n ip.enable_matplotlib('inline')\n else:\n print('enabling matplotlib')\n ip.enable_matplotlib()\n\n# install the finder immediately\nsys.meta_path.insert(0, MatplotlibFinder())", "path": "scipy-notebook/mplimporthook.py"}]} | 1,092 | 464 |
gh_patches_debug_14024 | rasdani/github-patches | git_diff | ivy-llc__ivy-16042 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cosine_similarity
#15051
</issue>
<code>
[start of ivy/functional/frontends/paddle/nn/functional/common.py]
1 # local
2
[end of ivy/functional/frontends/paddle/nn/functional/common.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/nn/functional/common.py b/ivy/functional/frontends/paddle/nn/functional/common.py
--- a/ivy/functional/frontends/paddle/nn/functional/common.py
+++ b/ivy/functional/frontends/paddle/nn/functional/common.py
@@ -1 +1,25 @@
# local
+import ivy
+from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
+from ivy.func_wrapper import with_unsupported_dtypes
+
+
+@with_unsupported_dtypes({"2.0.1 and below": ("float16", "bfloat16")}, "torch")
+@to_ivy_arrays_and_back
+def cosine_similarity(x1, x2, *, axis=1, eps=1e-08):
+ if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:
+ numerator = ivy.sum(x1 * x2, axis=axis)
+ x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)
+ x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)
+ else:
+ numerator = ivy.sum(x1 * x2)
+ x1_squared_norm = ivy.sum(ivy.square(x1))
+ x2_squared_norm = ivy.sum(ivy.square(x2))
+
+ x1_norm = ivy.sqrt(x1_squared_norm)
+ x2_norm = ivy.sqrt(x2_squared_norm)
+ norm_mm = x1_norm * x2_norm
+ denominator = ivy.maximum(norm_mm, eps)
+
+ cosine = numerator / denominator
+ return cosine
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/nn/functional/common.py b/ivy/functional/frontends/paddle/nn/functional/common.py\n--- a/ivy/functional/frontends/paddle/nn/functional/common.py\n+++ b/ivy/functional/frontends/paddle/nn/functional/common.py\n@@ -1 +1,25 @@\n # local\n+import ivy\n+from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n+from ivy.func_wrapper import with_unsupported_dtypes\n+\n+\n+@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n+@to_ivy_arrays_and_back\n+def cosine_similarity(x1, x2, *, axis=1, eps=1e-08):\n+ if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:\n+ numerator = ivy.sum(x1 * x2, axis=axis)\n+ x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)\n+ x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)\n+ else:\n+ numerator = ivy.sum(x1 * x2)\n+ x1_squared_norm = ivy.sum(ivy.square(x1))\n+ x2_squared_norm = ivy.sum(ivy.square(x2))\n+\n+ x1_norm = ivy.sqrt(x1_squared_norm)\n+ x2_norm = ivy.sqrt(x2_squared_norm)\n+ norm_mm = x1_norm * x2_norm\n+ denominator = ivy.maximum(norm_mm, eps)\n+\n+ cosine = numerator / denominator\n+ return cosine\n", "issue": "cosine_similarity\n#15051 \n", "before_files": [{"content": "# local\n", "path": "ivy/functional/frontends/paddle/nn/functional/common.py"}]} | 570 | 381 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.