problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_22534
|
rasdani/github-patches
|
git_diff
|
ipython__ipython-12056
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IPython/utils/py3compat.py unused `with_metaclass` function.
The function `with_metaclass` seem to be unused;
we should : check if it is used, if not remove the function .
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/utils/py3compat.py`
Content:
```
1 # coding: utf-8
2 """Compatibility tricks for Python 3. Mainly to do with unicode.
3
4 This file is deprecated and will be removed in a future version.
5 """
6 import functools
7 import os
8 import sys
9 import re
10 import shutil
11 import types
12 import platform
13
14 from .encoding import DEFAULT_ENCODING
15
16
17 def decode(s, encoding=None):
18 encoding = encoding or DEFAULT_ENCODING
19 return s.decode(encoding, "replace")
20
21 def encode(u, encoding=None):
22 encoding = encoding or DEFAULT_ENCODING
23 return u.encode(encoding, "replace")
24
25
26 def cast_unicode(s, encoding=None):
27 if isinstance(s, bytes):
28 return decode(s, encoding)
29 return s
30
31 def cast_bytes(s, encoding=None):
32 if not isinstance(s, bytes):
33 return encode(s, encoding)
34 return s
35
36 def buffer_to_bytes(buf):
37 """Cast a buffer object to bytes"""
38 if not isinstance(buf, bytes):
39 buf = bytes(buf)
40 return buf
41
42 def _modify_str_or_docstring(str_change_func):
43 @functools.wraps(str_change_func)
44 def wrapper(func_or_str):
45 if isinstance(func_or_str, (str,)):
46 func = None
47 doc = func_or_str
48 else:
49 func = func_or_str
50 doc = func.__doc__
51
52 # PYTHONOPTIMIZE=2 strips docstrings, so they can disappear unexpectedly
53 if doc is not None:
54 doc = str_change_func(doc)
55
56 if func:
57 func.__doc__ = doc
58 return func
59 return doc
60 return wrapper
61
62 def safe_unicode(e):
63 """unicode(e) with various fallbacks. Used for exceptions, which may not be
64 safe to call unicode() on.
65 """
66 try:
67 return str(e)
68 except UnicodeError:
69 pass
70
71 try:
72 return repr(e)
73 except UnicodeError:
74 pass
75
76 return u'Unrecoverably corrupt evalue'
77
78 # shutil.which from Python 3.4
79 def _shutil_which(cmd, mode=os.F_OK | os.X_OK, path=None):
80 """Given a command, mode, and a PATH string, return the path which
81 conforms to the given mode on the PATH, or None if there is no such
82 file.
83
84 `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
85 of os.environ.get("PATH"), or can be overridden with a custom search
86 path.
87
88 This is a backport of shutil.which from Python 3.4
89 """
90 # Check that a given file can be accessed with the correct mode.
91 # Additionally check that `file` is not a directory, as on Windows
92 # directories pass the os.access check.
93 def _access_check(fn, mode):
94 return (os.path.exists(fn) and os.access(fn, mode)
95 and not os.path.isdir(fn))
96
97 # If we're given a path with a directory part, look it up directly rather
98 # than referring to PATH directories. This includes checking relative to the
99 # current directory, e.g. ./script
100 if os.path.dirname(cmd):
101 if _access_check(cmd, mode):
102 return cmd
103 return None
104
105 if path is None:
106 path = os.environ.get("PATH", os.defpath)
107 if not path:
108 return None
109 path = path.split(os.pathsep)
110
111 if sys.platform == "win32":
112 # The current directory takes precedence on Windows.
113 if not os.curdir in path:
114 path.insert(0, os.curdir)
115
116 # PATHEXT is necessary to check on Windows.
117 pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
118 # See if the given file matches any of the expected path extensions.
119 # This will allow us to short circuit when given "python.exe".
120 # If it does match, only test that one, otherwise we have to try
121 # others.
122 if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
123 files = [cmd]
124 else:
125 files = [cmd + ext for ext in pathext]
126 else:
127 # On other platforms you don't have things like PATHEXT to tell you
128 # what file suffixes are executable, so just pass on cmd as-is.
129 files = [cmd]
130
131 seen = set()
132 for dir in path:
133 normdir = os.path.normcase(dir)
134 if not normdir in seen:
135 seen.add(normdir)
136 for thefile in files:
137 name = os.path.join(dir, thefile)
138 if _access_check(name, mode):
139 return name
140 return None
141
142 PY3 = True
143
144 # keep reference to builtin_mod because the kernel overrides that value
145 # to forward requests to a frontend.
146 def input(prompt=''):
147 return builtin_mod.input(prompt)
148
149 builtin_mod_name = "builtins"
150 import builtins as builtin_mod
151
152
153 which = shutil.which
154
155 def isidentifier(s, dotted=False):
156 if dotted:
157 return all(isidentifier(a) for a in s.split("."))
158 return s.isidentifier()
159
160 getcwd = os.getcwd
161
162 MethodType = types.MethodType
163
164 def execfile(fname, glob, loc=None, compiler=None):
165 loc = loc if (loc is not None) else glob
166 with open(fname, 'rb') as f:
167 compiler = compiler or compile
168 exec(compiler(f.read(), fname, 'exec'), glob, loc)
169
170 # Refactor print statements in doctests.
171 _print_statement_re = re.compile(r"\bprint (?P<expr>.*)$", re.MULTILINE)
172 def _print_statement_sub(match):
173 expr = match.groups('expr')
174 return "print(%s)" % expr
175
176 # Abstract u'abc' syntax:
177 @_modify_str_or_docstring
178 def u_format(s):
179 """"{u}'abc'" --> "'abc'" (Python 3)
180
181 Accepts a string or a function, so it can be used as a decorator."""
182 return s.format(u='')
183
184 def get_closure(f):
185 """Get a function's closure attribute"""
186 return f.__closure__
187
188
189 PY2 = not PY3
190 PYPY = platform.python_implementation() == "PyPy"
191
192
193
194
195 # Parts below taken from six:
196 # Copyright (c) 2010-2013 Benjamin Peterson
197 #
198 # Permission is hereby granted, free of charge, to any person obtaining a copy
199 # of this software and associated documentation files (the "Software"), to deal
200 # in the Software without restriction, including without limitation the rights
201 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
202 # copies of the Software, and to permit persons to whom the Software is
203 # furnished to do so, subject to the following conditions:
204 #
205 # The above copyright notice and this permission notice shall be included in all
206 # copies or substantial portions of the Software.
207 #
208 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
209 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
210 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
211 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
212 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
213 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
214 # SOFTWARE.
215
216 def with_metaclass(meta, *bases):
217 """Create a base class with a metaclass."""
218 return meta("_NewBase", bases, {})
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/IPython/utils/py3compat.py b/IPython/utils/py3compat.py
--- a/IPython/utils/py3compat.py
+++ b/IPython/utils/py3compat.py
@@ -189,30 +189,3 @@
PY2 = not PY3
PYPY = platform.python_implementation() == "PyPy"
-
-
-
-# Parts below taken from six:
-# Copyright (c) 2010-2013 Benjamin Peterson
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-
-def with_metaclass(meta, *bases):
- """Create a base class with a metaclass."""
- return meta("_NewBase", bases, {})
|
{"golden_diff": "diff --git a/IPython/utils/py3compat.py b/IPython/utils/py3compat.py\n--- a/IPython/utils/py3compat.py\n+++ b/IPython/utils/py3compat.py\n@@ -189,30 +189,3 @@\n PY2 = not PY3\n PYPY = platform.python_implementation() == \"PyPy\"\n \n-\n-\n-\n-# Parts below taken from six:\n-# Copyright (c) 2010-2013 Benjamin Peterson\n-#\n-# Permission is hereby granted, free of charge, to any person obtaining a copy\n-# of this software and associated documentation files (the \"Software\"), to deal\n-# in the Software without restriction, including without limitation the rights\n-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n-# copies of the Software, and to permit persons to whom the Software is\n-# furnished to do so, subject to the following conditions:\n-#\n-# The above copyright notice and this permission notice shall be included in all\n-# copies or substantial portions of the Software.\n-#\n-# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n-# SOFTWARE.\n-\n-def with_metaclass(meta, *bases):\n- \"\"\"Create a base class with a metaclass.\"\"\"\n- return meta(\"_NewBase\", bases, {})\n", "issue": "IPython/utils/py3compat.py unused `with_metaclass` function. \nThe function `with_metaclass` seem to be unused; \r\n\r\nwe should : check if it is used, if not remove the function .\n", "before_files": [{"content": "# coding: utf-8\n\"\"\"Compatibility tricks for Python 3. Mainly to do with unicode.\n\nThis file is deprecated and will be removed in a future version.\n\"\"\"\nimport functools\nimport os\nimport sys\nimport re\nimport shutil\nimport types\nimport platform\n\nfrom .encoding import DEFAULT_ENCODING\n\n\ndef decode(s, encoding=None):\n encoding = encoding or DEFAULT_ENCODING\n return s.decode(encoding, \"replace\")\n\ndef encode(u, encoding=None):\n encoding = encoding or DEFAULT_ENCODING\n return u.encode(encoding, \"replace\")\n\n\ndef cast_unicode(s, encoding=None):\n if isinstance(s, bytes):\n return decode(s, encoding)\n return s\n\ndef cast_bytes(s, encoding=None):\n if not isinstance(s, bytes):\n return encode(s, encoding)\n return s\n\ndef buffer_to_bytes(buf):\n \"\"\"Cast a buffer object to bytes\"\"\"\n if not isinstance(buf, bytes):\n buf = bytes(buf)\n return buf\n\ndef _modify_str_or_docstring(str_change_func):\n @functools.wraps(str_change_func)\n def wrapper(func_or_str):\n if isinstance(func_or_str, (str,)):\n func = None\n doc = func_or_str\n else:\n func = func_or_str\n doc = func.__doc__\n\n # PYTHONOPTIMIZE=2 strips docstrings, so they can disappear unexpectedly\n if doc is not None:\n doc = str_change_func(doc)\n\n if func:\n func.__doc__ = doc\n return func\n return doc\n return wrapper\n\ndef safe_unicode(e):\n \"\"\"unicode(e) with various fallbacks. Used for exceptions, which may not be\n safe to call unicode() on.\n \"\"\"\n try:\n return str(e)\n except UnicodeError:\n pass\n\n try:\n return repr(e)\n except UnicodeError:\n pass\n\n return u'Unrecoverably corrupt evalue'\n\n# shutil.which from Python 3.4\ndef _shutil_which(cmd, mode=os.F_OK | os.X_OK, path=None):\n \"\"\"Given a command, mode, and a PATH string, return the path which\n conforms to the given mode on the PATH, or None if there is no such\n file.\n\n `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result\n of os.environ.get(\"PATH\"), or can be overridden with a custom search\n path.\n\n This is a backport of shutil.which from Python 3.4\n \"\"\"\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode)\n and not os.path.isdir(fn))\n\n # If we're given a path with a directory part, look it up directly rather\n # than referring to PATH directories. This includes checking relative to the\n # current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return None\n\n if path is None:\n path = os.environ.get(\"PATH\", os.defpath)\n if not path:\n return None\n path = path.split(os.pathsep)\n\n if sys.platform == \"win32\":\n # The current directory takes precedence on Windows.\n if not os.curdir in path:\n path.insert(0, os.curdir)\n\n # PATHEXT is necessary to check on Windows.\n pathext = os.environ.get(\"PATHEXT\", \"\").split(os.pathsep)\n # See if the given file matches any of the expected path extensions.\n # This will allow us to short circuit when given \"python.exe\".\n # If it does match, only test that one, otherwise we have to try\n # others.\n if any(cmd.lower().endswith(ext.lower()) for ext in pathext):\n files = [cmd]\n else:\n files = [cmd + ext for ext in pathext]\n else:\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if not normdir in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n return None\n\nPY3 = True\n\n# keep reference to builtin_mod because the kernel overrides that value\n# to forward requests to a frontend.\ndef input(prompt=''):\n return builtin_mod.input(prompt)\n\nbuiltin_mod_name = \"builtins\"\nimport builtins as builtin_mod\n\n\nwhich = shutil.which\n\ndef isidentifier(s, dotted=False):\n if dotted:\n return all(isidentifier(a) for a in s.split(\".\"))\n return s.isidentifier()\n\ngetcwd = os.getcwd\n\nMethodType = types.MethodType\n\ndef execfile(fname, glob, loc=None, compiler=None):\n loc = loc if (loc is not None) else glob\n with open(fname, 'rb') as f:\n compiler = compiler or compile\n exec(compiler(f.read(), fname, 'exec'), glob, loc)\n\n# Refactor print statements in doctests.\n_print_statement_re = re.compile(r\"\\bprint (?P<expr>.*)$\", re.MULTILINE)\ndef _print_statement_sub(match):\n expr = match.groups('expr')\n return \"print(%s)\" % expr\n\n# Abstract u'abc' syntax:\n@_modify_str_or_docstring\ndef u_format(s):\n \"\"\"\"{u}'abc'\" --> \"'abc'\" (Python 3)\n\n Accepts a string or a function, so it can be used as a decorator.\"\"\"\n return s.format(u='')\n\ndef get_closure(f):\n \"\"\"Get a function's closure attribute\"\"\"\n return f.__closure__\n\n\nPY2 = not PY3\nPYPY = platform.python_implementation() == \"PyPy\"\n\n\n\n\n# Parts below taken from six:\n# Copyright (c) 2010-2013 Benjamin Peterson\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\ndef with_metaclass(meta, *bases):\n \"\"\"Create a base class with a metaclass.\"\"\"\n return meta(\"_NewBase\", bases, {})\n", "path": "IPython/utils/py3compat.py"}], "after_files": [{"content": "# coding: utf-8\n\"\"\"Compatibility tricks for Python 3. Mainly to do with unicode.\n\nThis file is deprecated and will be removed in a future version.\n\"\"\"\nimport functools\nimport os\nimport sys\nimport re\nimport shutil\nimport types\nimport platform\n\nfrom .encoding import DEFAULT_ENCODING\n\n\ndef decode(s, encoding=None):\n encoding = encoding or DEFAULT_ENCODING\n return s.decode(encoding, \"replace\")\n\ndef encode(u, encoding=None):\n encoding = encoding or DEFAULT_ENCODING\n return u.encode(encoding, \"replace\")\n\n\ndef cast_unicode(s, encoding=None):\n if isinstance(s, bytes):\n return decode(s, encoding)\n return s\n\ndef cast_bytes(s, encoding=None):\n if not isinstance(s, bytes):\n return encode(s, encoding)\n return s\n\ndef buffer_to_bytes(buf):\n \"\"\"Cast a buffer object to bytes\"\"\"\n if not isinstance(buf, bytes):\n buf = bytes(buf)\n return buf\n\ndef _modify_str_or_docstring(str_change_func):\n @functools.wraps(str_change_func)\n def wrapper(func_or_str):\n if isinstance(func_or_str, (str,)):\n func = None\n doc = func_or_str\n else:\n func = func_or_str\n doc = func.__doc__\n\n # PYTHONOPTIMIZE=2 strips docstrings, so they can disappear unexpectedly\n if doc is not None:\n doc = str_change_func(doc)\n\n if func:\n func.__doc__ = doc\n return func\n return doc\n return wrapper\n\ndef safe_unicode(e):\n \"\"\"unicode(e) with various fallbacks. Used for exceptions, which may not be\n safe to call unicode() on.\n \"\"\"\n try:\n return str(e)\n except UnicodeError:\n pass\n\n try:\n return repr(e)\n except UnicodeError:\n pass\n\n return u'Unrecoverably corrupt evalue'\n\n# shutil.which from Python 3.4\ndef _shutil_which(cmd, mode=os.F_OK | os.X_OK, path=None):\n \"\"\"Given a command, mode, and a PATH string, return the path which\n conforms to the given mode on the PATH, or None if there is no such\n file.\n\n `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result\n of os.environ.get(\"PATH\"), or can be overridden with a custom search\n path.\n\n This is a backport of shutil.which from Python 3.4\n \"\"\"\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode)\n and not os.path.isdir(fn))\n\n # If we're given a path with a directory part, look it up directly rather\n # than referring to PATH directories. This includes checking relative to the\n # current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return None\n\n if path is None:\n path = os.environ.get(\"PATH\", os.defpath)\n if not path:\n return None\n path = path.split(os.pathsep)\n\n if sys.platform == \"win32\":\n # The current directory takes precedence on Windows.\n if not os.curdir in path:\n path.insert(0, os.curdir)\n\n # PATHEXT is necessary to check on Windows.\n pathext = os.environ.get(\"PATHEXT\", \"\").split(os.pathsep)\n # See if the given file matches any of the expected path extensions.\n # This will allow us to short circuit when given \"python.exe\".\n # If it does match, only test that one, otherwise we have to try\n # others.\n if any(cmd.lower().endswith(ext.lower()) for ext in pathext):\n files = [cmd]\n else:\n files = [cmd + ext for ext in pathext]\n else:\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if not normdir in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n return None\n\nPY3 = True\n\n# keep reference to builtin_mod because the kernel overrides that value\n# to forward requests to a frontend.\ndef input(prompt=''):\n return builtin_mod.input(prompt)\n\nbuiltin_mod_name = \"builtins\"\nimport builtins as builtin_mod\n\n\nwhich = shutil.which\n\ndef isidentifier(s, dotted=False):\n if dotted:\n return all(isidentifier(a) for a in s.split(\".\"))\n return s.isidentifier()\n\ngetcwd = os.getcwd\n\nMethodType = types.MethodType\n\ndef execfile(fname, glob, loc=None, compiler=None):\n loc = loc if (loc is not None) else glob\n with open(fname, 'rb') as f:\n compiler = compiler or compile\n exec(compiler(f.read(), fname, 'exec'), glob, loc)\n\n# Refactor print statements in doctests.\n_print_statement_re = re.compile(r\"\\bprint (?P<expr>.*)$\", re.MULTILINE)\ndef _print_statement_sub(match):\n expr = match.groups('expr')\n return \"print(%s)\" % expr\n\n# Abstract u'abc' syntax:\n@_modify_str_or_docstring\ndef u_format(s):\n \"\"\"\"{u}'abc'\" --> \"'abc'\" (Python 3)\n\n Accepts a string or a function, so it can be used as a decorator.\"\"\"\n return s.format(u='')\n\ndef get_closure(f):\n \"\"\"Get a function's closure attribute\"\"\"\n return f.__closure__\n\n\nPY2 = not PY3\nPYPY = platform.python_implementation() == \"PyPy\"\n\n", "path": "IPython/utils/py3compat.py"}]}
| 2,477 | 378 |
gh_patches_debug_50933
|
rasdani/github-patches
|
git_diff
|
apache__airflow-15117
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove 'user_id', 'role_id' from User and Role in OpenAPI schema
Would be good to remove the 'id' of both User and Role schemas from what is dumped in REST API endpoints. ID of User and Role table are sensitive data that would be fine to hide from the endpoints
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/api_connexion/schemas/user_schema.py`
Content:
```
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 from typing import List, NamedTuple
18
19 from flask_appbuilder.security.sqla.models import User
20 from marshmallow import Schema, fields
21 from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
22
23 from airflow.api_connexion.parameters import validate_istimezone
24 from airflow.api_connexion.schemas.role_and_permission_schema import RoleSchema
25
26
27 class UserCollectionItemSchema(SQLAlchemySchema):
28 """user collection item schema"""
29
30 class Meta:
31 """Meta"""
32
33 model = User
34 dateformat = "iso"
35
36 user_id = auto_field('id', dump_only=True)
37 first_name = auto_field()
38 last_name = auto_field()
39 username = auto_field()
40 active = auto_field(dump_only=True)
41 email = auto_field()
42 last_login = auto_field(dump_only=True)
43 login_count = auto_field(dump_only=True)
44 fail_login_count = auto_field(dump_only=True)
45 roles = fields.List(fields.Nested(RoleSchema, only=('name',)))
46 created_on = auto_field(validate=validate_istimezone, dump_only=True)
47 changed_on = auto_field(validate=validate_istimezone, dump_only=True)
48
49
50 class UserSchema(UserCollectionItemSchema):
51 """User schema"""
52
53 password = auto_field(load_only=True)
54
55
56 class UserCollection(NamedTuple):
57 """User collection"""
58
59 users: List[User]
60 total_entries: int
61
62
63 class UserCollectionSchema(Schema):
64 """User collection schema"""
65
66 users = fields.List(fields.Nested(UserCollectionItemSchema))
67 total_entries = fields.Int()
68
69
70 user_collection_item_schema = UserCollectionItemSchema()
71 user_schema = UserSchema()
72 user_collection_schema = UserCollectionSchema()
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/airflow/api_connexion/schemas/user_schema.py b/airflow/api_connexion/schemas/user_schema.py
--- a/airflow/api_connexion/schemas/user_schema.py
+++ b/airflow/api_connexion/schemas/user_schema.py
@@ -33,7 +33,6 @@
model = User
dateformat = "iso"
- user_id = auto_field('id', dump_only=True)
first_name = auto_field()
last_name = auto_field()
username = auto_field()
|
{"golden_diff": "diff --git a/airflow/api_connexion/schemas/user_schema.py b/airflow/api_connexion/schemas/user_schema.py\n--- a/airflow/api_connexion/schemas/user_schema.py\n+++ b/airflow/api_connexion/schemas/user_schema.py\n@@ -33,7 +33,6 @@\n model = User\n dateformat = \"iso\"\n \n- user_id = auto_field('id', dump_only=True)\n first_name = auto_field()\n last_name = auto_field()\n username = auto_field()\n", "issue": "Remove 'user_id', 'role_id' from User and Role in OpenAPI schema \nWould be good to remove the 'id' of both User and Role schemas from what is dumped in REST API endpoints. ID of User and Role table are sensitive data that would be fine to hide from the endpoints\r\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom typing import List, NamedTuple\n\nfrom flask_appbuilder.security.sqla.models import User\nfrom marshmallow import Schema, fields\nfrom marshmallow_sqlalchemy import SQLAlchemySchema, auto_field\n\nfrom airflow.api_connexion.parameters import validate_istimezone\nfrom airflow.api_connexion.schemas.role_and_permission_schema import RoleSchema\n\n\nclass UserCollectionItemSchema(SQLAlchemySchema):\n \"\"\"user collection item schema\"\"\"\n\n class Meta:\n \"\"\"Meta\"\"\"\n\n model = User\n dateformat = \"iso\"\n\n user_id = auto_field('id', dump_only=True)\n first_name = auto_field()\n last_name = auto_field()\n username = auto_field()\n active = auto_field(dump_only=True)\n email = auto_field()\n last_login = auto_field(dump_only=True)\n login_count = auto_field(dump_only=True)\n fail_login_count = auto_field(dump_only=True)\n roles = fields.List(fields.Nested(RoleSchema, only=('name',)))\n created_on = auto_field(validate=validate_istimezone, dump_only=True)\n changed_on = auto_field(validate=validate_istimezone, dump_only=True)\n\n\nclass UserSchema(UserCollectionItemSchema):\n \"\"\"User schema\"\"\"\n\n password = auto_field(load_only=True)\n\n\nclass UserCollection(NamedTuple):\n \"\"\"User collection\"\"\"\n\n users: List[User]\n total_entries: int\n\n\nclass UserCollectionSchema(Schema):\n \"\"\"User collection schema\"\"\"\n\n users = fields.List(fields.Nested(UserCollectionItemSchema))\n total_entries = fields.Int()\n\n\nuser_collection_item_schema = UserCollectionItemSchema()\nuser_schema = UserSchema()\nuser_collection_schema = UserCollectionSchema()\n", "path": "airflow/api_connexion/schemas/user_schema.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom typing import List, NamedTuple\n\nfrom flask_appbuilder.security.sqla.models import User\nfrom marshmallow import Schema, fields\nfrom marshmallow_sqlalchemy import SQLAlchemySchema, auto_field\n\nfrom airflow.api_connexion.parameters import validate_istimezone\nfrom airflow.api_connexion.schemas.role_and_permission_schema import RoleSchema\n\n\nclass UserCollectionItemSchema(SQLAlchemySchema):\n \"\"\"user collection item schema\"\"\"\n\n class Meta:\n \"\"\"Meta\"\"\"\n\n model = User\n dateformat = \"iso\"\n\n first_name = auto_field()\n last_name = auto_field()\n username = auto_field()\n active = auto_field(dump_only=True)\n email = auto_field()\n last_login = auto_field(dump_only=True)\n login_count = auto_field(dump_only=True)\n fail_login_count = auto_field(dump_only=True)\n roles = fields.List(fields.Nested(RoleSchema, only=('name',)))\n created_on = auto_field(validate=validate_istimezone, dump_only=True)\n changed_on = auto_field(validate=validate_istimezone, dump_only=True)\n\n\nclass UserSchema(UserCollectionItemSchema):\n \"\"\"User schema\"\"\"\n\n password = auto_field(load_only=True)\n\n\nclass UserCollection(NamedTuple):\n \"\"\"User collection\"\"\"\n\n users: List[User]\n total_entries: int\n\n\nclass UserCollectionSchema(Schema):\n \"\"\"User collection schema\"\"\"\n\n users = fields.List(fields.Nested(UserCollectionItemSchema))\n total_entries = fields.Int()\n\n\nuser_collection_item_schema = UserCollectionItemSchema()\nuser_schema = UserSchema()\nuser_collection_schema = UserCollectionSchema()\n", "path": "airflow/api_connexion/schemas/user_schema.py"}]}
| 997 | 115 |
gh_patches_debug_24467
|
rasdani/github-patches
|
git_diff
|
searx__searx-2102
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wikipedia crash on specific search query
Instance: searx.be
Browser: Chrome Version 84.0.4147.89
Hello,
Not sure whether this is a bug, problem with the instance itself , or my browser, but when I search for certain query e.g. `port dover`, I get an error message:
```
Engines cannot retrieve results:
wikipedia (unexpected crash 'NoneType' object has no attribute 'replace')
```

This error doesn't happen if I change the language to Deutsch, Espanol, or other language. Seems like it only happens with English language.


This error doesn't happen with other search query.

I also test this on other searx instances and the results are mixed.
No error:
https://searx.ninja
https://search.snopyta.org/
Both are still on 0.16.0
Same error:
https://searx.fmac.xyz
https://searx.xyz/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/wikipedia.py`
Content:
```
1 """
2 Wikipedia (Web)
3
4 @website https://{language}.wikipedia.org
5 @provide-api yes
6
7 @using-api yes
8 @results JSON
9 @stable yes
10 @parse url, infobox
11 """
12
13 from json import loads
14 from lxml.html import fromstring
15 from searx.url_utils import quote, urlencode
16 from searx.utils import match_language
17
18 # search-url
19 base_url = u'https://{language}.wikipedia.org/'
20 search_url = base_url + u'w/api.php?'\
21 'action=query'\
22 '&format=json'\
23 '&{query}'\
24 '&prop=extracts|pageimages|pageprops'\
25 '&ppprop=disambiguation'\
26 '&exintro'\
27 '&explaintext'\
28 '&pithumbsize=300'\
29 '&redirects'
30 supported_languages_url = 'https://meta.wikimedia.org/wiki/List_of_Wikipedias'
31
32
33 # set language in base_url
34 def url_lang(lang):
35 lang_pre = lang.split('-')[0]
36 if lang_pre == 'all' or lang_pre not in supported_languages and lang_pre not in language_aliases:
37 return 'en'
38 return match_language(lang, supported_languages, language_aliases).split('-')[0]
39
40
41 # do search-request
42 def request(query, params):
43 if query.islower():
44 query = u'{0}|{1}'.format(query.decode('utf-8'), query.decode('utf-8').title()).encode('utf-8')
45
46 params['url'] = search_url.format(query=urlencode({'titles': query}),
47 language=url_lang(params['language']))
48
49 return params
50
51
52 # get first meaningful paragraph
53 # this should filter out disambiguation pages and notes above first paragraph
54 # "magic numbers" were obtained by fine tuning
55 def extract_first_paragraph(content, title, image):
56 first_paragraph = None
57
58 failed_attempts = 0
59 for paragraph in content.split('\n'):
60
61 starts_with_title = paragraph.lower().find(title.lower(), 0, len(title) + 35)
62 length = len(paragraph)
63
64 if length >= 200 or (starts_with_title >= 0 and (image or length >= 150)):
65 first_paragraph = paragraph
66 break
67
68 failed_attempts += 1
69 if failed_attempts > 3:
70 return None
71
72 return first_paragraph
73
74
75 # get response from search-request
76 def response(resp):
77 results = []
78
79 search_result = loads(resp.text)
80
81 # wikipedia article's unique id
82 # first valid id is assumed to be the requested article
83 if 'pages' not in search_result['query']:
84 return results
85
86 for article_id in search_result['query']['pages']:
87 page = search_result['query']['pages'][article_id]
88 if int(article_id) > 0:
89 break
90
91 if int(article_id) < 0 or 'disambiguation' in page.get('pageprops', {}):
92 return []
93
94 title = page.get('title')
95
96 image = page.get('thumbnail')
97 if image:
98 image = image.get('source')
99
100 extract = page.get('extract')
101
102 summary = extract_first_paragraph(extract, title, image)
103 summary = summary.replace('() ', '')
104
105 # link to wikipedia article
106 wikipedia_link = base_url.format(language=url_lang(resp.search_params['language'])) \
107 + 'wiki/' + quote(title.replace(' ', '_').encode('utf8'))
108
109 results.append({'url': wikipedia_link, 'title': title})
110
111 results.append({'infobox': title,
112 'id': wikipedia_link,
113 'content': summary,
114 'img_src': image,
115 'urls': [{'title': 'Wikipedia', 'url': wikipedia_link}]})
116
117 return results
118
119
120 # get supported languages from their site
121 def _fetch_supported_languages(resp):
122 supported_languages = {}
123 dom = fromstring(resp.text)
124 tables = dom.xpath('//table[contains(@class,"sortable")]')
125 for table in tables:
126 # exclude header row
127 trs = table.xpath('.//tr')[1:]
128 for tr in trs:
129 td = tr.xpath('./td')
130 code = td[3].xpath('./a')[0].text
131 name = td[2].xpath('./a')[0].text
132 english_name = td[1].xpath('./a')[0].text
133 articles = int(td[4].xpath('./a/b')[0].text.replace(',', ''))
134 # exclude languages with too few articles
135 if articles >= 100:
136 supported_languages[code] = {"name": name, "english_name": english_name, "articles": articles}
137
138 return supported_languages
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/engines/wikipedia.py b/searx/engines/wikipedia.py
--- a/searx/engines/wikipedia.py
+++ b/searx/engines/wikipedia.py
@@ -49,29 +49,6 @@
return params
-# get first meaningful paragraph
-# this should filter out disambiguation pages and notes above first paragraph
-# "magic numbers" were obtained by fine tuning
-def extract_first_paragraph(content, title, image):
- first_paragraph = None
-
- failed_attempts = 0
- for paragraph in content.split('\n'):
-
- starts_with_title = paragraph.lower().find(title.lower(), 0, len(title) + 35)
- length = len(paragraph)
-
- if length >= 200 or (starts_with_title >= 0 and (image or length >= 150)):
- first_paragraph = paragraph
- break
-
- failed_attempts += 1
- if failed_attempts > 3:
- return None
-
- return first_paragraph
-
-
# get response from search-request
def response(resp):
results = []
@@ -97,10 +74,7 @@
if image:
image = image.get('source')
- extract = page.get('extract')
-
- summary = extract_first_paragraph(extract, title, image)
- summary = summary.replace('() ', '')
+ summary = page.get('extract', '').split('\n')[0].replace('()', '')
# link to wikipedia article
wikipedia_link = base_url.format(language=url_lang(resp.search_params['language'])) \
|
{"golden_diff": "diff --git a/searx/engines/wikipedia.py b/searx/engines/wikipedia.py\n--- a/searx/engines/wikipedia.py\n+++ b/searx/engines/wikipedia.py\n@@ -49,29 +49,6 @@\n return params\n \n \n-# get first meaningful paragraph\n-# this should filter out disambiguation pages and notes above first paragraph\n-# \"magic numbers\" were obtained by fine tuning\n-def extract_first_paragraph(content, title, image):\n- first_paragraph = None\n-\n- failed_attempts = 0\n- for paragraph in content.split('\\n'):\n-\n- starts_with_title = paragraph.lower().find(title.lower(), 0, len(title) + 35)\n- length = len(paragraph)\n-\n- if length >= 200 or (starts_with_title >= 0 and (image or length >= 150)):\n- first_paragraph = paragraph\n- break\n-\n- failed_attempts += 1\n- if failed_attempts > 3:\n- return None\n-\n- return first_paragraph\n-\n-\n # get response from search-request\n def response(resp):\n results = []\n@@ -97,10 +74,7 @@\n if image:\n image = image.get('source')\n \n- extract = page.get('extract')\n-\n- summary = extract_first_paragraph(extract, title, image)\n- summary = summary.replace('() ', '')\n+ summary = page.get('extract', '').split('\\n')[0].replace('()', '')\n \n # link to wikipedia article\n wikipedia_link = base_url.format(language=url_lang(resp.search_params['language'])) \\\n", "issue": "Wikipedia crash on specific search query\nInstance: searx.be\r\nBrowser: Chrome Version 84.0.4147.89\r\n\r\nHello,\r\n\r\nNot sure whether this is a bug, problem with the instance itself , or my browser, but when I search for certain query e.g. `port dover`, I get an error message:\r\n```\r\nEngines cannot retrieve results:\r\nwikipedia (unexpected crash 'NoneType' object has no attribute 'replace')\r\n```\r\n\r\n\r\n\r\n\r\n\r\nThis error doesn't happen if I change the language to Deutsch, Espanol, or other language. Seems like it only happens with English language. \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nThis error doesn't happen with other search query.\r\n\r\n\r\n\r\n\r\nI also test this on other searx instances and the results are mixed. \r\n\r\nNo error:\r\nhttps://searx.ninja\r\nhttps://search.snopyta.org/\r\nBoth are still on 0.16.0\r\n\r\nSame error:\r\nhttps://searx.fmac.xyz\r\nhttps://searx.xyz/\r\n\r\n\n", "before_files": [{"content": "\"\"\"\n Wikipedia (Web)\n\n @website https://{language}.wikipedia.org\n @provide-api yes\n\n @using-api yes\n @results JSON\n @stable yes\n @parse url, infobox\n\"\"\"\n\nfrom json import loads\nfrom lxml.html import fromstring\nfrom searx.url_utils import quote, urlencode\nfrom searx.utils import match_language\n\n# search-url\nbase_url = u'https://{language}.wikipedia.org/'\nsearch_url = base_url + u'w/api.php?'\\\n 'action=query'\\\n '&format=json'\\\n '&{query}'\\\n '&prop=extracts|pageimages|pageprops'\\\n '&ppprop=disambiguation'\\\n '&exintro'\\\n '&explaintext'\\\n '&pithumbsize=300'\\\n '&redirects'\nsupported_languages_url = 'https://meta.wikimedia.org/wiki/List_of_Wikipedias'\n\n\n# set language in base_url\ndef url_lang(lang):\n lang_pre = lang.split('-')[0]\n if lang_pre == 'all' or lang_pre not in supported_languages and lang_pre not in language_aliases:\n return 'en'\n return match_language(lang, supported_languages, language_aliases).split('-')[0]\n\n\n# do search-request\ndef request(query, params):\n if query.islower():\n query = u'{0}|{1}'.format(query.decode('utf-8'), query.decode('utf-8').title()).encode('utf-8')\n\n params['url'] = search_url.format(query=urlencode({'titles': query}),\n language=url_lang(params['language']))\n\n return params\n\n\n# get first meaningful paragraph\n# this should filter out disambiguation pages and notes above first paragraph\n# \"magic numbers\" were obtained by fine tuning\ndef extract_first_paragraph(content, title, image):\n first_paragraph = None\n\n failed_attempts = 0\n for paragraph in content.split('\\n'):\n\n starts_with_title = paragraph.lower().find(title.lower(), 0, len(title) + 35)\n length = len(paragraph)\n\n if length >= 200 or (starts_with_title >= 0 and (image or length >= 150)):\n first_paragraph = paragraph\n break\n\n failed_attempts += 1\n if failed_attempts > 3:\n return None\n\n return first_paragraph\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n search_result = loads(resp.text)\n\n # wikipedia article's unique id\n # first valid id is assumed to be the requested article\n if 'pages' not in search_result['query']:\n return results\n\n for article_id in search_result['query']['pages']:\n page = search_result['query']['pages'][article_id]\n if int(article_id) > 0:\n break\n\n if int(article_id) < 0 or 'disambiguation' in page.get('pageprops', {}):\n return []\n\n title = page.get('title')\n\n image = page.get('thumbnail')\n if image:\n image = image.get('source')\n\n extract = page.get('extract')\n\n summary = extract_first_paragraph(extract, title, image)\n summary = summary.replace('() ', '')\n\n # link to wikipedia article\n wikipedia_link = base_url.format(language=url_lang(resp.search_params['language'])) \\\n + 'wiki/' + quote(title.replace(' ', '_').encode('utf8'))\n\n results.append({'url': wikipedia_link, 'title': title})\n\n results.append({'infobox': title,\n 'id': wikipedia_link,\n 'content': summary,\n 'img_src': image,\n 'urls': [{'title': 'Wikipedia', 'url': wikipedia_link}]})\n\n return results\n\n\n# get supported languages from their site\ndef _fetch_supported_languages(resp):\n supported_languages = {}\n dom = fromstring(resp.text)\n tables = dom.xpath('//table[contains(@class,\"sortable\")]')\n for table in tables:\n # exclude header row\n trs = table.xpath('.//tr')[1:]\n for tr in trs:\n td = tr.xpath('./td')\n code = td[3].xpath('./a')[0].text\n name = td[2].xpath('./a')[0].text\n english_name = td[1].xpath('./a')[0].text\n articles = int(td[4].xpath('./a/b')[0].text.replace(',', ''))\n # exclude languages with too few articles\n if articles >= 100:\n supported_languages[code] = {\"name\": name, \"english_name\": english_name, \"articles\": articles}\n\n return supported_languages\n", "path": "searx/engines/wikipedia.py"}], "after_files": [{"content": "\"\"\"\n Wikipedia (Web)\n\n @website https://{language}.wikipedia.org\n @provide-api yes\n\n @using-api yes\n @results JSON\n @stable yes\n @parse url, infobox\n\"\"\"\n\nfrom json import loads\nfrom lxml.html import fromstring\nfrom searx.url_utils import quote, urlencode\nfrom searx.utils import match_language\n\n# search-url\nbase_url = u'https://{language}.wikipedia.org/'\nsearch_url = base_url + u'w/api.php?'\\\n 'action=query'\\\n '&format=json'\\\n '&{query}'\\\n '&prop=extracts|pageimages|pageprops'\\\n '&ppprop=disambiguation'\\\n '&exintro'\\\n '&explaintext'\\\n '&pithumbsize=300'\\\n '&redirects'\nsupported_languages_url = 'https://meta.wikimedia.org/wiki/List_of_Wikipedias'\n\n\n# set language in base_url\ndef url_lang(lang):\n lang_pre = lang.split('-')[0]\n if lang_pre == 'all' or lang_pre not in supported_languages and lang_pre not in language_aliases:\n return 'en'\n return match_language(lang, supported_languages, language_aliases).split('-')[0]\n\n\n# do search-request\ndef request(query, params):\n if query.islower():\n query = u'{0}|{1}'.format(query.decode('utf-8'), query.decode('utf-8').title()).encode('utf-8')\n\n params['url'] = search_url.format(query=urlencode({'titles': query}),\n language=url_lang(params['language']))\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n search_result = loads(resp.text)\n\n # wikipedia article's unique id\n # first valid id is assumed to be the requested article\n if 'pages' not in search_result['query']:\n return results\n\n for article_id in search_result['query']['pages']:\n page = search_result['query']['pages'][article_id]\n if int(article_id) > 0:\n break\n\n if int(article_id) < 0 or 'disambiguation' in page.get('pageprops', {}):\n return []\n\n title = page.get('title')\n\n image = page.get('thumbnail')\n if image:\n image = image.get('source')\n\n summary = page.get('extract', '').split('\\n')[0].replace('()', '')\n\n # link to wikipedia article\n wikipedia_link = base_url.format(language=url_lang(resp.search_params['language'])) \\\n + 'wiki/' + quote(title.replace(' ', '_').encode('utf8'))\n\n results.append({'url': wikipedia_link, 'title': title})\n\n results.append({'infobox': title,\n 'id': wikipedia_link,\n 'content': summary,\n 'img_src': image,\n 'urls': [{'title': 'Wikipedia', 'url': wikipedia_link}]})\n\n return results\n\n\n# get supported languages from their site\ndef _fetch_supported_languages(resp):\n supported_languages = {}\n dom = fromstring(resp.text)\n tables = dom.xpath('//table[contains(@class,\"sortable\")]')\n for table in tables:\n # exclude header row\n trs = table.xpath('.//tr')[1:]\n for tr in trs:\n td = tr.xpath('./td')\n code = td[3].xpath('./a')[0].text\n name = td[2].xpath('./a')[0].text\n english_name = td[1].xpath('./a')[0].text\n articles = int(td[4].xpath('./a/b')[0].text.replace(',', ''))\n # exclude languages with too few articles\n if articles >= 100:\n supported_languages[code] = {\"name\": name, \"english_name\": english_name, \"articles\": articles}\n\n return supported_languages\n", "path": "searx/engines/wikipedia.py"}]}
| 2,057 | 363 |
gh_patches_debug_2094
|
rasdani/github-patches
|
git_diff
|
piskvorky__gensim-3441
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
annoy.py conversion of cosine distance to cosine similarity is incorrect
in [this function](https://github.com/RaRe-Technologies/gensim/blob/f35faae7a7b0c3c8586fb61208560522e37e0e7e/gensim/similarities/annoy.py#L169) the code to calculate cosine similarity is incorrect
def most_similar(self, vector, num_neighbors):
"""Find `num_neighbors` most similar items.
Parameters
----------
vector : numpy.array
Vector for word/document.
num_neighbors : int
Number of most similar items
Returns
-------
list of (str, float)
List of most similar items in format [(`item`, `cosine_distance`), ... ]
"""
ids, distances = self.index.get_nns_by_vector(
vector, num_neighbors, include_distances=True)
return [(self.labels[ids[i]], 1 - distances[i] / 2) for i in range(len(ids))]
according to annoy documentation `get_nns_by_vector` with `include_distances=True` will return the distances and not the square power of the distance (this was changed since aug 2016):
_`a.get_distance(i, j)` returns the distance between items i and j. NOTE: this used to return the squared distance, but has been changed as of Aug 2016._
[link](https://github.com/spotify/annoy#:~:text=a.get_distance(i%2C%20j)%20returns%20the%20distance%20between%20items%20i%20and%20j.%20NOTE%3A%20this%20used%20to%20return%20the%20squared%20distance%2C%20but%20has%20been%20changed%20as%20of%20Aug%202016.)
also:
Annoy uses Euclidean distance of normalized vectors for its angular distance, which for two vectors u,v is equal to sqrt(2(1-cos(u,v)))
[link](https://github.com/spotify/annoy#:~:text=Annoy%20uses%20Euclidean%20distance%20of%20normalized%20vectors%20for%20its%20angular%20distance%2C%20which%20for%20two%20vectors%20u%2Cv%20is%20equal%20to%20sqrt(2(1%2Dcos(u%2Cv))))
so this means that in order to calculate the cosine similarity correctly we should do this:
`
return [(self.labels[ids[i]], 1 - distances[i]^2 / 2) for i in range(len(ids))]
`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gensim/similarities/annoy.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) 2013 Radim Rehurek <[email protected]>
5 # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
6
7 """
8 This module integrates Spotify's `Annoy <https://github.com/spotify/annoy>`_ (Approximate Nearest Neighbors Oh Yeah)
9 library with Gensim's :class:`~gensim.models.word2vec.Word2Vec`, :class:`~gensim.models.doc2vec.Doc2Vec`,
10 :class:`~gensim.models.fasttext.FastText` and :class:`~gensim.models.keyedvectors.KeyedVectors` word embeddings.
11
12 .. Important::
13 To use this module, you must have the ``annoy`` library installed.
14 To install it, run ``pip install annoy``.
15
16 """
17
18 # Avoid import collisions on py2: this module has the same name as the actual Annoy library.
19 from __future__ import absolute_import
20
21 import os
22
23 try:
24 import cPickle as _pickle
25 except ImportError:
26 import pickle as _pickle
27
28 from gensim import utils
29 from gensim.models.doc2vec import Doc2Vec
30 from gensim.models.word2vec import Word2Vec
31 from gensim.models.fasttext import FastText
32 from gensim.models import KeyedVectors
33
34
35 _NOANNOY = ImportError("Annoy not installed. To use the Annoy indexer, please run `pip install annoy`.")
36
37
38 class AnnoyIndexer():
39 """This class allows the use of `Annoy <https://github.com/spotify/annoy>`_ for fast (approximate)
40 vector retrieval in `most_similar()` calls of
41 :class:`~gensim.models.word2vec.Word2Vec`, :class:`~gensim.models.doc2vec.Doc2Vec`,
42 :class:`~gensim.models.fasttext.FastText` and :class:`~gensim.models.keyedvectors.Word2VecKeyedVectors` models.
43
44 """
45
46 def __init__(self, model=None, num_trees=None):
47 """
48 Parameters
49 ----------
50 model : trained model, optional
51 Use vectors from this model as the source for the index.
52 num_trees : int, optional
53 Number of trees for Annoy indexer.
54
55 Examples
56 --------
57 .. sourcecode:: pycon
58
59 >>> from gensim.similarities.annoy import AnnoyIndexer
60 >>> from gensim.models import Word2Vec
61 >>>
62 >>> sentences = [['cute', 'cat', 'say', 'meow'], ['cute', 'dog', 'say', 'woof']]
63 >>> model = Word2Vec(sentences, min_count=1, seed=1)
64 >>>
65 >>> indexer = AnnoyIndexer(model, 2)
66 >>> model.most_similar("cat", topn=2, indexer=indexer)
67 [('cat', 1.0), ('dog', 0.32011348009109497)]
68
69 """
70 self.index = None
71 self.labels = None
72 self.model = model
73 self.num_trees = num_trees
74
75 if model and num_trees:
76 # Extract the KeyedVectors object from whatever model we were given.
77 if isinstance(self.model, Doc2Vec):
78 kv = self.model.dv
79 elif isinstance(self.model, (Word2Vec, FastText)):
80 kv = self.model.wv
81 elif isinstance(self.model, (KeyedVectors,)):
82 kv = self.model
83 else:
84 raise ValueError("Only a Word2Vec, Doc2Vec, FastText or KeyedVectors instance can be used")
85 self._build_from_model(kv.get_normed_vectors(), kv.index_to_key, kv.vector_size)
86
87 def save(self, fname, protocol=utils.PICKLE_PROTOCOL):
88 """Save AnnoyIndexer instance to disk.
89
90 Parameters
91 ----------
92 fname : str
93 Path to output. Save will produce 2 files:
94 `fname`: Annoy index itself.
95 `fname.dict`: Index metadata.
96 protocol : int, optional
97 Protocol for pickle.
98
99 Notes
100 -----
101 This method saves **only the index**. The trained model isn't preserved.
102
103 """
104 self.index.save(fname)
105 d = {'f': self.model.vector_size, 'num_trees': self.num_trees, 'labels': self.labels}
106 with utils.open(fname + '.dict', 'wb') as fout:
107 _pickle.dump(d, fout, protocol=protocol)
108
109 def load(self, fname):
110 """Load an AnnoyIndexer instance from disk.
111
112 Parameters
113 ----------
114 fname : str
115 The path as previously used by ``save()``.
116
117 Examples
118 --------
119 .. sourcecode:: pycon
120
121 >>> from gensim.similarities.index import AnnoyIndexer
122 >>> from gensim.models import Word2Vec
123 >>> from tempfile import mkstemp
124 >>>
125 >>> sentences = [['cute', 'cat', 'say', 'meow'], ['cute', 'dog', 'say', 'woof']]
126 >>> model = Word2Vec(sentences, min_count=1, seed=1, epochs=10)
127 >>>
128 >>> indexer = AnnoyIndexer(model, 2)
129 >>> _, temp_fn = mkstemp()
130 >>> indexer.save(temp_fn)
131 >>>
132 >>> new_indexer = AnnoyIndexer()
133 >>> new_indexer.load(temp_fn)
134 >>> new_indexer.model = model
135
136 """
137 fname_dict = fname + '.dict'
138 if not (os.path.exists(fname) and os.path.exists(fname_dict)):
139 raise IOError(
140 f"Can't find index files '{fname}' and '{fname_dict}' - unable to restore AnnoyIndexer state."
141 )
142 try:
143 from annoy import AnnoyIndex
144 except ImportError:
145 raise _NOANNOY
146
147 with utils.open(fname_dict, 'rb') as f:
148 d = _pickle.loads(f.read())
149 self.num_trees = d['num_trees']
150 self.index = AnnoyIndex(d['f'], metric='angular')
151 self.index.load(fname)
152 self.labels = d['labels']
153
154 def _build_from_model(self, vectors, labels, num_features):
155 try:
156 from annoy import AnnoyIndex
157 except ImportError:
158 raise _NOANNOY
159
160 index = AnnoyIndex(num_features, metric='angular')
161
162 for vector_num, vector in enumerate(vectors):
163 index.add_item(vector_num, vector)
164
165 index.build(self.num_trees)
166 self.index = index
167 self.labels = labels
168
169 def most_similar(self, vector, num_neighbors):
170 """Find `num_neighbors` most similar items.
171
172 Parameters
173 ----------
174 vector : numpy.array
175 Vector for word/document.
176 num_neighbors : int
177 Number of most similar items
178
179 Returns
180 -------
181 list of (str, float)
182 List of most similar items in format [(`item`, `cosine_distance`), ... ]
183
184 """
185 ids, distances = self.index.get_nns_by_vector(
186 vector, num_neighbors, include_distances=True)
187
188 return [(self.labels[ids[i]], 1 - distances[i] / 2) for i in range(len(ids))]
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gensim/similarities/annoy.py b/gensim/similarities/annoy.py
--- a/gensim/similarities/annoy.py
+++ b/gensim/similarities/annoy.py
@@ -185,4 +185,4 @@
ids, distances = self.index.get_nns_by_vector(
vector, num_neighbors, include_distances=True)
- return [(self.labels[ids[i]], 1 - distances[i] / 2) for i in range(len(ids))]
+ return [(self.labels[ids[i]], 1 - distances[i] ** 2 / 2) for i in range(len(ids))]
|
{"golden_diff": "diff --git a/gensim/similarities/annoy.py b/gensim/similarities/annoy.py\n--- a/gensim/similarities/annoy.py\n+++ b/gensim/similarities/annoy.py\n@@ -185,4 +185,4 @@\n ids, distances = self.index.get_nns_by_vector(\n vector, num_neighbors, include_distances=True)\n \n- return [(self.labels[ids[i]], 1 - distances[i] / 2) for i in range(len(ids))]\n+ return [(self.labels[ids[i]], 1 - distances[i] ** 2 / 2) for i in range(len(ids))]\n", "issue": "annoy.py conversion of cosine distance to cosine similarity is incorrect\nin [this function](https://github.com/RaRe-Technologies/gensim/blob/f35faae7a7b0c3c8586fb61208560522e37e0e7e/gensim/similarities/annoy.py#L169) the code to calculate cosine similarity is incorrect\r\n\r\n def most_similar(self, vector, num_neighbors):\r\n \"\"\"Find `num_neighbors` most similar items.\r\n\r\n Parameters\r\n ----------\r\n vector : numpy.array\r\n Vector for word/document.\r\n num_neighbors : int\r\n Number of most similar items\r\n\r\n Returns\r\n -------\r\n list of (str, float)\r\n List of most similar items in format [(`item`, `cosine_distance`), ... ]\r\n\r\n \"\"\"\r\n ids, distances = self.index.get_nns_by_vector(\r\n vector, num_neighbors, include_distances=True)\r\n\r\n return [(self.labels[ids[i]], 1 - distances[i] / 2) for i in range(len(ids))]\r\n\r\naccording to annoy documentation `get_nns_by_vector` with `include_distances=True` will return the distances and not the square power of the distance (this was changed since aug 2016):\r\n\r\n_`a.get_distance(i, j)` returns the distance between items i and j. NOTE: this used to return the squared distance, but has been changed as of Aug 2016._\r\n\r\n[link](https://github.com/spotify/annoy#:~:text=a.get_distance(i%2C%20j)%20returns%20the%20distance%20between%20items%20i%20and%20j.%20NOTE%3A%20this%20used%20to%20return%20the%20squared%20distance%2C%20but%20has%20been%20changed%20as%20of%20Aug%202016.)\r\n\r\nalso:\r\nAnnoy uses Euclidean distance of normalized vectors for its angular distance, which for two vectors u,v is equal to sqrt(2(1-cos(u,v)))\r\n[link](https://github.com/spotify/annoy#:~:text=Annoy%20uses%20Euclidean%20distance%20of%20normalized%20vectors%20for%20its%20angular%20distance%2C%20which%20for%20two%20vectors%20u%2Cv%20is%20equal%20to%20sqrt(2(1%2Dcos(u%2Cv))))\r\n\r\nso this means that in order to calculate the cosine similarity correctly we should do this:\r\n`\r\nreturn [(self.labels[ids[i]], 1 - distances[i]^2 / 2) for i in range(len(ids))]\r\n`\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nThis module integrates Spotify's `Annoy <https://github.com/spotify/annoy>`_ (Approximate Nearest Neighbors Oh Yeah)\nlibrary with Gensim's :class:`~gensim.models.word2vec.Word2Vec`, :class:`~gensim.models.doc2vec.Doc2Vec`,\n:class:`~gensim.models.fasttext.FastText` and :class:`~gensim.models.keyedvectors.KeyedVectors` word embeddings.\n\n.. Important::\n To use this module, you must have the ``annoy`` library installed.\n To install it, run ``pip install annoy``.\n\n\"\"\"\n\n# Avoid import collisions on py2: this module has the same name as the actual Annoy library.\nfrom __future__ import absolute_import\n\nimport os\n\ntry:\n import cPickle as _pickle\nexcept ImportError:\n import pickle as _pickle\n\nfrom gensim import utils\nfrom gensim.models.doc2vec import Doc2Vec\nfrom gensim.models.word2vec import Word2Vec\nfrom gensim.models.fasttext import FastText\nfrom gensim.models import KeyedVectors\n\n\n_NOANNOY = ImportError(\"Annoy not installed. To use the Annoy indexer, please run `pip install annoy`.\")\n\n\nclass AnnoyIndexer():\n \"\"\"This class allows the use of `Annoy <https://github.com/spotify/annoy>`_ for fast (approximate)\n vector retrieval in `most_similar()` calls of\n :class:`~gensim.models.word2vec.Word2Vec`, :class:`~gensim.models.doc2vec.Doc2Vec`,\n :class:`~gensim.models.fasttext.FastText` and :class:`~gensim.models.keyedvectors.Word2VecKeyedVectors` models.\n\n \"\"\"\n\n def __init__(self, model=None, num_trees=None):\n \"\"\"\n Parameters\n ----------\n model : trained model, optional\n Use vectors from this model as the source for the index.\n num_trees : int, optional\n Number of trees for Annoy indexer.\n\n Examples\n --------\n .. sourcecode:: pycon\n\n >>> from gensim.similarities.annoy import AnnoyIndexer\n >>> from gensim.models import Word2Vec\n >>>\n >>> sentences = [['cute', 'cat', 'say', 'meow'], ['cute', 'dog', 'say', 'woof']]\n >>> model = Word2Vec(sentences, min_count=1, seed=1)\n >>>\n >>> indexer = AnnoyIndexer(model, 2)\n >>> model.most_similar(\"cat\", topn=2, indexer=indexer)\n [('cat', 1.0), ('dog', 0.32011348009109497)]\n\n \"\"\"\n self.index = None\n self.labels = None\n self.model = model\n self.num_trees = num_trees\n\n if model and num_trees:\n # Extract the KeyedVectors object from whatever model we were given.\n if isinstance(self.model, Doc2Vec):\n kv = self.model.dv\n elif isinstance(self.model, (Word2Vec, FastText)):\n kv = self.model.wv\n elif isinstance(self.model, (KeyedVectors,)):\n kv = self.model\n else:\n raise ValueError(\"Only a Word2Vec, Doc2Vec, FastText or KeyedVectors instance can be used\")\n self._build_from_model(kv.get_normed_vectors(), kv.index_to_key, kv.vector_size)\n\n def save(self, fname, protocol=utils.PICKLE_PROTOCOL):\n \"\"\"Save AnnoyIndexer instance to disk.\n\n Parameters\n ----------\n fname : str\n Path to output. Save will produce 2 files:\n `fname`: Annoy index itself.\n `fname.dict`: Index metadata.\n protocol : int, optional\n Protocol for pickle.\n\n Notes\n -----\n This method saves **only the index**. The trained model isn't preserved.\n\n \"\"\"\n self.index.save(fname)\n d = {'f': self.model.vector_size, 'num_trees': self.num_trees, 'labels': self.labels}\n with utils.open(fname + '.dict', 'wb') as fout:\n _pickle.dump(d, fout, protocol=protocol)\n\n def load(self, fname):\n \"\"\"Load an AnnoyIndexer instance from disk.\n\n Parameters\n ----------\n fname : str\n The path as previously used by ``save()``.\n\n Examples\n --------\n .. sourcecode:: pycon\n\n >>> from gensim.similarities.index import AnnoyIndexer\n >>> from gensim.models import Word2Vec\n >>> from tempfile import mkstemp\n >>>\n >>> sentences = [['cute', 'cat', 'say', 'meow'], ['cute', 'dog', 'say', 'woof']]\n >>> model = Word2Vec(sentences, min_count=1, seed=1, epochs=10)\n >>>\n >>> indexer = AnnoyIndexer(model, 2)\n >>> _, temp_fn = mkstemp()\n >>> indexer.save(temp_fn)\n >>>\n >>> new_indexer = AnnoyIndexer()\n >>> new_indexer.load(temp_fn)\n >>> new_indexer.model = model\n\n \"\"\"\n fname_dict = fname + '.dict'\n if not (os.path.exists(fname) and os.path.exists(fname_dict)):\n raise IOError(\n f\"Can't find index files '{fname}' and '{fname_dict}' - unable to restore AnnoyIndexer state.\"\n )\n try:\n from annoy import AnnoyIndex\n except ImportError:\n raise _NOANNOY\n\n with utils.open(fname_dict, 'rb') as f:\n d = _pickle.loads(f.read())\n self.num_trees = d['num_trees']\n self.index = AnnoyIndex(d['f'], metric='angular')\n self.index.load(fname)\n self.labels = d['labels']\n\n def _build_from_model(self, vectors, labels, num_features):\n try:\n from annoy import AnnoyIndex\n except ImportError:\n raise _NOANNOY\n\n index = AnnoyIndex(num_features, metric='angular')\n\n for vector_num, vector in enumerate(vectors):\n index.add_item(vector_num, vector)\n\n index.build(self.num_trees)\n self.index = index\n self.labels = labels\n\n def most_similar(self, vector, num_neighbors):\n \"\"\"Find `num_neighbors` most similar items.\n\n Parameters\n ----------\n vector : numpy.array\n Vector for word/document.\n num_neighbors : int\n Number of most similar items\n\n Returns\n -------\n list of (str, float)\n List of most similar items in format [(`item`, `cosine_distance`), ... ]\n\n \"\"\"\n ids, distances = self.index.get_nns_by_vector(\n vector, num_neighbors, include_distances=True)\n\n return [(self.labels[ids[i]], 1 - distances[i] / 2) for i in range(len(ids))]\n", "path": "gensim/similarities/annoy.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nThis module integrates Spotify's `Annoy <https://github.com/spotify/annoy>`_ (Approximate Nearest Neighbors Oh Yeah)\nlibrary with Gensim's :class:`~gensim.models.word2vec.Word2Vec`, :class:`~gensim.models.doc2vec.Doc2Vec`,\n:class:`~gensim.models.fasttext.FastText` and :class:`~gensim.models.keyedvectors.KeyedVectors` word embeddings.\n\n.. Important::\n To use this module, you must have the ``annoy`` library installed.\n To install it, run ``pip install annoy``.\n\n\"\"\"\n\n# Avoid import collisions on py2: this module has the same name as the actual Annoy library.\nfrom __future__ import absolute_import\n\nimport os\n\ntry:\n import cPickle as _pickle\nexcept ImportError:\n import pickle as _pickle\n\nfrom gensim import utils\nfrom gensim.models.doc2vec import Doc2Vec\nfrom gensim.models.word2vec import Word2Vec\nfrom gensim.models.fasttext import FastText\nfrom gensim.models import KeyedVectors\n\n\n_NOANNOY = ImportError(\"Annoy not installed. To use the Annoy indexer, please run `pip install annoy`.\")\n\n\nclass AnnoyIndexer():\n \"\"\"This class allows the use of `Annoy <https://github.com/spotify/annoy>`_ for fast (approximate)\n vector retrieval in `most_similar()` calls of\n :class:`~gensim.models.word2vec.Word2Vec`, :class:`~gensim.models.doc2vec.Doc2Vec`,\n :class:`~gensim.models.fasttext.FastText` and :class:`~gensim.models.keyedvectors.Word2VecKeyedVectors` models.\n\n \"\"\"\n\n def __init__(self, model=None, num_trees=None):\n \"\"\"\n Parameters\n ----------\n model : trained model, optional\n Use vectors from this model as the source for the index.\n num_trees : int, optional\n Number of trees for Annoy indexer.\n\n Examples\n --------\n .. sourcecode:: pycon\n\n >>> from gensim.similarities.annoy import AnnoyIndexer\n >>> from gensim.models import Word2Vec\n >>>\n >>> sentences = [['cute', 'cat', 'say', 'meow'], ['cute', 'dog', 'say', 'woof']]\n >>> model = Word2Vec(sentences, min_count=1, seed=1)\n >>>\n >>> indexer = AnnoyIndexer(model, 2)\n >>> model.most_similar(\"cat\", topn=2, indexer=indexer)\n [('cat', 1.0), ('dog', 0.32011348009109497)]\n\n \"\"\"\n self.index = None\n self.labels = None\n self.model = model\n self.num_trees = num_trees\n\n if model and num_trees:\n # Extract the KeyedVectors object from whatever model we were given.\n if isinstance(self.model, Doc2Vec):\n kv = self.model.dv\n elif isinstance(self.model, (Word2Vec, FastText)):\n kv = self.model.wv\n elif isinstance(self.model, (KeyedVectors,)):\n kv = self.model\n else:\n raise ValueError(\"Only a Word2Vec, Doc2Vec, FastText or KeyedVectors instance can be used\")\n self._build_from_model(kv.get_normed_vectors(), kv.index_to_key, kv.vector_size)\n\n def save(self, fname, protocol=utils.PICKLE_PROTOCOL):\n \"\"\"Save AnnoyIndexer instance to disk.\n\n Parameters\n ----------\n fname : str\n Path to output. Save will produce 2 files:\n `fname`: Annoy index itself.\n `fname.dict`: Index metadata.\n protocol : int, optional\n Protocol for pickle.\n\n Notes\n -----\n This method saves **only the index**. The trained model isn't preserved.\n\n \"\"\"\n self.index.save(fname)\n d = {'f': self.model.vector_size, 'num_trees': self.num_trees, 'labels': self.labels}\n with utils.open(fname + '.dict', 'wb') as fout:\n _pickle.dump(d, fout, protocol=protocol)\n\n def load(self, fname):\n \"\"\"Load an AnnoyIndexer instance from disk.\n\n Parameters\n ----------\n fname : str\n The path as previously used by ``save()``.\n\n Examples\n --------\n .. sourcecode:: pycon\n\n >>> from gensim.similarities.index import AnnoyIndexer\n >>> from gensim.models import Word2Vec\n >>> from tempfile import mkstemp\n >>>\n >>> sentences = [['cute', 'cat', 'say', 'meow'], ['cute', 'dog', 'say', 'woof']]\n >>> model = Word2Vec(sentences, min_count=1, seed=1, epochs=10)\n >>>\n >>> indexer = AnnoyIndexer(model, 2)\n >>> _, temp_fn = mkstemp()\n >>> indexer.save(temp_fn)\n >>>\n >>> new_indexer = AnnoyIndexer()\n >>> new_indexer.load(temp_fn)\n >>> new_indexer.model = model\n\n \"\"\"\n fname_dict = fname + '.dict'\n if not (os.path.exists(fname) and os.path.exists(fname_dict)):\n raise IOError(\n f\"Can't find index files '{fname}' and '{fname_dict}' - unable to restore AnnoyIndexer state.\"\n )\n try:\n from annoy import AnnoyIndex\n except ImportError:\n raise _NOANNOY\n\n with utils.open(fname_dict, 'rb') as f:\n d = _pickle.loads(f.read())\n self.num_trees = d['num_trees']\n self.index = AnnoyIndex(d['f'], metric='angular')\n self.index.load(fname)\n self.labels = d['labels']\n\n def _build_from_model(self, vectors, labels, num_features):\n try:\n from annoy import AnnoyIndex\n except ImportError:\n raise _NOANNOY\n\n index = AnnoyIndex(num_features, metric='angular')\n\n for vector_num, vector in enumerate(vectors):\n index.add_item(vector_num, vector)\n\n index.build(self.num_trees)\n self.index = index\n self.labels = labels\n\n def most_similar(self, vector, num_neighbors):\n \"\"\"Find `num_neighbors` most similar items.\n\n Parameters\n ----------\n vector : numpy.array\n Vector for word/document.\n num_neighbors : int\n Number of most similar items\n\n Returns\n -------\n list of (str, float)\n List of most similar items in format [(`item`, `cosine_distance`), ... ]\n\n \"\"\"\n ids, distances = self.index.get_nns_by_vector(\n vector, num_neighbors, include_distances=True)\n\n return [(self.labels[ids[i]], 1 - distances[i] ** 2 / 2) for i in range(len(ids))]\n", "path": "gensim/similarities/annoy.py"}]}
| 2,982 | 147 |
gh_patches_debug_3162
|
rasdani/github-patches
|
git_diff
|
mantl__mantl-1652
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unblocking Neutron ports fails when public_url is unset (or Neutron is named differently)
I am trying to install mantl on openstack and am using the sample.yml playbook. When I run the playbook, it fails here
```
TASK: [calico | unlock neutron ports to allow calico traffic] *****************
failed: [mantl-worker-001] => (item=mantl-edge-01) => {"changed": false, "cmd": "/usr/local/bin/neutron_port_update.py \"192.168.0.0/16\" \"fa:16:3e:47:b1:91\"", "delta": "0:00:00.329717", "end": "2016-06-15 21:18:58.223490", "failed": true, "failed_when_result": true, "item": "mantl-edge-01", "rc": 1, "start": "2016-06-15 21:18:57.893773", "stdout_lines": [], "warnings": []}
stderr: Traceback (most recent call last):
File "/usr/local/bin/neutron_port_update.py", line 128, in <module>
ports = list_ports(token, public_url)
File "/usr/local/bin/neutron_port_update.py", line 82, in list_ports
auth_url = public_url + "v2.0/ports"
TypeError: unsupported operand type(s) for +: 'NoneType' and 'str'
```
It seems that public_url is unset. I manually ran neutron_port_update.py, and tried to debug it. public_url gets set by neutron_public_url, and it appears that is looking for an endpoint named neutron. I have attached the json input and there is no endpoint with that name.
[parsed_json.txt](https://github.com/CiscoCloud/mantl/files/317249/parsed_json.txt)
Unblocking Neutron ports fails when public_url is unset (or Neutron is named differently)
I am trying to install mantl on openstack and am using the sample.yml playbook. When I run the playbook, it fails here
```
TASK: [calico | unlock neutron ports to allow calico traffic] *****************
failed: [mantl-worker-001] => (item=mantl-edge-01) => {"changed": false, "cmd": "/usr/local/bin/neutron_port_update.py \"192.168.0.0/16\" \"fa:16:3e:47:b1:91\"", "delta": "0:00:00.329717", "end": "2016-06-15 21:18:58.223490", "failed": true, "failed_when_result": true, "item": "mantl-edge-01", "rc": 1, "start": "2016-06-15 21:18:57.893773", "stdout_lines": [], "warnings": []}
stderr: Traceback (most recent call last):
File "/usr/local/bin/neutron_port_update.py", line 128, in <module>
ports = list_ports(token, public_url)
File "/usr/local/bin/neutron_port_update.py", line 82, in list_ports
auth_url = public_url + "v2.0/ports"
TypeError: unsupported operand type(s) for +: 'NoneType' and 'str'
```
It seems that public_url is unset. I manually ran neutron_port_update.py, and tried to debug it. public_url gets set by neutron_public_url, and it appears that is looking for an endpoint named neutron. I have attached the json input and there is no endpoint with that name.
[parsed_json.txt](https://github.com/CiscoCloud/mantl/files/317249/parsed_json.txt)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `roles/calico/files/neutron_port_update.py`
Content:
```
1 #!/usr/bin/env python
2 # This script updates the allowed address pairs in Neutron with the
3 # 'neutron port-update' command. This is required by Calico in OpenStack,
4 # otherwise BGP will not be working. We query OpenStack API directly to prevent
5 # installing any dependencies such as python-neutronclient.
6 #
7 # USAGE: script_name arg1 arg2...argN
8 # arg1 - Calico network, i.e. 192.168.0.0/24
9 # arg2...argN - VMs MAC addresses
10 #
11 # Script exit codes (for Ansible)
12 # 0 - port has been updated
13 # 1 - error
14 # 2 - no update to port [default]
15
16 import json
17 import os
18 import requests
19 import sys
20
21 def credentials():
22 """Retrieves credentials"""
23
24 username = os.environ.get('OS_USERNAME')
25 password = os.environ.get('OS_PASSWORD')
26 tenant_name = os.environ.get('OS_TENANT_NAME')
27 auth_url = os.environ.get('OS_AUTH_URL')
28
29 if not all((username, password, tenant_name, auth_url)):
30 sys.stderr.write("ERROR: Unable to get Keystone credentials\n")
31 exit(1)
32
33 return {
34 'username': username,
35 'password': password,
36 'tenant_name': tenant_name,
37 'auth_url': auth_url
38 }
39
40 def get_catalog():
41 """Get service catalog from Keystone with token and all endpoints"""
42
43 creds = credentials()
44 headers = {'Content-Type': 'application/json'}
45 payload = {
46 "auth":
47 {
48 "tenantName": creds['tenant_name'],
49 "passwordCredentials": {
50 "username": creds['username'],
51 "password": creds['password']
52 }
53 }
54 }
55 auth_url = creds['auth_url'] + "/tokens"
56 r = requests.post(auth_url, headers=headers, data=json.dumps(payload))
57
58 parsed_json = json.loads(r.text)
59 if not parsed_json or 'error' in parsed_json:
60 sys.stderr.write("ERROR: Unable to get authentication token\n")
61 exit(1)
62
63 return parsed_json
64
65 def get_token(catalog):
66 """Get Keystone authentication token"""
67
68 return catalog['access']['token']['id']
69
70 def neutron_public_url(catalog):
71 """Get Neutron publicURL"""
72
73 for i in catalog['access']['serviceCatalog']:
74 if i['name'] == 'neutron':
75 for endpoint in i['endpoints']:
76 return endpoint['publicURL']
77
78 def list_ports(token, public_url):
79 """List Neutron ports"""
80
81 headers = {'X-Auth-Token': token}
82 auth_url = public_url + "v2.0/ports"
83 r = requests.get(auth_url, headers=headers)
84
85 if r.text:
86 parsed_json = json.loads(r.text)
87 return parsed_json['ports']
88 else:
89 sys.stderr.write("ERROR: Unable to retrieve Neutron ports list\n")
90 exit(1)
91
92 def update_port(token, public_url, port_id, mac_address, calico_network):
93 """Update Neutron port with the allowed address pairs"""
94
95 headers = {'Content-Type': 'application/json', 'X-Auth-Token': token}
96 payload = {
97 "port": {
98 "allowed_address_pairs": [
99 {
100 "ip_address": calico_network,
101 "mac_address": mac_address
102 }
103 ]
104 }
105 }
106 auth_url = public_url + "v2.0/ports/" + port_id
107 r = requests.put(auth_url, headers=headers, data=json.dumps(payload))
108
109 parsed_json = json.loads(r.text)
110 if r.status_code != 200 or 'NeutronError' in parsed_json:
111 sys.stderr.write("ERROR: Unable to update port: %s\n" % parsed_json['NeutronError'])
112 exit(1)
113 else:
114 return r.status_code
115
116 if __name__ == "__main__":
117
118 if len(sys.argv) < 3:
119 sys.stderr.write("ERROR: Please run script with the correct arguments\n")
120 exit(1)
121
122 calico_network = sys.argv[1]
123 vms_mac_addresses = sys.argv[2:]
124
125 catalog = get_catalog()
126 token = get_token(catalog)
127 public_url = neutron_public_url(catalog)
128 ports = list_ports(token, public_url)
129
130 exit_code = 0 # no update to port
131
132 for port in ports:
133 port_id = port['id']
134 mac_address = port['mac_address']
135 if mac_address in vms_mac_addresses and not port['allowed_address_pairs']:
136 status_code = update_port(token, public_url, port_id, mac_address, calico_network)
137 if status_code == 200:
138 exit_code = 2 # port has been updated
139
140 exit(exit_code)
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/roles/calico/files/neutron_port_update.py b/roles/calico/files/neutron_port_update.py
--- a/roles/calico/files/neutron_port_update.py
+++ b/roles/calico/files/neutron_port_update.py
@@ -71,7 +71,7 @@
"""Get Neutron publicURL"""
for i in catalog['access']['serviceCatalog']:
- if i['name'] == 'neutron':
+ if i['type'] == 'network':
for endpoint in i['endpoints']:
return endpoint['publicURL']
|
{"golden_diff": "diff --git a/roles/calico/files/neutron_port_update.py b/roles/calico/files/neutron_port_update.py\n--- a/roles/calico/files/neutron_port_update.py\n+++ b/roles/calico/files/neutron_port_update.py\n@@ -71,7 +71,7 @@\n \"\"\"Get Neutron publicURL\"\"\"\n \n for i in catalog['access']['serviceCatalog']:\n- if i['name'] == 'neutron':\n+ if i['type'] == 'network':\n for endpoint in i['endpoints']:\n return endpoint['publicURL']\n", "issue": "Unblocking Neutron ports fails when public_url is unset (or Neutron is named differently)\nI am trying to install mantl on openstack and am using the sample.yml playbook. When I run the playbook, it fails here\n\n```\nTASK: [calico | unlock neutron ports to allow calico traffic] *****************\nfailed: [mantl-worker-001] => (item=mantl-edge-01) => {\"changed\": false, \"cmd\": \"/usr/local/bin/neutron_port_update.py \\\"192.168.0.0/16\\\" \\\"fa:16:3e:47:b1:91\\\"\", \"delta\": \"0:00:00.329717\", \"end\": \"2016-06-15 21:18:58.223490\", \"failed\": true, \"failed_when_result\": true, \"item\": \"mantl-edge-01\", \"rc\": 1, \"start\": \"2016-06-15 21:18:57.893773\", \"stdout_lines\": [], \"warnings\": []}\nstderr: Traceback (most recent call last):\n File \"/usr/local/bin/neutron_port_update.py\", line 128, in <module>\n ports = list_ports(token, public_url)\n File \"/usr/local/bin/neutron_port_update.py\", line 82, in list_ports\n auth_url = public_url + \"v2.0/ports\"\nTypeError: unsupported operand type(s) for +: 'NoneType' and 'str'\n```\n\nIt seems that public_url is unset. I manually ran neutron_port_update.py, and tried to debug it. public_url gets set by neutron_public_url, and it appears that is looking for an endpoint named neutron. I have attached the json input and there is no endpoint with that name.\n\n[parsed_json.txt](https://github.com/CiscoCloud/mantl/files/317249/parsed_json.txt)\n\nUnblocking Neutron ports fails when public_url is unset (or Neutron is named differently)\nI am trying to install mantl on openstack and am using the sample.yml playbook. When I run the playbook, it fails here\n\n```\nTASK: [calico | unlock neutron ports to allow calico traffic] *****************\nfailed: [mantl-worker-001] => (item=mantl-edge-01) => {\"changed\": false, \"cmd\": \"/usr/local/bin/neutron_port_update.py \\\"192.168.0.0/16\\\" \\\"fa:16:3e:47:b1:91\\\"\", \"delta\": \"0:00:00.329717\", \"end\": \"2016-06-15 21:18:58.223490\", \"failed\": true, \"failed_when_result\": true, \"item\": \"mantl-edge-01\", \"rc\": 1, \"start\": \"2016-06-15 21:18:57.893773\", \"stdout_lines\": [], \"warnings\": []}\nstderr: Traceback (most recent call last):\n File \"/usr/local/bin/neutron_port_update.py\", line 128, in <module>\n ports = list_ports(token, public_url)\n File \"/usr/local/bin/neutron_port_update.py\", line 82, in list_ports\n auth_url = public_url + \"v2.0/ports\"\nTypeError: unsupported operand type(s) for +: 'NoneType' and 'str'\n```\n\nIt seems that public_url is unset. I manually ran neutron_port_update.py, and tried to debug it. public_url gets set by neutron_public_url, and it appears that is looking for an endpoint named neutron. I have attached the json input and there is no endpoint with that name.\n\n[parsed_json.txt](https://github.com/CiscoCloud/mantl/files/317249/parsed_json.txt)\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# This script updates the allowed address pairs in Neutron with the\n# 'neutron port-update' command. This is required by Calico in OpenStack,\n# otherwise BGP will not be working. We query OpenStack API directly to prevent\n# installing any dependencies such as python-neutronclient.\n#\n# USAGE: script_name arg1 arg2...argN\n# arg1 - Calico network, i.e. 192.168.0.0/24\n# arg2...argN - VMs MAC addresses\n#\n# Script exit codes (for Ansible)\n# 0 - port has been updated\n# 1 - error\n# 2 - no update to port [default]\n\nimport json\nimport os\nimport requests\nimport sys\n\ndef credentials():\n \"\"\"Retrieves credentials\"\"\"\n\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant_name = os.environ.get('OS_TENANT_NAME')\n auth_url = os.environ.get('OS_AUTH_URL')\n\n if not all((username, password, tenant_name, auth_url)):\n sys.stderr.write(\"ERROR: Unable to get Keystone credentials\\n\")\n exit(1)\n\n return {\n 'username': username,\n 'password': password,\n 'tenant_name': tenant_name,\n 'auth_url': auth_url\n }\n\ndef get_catalog():\n \"\"\"Get service catalog from Keystone with token and all endpoints\"\"\"\n\n creds = credentials()\n headers = {'Content-Type': 'application/json'}\n payload = {\n \"auth\":\n {\n \"tenantName\": creds['tenant_name'],\n \"passwordCredentials\": {\n \"username\": creds['username'],\n \"password\": creds['password']\n }\n }\n }\n auth_url = creds['auth_url'] + \"/tokens\"\n r = requests.post(auth_url, headers=headers, data=json.dumps(payload))\n\n parsed_json = json.loads(r.text)\n if not parsed_json or 'error' in parsed_json:\n sys.stderr.write(\"ERROR: Unable to get authentication token\\n\")\n exit(1)\n\n return parsed_json\n\ndef get_token(catalog):\n \"\"\"Get Keystone authentication token\"\"\"\n\n return catalog['access']['token']['id']\n\ndef neutron_public_url(catalog):\n \"\"\"Get Neutron publicURL\"\"\"\n\n for i in catalog['access']['serviceCatalog']:\n if i['name'] == 'neutron':\n for endpoint in i['endpoints']:\n return endpoint['publicURL']\n\ndef list_ports(token, public_url):\n \"\"\"List Neutron ports\"\"\"\n\n headers = {'X-Auth-Token': token}\n auth_url = public_url + \"v2.0/ports\"\n r = requests.get(auth_url, headers=headers)\n\n if r.text:\n parsed_json = json.loads(r.text)\n return parsed_json['ports']\n else:\n sys.stderr.write(\"ERROR: Unable to retrieve Neutron ports list\\n\")\n exit(1)\n\ndef update_port(token, public_url, port_id, mac_address, calico_network):\n \"\"\"Update Neutron port with the allowed address pairs\"\"\"\n\n headers = {'Content-Type': 'application/json', 'X-Auth-Token': token}\n payload = {\n \"port\": {\n \"allowed_address_pairs\": [\n {\n \"ip_address\": calico_network,\n \"mac_address\": mac_address\n }\n ]\n }\n }\n auth_url = public_url + \"v2.0/ports/\" + port_id\n r = requests.put(auth_url, headers=headers, data=json.dumps(payload))\n\n parsed_json = json.loads(r.text)\n if r.status_code != 200 or 'NeutronError' in parsed_json:\n sys.stderr.write(\"ERROR: Unable to update port: %s\\n\" % parsed_json['NeutronError'])\n exit(1)\n else:\n return r.status_code\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) < 3:\n sys.stderr.write(\"ERROR: Please run script with the correct arguments\\n\")\n exit(1)\n\n calico_network = sys.argv[1]\n vms_mac_addresses = sys.argv[2:]\n\n catalog = get_catalog()\n token = get_token(catalog)\n public_url = neutron_public_url(catalog)\n ports = list_ports(token, public_url)\n\n exit_code = 0 # no update to port\n\n for port in ports:\n port_id = port['id']\n mac_address = port['mac_address']\n if mac_address in vms_mac_addresses and not port['allowed_address_pairs']:\n status_code = update_port(token, public_url, port_id, mac_address, calico_network)\n if status_code == 200:\n exit_code = 2 # port has been updated\n\n exit(exit_code)\n", "path": "roles/calico/files/neutron_port_update.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# This script updates the allowed address pairs in Neutron with the\n# 'neutron port-update' command. This is required by Calico in OpenStack,\n# otherwise BGP will not be working. We query OpenStack API directly to prevent\n# installing any dependencies such as python-neutronclient.\n#\n# USAGE: script_name arg1 arg2...argN\n# arg1 - Calico network, i.e. 192.168.0.0/24\n# arg2...argN - VMs MAC addresses\n#\n# Script exit codes (for Ansible)\n# 0 - port has been updated\n# 1 - error\n# 2 - no update to port [default]\n\nimport json\nimport os\nimport requests\nimport sys\n\ndef credentials():\n \"\"\"Retrieves credentials\"\"\"\n\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant_name = os.environ.get('OS_TENANT_NAME')\n auth_url = os.environ.get('OS_AUTH_URL')\n\n if not all((username, password, tenant_name, auth_url)):\n sys.stderr.write(\"ERROR: Unable to get Keystone credentials\\n\")\n exit(1)\n\n return {\n 'username': username,\n 'password': password,\n 'tenant_name': tenant_name,\n 'auth_url': auth_url\n }\n\ndef get_catalog():\n \"\"\"Get service catalog from Keystone with token and all endpoints\"\"\"\n\n creds = credentials()\n headers = {'Content-Type': 'application/json'}\n payload = {\n \"auth\":\n {\n \"tenantName\": creds['tenant_name'],\n \"passwordCredentials\": {\n \"username\": creds['username'],\n \"password\": creds['password']\n }\n }\n }\n auth_url = creds['auth_url'] + \"/tokens\"\n r = requests.post(auth_url, headers=headers, data=json.dumps(payload))\n\n parsed_json = json.loads(r.text)\n if not parsed_json or 'error' in parsed_json:\n sys.stderr.write(\"ERROR: Unable to get authentication token\\n\")\n exit(1)\n\n return parsed_json\n\ndef get_token(catalog):\n \"\"\"Get Keystone authentication token\"\"\"\n\n return catalog['access']['token']['id']\n\ndef neutron_public_url(catalog):\n \"\"\"Get Neutron publicURL\"\"\"\n\n for i in catalog['access']['serviceCatalog']:\n if i['type'] == 'network':\n for endpoint in i['endpoints']:\n return endpoint['publicURL']\n\ndef list_ports(token, public_url):\n \"\"\"List Neutron ports\"\"\"\n\n headers = {'X-Auth-Token': token}\n auth_url = public_url + \"v2.0/ports\"\n r = requests.get(auth_url, headers=headers)\n\n if r.text:\n parsed_json = json.loads(r.text)\n return parsed_json['ports']\n else:\n sys.stderr.write(\"ERROR: Unable to retrieve Neutron ports list\\n\")\n exit(1)\n\ndef update_port(token, public_url, port_id, mac_address, calico_network):\n \"\"\"Update Neutron port with the allowed address pairs\"\"\"\n\n headers = {'Content-Type': 'application/json', 'X-Auth-Token': token}\n payload = {\n \"port\": {\n \"allowed_address_pairs\": [\n {\n \"ip_address\": calico_network,\n \"mac_address\": mac_address\n }\n ]\n }\n }\n auth_url = public_url + \"v2.0/ports/\" + port_id\n r = requests.put(auth_url, headers=headers, data=json.dumps(payload))\n\n parsed_json = json.loads(r.text)\n if r.status_code != 200 or 'NeutronError' in parsed_json:\n sys.stderr.write(\"ERROR: Unable to update port: %s\\n\" % parsed_json['NeutronError'])\n exit(1)\n else:\n return r.status_code\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) < 3:\n sys.stderr.write(\"ERROR: Please run script with the correct arguments\\n\")\n exit(1)\n\n calico_network = sys.argv[1]\n vms_mac_addresses = sys.argv[2:]\n\n catalog = get_catalog()\n token = get_token(catalog)\n public_url = neutron_public_url(catalog)\n ports = list_ports(token, public_url)\n\n exit_code = 0 # no update to port\n\n for port in ports:\n port_id = port['id']\n mac_address = port['mac_address']\n if mac_address in vms_mac_addresses and not port['allowed_address_pairs']:\n status_code = update_port(token, public_url, port_id, mac_address, calico_network)\n if status_code == 200:\n exit_code = 2 # port has been updated\n\n exit(exit_code)\n", "path": "roles/calico/files/neutron_port_update.py"}]}
| 2,500 | 123 |
gh_patches_debug_21691
|
rasdani/github-patches
|
git_diff
|
yt-project__yt-4016
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
REG: fail to configure yt from the command line if $HOME/.config/ doesn't exist
### Bug report
**Bug summary**
This was discovered downstream thanks to yt_astro_analysis' CI
**Code for reproduction**
```shell
yt config set --global yt suppress_stream_logging True
```
**Actual outcome**
```python-traceback
Traceback (most recent call last):
File "/Users/robcleme/dev/yt-project/yt/yt/utilities/configure.py", line 106, in write
file_handler.write(config_as_str)
AttributeError: 'str' object has no attribute 'write'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/robcleme/.pyenv/versions/38-yt-dev/bin/yt", line 33, in <module>
sys.exit(load_entry_point('yt', 'console_scripts', 'yt')())
File "/Users/robcleme/dev/yt-project/yt/yt/utilities/command_line.py", line 1632, in run_main
args.func(args)
File "/Users/robcleme/dev/yt-project/yt/yt/utilities/command_line.py", line 224, in run
self(args)
File "/Users/robcleme/dev/yt-project/yt/yt/utilities/command_line.py", line 1418, in __call__
set_config(args.section, args.option, args.value, self.config_file)
File "/Users/robcleme/dev/yt-project/yt/yt/utilities/configure.py", line 173, in set_config
write_config(config_file)
File "/Users/robcleme/dev/yt-project/yt/yt/utilities/configure.py", line 177, in write_config
CONFIG.write(config_file)
File "/Users/robcleme/dev/yt-project/yt/yt/utilities/configure.py", line 109, in write
with open(file_handler, mode="w") as fh:
FileNotFoundError: [Errno 2] No such file or directory: '/Users/robcleme/.config/yt/yt.toml'
```
**Expected outcome**
The configuration dir should be created.
I bet this regression is caused by #3626
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt/utilities/configure.py`
Content:
```
1 import os
2 import sys
3 import warnings
4 from typing import Callable, List
5
6 import tomli_w
7 from more_itertools import always_iterable
8
9 from yt.utilities.configuration_tree import ConfigLeaf, ConfigNode
10
11 if sys.version_info >= (3, 11):
12 import tomllib
13 else:
14 import tomli as tomllib
15
16 configuration_callbacks: List[Callable[["YTConfig"], None]] = []
17
18
19 def config_dir():
20 config_root = os.environ.get(
21 "XDG_CONFIG_HOME", os.path.join(os.path.expanduser("~"), ".config")
22 )
23 conf_dir = os.path.join(config_root, "yt")
24 return conf_dir
25
26
27 class YTConfig:
28 def __init__(self, defaults=None):
29 if defaults is None:
30 defaults = {}
31 self.config_root = ConfigNode(None)
32
33 def get(self, section, *keys, callback=None):
34 node_or_leaf = self.config_root.get(section, *keys)
35 if isinstance(node_or_leaf, ConfigLeaf):
36 if callback is not None:
37 return callback(node_or_leaf)
38 return node_or_leaf.value
39 return node_or_leaf
40
41 def get_most_specific(self, section, *keys, **kwargs):
42 use_fallback = "fallback" in kwargs
43 fallback = kwargs.pop("fallback", None)
44 try:
45 return self.config_root.get_deepest_leaf(section, *keys)
46 except KeyError as err:
47 if use_fallback:
48 return fallback
49 else:
50 raise err
51
52 def update(self, new_values, metadata=None):
53 if metadata is None:
54 metadata = {}
55 self.config_root.update(new_values, metadata)
56
57 def has_section(self, section):
58 try:
59 self.config_root.get_child(section)
60 return True
61 except KeyError:
62 return False
63
64 def add_section(self, section):
65 self.config_root.add_child(section)
66
67 def remove_section(self, section):
68 if self.has_section(section):
69 self.config_root.remove_child(section)
70 return True
71 else:
72 return False
73
74 def set(self, *args, metadata=None):
75 section, *keys, value = args
76 if metadata is None:
77 metadata = {"source": "runtime"}
78 self.config_root.upsert_from_list(
79 [section] + list(keys), value, extra_data=metadata
80 )
81
82 def remove(self, *args):
83 self.config_root.pop_leaf(args)
84
85 def read(self, file_names):
86 file_names_read = []
87 for fname in always_iterable(file_names):
88 if not os.path.exists(fname):
89 continue
90 metadata = {"source": f"file: {fname}"}
91 try:
92 with open(fname, "rb") as fh:
93 data = tomllib.load(fh)
94 except tomllib.TOMLDecodeError as exc:
95 warnings.warn(
96 f"Could not load configuration file {fname} (invalid TOML: {exc})"
97 )
98 else:
99 self.update(data, metadata=metadata)
100 file_names_read.append(fname)
101
102 return file_names_read
103
104 def write(self, file_handler):
105 value = self.config_root.as_dict()
106 config_as_str = tomli_w.dumps(value)
107
108 try:
109 # Assuming file_handler has a write attribute
110 file_handler.write(config_as_str)
111 except AttributeError:
112 # Otherwise we expect a path to a file
113 with open(file_handler, mode="w") as fh:
114 fh.write(config_as_str)
115
116 @staticmethod
117 def get_global_config_file():
118 return os.path.join(config_dir(), "yt.toml")
119
120 @staticmethod
121 def get_local_config_file():
122 return os.path.join(os.path.abspath(os.curdir), "yt.toml")
123
124 def __setitem__(self, args, value):
125 section, *keys = always_iterable(args)
126 self.set(section, *keys, value, metadata=None)
127
128 def __getitem__(self, key):
129 section, *keys = always_iterable(key)
130 return self.get(section, *keys)
131
132 def __contains__(self, item):
133 return item in self.config_root
134
135 # Add support for IPython rich display
136 # see https://ipython.readthedocs.io/en/stable/config/integrating.html
137 def _repr_json_(self):
138 return self.config_root._repr_json_()
139
140
141 CONFIG = YTConfig()
142
143
144 def _cast_bool_helper(value):
145 if value == "True":
146 return True
147 elif value == "False":
148 return False
149 else:
150 raise ValueError("Cannot safely cast to bool")
151
152
153 def _expand_all(s):
154 return os.path.expandvars(os.path.expanduser(s))
155
156
157 def _cast_value_helper(value, types=(_cast_bool_helper, int, float, _expand_all)):
158 for t in types:
159 try:
160 retval = t(value)
161 return retval
162 except ValueError:
163 pass
164
165
166 def get_config(section, option):
167 *option_path, option_name = option.split(".")
168 return CONFIG.get(section, *option_path, option_name)
169
170
171 def set_config(section, option, value, config_file):
172 if not CONFIG.has_section(section):
173 CONFIG.add_section(section)
174
175 option_path = option.split(".")
176 CONFIG.set(section, *option_path, _cast_value_helper(value))
177 write_config(config_file)
178
179
180 def write_config(config_file):
181 CONFIG.write(config_file)
182
183
184 def rm_config(section, option, config_file):
185 option_path = option.split(".")
186 CONFIG.remove(section, *option_path)
187 write_config(config_file)
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/yt/utilities/configure.py b/yt/utilities/configure.py
--- a/yt/utilities/configure.py
+++ b/yt/utilities/configure.py
@@ -1,6 +1,7 @@
import os
import sys
import warnings
+from pathlib import Path
from typing import Callable, List
import tomli_w
@@ -106,12 +107,19 @@
config_as_str = tomli_w.dumps(value)
try:
- # Assuming file_handler has a write attribute
+ file_path = Path(file_handler)
+ except TypeError:
+ if not hasattr(file_handler, "write"):
+ raise TypeError(
+ f"Expected a path to a file, or a writable object, got {file_handler}"
+ ) from None
file_handler.write(config_as_str)
- except AttributeError:
- # Otherwise we expect a path to a file
- with open(file_handler, mode="w") as fh:
- fh.write(config_as_str)
+ else:
+ pdir = file_path.parent
+ if not pdir.exists():
+ warnings.warn(f"{pdir!s} does not exist, creating it (recursively)")
+ os.makedirs(pdir)
+ file_path.write_text(config_as_str)
@staticmethod
def get_global_config_file():
|
{"golden_diff": "diff --git a/yt/utilities/configure.py b/yt/utilities/configure.py\n--- a/yt/utilities/configure.py\n+++ b/yt/utilities/configure.py\n@@ -1,6 +1,7 @@\n import os\n import sys\n import warnings\n+from pathlib import Path\n from typing import Callable, List\n \n import tomli_w\n@@ -106,12 +107,19 @@\n config_as_str = tomli_w.dumps(value)\n \n try:\n- # Assuming file_handler has a write attribute\n+ file_path = Path(file_handler)\n+ except TypeError:\n+ if not hasattr(file_handler, \"write\"):\n+ raise TypeError(\n+ f\"Expected a path to a file, or a writable object, got {file_handler}\"\n+ ) from None\n file_handler.write(config_as_str)\n- except AttributeError:\n- # Otherwise we expect a path to a file\n- with open(file_handler, mode=\"w\") as fh:\n- fh.write(config_as_str)\n+ else:\n+ pdir = file_path.parent\n+ if not pdir.exists():\n+ warnings.warn(f\"{pdir!s} does not exist, creating it (recursively)\")\n+ os.makedirs(pdir)\n+ file_path.write_text(config_as_str)\n \n @staticmethod\n def get_global_config_file():\n", "issue": "REG: fail to configure yt from the command line if $HOME/.config/ doesn't exist\n### Bug report\r\n\r\n**Bug summary**\r\n\r\nThis was discovered downstream thanks to yt_astro_analysis' CI\r\n\r\n**Code for reproduction**\r\n```shell\r\nyt config set --global yt suppress_stream_logging True\r\n```\r\n\r\n**Actual outcome**\r\n\r\n```python-traceback\r\nTraceback (most recent call last):\r\n File \"/Users/robcleme/dev/yt-project/yt/yt/utilities/configure.py\", line 106, in write\r\n file_handler.write(config_as_str)\r\nAttributeError: 'str' object has no attribute 'write'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/Users/robcleme/.pyenv/versions/38-yt-dev/bin/yt\", line 33, in <module>\r\n sys.exit(load_entry_point('yt', 'console_scripts', 'yt')())\r\n File \"/Users/robcleme/dev/yt-project/yt/yt/utilities/command_line.py\", line 1632, in run_main\r\n args.func(args)\r\n File \"/Users/robcleme/dev/yt-project/yt/yt/utilities/command_line.py\", line 224, in run\r\n self(args)\r\n File \"/Users/robcleme/dev/yt-project/yt/yt/utilities/command_line.py\", line 1418, in __call__\r\n set_config(args.section, args.option, args.value, self.config_file)\r\n File \"/Users/robcleme/dev/yt-project/yt/yt/utilities/configure.py\", line 173, in set_config\r\n write_config(config_file)\r\n File \"/Users/robcleme/dev/yt-project/yt/yt/utilities/configure.py\", line 177, in write_config\r\n CONFIG.write(config_file)\r\n File \"/Users/robcleme/dev/yt-project/yt/yt/utilities/configure.py\", line 109, in write\r\n with open(file_handler, mode=\"w\") as fh:\r\nFileNotFoundError: [Errno 2] No such file or directory: '/Users/robcleme/.config/yt/yt.toml'\r\n```\r\n\r\n**Expected outcome**\r\n\r\nThe configuration dir should be created.\r\nI bet this regression is caused by #3626\n", "before_files": [{"content": "import os\nimport sys\nimport warnings\nfrom typing import Callable, List\n\nimport tomli_w\nfrom more_itertools import always_iterable\n\nfrom yt.utilities.configuration_tree import ConfigLeaf, ConfigNode\n\nif sys.version_info >= (3, 11):\n import tomllib\nelse:\n import tomli as tomllib\n\nconfiguration_callbacks: List[Callable[[\"YTConfig\"], None]] = []\n\n\ndef config_dir():\n config_root = os.environ.get(\n \"XDG_CONFIG_HOME\", os.path.join(os.path.expanduser(\"~\"), \".config\")\n )\n conf_dir = os.path.join(config_root, \"yt\")\n return conf_dir\n\n\nclass YTConfig:\n def __init__(self, defaults=None):\n if defaults is None:\n defaults = {}\n self.config_root = ConfigNode(None)\n\n def get(self, section, *keys, callback=None):\n node_or_leaf = self.config_root.get(section, *keys)\n if isinstance(node_or_leaf, ConfigLeaf):\n if callback is not None:\n return callback(node_or_leaf)\n return node_or_leaf.value\n return node_or_leaf\n\n def get_most_specific(self, section, *keys, **kwargs):\n use_fallback = \"fallback\" in kwargs\n fallback = kwargs.pop(\"fallback\", None)\n try:\n return self.config_root.get_deepest_leaf(section, *keys)\n except KeyError as err:\n if use_fallback:\n return fallback\n else:\n raise err\n\n def update(self, new_values, metadata=None):\n if metadata is None:\n metadata = {}\n self.config_root.update(new_values, metadata)\n\n def has_section(self, section):\n try:\n self.config_root.get_child(section)\n return True\n except KeyError:\n return False\n\n def add_section(self, section):\n self.config_root.add_child(section)\n\n def remove_section(self, section):\n if self.has_section(section):\n self.config_root.remove_child(section)\n return True\n else:\n return False\n\n def set(self, *args, metadata=None):\n section, *keys, value = args\n if metadata is None:\n metadata = {\"source\": \"runtime\"}\n self.config_root.upsert_from_list(\n [section] + list(keys), value, extra_data=metadata\n )\n\n def remove(self, *args):\n self.config_root.pop_leaf(args)\n\n def read(self, file_names):\n file_names_read = []\n for fname in always_iterable(file_names):\n if not os.path.exists(fname):\n continue\n metadata = {\"source\": f\"file: {fname}\"}\n try:\n with open(fname, \"rb\") as fh:\n data = tomllib.load(fh)\n except tomllib.TOMLDecodeError as exc:\n warnings.warn(\n f\"Could not load configuration file {fname} (invalid TOML: {exc})\"\n )\n else:\n self.update(data, metadata=metadata)\n file_names_read.append(fname)\n\n return file_names_read\n\n def write(self, file_handler):\n value = self.config_root.as_dict()\n config_as_str = tomli_w.dumps(value)\n\n try:\n # Assuming file_handler has a write attribute\n file_handler.write(config_as_str)\n except AttributeError:\n # Otherwise we expect a path to a file\n with open(file_handler, mode=\"w\") as fh:\n fh.write(config_as_str)\n\n @staticmethod\n def get_global_config_file():\n return os.path.join(config_dir(), \"yt.toml\")\n\n @staticmethod\n def get_local_config_file():\n return os.path.join(os.path.abspath(os.curdir), \"yt.toml\")\n\n def __setitem__(self, args, value):\n section, *keys = always_iterable(args)\n self.set(section, *keys, value, metadata=None)\n\n def __getitem__(self, key):\n section, *keys = always_iterable(key)\n return self.get(section, *keys)\n\n def __contains__(self, item):\n return item in self.config_root\n\n # Add support for IPython rich display\n # see https://ipython.readthedocs.io/en/stable/config/integrating.html\n def _repr_json_(self):\n return self.config_root._repr_json_()\n\n\nCONFIG = YTConfig()\n\n\ndef _cast_bool_helper(value):\n if value == \"True\":\n return True\n elif value == \"False\":\n return False\n else:\n raise ValueError(\"Cannot safely cast to bool\")\n\n\ndef _expand_all(s):\n return os.path.expandvars(os.path.expanduser(s))\n\n\ndef _cast_value_helper(value, types=(_cast_bool_helper, int, float, _expand_all)):\n for t in types:\n try:\n retval = t(value)\n return retval\n except ValueError:\n pass\n\n\ndef get_config(section, option):\n *option_path, option_name = option.split(\".\")\n return CONFIG.get(section, *option_path, option_name)\n\n\ndef set_config(section, option, value, config_file):\n if not CONFIG.has_section(section):\n CONFIG.add_section(section)\n\n option_path = option.split(\".\")\n CONFIG.set(section, *option_path, _cast_value_helper(value))\n write_config(config_file)\n\n\ndef write_config(config_file):\n CONFIG.write(config_file)\n\n\ndef rm_config(section, option, config_file):\n option_path = option.split(\".\")\n CONFIG.remove(section, *option_path)\n write_config(config_file)\n", "path": "yt/utilities/configure.py"}], "after_files": [{"content": "import os\nimport sys\nimport warnings\nfrom pathlib import Path\nfrom typing import Callable, List\n\nimport tomli_w\nfrom more_itertools import always_iterable\n\nfrom yt.utilities.configuration_tree import ConfigLeaf, ConfigNode\n\nif sys.version_info >= (3, 11):\n import tomllib\nelse:\n import tomli as tomllib\n\nconfiguration_callbacks: List[Callable[[\"YTConfig\"], None]] = []\n\n\ndef config_dir():\n config_root = os.environ.get(\n \"XDG_CONFIG_HOME\", os.path.join(os.path.expanduser(\"~\"), \".config\")\n )\n conf_dir = os.path.join(config_root, \"yt\")\n return conf_dir\n\n\nclass YTConfig:\n def __init__(self, defaults=None):\n if defaults is None:\n defaults = {}\n self.config_root = ConfigNode(None)\n\n def get(self, section, *keys, callback=None):\n node_or_leaf = self.config_root.get(section, *keys)\n if isinstance(node_or_leaf, ConfigLeaf):\n if callback is not None:\n return callback(node_or_leaf)\n return node_or_leaf.value\n return node_or_leaf\n\n def get_most_specific(self, section, *keys, **kwargs):\n use_fallback = \"fallback\" in kwargs\n fallback = kwargs.pop(\"fallback\", None)\n try:\n return self.config_root.get_deepest_leaf(section, *keys)\n except KeyError as err:\n if use_fallback:\n return fallback\n else:\n raise err\n\n def update(self, new_values, metadata=None):\n if metadata is None:\n metadata = {}\n self.config_root.update(new_values, metadata)\n\n def has_section(self, section):\n try:\n self.config_root.get_child(section)\n return True\n except KeyError:\n return False\n\n def add_section(self, section):\n self.config_root.add_child(section)\n\n def remove_section(self, section):\n if self.has_section(section):\n self.config_root.remove_child(section)\n return True\n else:\n return False\n\n def set(self, *args, metadata=None):\n section, *keys, value = args\n if metadata is None:\n metadata = {\"source\": \"runtime\"}\n self.config_root.upsert_from_list(\n [section] + list(keys), value, extra_data=metadata\n )\n\n def remove(self, *args):\n self.config_root.pop_leaf(args)\n\n def read(self, file_names):\n file_names_read = []\n for fname in always_iterable(file_names):\n if not os.path.exists(fname):\n continue\n metadata = {\"source\": f\"file: {fname}\"}\n try:\n with open(fname, \"rb\") as fh:\n data = tomllib.load(fh)\n except tomllib.TOMLDecodeError as exc:\n warnings.warn(\n f\"Could not load configuration file {fname} (invalid TOML: {exc})\"\n )\n else:\n self.update(data, metadata=metadata)\n file_names_read.append(fname)\n\n return file_names_read\n\n def write(self, file_handler):\n value = self.config_root.as_dict()\n config_as_str = tomli_w.dumps(value)\n\n try:\n file_path = Path(file_handler)\n except TypeError:\n if not hasattr(file_handler, \"write\"):\n raise TypeError(\n f\"Expected a path to a file, or a writable object, got {file_handler}\"\n ) from None\n file_handler.write(config_as_str)\n else:\n pdir = file_path.parent\n if not pdir.exists():\n warnings.warn(f\"{pdir!s} does not exist, creating it (recursively)\")\n os.makedirs(pdir)\n file_path.write_text(config_as_str)\n\n @staticmethod\n def get_global_config_file():\n return os.path.join(config_dir(), \"yt.toml\")\n\n @staticmethod\n def get_local_config_file():\n return os.path.join(os.path.abspath(os.curdir), \"yt.toml\")\n\n def __setitem__(self, args, value):\n section, *keys = always_iterable(args)\n self.set(section, *keys, value, metadata=None)\n\n def __getitem__(self, key):\n section, *keys = always_iterable(key)\n return self.get(section, *keys)\n\n def __contains__(self, item):\n return item in self.config_root\n\n # Add support for IPython rich display\n # see https://ipython.readthedocs.io/en/stable/config/integrating.html\n def _repr_json_(self):\n return self.config_root._repr_json_()\n\n\nCONFIG = YTConfig()\n\n\ndef _cast_bool_helper(value):\n if value == \"True\":\n return True\n elif value == \"False\":\n return False\n else:\n raise ValueError(\"Cannot safely cast to bool\")\n\n\ndef _expand_all(s):\n return os.path.expandvars(os.path.expanduser(s))\n\n\ndef _cast_value_helper(value, types=(_cast_bool_helper, int, float, _expand_all)):\n for t in types:\n try:\n retval = t(value)\n return retval\n except ValueError:\n pass\n\n\ndef get_config(section, option):\n *option_path, option_name = option.split(\".\")\n return CONFIG.get(section, *option_path, option_name)\n\n\ndef set_config(section, option, value, config_file):\n if not CONFIG.has_section(section):\n CONFIG.add_section(section)\n\n option_path = option.split(\".\")\n CONFIG.set(section, *option_path, _cast_value_helper(value))\n write_config(config_file)\n\n\ndef write_config(config_file):\n CONFIG.write(config_file)\n\n\ndef rm_config(section, option, config_file):\n option_path = option.split(\".\")\n CONFIG.remove(section, *option_path)\n write_config(config_file)\n", "path": "yt/utilities/configure.py"}]}
| 2,411 | 294 |
gh_patches_debug_11475
|
rasdani/github-patches
|
git_diff
|
huggingface__transformers-4477
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
❓ Warning : This overload of addcdiv_ is deprecated
# ❓ Questions & Help
When running the [official Colab example of GLUE](https://colab.research.google.com/github/huggingface/blog/blob/master/notebooks/trainer/01_text_classification.ipynb), during training I receive a `UserWarning` :
```
/pytorch/torch/csrc/utils/python_arg_parser.cpp:756: UserWarning: This overload of addcdiv_ is deprecated:
addcdiv_(Number value, Tensor tensor1, Tensor tensor2)
Consider using one of the following signatures instead:
addcdiv_(Tensor tensor1, Tensor tensor2, *, Number value)
```
---
**Is it expected ?**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/transformers/optimization.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """PyTorch optimization for BERT model."""
16
17 import logging
18 import math
19
20 import torch
21 from torch.optim import Optimizer
22 from torch.optim.lr_scheduler import LambdaLR
23
24
25 logger = logging.getLogger(__name__)
26
27
28 def get_constant_schedule(optimizer, last_epoch=-1):
29 """ Create a schedule with a constant learning rate.
30 """
31 return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch)
32
33
34 def get_constant_schedule_with_warmup(optimizer, num_warmup_steps, last_epoch=-1):
35 """ Create a schedule with a constant learning rate preceded by a warmup
36 period during which the learning rate increases linearly between 0 and 1.
37 """
38
39 def lr_lambda(current_step):
40 if current_step < num_warmup_steps:
41 return float(current_step) / float(max(1.0, num_warmup_steps))
42 return 1.0
43
44 return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
45
46
47 def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
48 """ Create a schedule with a learning rate that decreases linearly after
49 linearly increasing during a warmup period.
50 """
51
52 def lr_lambda(current_step):
53 if current_step < num_warmup_steps:
54 return float(current_step) / float(max(1, num_warmup_steps))
55 return max(
56 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))
57 )
58
59 return LambdaLR(optimizer, lr_lambda, last_epoch)
60
61
62 def get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=0.5, last_epoch=-1):
63 """ Create a schedule with a learning rate that decreases following the
64 values of the cosine function between 0 and `pi * cycles` after a warmup
65 period during which it increases linearly between 0 and 1.
66 """
67
68 def lr_lambda(current_step):
69 if current_step < num_warmup_steps:
70 return float(current_step) / float(max(1, num_warmup_steps))
71 progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
72 return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
73
74 return LambdaLR(optimizer, lr_lambda, last_epoch)
75
76
77 def get_cosine_with_hard_restarts_schedule_with_warmup(
78 optimizer, num_warmup_steps, num_training_steps, num_cycles=1.0, last_epoch=-1
79 ):
80 """ Create a schedule with a learning rate that decreases following the
81 values of the cosine function with several hard restarts, after a warmup
82 period during which it increases linearly between 0 and 1.
83 """
84
85 def lr_lambda(current_step):
86 if current_step < num_warmup_steps:
87 return float(current_step) / float(max(1, num_warmup_steps))
88 progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
89 if progress >= 1.0:
90 return 0.0
91 return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))
92
93 return LambdaLR(optimizer, lr_lambda, last_epoch)
94
95
96 class AdamW(Optimizer):
97 """ Implements Adam algorithm with weight decay fix.
98
99 Parameters:
100 lr (float): learning rate. Default 1e-3.
101 betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)
102 eps (float): Adams epsilon. Default: 1e-6
103 weight_decay (float): Weight decay. Default: 0.0
104 correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.
105 """
106
107 def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):
108 if lr < 0.0:
109 raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
110 if not 0.0 <= betas[0] < 1.0:
111 raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
112 if not 0.0 <= betas[1] < 1.0:
113 raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
114 if not 0.0 <= eps:
115 raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
116 defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)
117 super().__init__(params, defaults)
118
119 def step(self, closure=None):
120 """Performs a single optimization step.
121
122 Arguments:
123 closure (callable, optional): A closure that reevaluates the model
124 and returns the loss.
125 """
126 loss = None
127 if closure is not None:
128 loss = closure()
129
130 for group in self.param_groups:
131 for p in group["params"]:
132 if p.grad is None:
133 continue
134 grad = p.grad.data
135 if grad.is_sparse:
136 raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
137
138 state = self.state[p]
139
140 # State initialization
141 if len(state) == 0:
142 state["step"] = 0
143 # Exponential moving average of gradient values
144 state["exp_avg"] = torch.zeros_like(p.data)
145 # Exponential moving average of squared gradient values
146 state["exp_avg_sq"] = torch.zeros_like(p.data)
147
148 exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
149 beta1, beta2 = group["betas"]
150
151 state["step"] += 1
152
153 # Decay the first and second moment running average coefficient
154 # In-place operations to update the averages at the same time
155 exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
156 exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
157 denom = exp_avg_sq.sqrt().add_(group["eps"])
158
159 step_size = group["lr"]
160 if group["correct_bias"]: # No bias correction for Bert
161 bias_correction1 = 1.0 - beta1 ** state["step"]
162 bias_correction2 = 1.0 - beta2 ** state["step"]
163 step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
164
165 p.data.addcdiv_(-step_size, exp_avg, denom)
166
167 # Just adding the square of the weights to the loss function is *not*
168 # the correct way of using L2 regularization/weight decay with Adam,
169 # since that will interact with the m and v parameters in strange ways.
170 #
171 # Instead we want to decay the weights in a manner that doesn't interact
172 # with the m/v parameters. This is equivalent to adding the square
173 # of the weights to the loss with plain (non-momentum) SGD.
174 # Add weight decay at the end (fixed version)
175 if group["weight_decay"] > 0.0:
176 p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"])
177
178 return loss
179
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/transformers/optimization.py b/src/transformers/optimization.py
--- a/src/transformers/optimization.py
+++ b/src/transformers/optimization.py
@@ -162,7 +162,7 @@
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
- p.data.addcdiv_(-step_size, exp_avg, denom)
+ p.data.addcdiv_(exp_avg, denom, value=-step_size)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
|
{"golden_diff": "diff --git a/src/transformers/optimization.py b/src/transformers/optimization.py\n--- a/src/transformers/optimization.py\n+++ b/src/transformers/optimization.py\n@@ -162,7 +162,7 @@\n bias_correction2 = 1.0 - beta2 ** state[\"step\"]\n step_size = step_size * math.sqrt(bias_correction2) / bias_correction1\n \n- p.data.addcdiv_(-step_size, exp_avg, denom)\n+ p.data.addcdiv_(exp_avg, denom, value=-step_size)\n \n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n", "issue": "\u2753 Warning : This overload of addcdiv_ is deprecated\n# \u2753 Questions & Help\r\n\r\nWhen running the [official Colab example of GLUE](https://colab.research.google.com/github/huggingface/blog/blob/master/notebooks/trainer/01_text_classification.ipynb), during training I receive a `UserWarning` :\r\n\r\n```\r\n/pytorch/torch/csrc/utils/python_arg_parser.cpp:756: UserWarning: This overload of addcdiv_ is deprecated:\r\n\taddcdiv_(Number value, Tensor tensor1, Tensor tensor2)\r\nConsider using one of the following signatures instead:\r\n\taddcdiv_(Tensor tensor1, Tensor tensor2, *, Number value)\r\n```\r\n\r\n---\r\n\r\n**Is it expected ?**\r\n\r\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch optimization for BERT model.\"\"\"\n\nimport logging\nimport math\n\nimport torch\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import LambdaLR\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_constant_schedule(optimizer, last_epoch=-1):\n \"\"\" Create a schedule with a constant learning rate.\n \"\"\"\n return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch)\n\n\ndef get_constant_schedule_with_warmup(optimizer, num_warmup_steps, last_epoch=-1):\n \"\"\" Create a schedule with a constant learning rate preceded by a warmup\n period during which the learning rate increases linearly between 0 and 1.\n \"\"\"\n\n def lr_lambda(current_step):\n if current_step < num_warmup_steps:\n return float(current_step) / float(max(1.0, num_warmup_steps))\n return 1.0\n\n return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)\n\n\ndef get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):\n \"\"\" Create a schedule with a learning rate that decreases linearly after\n linearly increasing during a warmup period.\n \"\"\"\n\n def lr_lambda(current_step):\n if current_step < num_warmup_steps:\n return float(current_step) / float(max(1, num_warmup_steps))\n return max(\n 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))\n )\n\n return LambdaLR(optimizer, lr_lambda, last_epoch)\n\n\ndef get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=0.5, last_epoch=-1):\n \"\"\" Create a schedule with a learning rate that decreases following the\n values of the cosine function between 0 and `pi * cycles` after a warmup\n period during which it increases linearly between 0 and 1.\n \"\"\"\n\n def lr_lambda(current_step):\n if current_step < num_warmup_steps:\n return float(current_step) / float(max(1, num_warmup_steps))\n progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))\n return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))\n\n return LambdaLR(optimizer, lr_lambda, last_epoch)\n\n\ndef get_cosine_with_hard_restarts_schedule_with_warmup(\n optimizer, num_warmup_steps, num_training_steps, num_cycles=1.0, last_epoch=-1\n):\n \"\"\" Create a schedule with a learning rate that decreases following the\n values of the cosine function with several hard restarts, after a warmup\n period during which it increases linearly between 0 and 1.\n \"\"\"\n\n def lr_lambda(current_step):\n if current_step < num_warmup_steps:\n return float(current_step) / float(max(1, num_warmup_steps))\n progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))\n if progress >= 1.0:\n return 0.0\n return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))\n\n return LambdaLR(optimizer, lr_lambda, last_epoch)\n\n\nclass AdamW(Optimizer):\n \"\"\" Implements Adam algorithm with weight decay fix.\n\n Parameters:\n lr (float): learning rate. Default 1e-3.\n betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)\n eps (float): Adams epsilon. Default: 1e-6\n weight_decay (float): Weight decay. Default: 0.0\n correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {} - should be >= 0.0\".format(lr))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[1]))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {} - should be >= 0.0\".format(eps))\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)\n super().__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\"Adam does not support sparse gradients, please consider SparseAdam instead\")\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state[\"step\"] = 0\n # Exponential moving average of gradient values\n state[\"exp_avg\"] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state[\"exp_avg_sq\"] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state[\"exp_avg\"], state[\"exp_avg_sq\"]\n beta1, beta2 = group[\"betas\"]\n\n state[\"step\"] += 1\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)\n denom = exp_avg_sq.sqrt().add_(group[\"eps\"])\n\n step_size = group[\"lr\"]\n if group[\"correct_bias\"]: # No bias correction for Bert\n bias_correction1 = 1.0 - beta1 ** state[\"step\"]\n bias_correction2 = 1.0 - beta2 ** state[\"step\"]\n step_size = step_size * math.sqrt(bias_correction2) / bias_correction1\n\n p.data.addcdiv_(-step_size, exp_avg, denom)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n # Add weight decay at the end (fixed version)\n if group[\"weight_decay\"] > 0.0:\n p.data.add_(p.data, alpha=-group[\"lr\"] * group[\"weight_decay\"])\n\n return loss\n", "path": "src/transformers/optimization.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch optimization for BERT model.\"\"\"\n\nimport logging\nimport math\n\nimport torch\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import LambdaLR\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_constant_schedule(optimizer, last_epoch=-1):\n \"\"\" Create a schedule with a constant learning rate.\n \"\"\"\n return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch)\n\n\ndef get_constant_schedule_with_warmup(optimizer, num_warmup_steps, last_epoch=-1):\n \"\"\" Create a schedule with a constant learning rate preceded by a warmup\n period during which the learning rate increases linearly between 0 and 1.\n \"\"\"\n\n def lr_lambda(current_step):\n if current_step < num_warmup_steps:\n return float(current_step) / float(max(1.0, num_warmup_steps))\n return 1.0\n\n return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)\n\n\ndef get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):\n \"\"\" Create a schedule with a learning rate that decreases linearly after\n linearly increasing during a warmup period.\n \"\"\"\n\n def lr_lambda(current_step):\n if current_step < num_warmup_steps:\n return float(current_step) / float(max(1, num_warmup_steps))\n return max(\n 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))\n )\n\n return LambdaLR(optimizer, lr_lambda, last_epoch)\n\n\ndef get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=0.5, last_epoch=-1):\n \"\"\" Create a schedule with a learning rate that decreases following the\n values of the cosine function between 0 and `pi * cycles` after a warmup\n period during which it increases linearly between 0 and 1.\n \"\"\"\n\n def lr_lambda(current_step):\n if current_step < num_warmup_steps:\n return float(current_step) / float(max(1, num_warmup_steps))\n progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))\n return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))\n\n return LambdaLR(optimizer, lr_lambda, last_epoch)\n\n\ndef get_cosine_with_hard_restarts_schedule_with_warmup(\n optimizer, num_warmup_steps, num_training_steps, num_cycles=1.0, last_epoch=-1\n):\n \"\"\" Create a schedule with a learning rate that decreases following the\n values of the cosine function with several hard restarts, after a warmup\n period during which it increases linearly between 0 and 1.\n \"\"\"\n\n def lr_lambda(current_step):\n if current_step < num_warmup_steps:\n return float(current_step) / float(max(1, num_warmup_steps))\n progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))\n if progress >= 1.0:\n return 0.0\n return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))\n\n return LambdaLR(optimizer, lr_lambda, last_epoch)\n\n\nclass AdamW(Optimizer):\n \"\"\" Implements Adam algorithm with weight decay fix.\n\n Parameters:\n lr (float): learning rate. Default 1e-3.\n betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)\n eps (float): Adams epsilon. Default: 1e-6\n weight_decay (float): Weight decay. Default: 0.0\n correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {} - should be >= 0.0\".format(lr))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[1]))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {} - should be >= 0.0\".format(eps))\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)\n super().__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\"Adam does not support sparse gradients, please consider SparseAdam instead\")\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state[\"step\"] = 0\n # Exponential moving average of gradient values\n state[\"exp_avg\"] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state[\"exp_avg_sq\"] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state[\"exp_avg\"], state[\"exp_avg_sq\"]\n beta1, beta2 = group[\"betas\"]\n\n state[\"step\"] += 1\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)\n denom = exp_avg_sq.sqrt().add_(group[\"eps\"])\n\n step_size = group[\"lr\"]\n if group[\"correct_bias\"]: # No bias correction for Bert\n bias_correction1 = 1.0 - beta1 ** state[\"step\"]\n bias_correction2 = 1.0 - beta2 ** state[\"step\"]\n step_size = step_size * math.sqrt(bias_correction2) / bias_correction1\n\n p.data.addcdiv_(exp_avg, denom, value=-step_size)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n # Add weight decay at the end (fixed version)\n if group[\"weight_decay\"] > 0.0:\n p.data.add_(p.data, alpha=-group[\"lr\"] * group[\"weight_decay\"])\n\n return loss\n", "path": "src/transformers/optimization.py"}]}
| 2,722 | 163 |
gh_patches_debug_15174
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-6389
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PathOverflow: Path Overflow from: '**********ZZZZ'
### Issue Summary
Creating a new page from the administration area raises a PathOverflow error.
```
PathOverflow: Path Overflow from: '000100020003ZZZZ'
File "django/core/handlers/exception.py", line 34, in inner
response = get_response(request)
File "django/core/handlers/base.py", line 126, in _get_response
response = self.process_exception_by_middleware(e, request)
File "django/core/handlers/base.py", line 124, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "django/views/decorators/cache.py", line 44, in _wrapped_view_func
response = view_func(request, *args, **kwargs)
File "wagtail/admin/urls/__init__.py", line 102, in wrapper
return view_func(request, *args, **kwargs)
File "wagtail/admin/decorators.py", line 34, in decorated_view
return view_func(request, *args, **kwargs)
File "wagtail/admin/views/pages.py", line 224, in create
parent_page.add_child(instance=page)
File "treebeard/mp_tree.py", line 1013, in add_child
return MP_AddChildHandler(self, **kwargs).process()
File "treebeard/mp_tree.py", line 387, in process
newobj.path = self.node.get_last_child()._inc_path()
File "treebeard/mp_tree.py", line 1114, in _inc_path
raise PathOverflow(_("Path Overflow from: '%s'" % (self.path, )))
```
The only information I could find potentially regarding the issue was on SO at the following link: https://stackoverflow.com/questions/54166821/wagtail-pathoverflow-on-adding-new-child-page
There are only a handful of pages, so I'm not sure that particular issue applies though.
### Steps to Reproduce
I haven't been able to reproduce this myself, however a client has been seeing this behavior, which at the time I've just resolved by modifying the ZZZZ at the end to be numeric. I'm in the process of gathering some steps they may have taken to replicate it from this end (where they tried to add the page from)
* I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: no
### Technical details
* Python version: 3.6.8
* Django version: 2.1.4
* Wagtail version: 2.4
* Browser version: N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/admin/views/pages/preview.py`
Content:
```
1 from time import time
2
3 from django.contrib.contenttypes.models import ContentType
4 from django.core.exceptions import PermissionDenied
5 from django.http import Http404, JsonResponse
6 from django.http.request import QueryDict
7 from django.shortcuts import get_object_or_404
8 from django.template.response import TemplateResponse
9 from django.views.generic import View
10
11 from wagtail.core.models import Page
12
13
14 def view_draft(request, page_id):
15 page = get_object_or_404(Page, id=page_id).get_latest_revision_as_page()
16 perms = page.permissions_for_user(request.user)
17 if not (perms.can_publish() or perms.can_edit()):
18 raise PermissionDenied
19
20 try:
21 preview_mode = page.default_preview_mode
22 except IndexError:
23 raise PermissionDenied
24
25 return page.make_preview_request(request, preview_mode)
26
27
28 class PreviewOnEdit(View):
29 http_method_names = ('post', 'get')
30 preview_expiration_timeout = 60 * 60 * 24 # seconds
31 session_key_prefix = 'wagtail-preview-'
32
33 def remove_old_preview_data(self):
34 expiration = time() - self.preview_expiration_timeout
35 expired_keys = [
36 k for k, v in self.request.session.items()
37 if k.startswith(self.session_key_prefix) and v[1] < expiration]
38 # Removes the session key gracefully
39 for k in expired_keys:
40 self.request.session.pop(k)
41
42 @property
43 def session_key(self):
44 return self.session_key_prefix + ','.join(self.args)
45
46 def get_page(self):
47 return get_object_or_404(Page,
48 id=self.kwargs["page_id"]).get_latest_revision_as_page()
49
50 def get_form(self, page, query_dict):
51 form_class = page.get_edit_handler().get_form_class()
52 parent_page = page.get_parent().specific
53
54 if self.session_key not in self.request.session:
55 # Session key not in session, returning null form
56 return form_class(instance=page, parent_page=parent_page)
57
58 return form_class(query_dict, instance=page, parent_page=parent_page)
59
60 def post(self, request, *args, **kwargs):
61 # TODO: Handle request.FILES.
62 request.session[self.session_key] = request.POST.urlencode(), time()
63 self.remove_old_preview_data()
64 form = self.get_form(self.get_page(), request.POST)
65 return JsonResponse({'is_valid': form.is_valid()})
66
67 def error_response(self, page):
68 return TemplateResponse(
69 self.request, 'wagtailadmin/pages/preview_error.html',
70 {'page': page}
71 )
72
73 def get(self, request, *args, **kwargs):
74 page = self.get_page()
75
76 post_data, timestamp = self.request.session.get(self.session_key,
77 (None, None))
78 if not isinstance(post_data, str):
79 post_data = ''
80 form = self.get_form(page, QueryDict(post_data))
81
82 if not form.is_valid():
83 return self.error_response(page)
84
85 form.save(commit=False)
86
87 try:
88 preview_mode = request.GET.get('mode', page.default_preview_mode)
89 except IndexError:
90 raise PermissionDenied
91
92 return page.make_preview_request(request, preview_mode)
93
94
95 class PreviewOnCreate(PreviewOnEdit):
96 def get_page(self):
97 content_type_app_name = self.kwargs["content_type_app_name"]
98 content_type_model_name = self.kwargs["content_type_model_name"]
99 parent_page_id = self.kwargs["parent_page_id"]
100 try:
101 content_type = ContentType.objects.get_by_natural_key(
102 content_type_app_name, content_type_model_name)
103 except ContentType.DoesNotExist:
104 raise Http404
105
106 page = content_type.model_class()()
107 parent_page = get_object_or_404(Page, id=parent_page_id).specific
108 # We need to populate treebeard's path / depth fields in order to
109 # pass validation. We can't make these 100% consistent with the rest
110 # of the tree without making actual database changes (such as
111 # incrementing the parent's numchild field), but by calling treebeard's
112 # internal _get_path method, we can set a 'realistic' value that will
113 # hopefully enable tree traversal operations
114 # to at least partially work.
115 page.depth = parent_page.depth + 1
116 # Puts the page at the maximum possible path
117 # for a child of `parent_page`.
118 page.path = Page._get_children_path_interval(parent_page.path)[1]
119 return page
120
121 def get_form(self, page, query_dict):
122 form = super().get_form(page, query_dict)
123 if form.is_valid():
124 # Ensures our unsaved page has a suitable url.
125 form.instance.set_url_path(form.parent_page)
126
127 form.instance.full_clean()
128 return form
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/admin/views/pages/preview.py b/wagtail/admin/views/pages/preview.py
--- a/wagtail/admin/views/pages/preview.py
+++ b/wagtail/admin/views/pages/preview.py
@@ -113,9 +113,15 @@
# hopefully enable tree traversal operations
# to at least partially work.
page.depth = parent_page.depth + 1
- # Puts the page at the maximum possible path
+ # Puts the page at the next available path
# for a child of `parent_page`.
- page.path = Page._get_children_path_interval(parent_page.path)[1]
+ if parent_page.is_leaf():
+ # set the path as the first child of parent_page
+ page.path = page._get_path(parent_page.path, page.depth, 1)
+ else:
+ # add the new page after the last child of parent_page
+ page.path = parent_page.get_last_child()._inc_path()
+
return page
def get_form(self, page, query_dict):
|
{"golden_diff": "diff --git a/wagtail/admin/views/pages/preview.py b/wagtail/admin/views/pages/preview.py\n--- a/wagtail/admin/views/pages/preview.py\n+++ b/wagtail/admin/views/pages/preview.py\n@@ -113,9 +113,15 @@\n # hopefully enable tree traversal operations\n # to at least partially work.\n page.depth = parent_page.depth + 1\n- # Puts the page at the maximum possible path\n+ # Puts the page at the next available path\n # for a child of `parent_page`.\n- page.path = Page._get_children_path_interval(parent_page.path)[1]\n+ if parent_page.is_leaf():\n+ # set the path as the first child of parent_page\n+ page.path = page._get_path(parent_page.path, page.depth, 1)\n+ else:\n+ # add the new page after the last child of parent_page\n+ page.path = parent_page.get_last_child()._inc_path()\n+\n return page\n \n def get_form(self, page, query_dict):\n", "issue": "PathOverflow: Path Overflow from: '**********ZZZZ'\n### Issue Summary\r\n\r\nCreating a new page from the administration area raises a PathOverflow error.\r\n\r\n```\r\nPathOverflow: Path Overflow from: '000100020003ZZZZ'\r\n File \"django/core/handlers/exception.py\", line 34, in inner\r\n response = get_response(request)\r\n File \"django/core/handlers/base.py\", line 126, in _get_response\r\n response = self.process_exception_by_middleware(e, request)\r\n File \"django/core/handlers/base.py\", line 124, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"django/views/decorators/cache.py\", line 44, in _wrapped_view_func\r\n response = view_func(request, *args, **kwargs)\r\n File \"wagtail/admin/urls/__init__.py\", line 102, in wrapper\r\n return view_func(request, *args, **kwargs)\r\n File \"wagtail/admin/decorators.py\", line 34, in decorated_view\r\n return view_func(request, *args, **kwargs)\r\n File \"wagtail/admin/views/pages.py\", line 224, in create\r\n parent_page.add_child(instance=page)\r\n File \"treebeard/mp_tree.py\", line 1013, in add_child\r\n return MP_AddChildHandler(self, **kwargs).process()\r\n File \"treebeard/mp_tree.py\", line 387, in process\r\n newobj.path = self.node.get_last_child()._inc_path()\r\n File \"treebeard/mp_tree.py\", line 1114, in _inc_path\r\n raise PathOverflow(_(\"Path Overflow from: '%s'\" % (self.path, )))\r\n```\r\n\r\nThe only information I could find potentially regarding the issue was on SO at the following link: https://stackoverflow.com/questions/54166821/wagtail-pathoverflow-on-adding-new-child-page\r\n\r\nThere are only a handful of pages, so I'm not sure that particular issue applies though.\r\n\r\n### Steps to Reproduce\r\n\r\nI haven't been able to reproduce this myself, however a client has been seeing this behavior, which at the time I've just resolved by modifying the ZZZZ at the end to be numeric. I'm in the process of gathering some steps they may have taken to replicate it from this end (where they tried to add the page from)\r\n\r\n* I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: no\r\n\r\n\r\n### Technical details\r\n\r\n* Python version: 3.6.8\r\n* Django version: 2.1.4\r\n* Wagtail version: 2.4\r\n* Browser version: N/A\r\n\n", "before_files": [{"content": "from time import time\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404, JsonResponse\nfrom django.http.request import QueryDict\nfrom django.shortcuts import get_object_or_404\nfrom django.template.response import TemplateResponse\nfrom django.views.generic import View\n\nfrom wagtail.core.models import Page\n\n\ndef view_draft(request, page_id):\n page = get_object_or_404(Page, id=page_id).get_latest_revision_as_page()\n perms = page.permissions_for_user(request.user)\n if not (perms.can_publish() or perms.can_edit()):\n raise PermissionDenied\n\n try:\n preview_mode = page.default_preview_mode\n except IndexError:\n raise PermissionDenied\n\n return page.make_preview_request(request, preview_mode)\n\n\nclass PreviewOnEdit(View):\n http_method_names = ('post', 'get')\n preview_expiration_timeout = 60 * 60 * 24 # seconds\n session_key_prefix = 'wagtail-preview-'\n\n def remove_old_preview_data(self):\n expiration = time() - self.preview_expiration_timeout\n expired_keys = [\n k for k, v in self.request.session.items()\n if k.startswith(self.session_key_prefix) and v[1] < expiration]\n # Removes the session key gracefully\n for k in expired_keys:\n self.request.session.pop(k)\n\n @property\n def session_key(self):\n return self.session_key_prefix + ','.join(self.args)\n\n def get_page(self):\n return get_object_or_404(Page,\n id=self.kwargs[\"page_id\"]).get_latest_revision_as_page()\n\n def get_form(self, page, query_dict):\n form_class = page.get_edit_handler().get_form_class()\n parent_page = page.get_parent().specific\n\n if self.session_key not in self.request.session:\n # Session key not in session, returning null form\n return form_class(instance=page, parent_page=parent_page)\n\n return form_class(query_dict, instance=page, parent_page=parent_page)\n\n def post(self, request, *args, **kwargs):\n # TODO: Handle request.FILES.\n request.session[self.session_key] = request.POST.urlencode(), time()\n self.remove_old_preview_data()\n form = self.get_form(self.get_page(), request.POST)\n return JsonResponse({'is_valid': form.is_valid()})\n\n def error_response(self, page):\n return TemplateResponse(\n self.request, 'wagtailadmin/pages/preview_error.html',\n {'page': page}\n )\n\n def get(self, request, *args, **kwargs):\n page = self.get_page()\n\n post_data, timestamp = self.request.session.get(self.session_key,\n (None, None))\n if not isinstance(post_data, str):\n post_data = ''\n form = self.get_form(page, QueryDict(post_data))\n\n if not form.is_valid():\n return self.error_response(page)\n\n form.save(commit=False)\n\n try:\n preview_mode = request.GET.get('mode', page.default_preview_mode)\n except IndexError:\n raise PermissionDenied\n\n return page.make_preview_request(request, preview_mode)\n\n\nclass PreviewOnCreate(PreviewOnEdit):\n def get_page(self):\n content_type_app_name = self.kwargs[\"content_type_app_name\"]\n content_type_model_name = self.kwargs[\"content_type_model_name\"]\n parent_page_id = self.kwargs[\"parent_page_id\"]\n try:\n content_type = ContentType.objects.get_by_natural_key(\n content_type_app_name, content_type_model_name)\n except ContentType.DoesNotExist:\n raise Http404\n\n page = content_type.model_class()()\n parent_page = get_object_or_404(Page, id=parent_page_id).specific\n # We need to populate treebeard's path / depth fields in order to\n # pass validation. We can't make these 100% consistent with the rest\n # of the tree without making actual database changes (such as\n # incrementing the parent's numchild field), but by calling treebeard's\n # internal _get_path method, we can set a 'realistic' value that will\n # hopefully enable tree traversal operations\n # to at least partially work.\n page.depth = parent_page.depth + 1\n # Puts the page at the maximum possible path\n # for a child of `parent_page`.\n page.path = Page._get_children_path_interval(parent_page.path)[1]\n return page\n\n def get_form(self, page, query_dict):\n form = super().get_form(page, query_dict)\n if form.is_valid():\n # Ensures our unsaved page has a suitable url.\n form.instance.set_url_path(form.parent_page)\n\n form.instance.full_clean()\n return form\n", "path": "wagtail/admin/views/pages/preview.py"}], "after_files": [{"content": "from time import time\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404, JsonResponse\nfrom django.http.request import QueryDict\nfrom django.shortcuts import get_object_or_404\nfrom django.template.response import TemplateResponse\nfrom django.views.generic import View\n\nfrom wagtail.core.models import Page\n\n\ndef view_draft(request, page_id):\n page = get_object_or_404(Page, id=page_id).get_latest_revision_as_page()\n perms = page.permissions_for_user(request.user)\n if not (perms.can_publish() or perms.can_edit()):\n raise PermissionDenied\n\n try:\n preview_mode = page.default_preview_mode\n except IndexError:\n raise PermissionDenied\n\n return page.make_preview_request(request, preview_mode)\n\n\nclass PreviewOnEdit(View):\n http_method_names = ('post', 'get')\n preview_expiration_timeout = 60 * 60 * 24 # seconds\n session_key_prefix = 'wagtail-preview-'\n\n def remove_old_preview_data(self):\n expiration = time() - self.preview_expiration_timeout\n expired_keys = [\n k for k, v in self.request.session.items()\n if k.startswith(self.session_key_prefix) and v[1] < expiration]\n # Removes the session key gracefully\n for k in expired_keys:\n self.request.session.pop(k)\n\n @property\n def session_key(self):\n return self.session_key_prefix + ','.join(self.args)\n\n def get_page(self):\n return get_object_or_404(Page,\n id=self.kwargs[\"page_id\"]).get_latest_revision_as_page()\n\n def get_form(self, page, query_dict):\n form_class = page.get_edit_handler().get_form_class()\n parent_page = page.get_parent().specific\n\n if self.session_key not in self.request.session:\n # Session key not in session, returning null form\n return form_class(instance=page, parent_page=parent_page)\n\n return form_class(query_dict, instance=page, parent_page=parent_page)\n\n def post(self, request, *args, **kwargs):\n # TODO: Handle request.FILES.\n request.session[self.session_key] = request.POST.urlencode(), time()\n self.remove_old_preview_data()\n form = self.get_form(self.get_page(), request.POST)\n return JsonResponse({'is_valid': form.is_valid()})\n\n def error_response(self, page):\n return TemplateResponse(\n self.request, 'wagtailadmin/pages/preview_error.html',\n {'page': page}\n )\n\n def get(self, request, *args, **kwargs):\n page = self.get_page()\n\n post_data, timestamp = self.request.session.get(self.session_key,\n (None, None))\n if not isinstance(post_data, str):\n post_data = ''\n form = self.get_form(page, QueryDict(post_data))\n\n if not form.is_valid():\n return self.error_response(page)\n\n form.save(commit=False)\n\n try:\n preview_mode = request.GET.get('mode', page.default_preview_mode)\n except IndexError:\n raise PermissionDenied\n\n return page.make_preview_request(request, preview_mode)\n\n\nclass PreviewOnCreate(PreviewOnEdit):\n def get_page(self):\n content_type_app_name = self.kwargs[\"content_type_app_name\"]\n content_type_model_name = self.kwargs[\"content_type_model_name\"]\n parent_page_id = self.kwargs[\"parent_page_id\"]\n try:\n content_type = ContentType.objects.get_by_natural_key(\n content_type_app_name, content_type_model_name)\n except ContentType.DoesNotExist:\n raise Http404\n\n page = content_type.model_class()()\n parent_page = get_object_or_404(Page, id=parent_page_id).specific\n # We need to populate treebeard's path / depth fields in order to\n # pass validation. We can't make these 100% consistent with the rest\n # of the tree without making actual database changes (such as\n # incrementing the parent's numchild field), but by calling treebeard's\n # internal _get_path method, we can set a 'realistic' value that will\n # hopefully enable tree traversal operations\n # to at least partially work.\n page.depth = parent_page.depth + 1\n # Puts the page at the next available path\n # for a child of `parent_page`.\n if parent_page.is_leaf():\n # set the path as the first child of parent_page\n page.path = page._get_path(parent_page.path, page.depth, 1)\n else:\n # add the new page after the last child of parent_page\n page.path = parent_page.get_last_child()._inc_path()\n\n return page\n\n def get_form(self, page, query_dict):\n form = super().get_form(page, query_dict)\n if form.is_valid():\n # Ensures our unsaved page has a suitable url.\n form.instance.set_url_path(form.parent_page)\n\n form.instance.full_clean()\n return form\n", "path": "wagtail/admin/views/pages/preview.py"}]}
| 2,176 | 233 |
gh_patches_debug_27864
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-5158
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
notification sent for modules in de/unpublished project
**URL:** (depending on follow status, so URL does not help here)
**user:** follower
**expected behaviour:** I should not get a notification for modules in unpublished projects
**behaviour:** If I follow an unpublished or depublished project (e.g. because a project had been published and after a participation was depublished or because the initiator followed in preview before publishing) I get a notification eg for start of participation. The Module has to be added to project.
**important screensize:**
**device & browser:**
**Comment/Question:** it is an edge case but was topic in two support questions
Screenshot?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/notifications/emails.py`
Content:
```
1 from django.contrib import auth
2
3 from meinberlin.apps.contrib.emails import Email
4
5 User = auth.get_user_model()
6
7
8 def _exclude_actor(receivers, actor):
9 if not actor:
10 return receivers
11
12 if hasattr(receivers, "exclude"):
13 return receivers.exclude(id=actor.id)
14
15 return [receiver for receiver in receivers if not receiver == actor]
16
17
18 def _exclude_moderators(receivers, action):
19 if hasattr(action, "project"):
20 moderator_ids = action.project.moderators.values_list("id", flat=True)
21
22 if hasattr(receivers, "exclude"):
23 return receivers.exclude(id__in=moderator_ids)
24
25 return [user for user in receivers if user.id not in moderator_ids]
26
27 return receivers
28
29
30 def _exclude_notifications_disabled(receivers):
31 if hasattr(receivers, "filter"):
32 return receivers.filter(get_notifications=True)
33
34 return [user for user in receivers if user.get_notifications]
35
36
37 class NotifyCreatorEmail(Email):
38 template_name = "meinberlin_notifications/emails/notify_creator"
39
40 def get_receivers(self):
41 action = self.object
42 if hasattr(action.target, "creator"):
43 receivers = [action.target.creator]
44 receivers = _exclude_notifications_disabled(receivers)
45 receivers = _exclude_actor(receivers, action.actor)
46 receivers = _exclude_moderators(receivers, action)
47 return receivers
48 return []
49
50
51 class NotifyCreatorOrContactOnModeratorFeedback(Email):
52 template_name = (
53 "meinberlin_notifications/emails/notify_creator_on_moderator_feedback"
54 )
55
56 def get_receivers(self):
57 if hasattr(self.object, "contact_email"):
58 # send to contact
59 receivers = [self.object.contact_email]
60 else:
61 # send to creator
62 receivers = [self.object.creator]
63 receivers = _exclude_notifications_disabled(receivers)
64 return receivers
65
66 def get_context(self):
67 context = super().get_context()
68 context["object"] = self.object
69 if not hasattr(self.object, "contact_email"):
70 # send to creator
71 context["send_to_creator"] = True
72 return context
73
74
75 class NotifyModeratorsEmail(Email):
76 template_name = "meinberlin_notifications/emails/notify_moderator"
77
78 def get_receivers(self):
79 action = self.object
80 receivers = action.project.moderators.all()
81 receivers = _exclude_actor(receivers, action.actor)
82 receivers = _exclude_notifications_disabled(receivers)
83 return receivers
84
85
86 class NotifyInitiatorsOnProjectCreatedEmail(Email):
87 template_name = "meinberlin_notifications/emails/notify_initiators_project_created"
88
89 def get_receivers(self):
90 project = self.object
91 creator = User.objects.get(pk=self.kwargs["creator_pk"])
92 receivers = project.organisation.initiators.all()
93 receivers = _exclude_actor(receivers, creator)
94 receivers = _exclude_notifications_disabled(receivers)
95 return receivers
96
97 def get_context(self):
98 context = super().get_context()
99 creator = User.objects.get(pk=self.kwargs["creator_pk"])
100 context["creator"] = creator
101 context["project"] = self.object
102 return context
103
104
105 class NotifyFollowersOnPhaseStartedEmail(Email):
106 template_name = "meinberlin_notifications/emails" "/notify_followers_phase_started"
107
108 def get_receivers(self):
109 action = self.object
110 receivers = User.objects.filter(
111 follow__project=action.project,
112 follow__enabled=True,
113 )
114 receivers = _exclude_notifications_disabled(receivers)
115 return receivers
116
117
118 class NotifyFollowersOnPhaseIsOverSoonEmail(Email):
119 template_name = (
120 "meinberlin_notifications/emails" "/notify_followers_phase_over_soon"
121 )
122
123 def get_receivers(self):
124 action = self.object
125 receivers = User.objects.filter(
126 follow__project=action.project,
127 follow__enabled=True,
128 )
129 receivers = _exclude_notifications_disabled(receivers)
130 return receivers
131
132
133 class NotifyFollowersOnUpcommingEventEmail(Email):
134 template_name = (
135 "meinberlin_notifications/emails" "/notify_followers_event_upcomming"
136 )
137
138 def get_receivers(self):
139 action = self.object
140 receivers = User.objects.filter(
141 follow__project=action.project,
142 follow__enabled=True,
143 )
144 receivers = _exclude_notifications_disabled(receivers)
145 return receivers
146
```
Path: `meinberlin/apps/notifications/signals.py`
Content:
```
1 from django.contrib.auth import get_user_model
2 from django.db.models import signals
3 from django.dispatch import receiver
4
5 from adhocracy4.actions.models import Action
6 from adhocracy4.actions.verbs import Verbs
7 from adhocracy4.dashboard import signals as dashboard_signals
8 from adhocracy4.follows.models import Follow
9 from adhocracy4.projects.models import Project
10
11 from . import emails
12
13 User = get_user_model()
14
15
16 @receiver(signals.post_save, sender=Action)
17 def send_notifications(instance, created, **kwargs):
18 action = instance
19 verb = Verbs(action.verb)
20
21 if action.type in ("item", "comment") and verb in (Verbs.CREATE, Verbs.ADD):
22 emails.NotifyCreatorEmail.send(action)
23
24 if action.project:
25 emails.NotifyModeratorsEmail.send(action)
26
27 elif action.type == "phase" and action.project.project_type == "a4projects.Project":
28 if verb == Verbs.START:
29 emails.NotifyFollowersOnPhaseStartedEmail.send(action)
30 elif verb == Verbs.SCHEDULE:
31 emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)
32
33 elif action.type == "offlineevent" and verb == Verbs.START:
34 emails.NotifyFollowersOnUpcommingEventEmail.send(action)
35
36
37 @receiver(dashboard_signals.project_created)
38 def send_project_created_notifications(**kwargs):
39 project = kwargs.get("project")
40 creator = kwargs.get("user")
41 emails.NotifyInitiatorsOnProjectCreatedEmail.send(project, creator_pk=creator.pk)
42
43
44 @receiver(signals.m2m_changed, sender=Project.moderators.through)
45 def autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs):
46 if action == "post_add":
47 autofollow_project(instance, pk_set, reverse)
48
49
50 def autofollow_project(instance, pk_set, reverse):
51 if not reverse:
52 project = instance
53 users_pks = pk_set
54
55 for user_pk in users_pks:
56 Follow.objects.update_or_create(
57 project=project, creator_id=user_pk, defaults={"enabled": True}
58 )
59 else:
60 user = instance
61 project_pks = pk_set
62
63 for project_pk in project_pks:
64 Follow.objects.update_or_create(
65 project_id=project_pk, creator=user, defaults={"enabled": True}
66 )
67
```
Path: `meinberlin/apps/offlineevents/management/commands/create_offlineevent_system_actions.py`
Content:
```
1 from datetime import timedelta
2
3 from django.conf import settings
4 from django.contrib.contenttypes.models import ContentType
5 from django.core.management.base import BaseCommand
6
7 from adhocracy4.actions.models import Action
8 from adhocracy4.actions.verbs import Verbs
9 from meinberlin.apps.offlineevents.models import OfflineEvent
10
11
12 class Command(BaseCommand):
13 help = "Create offlineevent system actions."
14
15 def __init__(self):
16 if hasattr(settings, "ACTIONS_OFFLINE_EVENT_STARTING_HOURS"):
17 self.event_starting_hours = settings.ACTIONS_OFFLINE_EVENT_STARTING_HOURS
18 else:
19 self.event_starting_hours = 72
20
21 def handle(self, *args, **options):
22 self._event_starting()
23
24 def _event_starting(self):
25 event_ct = ContentType.objects.get_for_model(OfflineEvent)
26
27 events = OfflineEvent.objects.starts_within(hours=self.event_starting_hours)
28 for event in events:
29 existing_action = Action.objects.filter(
30 project=event.project,
31 verb=Verbs.START.value,
32 obj_content_type=event_ct,
33 obj_object_id=event.id,
34 ).first()
35
36 # If the event date has been modified and moved more than
37 # event_starting_hours ahead, schedule a new action
38 if (
39 not existing_action
40 or (
41 existing_action.timestamp
42 + timedelta(hours=self.event_starting_hours)
43 )
44 < event.date
45 ):
46 Action.objects.create(
47 project=event.project,
48 verb=Verbs.START.value,
49 obj=event,
50 timestamp=event.date,
51 )
52 elif existing_action.timestamp != event.date:
53 existing_action.timestamp = event.date
54 existing_action.save()
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/meinberlin/apps/notifications/emails.py b/meinberlin/apps/notifications/emails.py
--- a/meinberlin/apps/notifications/emails.py
+++ b/meinberlin/apps/notifications/emails.py
@@ -130,10 +130,8 @@
return receivers
-class NotifyFollowersOnUpcommingEventEmail(Email):
- template_name = (
- "meinberlin_notifications/emails" "/notify_followers_event_upcomming"
- )
+class NotifyFollowersOnUpcomingEventEmail(Email):
+ template_name = "meinberlin_notifications/emails/notify_followers_event_upcoming"
def get_receivers(self):
action = self.object
diff --git a/meinberlin/apps/notifications/signals.py b/meinberlin/apps/notifications/signals.py
--- a/meinberlin/apps/notifications/signals.py
+++ b/meinberlin/apps/notifications/signals.py
@@ -31,7 +31,7 @@
emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)
elif action.type == "offlineevent" and verb == Verbs.START:
- emails.NotifyFollowersOnUpcommingEventEmail.send(action)
+ emails.NotifyFollowersOnUpcomingEventEmail.send(action)
@receiver(dashboard_signals.project_created)
diff --git a/meinberlin/apps/offlineevents/management/commands/create_offlineevent_system_actions.py b/meinberlin/apps/offlineevents/management/commands/create_offlineevent_system_actions.py
--- a/meinberlin/apps/offlineevents/management/commands/create_offlineevent_system_actions.py
+++ b/meinberlin/apps/offlineevents/management/commands/create_offlineevent_system_actions.py
@@ -24,7 +24,9 @@
def _event_starting(self):
event_ct = ContentType.objects.get_for_model(OfflineEvent)
- events = OfflineEvent.objects.starts_within(hours=self.event_starting_hours)
+ events = OfflineEvent.objects.starts_within(
+ hours=self.event_starting_hours
+ ).exclude(project__is_draft=True)
for event in events:
existing_action = Action.objects.filter(
project=event.project,
|
{"golden_diff": "diff --git a/meinberlin/apps/notifications/emails.py b/meinberlin/apps/notifications/emails.py\n--- a/meinberlin/apps/notifications/emails.py\n+++ b/meinberlin/apps/notifications/emails.py\n@@ -130,10 +130,8 @@\n return receivers\n \n \n-class NotifyFollowersOnUpcommingEventEmail(Email):\n- template_name = (\n- \"meinberlin_notifications/emails\" \"/notify_followers_event_upcomming\"\n- )\n+class NotifyFollowersOnUpcomingEventEmail(Email):\n+ template_name = \"meinberlin_notifications/emails/notify_followers_event_upcoming\"\n \n def get_receivers(self):\n action = self.object\ndiff --git a/meinberlin/apps/notifications/signals.py b/meinberlin/apps/notifications/signals.py\n--- a/meinberlin/apps/notifications/signals.py\n+++ b/meinberlin/apps/notifications/signals.py\n@@ -31,7 +31,7 @@\n emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)\n \n elif action.type == \"offlineevent\" and verb == Verbs.START:\n- emails.NotifyFollowersOnUpcommingEventEmail.send(action)\n+ emails.NotifyFollowersOnUpcomingEventEmail.send(action)\n \n \n @receiver(dashboard_signals.project_created)\ndiff --git a/meinberlin/apps/offlineevents/management/commands/create_offlineevent_system_actions.py b/meinberlin/apps/offlineevents/management/commands/create_offlineevent_system_actions.py\n--- a/meinberlin/apps/offlineevents/management/commands/create_offlineevent_system_actions.py\n+++ b/meinberlin/apps/offlineevents/management/commands/create_offlineevent_system_actions.py\n@@ -24,7 +24,9 @@\n def _event_starting(self):\n event_ct = ContentType.objects.get_for_model(OfflineEvent)\n \n- events = OfflineEvent.objects.starts_within(hours=self.event_starting_hours)\n+ events = OfflineEvent.objects.starts_within(\n+ hours=self.event_starting_hours\n+ ).exclude(project__is_draft=True)\n for event in events:\n existing_action = Action.objects.filter(\n project=event.project,\n", "issue": "notification sent for modules in de/unpublished project\n**URL:** (depending on follow status, so URL does not help here)\r\n**user:** follower\r\n**expected behaviour:** I should not get a notification for modules in unpublished projects\r\n**behaviour:** If I follow an unpublished or depublished project (e.g. because a project had been published and after a participation was depublished or because the initiator followed in preview before publishing) I get a notification eg for start of participation. The Module has to be added to project.\r\n**important screensize:**\r\n**device & browser:** \r\n**Comment/Question:** it is an edge case but was topic in two support questions\r\n\r\nScreenshot?\r\n\n", "before_files": [{"content": "from django.contrib import auth\n\nfrom meinberlin.apps.contrib.emails import Email\n\nUser = auth.get_user_model()\n\n\ndef _exclude_actor(receivers, actor):\n if not actor:\n return receivers\n\n if hasattr(receivers, \"exclude\"):\n return receivers.exclude(id=actor.id)\n\n return [receiver for receiver in receivers if not receiver == actor]\n\n\ndef _exclude_moderators(receivers, action):\n if hasattr(action, \"project\"):\n moderator_ids = action.project.moderators.values_list(\"id\", flat=True)\n\n if hasattr(receivers, \"exclude\"):\n return receivers.exclude(id__in=moderator_ids)\n\n return [user for user in receivers if user.id not in moderator_ids]\n\n return receivers\n\n\ndef _exclude_notifications_disabled(receivers):\n if hasattr(receivers, \"filter\"):\n return receivers.filter(get_notifications=True)\n\n return [user for user in receivers if user.get_notifications]\n\n\nclass NotifyCreatorEmail(Email):\n template_name = \"meinberlin_notifications/emails/notify_creator\"\n\n def get_receivers(self):\n action = self.object\n if hasattr(action.target, \"creator\"):\n receivers = [action.target.creator]\n receivers = _exclude_notifications_disabled(receivers)\n receivers = _exclude_actor(receivers, action.actor)\n receivers = _exclude_moderators(receivers, action)\n return receivers\n return []\n\n\nclass NotifyCreatorOrContactOnModeratorFeedback(Email):\n template_name = (\n \"meinberlin_notifications/emails/notify_creator_on_moderator_feedback\"\n )\n\n def get_receivers(self):\n if hasattr(self.object, \"contact_email\"):\n # send to contact\n receivers = [self.object.contact_email]\n else:\n # send to creator\n receivers = [self.object.creator]\n receivers = _exclude_notifications_disabled(receivers)\n return receivers\n\n def get_context(self):\n context = super().get_context()\n context[\"object\"] = self.object\n if not hasattr(self.object, \"contact_email\"):\n # send to creator\n context[\"send_to_creator\"] = True\n return context\n\n\nclass NotifyModeratorsEmail(Email):\n template_name = \"meinberlin_notifications/emails/notify_moderator\"\n\n def get_receivers(self):\n action = self.object\n receivers = action.project.moderators.all()\n receivers = _exclude_actor(receivers, action.actor)\n receivers = _exclude_notifications_disabled(receivers)\n return receivers\n\n\nclass NotifyInitiatorsOnProjectCreatedEmail(Email):\n template_name = \"meinberlin_notifications/emails/notify_initiators_project_created\"\n\n def get_receivers(self):\n project = self.object\n creator = User.objects.get(pk=self.kwargs[\"creator_pk\"])\n receivers = project.organisation.initiators.all()\n receivers = _exclude_actor(receivers, creator)\n receivers = _exclude_notifications_disabled(receivers)\n return receivers\n\n def get_context(self):\n context = super().get_context()\n creator = User.objects.get(pk=self.kwargs[\"creator_pk\"])\n context[\"creator\"] = creator\n context[\"project\"] = self.object\n return context\n\n\nclass NotifyFollowersOnPhaseStartedEmail(Email):\n template_name = \"meinberlin_notifications/emails\" \"/notify_followers_phase_started\"\n\n def get_receivers(self):\n action = self.object\n receivers = User.objects.filter(\n follow__project=action.project,\n follow__enabled=True,\n )\n receivers = _exclude_notifications_disabled(receivers)\n return receivers\n\n\nclass NotifyFollowersOnPhaseIsOverSoonEmail(Email):\n template_name = (\n \"meinberlin_notifications/emails\" \"/notify_followers_phase_over_soon\"\n )\n\n def get_receivers(self):\n action = self.object\n receivers = User.objects.filter(\n follow__project=action.project,\n follow__enabled=True,\n )\n receivers = _exclude_notifications_disabled(receivers)\n return receivers\n\n\nclass NotifyFollowersOnUpcommingEventEmail(Email):\n template_name = (\n \"meinberlin_notifications/emails\" \"/notify_followers_event_upcomming\"\n )\n\n def get_receivers(self):\n action = self.object\n receivers = User.objects.filter(\n follow__project=action.project,\n follow__enabled=True,\n )\n receivers = _exclude_notifications_disabled(receivers)\n return receivers\n", "path": "meinberlin/apps/notifications/emails.py"}, {"content": "from django.contrib.auth import get_user_model\nfrom django.db.models import signals\nfrom django.dispatch import receiver\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.actions.verbs import Verbs\nfrom adhocracy4.dashboard import signals as dashboard_signals\nfrom adhocracy4.follows.models import Follow\nfrom adhocracy4.projects.models import Project\n\nfrom . import emails\n\nUser = get_user_model()\n\n\n@receiver(signals.post_save, sender=Action)\ndef send_notifications(instance, created, **kwargs):\n action = instance\n verb = Verbs(action.verb)\n\n if action.type in (\"item\", \"comment\") and verb in (Verbs.CREATE, Verbs.ADD):\n emails.NotifyCreatorEmail.send(action)\n\n if action.project:\n emails.NotifyModeratorsEmail.send(action)\n\n elif action.type == \"phase\" and action.project.project_type == \"a4projects.Project\":\n if verb == Verbs.START:\n emails.NotifyFollowersOnPhaseStartedEmail.send(action)\n elif verb == Verbs.SCHEDULE:\n emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)\n\n elif action.type == \"offlineevent\" and verb == Verbs.START:\n emails.NotifyFollowersOnUpcommingEventEmail.send(action)\n\n\n@receiver(dashboard_signals.project_created)\ndef send_project_created_notifications(**kwargs):\n project = kwargs.get(\"project\")\n creator = kwargs.get(\"user\")\n emails.NotifyInitiatorsOnProjectCreatedEmail.send(project, creator_pk=creator.pk)\n\n\n@receiver(signals.m2m_changed, sender=Project.moderators.through)\ndef autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs):\n if action == \"post_add\":\n autofollow_project(instance, pk_set, reverse)\n\n\ndef autofollow_project(instance, pk_set, reverse):\n if not reverse:\n project = instance\n users_pks = pk_set\n\n for user_pk in users_pks:\n Follow.objects.update_or_create(\n project=project, creator_id=user_pk, defaults={\"enabled\": True}\n )\n else:\n user = instance\n project_pks = pk_set\n\n for project_pk in project_pks:\n Follow.objects.update_or_create(\n project_id=project_pk, creator=user, defaults={\"enabled\": True}\n )\n", "path": "meinberlin/apps/notifications/signals.py"}, {"content": "from datetime import timedelta\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.management.base import BaseCommand\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.actions.verbs import Verbs\nfrom meinberlin.apps.offlineevents.models import OfflineEvent\n\n\nclass Command(BaseCommand):\n help = \"Create offlineevent system actions.\"\n\n def __init__(self):\n if hasattr(settings, \"ACTIONS_OFFLINE_EVENT_STARTING_HOURS\"):\n self.event_starting_hours = settings.ACTIONS_OFFLINE_EVENT_STARTING_HOURS\n else:\n self.event_starting_hours = 72\n\n def handle(self, *args, **options):\n self._event_starting()\n\n def _event_starting(self):\n event_ct = ContentType.objects.get_for_model(OfflineEvent)\n\n events = OfflineEvent.objects.starts_within(hours=self.event_starting_hours)\n for event in events:\n existing_action = Action.objects.filter(\n project=event.project,\n verb=Verbs.START.value,\n obj_content_type=event_ct,\n obj_object_id=event.id,\n ).first()\n\n # If the event date has been modified and moved more than\n # event_starting_hours ahead, schedule a new action\n if (\n not existing_action\n or (\n existing_action.timestamp\n + timedelta(hours=self.event_starting_hours)\n )\n < event.date\n ):\n Action.objects.create(\n project=event.project,\n verb=Verbs.START.value,\n obj=event,\n timestamp=event.date,\n )\n elif existing_action.timestamp != event.date:\n existing_action.timestamp = event.date\n existing_action.save()\n", "path": "meinberlin/apps/offlineevents/management/commands/create_offlineevent_system_actions.py"}], "after_files": [{"content": "from django.contrib import auth\n\nfrom meinberlin.apps.contrib.emails import Email\n\nUser = auth.get_user_model()\n\n\ndef _exclude_actor(receivers, actor):\n if not actor:\n return receivers\n\n if hasattr(receivers, \"exclude\"):\n return receivers.exclude(id=actor.id)\n\n return [receiver for receiver in receivers if not receiver == actor]\n\n\ndef _exclude_moderators(receivers, action):\n if hasattr(action, \"project\"):\n moderator_ids = action.project.moderators.values_list(\"id\", flat=True)\n\n if hasattr(receivers, \"exclude\"):\n return receivers.exclude(id__in=moderator_ids)\n\n return [user for user in receivers if user.id not in moderator_ids]\n\n return receivers\n\n\ndef _exclude_notifications_disabled(receivers):\n if hasattr(receivers, \"filter\"):\n return receivers.filter(get_notifications=True)\n\n return [user for user in receivers if user.get_notifications]\n\n\nclass NotifyCreatorEmail(Email):\n template_name = \"meinberlin_notifications/emails/notify_creator\"\n\n def get_receivers(self):\n action = self.object\n if hasattr(action.target, \"creator\"):\n receivers = [action.target.creator]\n receivers = _exclude_notifications_disabled(receivers)\n receivers = _exclude_actor(receivers, action.actor)\n receivers = _exclude_moderators(receivers, action)\n return receivers\n return []\n\n\nclass NotifyCreatorOrContactOnModeratorFeedback(Email):\n template_name = (\n \"meinberlin_notifications/emails/notify_creator_on_moderator_feedback\"\n )\n\n def get_receivers(self):\n if hasattr(self.object, \"contact_email\"):\n # send to contact\n receivers = [self.object.contact_email]\n else:\n # send to creator\n receivers = [self.object.creator]\n receivers = _exclude_notifications_disabled(receivers)\n return receivers\n\n def get_context(self):\n context = super().get_context()\n context[\"object\"] = self.object\n if not hasattr(self.object, \"contact_email\"):\n # send to creator\n context[\"send_to_creator\"] = True\n return context\n\n\nclass NotifyModeratorsEmail(Email):\n template_name = \"meinberlin_notifications/emails/notify_moderator\"\n\n def get_receivers(self):\n action = self.object\n receivers = action.project.moderators.all()\n receivers = _exclude_actor(receivers, action.actor)\n receivers = _exclude_notifications_disabled(receivers)\n return receivers\n\n\nclass NotifyInitiatorsOnProjectCreatedEmail(Email):\n template_name = \"meinberlin_notifications/emails/notify_initiators_project_created\"\n\n def get_receivers(self):\n project = self.object\n creator = User.objects.get(pk=self.kwargs[\"creator_pk\"])\n receivers = project.organisation.initiators.all()\n receivers = _exclude_actor(receivers, creator)\n receivers = _exclude_notifications_disabled(receivers)\n return receivers\n\n def get_context(self):\n context = super().get_context()\n creator = User.objects.get(pk=self.kwargs[\"creator_pk\"])\n context[\"creator\"] = creator\n context[\"project\"] = self.object\n return context\n\n\nclass NotifyFollowersOnPhaseStartedEmail(Email):\n template_name = \"meinberlin_notifications/emails\" \"/notify_followers_phase_started\"\n\n def get_receivers(self):\n action = self.object\n receivers = User.objects.filter(\n follow__project=action.project,\n follow__enabled=True,\n )\n receivers = _exclude_notifications_disabled(receivers)\n return receivers\n\n\nclass NotifyFollowersOnPhaseIsOverSoonEmail(Email):\n template_name = (\n \"meinberlin_notifications/emails\" \"/notify_followers_phase_over_soon\"\n )\n\n def get_receivers(self):\n action = self.object\n receivers = User.objects.filter(\n follow__project=action.project,\n follow__enabled=True,\n )\n receivers = _exclude_notifications_disabled(receivers)\n return receivers\n\n\nclass NotifyFollowersOnUpcomingEventEmail(Email):\n template_name = \"meinberlin_notifications/emails/notify_followers_event_upcoming\"\n\n def get_receivers(self):\n action = self.object\n receivers = User.objects.filter(\n follow__project=action.project,\n follow__enabled=True,\n )\n receivers = _exclude_notifications_disabled(receivers)\n return receivers\n", "path": "meinberlin/apps/notifications/emails.py"}, {"content": "from django.contrib.auth import get_user_model\nfrom django.db.models import signals\nfrom django.dispatch import receiver\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.actions.verbs import Verbs\nfrom adhocracy4.dashboard import signals as dashboard_signals\nfrom adhocracy4.follows.models import Follow\nfrom adhocracy4.projects.models import Project\n\nfrom . import emails\n\nUser = get_user_model()\n\n\n@receiver(signals.post_save, sender=Action)\ndef send_notifications(instance, created, **kwargs):\n action = instance\n verb = Verbs(action.verb)\n\n if action.type in (\"item\", \"comment\") and verb in (Verbs.CREATE, Verbs.ADD):\n emails.NotifyCreatorEmail.send(action)\n\n if action.project:\n emails.NotifyModeratorsEmail.send(action)\n\n elif action.type == \"phase\" and action.project.project_type == \"a4projects.Project\":\n if verb == Verbs.START:\n emails.NotifyFollowersOnPhaseStartedEmail.send(action)\n elif verb == Verbs.SCHEDULE:\n emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)\n\n elif action.type == \"offlineevent\" and verb == Verbs.START:\n emails.NotifyFollowersOnUpcomingEventEmail.send(action)\n\n\n@receiver(dashboard_signals.project_created)\ndef send_project_created_notifications(**kwargs):\n project = kwargs.get(\"project\")\n creator = kwargs.get(\"user\")\n emails.NotifyInitiatorsOnProjectCreatedEmail.send(project, creator_pk=creator.pk)\n\n\n@receiver(signals.m2m_changed, sender=Project.moderators.through)\ndef autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs):\n if action == \"post_add\":\n autofollow_project(instance, pk_set, reverse)\n\n\ndef autofollow_project(instance, pk_set, reverse):\n if not reverse:\n project = instance\n users_pks = pk_set\n\n for user_pk in users_pks:\n Follow.objects.update_or_create(\n project=project, creator_id=user_pk, defaults={\"enabled\": True}\n )\n else:\n user = instance\n project_pks = pk_set\n\n for project_pk in project_pks:\n Follow.objects.update_or_create(\n project_id=project_pk, creator=user, defaults={\"enabled\": True}\n )\n", "path": "meinberlin/apps/notifications/signals.py"}, {"content": "from datetime import timedelta\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.management.base import BaseCommand\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.actions.verbs import Verbs\nfrom meinberlin.apps.offlineevents.models import OfflineEvent\n\n\nclass Command(BaseCommand):\n help = \"Create offlineevent system actions.\"\n\n def __init__(self):\n if hasattr(settings, \"ACTIONS_OFFLINE_EVENT_STARTING_HOURS\"):\n self.event_starting_hours = settings.ACTIONS_OFFLINE_EVENT_STARTING_HOURS\n else:\n self.event_starting_hours = 72\n\n def handle(self, *args, **options):\n self._event_starting()\n\n def _event_starting(self):\n event_ct = ContentType.objects.get_for_model(OfflineEvent)\n\n events = OfflineEvent.objects.starts_within(\n hours=self.event_starting_hours\n ).exclude(project__is_draft=True)\n for event in events:\n existing_action = Action.objects.filter(\n project=event.project,\n verb=Verbs.START.value,\n obj_content_type=event_ct,\n obj_object_id=event.id,\n ).first()\n\n # If the event date has been modified and moved more than\n # event_starting_hours ahead, schedule a new action\n if (\n not existing_action\n or (\n existing_action.timestamp\n + timedelta(hours=self.event_starting_hours)\n )\n < event.date\n ):\n Action.objects.create(\n project=event.project,\n verb=Verbs.START.value,\n obj=event,\n timestamp=event.date,\n )\n elif existing_action.timestamp != event.date:\n existing_action.timestamp = event.date\n existing_action.save()\n", "path": "meinberlin/apps/offlineevents/management/commands/create_offlineevent_system_actions.py"}]}
| 2,839 | 484 |
gh_patches_debug_18757
|
rasdani/github-patches
|
git_diff
|
opensearch-project__opensearch-build-2672
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[WIN M1] WIN (ZIP/MSI) Build/Assemble Process
|Tasks(ZIP) |**Tasks(MSI)** |Estimate |Status(ZIP) |Status(MSI) |Notes |
|--- |--- |--- |--- |--- |--- |
|Re-use the existing build process to generate the OpenSearch/Dashboards min + all of the plugins artifacts for WIN package to use |Same |0 |Completed |Completed | |
|The artifacts should be built with ~~LINUX~~ Windows platform specified, ~~as we will cross-compile WIN binary on LINUX then package with WINDOWS JDK~~. |Same |1 |Completed |Completed |~~This is still in debate as we can techinically build WIN on Windows machine, but there are a lot of things to setup just so Jenkins can run Python on Windows Agent.~~ We are able to run shell scripts natively on Windows agent |
|We already have "zip" supported for "—distribution“ parameter, but needs to check whether it is already combined with `--platform windows`. |We do not have "exe" support for "—distribution“ yet. However, this is different from "RPM" as we do not need min artifact to be a exe. The min artifact can be zip and the final product in assemble can be exe. |2 |Completed |Completed |As for "exe" we need to discuss whether a standard exe is enough, or do we want to invest into Windows official installer "msi". |
|We already have "--distribution" param available in assemble workflow, just need to verify existing functions of "ZIP". |We already have "--distribution" param available in assemble workflow, but no support for "EXE" redirection. Need to add a child class supporting the new distribution. |2 |Completed |Completed | |
|The generation code should pull the artifacts from the build workflow to a temporary location |Same |1 |Completed |Completed | |
|The code will compile the components and also call existing install function to install plugins on min artifacts |Same |1 |Completed |Completed | ETA: 2022/09/16 |
|After installation, the code will execute a tool or utility to wrap all the content into corresponding distribution format |Same |1 |Completed |Completed | **20220819 Note:** Plugin compilation currently have some issues with the build scripts, the compilation itself seems ok at least on things like common-utils. <br/><br/> ETA: 2022/09/16|
|The code will move the final distribution artifact from the temp location to dist folder |Same |1 |Completed |Completed | ETA: 2022/09/07 |
### Note: MSI section in this milestone is obsolete as MSI is just a wrapper of the content in ZIP. So as long as ZIP is completed here MSI is considered complete as well.
- [ ] do not remove
* PRs:
20220715:
* https://github.com/opensearch-project/opensearch-ci/pull/155
20220721:
* https://github.com/opensearch-project/opensearch-ci/pull/167
20220722:
* https://github.com/opensearch-project/opensearch-ci/pull/169
20220819:
* https://github.com/opensearch-project/opensearch-build/pull/2483
* https://github.com/opensearch-project/opensearch-ci/pull/187
20220824:
* https://github.com/opensearch-project/opensearch-ci/pull/190
20220902:
* https://github.com/opensearch-project/opensearch-ci/pull/197
* https://github.com/opensearch-project/opensearch-build/pull/2550
20220907:
* common-utils: https://github.com/opensearch-project/common-utils/issues/238
* https://github.com/opensearch-project/common-utils/pull/258
* https://github.com/opensearch-project/opensearch-ci/pull/199
20220914:
* alerting: https://github.com/opensearch-project/alerting/issues/557
* https://github.com/opensearch-project/alerting/issues/573
20220915:
* https://github.com/opensearch-project/opensearch-ci/pull/201
20220916:
* https://github.com/opensearch-project/opensearch-ci/pull/202
20220927:
* https://github.com/opensearch-project/alerting/pull/570
* https://github.com/opensearch-project/alerting/pull/571
* https://github.com/opensearch-project/alerting/pull/572
20220928:
* https://github.com/opensearch-project/opensearch-build/pull/2672
* security: https://github.com/opensearch-project/security/issues/2122
* https://github.com/opensearch-project/common-utils/pull/258
20221004:
* https://github.com/opensearch-project/opensearch-build/pull/2678
20221006:
* Security: https://github.com/opensearch-project/security/issues/2148
* https://github.com/opensearch-project/opensearch-build/pull/2704
20221007:
* OSD: https://github.com/opensearch-project/OpenSearch-Dashboards/issues/2535
20221010:
* https://github.com/opensearch-project/opensearch-ci/pull/208
* https://github.com/opensearch-project/opensearch-ci/pull/209
20221011:
* https://github.com/opensearch-project/opensearch-build-libraries/pull/14
* https://github.com/opensearch-project/opensearch-build/pull/2721
* OSD: https://github.com/opensearch-project/OpenSearch-Dashboards/issues/2554
20221012:
* https://github.com/opensearch-project/OpenSearch-Dashboards/issues/2026
20221013:
* https://github.com/opensearch-project/opensearch-build/pull/2730
20221018:
* https://github.com/opensearch-project/opensearch-build/pull/2756
* SQL: https://github.com/opensearch-project/sql/issues/928
* https://github.com/opensearch-project/opensearch-build/pull/2761
20221019:
* https://github.com/opensearch-project/OpenSearch/issues/4817
20221024:
* https://github.com/opensearch-project/opensearch-build/pull/2788
20221025:
* OSD: https://github.com/opensearch-project/OpenSearch-Dashboards/issues/2669
* https://github.com/opensearch-project/k-NN/pull/595
* https://github.com/opensearch-project/opensearch-ci/pull/212
20221027:
* https://github.com/opensearch-project/opensearch-ci/pull/213
* https://github.com/opensearch-project/opensearch-build/pull/2812
* https://github.com/opensearch-project/opensearch-build/pull/2817
* https://github.com/opensearch-project/opensearch-ci/pull/214
20221028:
* https://github.com/opensearch-project/opensearch-build/pull/2818
* https://github.com/opensearch-project/OpenSearch-Dashboards/pull/2686
20221101:
* https://github.com/opensearch-project/opensearch-build/pull/2840
* https://github.com/opensearch-project/opensearch-build/pull/2841
* https://github.com/opensearch-project/opensearch-ci/pull/215
20221103:
* https://github.com/opensearch-project/opensearch-build/pull/2845
* https://github.com/opensearch-project/opensearch-ci/pull/219
~~* OSD: https://github.com/opensearch-project/OpenSearch-Dashboards/issues/2764~~
* OSD ftrepo: https://github.com/opensearch-project/opensearch-dashboards-functional-test/issues/370
20221104:
* https://github.com/opensearch-project/opensearch-build/pull/2848
* https://github.com/opensearch-project/opensearch-ci/pull/224
* https://github.com/opensearch-project/opensearch-build-libraries/pull/28
20221107:
* https://github.com/opensearch-project/opensearch-build/pull/2860
* https://github.com/opensearch-project/opensearch-ci/pull/228
20221108:
* OSD ftrepo: https://github.com/opensearch-project/opensearch-dashboards-functional-test/issues/370
* https://github.com/opensearch-project/opensearch-build-libraries/pull/31
* https://github.com/opensearch-project/opensearch-build/pull/2869
20221114:
* #2892
20221116:
* https://github.com/opensearch-project/opensearch-build/pull/2914
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/assemble_workflow/dist.py`
Content:
```
1 # Copyright OpenSearch Contributors
2 # SPDX-License-Identifier: Apache-2.0
3 #
4 # The OpenSearch Contributors require contributions made to
5 # this file be licensed under the Apache-2.0 license or a
6 # compatible open source license.
7
8 import errno
9 import logging
10 import os
11 import shutil
12 import tarfile
13 import zipfile
14 from abc import ABC, abstractmethod
15
16 from assemble_workflow.bundle_rpm import BundleRpm
17 from manifests.build_manifest import BuildManifest
18 from system.zip_file import ZipFile
19
20
21 class Dist(ABC):
22 def __init__(self, name: str, path: str, min_path: str, build_cls: BuildManifest.Build) -> None:
23 self.build_cls = build_cls
24 self.name = name
25 self.filename = name.lower()
26 self.path = path
27 self.min_path = min_path
28
29 @abstractmethod
30 def __extract__(self, dest: str) -> None:
31 pass
32
33 @abstractmethod
34 def __build__(self, name: str, dest: str) -> None:
35 pass
36
37 def find_min_archive_path(self, dest: str) -> str:
38 '''
39 Return the single folder that contains the main files of {name}.
40 This folder is normally in the format of {filename}-{exact or bc version}.
41
42 Ex: opensearch-1.3.0 or opensearch-dashboards-1.3.0
43
44 Adding a check of whether {filename} is in folder name is to ensure
45 that only folders in above format are returned.
46
47 In tar there is only 1 top level folders after extraction.
48 But in rpm there are multiple folders such as var / usr / opensearch-1.3.0 ......
49
50 This is to ensure corrent folder is found, instead of simply choosing the 1st in the list.
51 '''
52
53 for file in os.scandir(dest):
54 if self.filename in file.name and file.is_dir():
55 self.archive_path = file.path
56 return self.archive_path
57
58 raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), os.path.join(dest, "*"))
59
60 def rename_archive_path(self, path: str) -> str:
61 '''
62 Rename the single folder at the top level of the tar that contains the min distribution to match current version.
63 For example, when OpenSearch 1.1.1 is built using the 1.1.0 artifact, we rename opensearch-1.1.0 to opensearch-1.1.1.
64 '''
65 current_name = os.path.basename(path)
66 target_name = self.min_path
67 if current_name != target_name:
68 logging.info(f"Renaming {path} to {target_name}.")
69 target_path = os.path.join(os.path.dirname(path), target_name)
70 os.rename(path, target_path)
71 return target_path
72 else:
73 return path
74
75 def extract(self, dest: str) -> str:
76 self.__extract__(dest)
77 self.archive_path = self.rename_archive_path(
78 self.find_min_archive_path(dest)
79 )
80 return self.archive_path
81
82 def build(self, name: str, dest: str) -> None:
83 self.__build__(name, dest)
84 path = os.path.join(dest, name)
85 shutil.copyfile(name, path)
86 logging.info(f"Published {path}.")
87
88
89 class DistTar(Dist):
90 def __extract__(self, dest: str) -> None:
91 with tarfile.open(self.path, "r:gz") as tar:
92 tar.extractall(dest)
93
94 def __build__(self, name: str, dest: str) -> None:
95 with tarfile.open(name, "w:gz") as tar:
96 tar.add(self.archive_path, arcname=os.path.basename(self.archive_path))
97
98
99 class DistZip(Dist):
100 def __extract__(self, dest: str) -> None:
101 with ZipFile(self.path, "r") as zip:
102 zip.extractall(dest)
103
104 def __build__(self, name: str, dest: str) -> None:
105 with ZipFile(name, "w", zipfile.ZIP_DEFLATED) as zip:
106 rootlen = len(self.archive_path) + 1
107 for base, _, files in os.walk(self.archive_path):
108 for file in files:
109 fn = os.path.join(base, file)
110 zip.write(fn, fn[rootlen:])
111
112
113 class DistRpm(Dist):
114
115 def __extract__(self, dest: str) -> None:
116 BundleRpm(self.filename, self.path, self.min_path).extract(dest)
117
118 def __build__(self, name: str, dest: str) -> None:
119 BundleRpm(self.filename, self.path, self.min_path).build(name, dest, self.archive_path, self.build_cls)
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/assemble_workflow/dist.py b/src/assemble_workflow/dist.py
--- a/src/assemble_workflow/dist.py
+++ b/src/assemble_workflow/dist.py
@@ -103,11 +103,17 @@
def __build__(self, name: str, dest: str) -> None:
with ZipFile(name, "w", zipfile.ZIP_DEFLATED) as zip:
- rootlen = len(self.archive_path) + 1
+ # root : /tmp/tmp********/opensearch-<version+qualifier>
+ # leadingdir : opensearch-<version+qualifier>
+ # root no leading dir: /tmp/tmp********/
+ # This is to preserve the leading directory `opensearch-<version+qualifier>` in zip
+ rootlen = len(self.archive_path)
+ leadingdirlen = len(os.path.basename(self.archive_path))
+ noleadingdirlen = rootlen - leadingdirlen
for base, _, files in os.walk(self.archive_path):
for file in files:
fn = os.path.join(base, file)
- zip.write(fn, fn[rootlen:])
+ zip.write(fn, fn[noleadingdirlen:])
class DistRpm(Dist):
|
{"golden_diff": "diff --git a/src/assemble_workflow/dist.py b/src/assemble_workflow/dist.py\n--- a/src/assemble_workflow/dist.py\n+++ b/src/assemble_workflow/dist.py\n@@ -103,11 +103,17 @@\n \n def __build__(self, name: str, dest: str) -> None:\n with ZipFile(name, \"w\", zipfile.ZIP_DEFLATED) as zip:\n- rootlen = len(self.archive_path) + 1\n+ # root : /tmp/tmp********/opensearch-<version+qualifier>\n+ # leadingdir : opensearch-<version+qualifier>\n+ # root no leading dir: /tmp/tmp********/\n+ # This is to preserve the leading directory `opensearch-<version+qualifier>` in zip\n+ rootlen = len(self.archive_path)\n+ leadingdirlen = len(os.path.basename(self.archive_path))\n+ noleadingdirlen = rootlen - leadingdirlen\n for base, _, files in os.walk(self.archive_path):\n for file in files:\n fn = os.path.join(base, file)\n- zip.write(fn, fn[rootlen:])\n+ zip.write(fn, fn[noleadingdirlen:])\n \n \n class DistRpm(Dist):\n", "issue": "[WIN M1] WIN (ZIP/MSI) Build/Assemble Process \n|Tasks(ZIP) |**Tasks(MSI)** |Estimate |Status(ZIP) |Status(MSI) |Notes |\r\n|--- |--- |--- |--- |--- |--- |\r\n|Re-use the existing build process to generate the OpenSearch/Dashboards min + all of the plugins artifacts for WIN package to use |Same |0 |Completed |Completed | |\r\n|The artifacts should be built with ~~LINUX~~ Windows platform specified, ~~as we will cross-compile WIN binary on LINUX then package with WINDOWS JDK~~. |Same |1 |Completed |Completed |~~This is still in debate as we can techinically build WIN on Windows machine, but there are a lot of things to setup just so Jenkins can run Python on Windows Agent.~~ We are able to run shell scripts natively on Windows agent |\r\n|We already have \"zip\" supported for \"\u2014distribution\u201c parameter, but needs to check whether it is already combined with `--platform windows`. |We do not have \"exe\" support for \"\u2014distribution\u201c yet. However, this is different from \"RPM\" as we do not need min artifact to be a exe. The min artifact can be zip and the final product in assemble can be exe. |2 |Completed |Completed |As for \"exe\" we need to discuss whether a standard exe is enough, or do we want to invest into Windows official installer \"msi\". |\r\n|We already have \"--distribution\" param available in assemble workflow, just need to verify existing functions of \"ZIP\". |We already have \"--distribution\" param available in assemble workflow, but no support for \"EXE\" redirection. Need to add a child class supporting the new distribution. |2 |Completed |Completed | |\r\n|The generation code should pull the artifacts from the build workflow to a temporary location |Same |1 |Completed |Completed | |\r\n|The code will compile the components and also call existing install function to install plugins on min artifacts |Same |1 |Completed |Completed | ETA: 2022/09/16 |\r\n|After installation, the code will execute a tool or utility to wrap all the content into corresponding distribution format |Same |1 |Completed |Completed | **20220819 Note:** Plugin compilation currently have some issues with the build scripts, the compilation itself seems ok at least on things like common-utils. <br/><br/> ETA: 2022/09/16|\r\n|The code will move the final distribution artifact from the temp location to dist folder |Same |1 |Completed |Completed | ETA: 2022/09/07 |\r\n\r\n\r\n### Note: MSI section in this milestone is obsolete as MSI is just a wrapper of the content in ZIP. So as long as ZIP is completed here MSI is considered complete as well.\r\n\r\n- [ ] do not remove\r\n\r\n* PRs:\r\n\r\n20220715:\r\n* https://github.com/opensearch-project/opensearch-ci/pull/155\r\n\r\n20220721:\r\n* https://github.com/opensearch-project/opensearch-ci/pull/167\r\n\r\n20220722:\r\n* https://github.com/opensearch-project/opensearch-ci/pull/169\r\n\r\n20220819:\r\n* https://github.com/opensearch-project/opensearch-build/pull/2483\r\n* https://github.com/opensearch-project/opensearch-ci/pull/187\r\n\r\n20220824:\r\n* https://github.com/opensearch-project/opensearch-ci/pull/190\r\n\r\n20220902:\r\n* https://github.com/opensearch-project/opensearch-ci/pull/197\r\n* https://github.com/opensearch-project/opensearch-build/pull/2550\r\n\r\n20220907:\r\n* common-utils: https://github.com/opensearch-project/common-utils/issues/238\r\n * https://github.com/opensearch-project/common-utils/pull/258\r\n* https://github.com/opensearch-project/opensearch-ci/pull/199\r\n\r\n20220914:\r\n* alerting: https://github.com/opensearch-project/alerting/issues/557\r\n * https://github.com/opensearch-project/alerting/issues/573\r\n\r\n20220915:\r\n* https://github.com/opensearch-project/opensearch-ci/pull/201\r\n\r\n20220916:\r\n* https://github.com/opensearch-project/opensearch-ci/pull/202\r\n\r\n20220927:\r\n* https://github.com/opensearch-project/alerting/pull/570\r\n* https://github.com/opensearch-project/alerting/pull/571\r\n* https://github.com/opensearch-project/alerting/pull/572\r\n\r\n20220928:\r\n* https://github.com/opensearch-project/opensearch-build/pull/2672\r\n* security: https://github.com/opensearch-project/security/issues/2122\r\n* https://github.com/opensearch-project/common-utils/pull/258\r\n\r\n20221004:\r\n* https://github.com/opensearch-project/opensearch-build/pull/2678\r\n\r\n20221006:\r\n* Security: https://github.com/opensearch-project/security/issues/2148\r\n* https://github.com/opensearch-project/opensearch-build/pull/2704\r\n\r\n20221007:\r\n* OSD: https://github.com/opensearch-project/OpenSearch-Dashboards/issues/2535\r\n\r\n20221010:\r\n* https://github.com/opensearch-project/opensearch-ci/pull/208\r\n* https://github.com/opensearch-project/opensearch-ci/pull/209\r\n\r\n20221011:\r\n* https://github.com/opensearch-project/opensearch-build-libraries/pull/14\r\n* https://github.com/opensearch-project/opensearch-build/pull/2721\r\n* OSD: https://github.com/opensearch-project/OpenSearch-Dashboards/issues/2554\r\n\r\n20221012:\r\n* https://github.com/opensearch-project/OpenSearch-Dashboards/issues/2026\r\n\r\n20221013:\r\n* https://github.com/opensearch-project/opensearch-build/pull/2730\r\n\r\n20221018:\r\n* https://github.com/opensearch-project/opensearch-build/pull/2756\r\n* SQL: https://github.com/opensearch-project/sql/issues/928\r\n* https://github.com/opensearch-project/opensearch-build/pull/2761\r\n\r\n20221019:\r\n* https://github.com/opensearch-project/OpenSearch/issues/4817\r\n\r\n20221024:\r\n* https://github.com/opensearch-project/opensearch-build/pull/2788\r\n\r\n20221025:\r\n* OSD: https://github.com/opensearch-project/OpenSearch-Dashboards/issues/2669\r\n* https://github.com/opensearch-project/k-NN/pull/595\r\n* https://github.com/opensearch-project/opensearch-ci/pull/212\r\n\r\n20221027:\r\n* https://github.com/opensearch-project/opensearch-ci/pull/213\r\n* https://github.com/opensearch-project/opensearch-build/pull/2812\r\n* https://github.com/opensearch-project/opensearch-build/pull/2817\r\n* https://github.com/opensearch-project/opensearch-ci/pull/214\r\n\r\n20221028:\r\n* https://github.com/opensearch-project/opensearch-build/pull/2818\r\n* https://github.com/opensearch-project/OpenSearch-Dashboards/pull/2686\r\n\r\n20221101:\r\n* https://github.com/opensearch-project/opensearch-build/pull/2840\r\n* https://github.com/opensearch-project/opensearch-build/pull/2841\r\n* https://github.com/opensearch-project/opensearch-ci/pull/215\r\n\r\n20221103:\r\n* https://github.com/opensearch-project/opensearch-build/pull/2845\r\n* https://github.com/opensearch-project/opensearch-ci/pull/219\r\n~~* OSD: https://github.com/opensearch-project/OpenSearch-Dashboards/issues/2764~~\r\n* OSD ftrepo: https://github.com/opensearch-project/opensearch-dashboards-functional-test/issues/370\r\n\r\n20221104:\r\n* https://github.com/opensearch-project/opensearch-build/pull/2848\r\n* https://github.com/opensearch-project/opensearch-ci/pull/224\r\n* https://github.com/opensearch-project/opensearch-build-libraries/pull/28\r\n\r\n20221107:\r\n* https://github.com/opensearch-project/opensearch-build/pull/2860\r\n* https://github.com/opensearch-project/opensearch-ci/pull/228\r\n\r\n20221108:\r\n* OSD ftrepo: https://github.com/opensearch-project/opensearch-dashboards-functional-test/issues/370\r\n* https://github.com/opensearch-project/opensearch-build-libraries/pull/31\r\n* https://github.com/opensearch-project/opensearch-build/pull/2869\r\n\r\n20221114:\r\n* #2892\r\n\r\n20221116:\r\n* https://github.com/opensearch-project/opensearch-build/pull/2914\n", "before_files": [{"content": "# Copyright OpenSearch Contributors\n# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport errno\nimport logging\nimport os\nimport shutil\nimport tarfile\nimport zipfile\nfrom abc import ABC, abstractmethod\n\nfrom assemble_workflow.bundle_rpm import BundleRpm\nfrom manifests.build_manifest import BuildManifest\nfrom system.zip_file import ZipFile\n\n\nclass Dist(ABC):\n def __init__(self, name: str, path: str, min_path: str, build_cls: BuildManifest.Build) -> None:\n self.build_cls = build_cls\n self.name = name\n self.filename = name.lower()\n self.path = path\n self.min_path = min_path\n\n @abstractmethod\n def __extract__(self, dest: str) -> None:\n pass\n\n @abstractmethod\n def __build__(self, name: str, dest: str) -> None:\n pass\n\n def find_min_archive_path(self, dest: str) -> str:\n '''\n Return the single folder that contains the main files of {name}.\n This folder is normally in the format of {filename}-{exact or bc version}.\n\n Ex: opensearch-1.3.0 or opensearch-dashboards-1.3.0\n\n Adding a check of whether {filename} is in folder name is to ensure\n that only folders in above format are returned.\n\n In tar there is only 1 top level folders after extraction.\n But in rpm there are multiple folders such as var / usr / opensearch-1.3.0 ......\n\n This is to ensure corrent folder is found, instead of simply choosing the 1st in the list.\n '''\n\n for file in os.scandir(dest):\n if self.filename in file.name and file.is_dir():\n self.archive_path = file.path\n return self.archive_path\n\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), os.path.join(dest, \"*\"))\n\n def rename_archive_path(self, path: str) -> str:\n '''\n Rename the single folder at the top level of the tar that contains the min distribution to match current version.\n For example, when OpenSearch 1.1.1 is built using the 1.1.0 artifact, we rename opensearch-1.1.0 to opensearch-1.1.1.\n '''\n current_name = os.path.basename(path)\n target_name = self.min_path\n if current_name != target_name:\n logging.info(f\"Renaming {path} to {target_name}.\")\n target_path = os.path.join(os.path.dirname(path), target_name)\n os.rename(path, target_path)\n return target_path\n else:\n return path\n\n def extract(self, dest: str) -> str:\n self.__extract__(dest)\n self.archive_path = self.rename_archive_path(\n self.find_min_archive_path(dest)\n )\n return self.archive_path\n\n def build(self, name: str, dest: str) -> None:\n self.__build__(name, dest)\n path = os.path.join(dest, name)\n shutil.copyfile(name, path)\n logging.info(f\"Published {path}.\")\n\n\nclass DistTar(Dist):\n def __extract__(self, dest: str) -> None:\n with tarfile.open(self.path, \"r:gz\") as tar:\n tar.extractall(dest)\n\n def __build__(self, name: str, dest: str) -> None:\n with tarfile.open(name, \"w:gz\") as tar:\n tar.add(self.archive_path, arcname=os.path.basename(self.archive_path))\n\n\nclass DistZip(Dist):\n def __extract__(self, dest: str) -> None:\n with ZipFile(self.path, \"r\") as zip:\n zip.extractall(dest)\n\n def __build__(self, name: str, dest: str) -> None:\n with ZipFile(name, \"w\", zipfile.ZIP_DEFLATED) as zip:\n rootlen = len(self.archive_path) + 1\n for base, _, files in os.walk(self.archive_path):\n for file in files:\n fn = os.path.join(base, file)\n zip.write(fn, fn[rootlen:])\n\n\nclass DistRpm(Dist):\n\n def __extract__(self, dest: str) -> None:\n BundleRpm(self.filename, self.path, self.min_path).extract(dest)\n\n def __build__(self, name: str, dest: str) -> None:\n BundleRpm(self.filename, self.path, self.min_path).build(name, dest, self.archive_path, self.build_cls)\n", "path": "src/assemble_workflow/dist.py"}], "after_files": [{"content": "# Copyright OpenSearch Contributors\n# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport errno\nimport logging\nimport os\nimport shutil\nimport tarfile\nimport zipfile\nfrom abc import ABC, abstractmethod\n\nfrom assemble_workflow.bundle_rpm import BundleRpm\nfrom manifests.build_manifest import BuildManifest\nfrom system.zip_file import ZipFile\n\n\nclass Dist(ABC):\n def __init__(self, name: str, path: str, min_path: str, build_cls: BuildManifest.Build) -> None:\n self.build_cls = build_cls\n self.name = name\n self.filename = name.lower()\n self.path = path\n self.min_path = min_path\n\n @abstractmethod\n def __extract__(self, dest: str) -> None:\n pass\n\n @abstractmethod\n def __build__(self, name: str, dest: str) -> None:\n pass\n\n def find_min_archive_path(self, dest: str) -> str:\n '''\n Return the single folder that contains the main files of {name}.\n This folder is normally in the format of {filename}-{exact or bc version}.\n\n Ex: opensearch-1.3.0 or opensearch-dashboards-1.3.0\n\n Adding a check of whether {filename} is in folder name is to ensure\n that only folders in above format are returned.\n\n In tar there is only 1 top level folders after extraction.\n But in rpm there are multiple folders such as var / usr / opensearch-1.3.0 ......\n\n This is to ensure corrent folder is found, instead of simply choosing the 1st in the list.\n '''\n\n for file in os.scandir(dest):\n if self.filename in file.name and file.is_dir():\n self.archive_path = file.path\n return self.archive_path\n\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), os.path.join(dest, \"*\"))\n\n def rename_archive_path(self, path: str) -> str:\n '''\n Rename the single folder at the top level of the tar that contains the min distribution to match current version.\n For example, when OpenSearch 1.1.1 is built using the 1.1.0 artifact, we rename opensearch-1.1.0 to opensearch-1.1.1.\n '''\n current_name = os.path.basename(path)\n target_name = self.min_path\n if current_name != target_name:\n logging.info(f\"Renaming {path} to {target_name}.\")\n target_path = os.path.join(os.path.dirname(path), target_name)\n os.rename(path, target_path)\n return target_path\n else:\n return path\n\n def extract(self, dest: str) -> str:\n self.__extract__(dest)\n self.archive_path = self.rename_archive_path(\n self.find_min_archive_path(dest)\n )\n return self.archive_path\n\n def build(self, name: str, dest: str) -> None:\n self.__build__(name, dest)\n path = os.path.join(dest, name)\n shutil.copyfile(name, path)\n logging.info(f\"Published {path}.\")\n\n\nclass DistTar(Dist):\n def __extract__(self, dest: str) -> None:\n with tarfile.open(self.path, \"r:gz\") as tar:\n tar.extractall(dest)\n\n def __build__(self, name: str, dest: str) -> None:\n with tarfile.open(name, \"w:gz\") as tar:\n tar.add(self.archive_path, arcname=os.path.basename(self.archive_path))\n\n\nclass DistZip(Dist):\n def __extract__(self, dest: str) -> None:\n with ZipFile(self.path, \"r\") as zip:\n zip.extractall(dest)\n\n def __build__(self, name: str, dest: str) -> None:\n with ZipFile(name, \"w\", zipfile.ZIP_DEFLATED) as zip:\n # root : /tmp/tmp********/opensearch-<version+qualifier>\n # leadingdir : opensearch-<version+qualifier>\n # root no leading dir: /tmp/tmp********/\n # This is to preserve the leading directory `opensearch-<version+qualifier>` in zip\n rootlen = len(self.archive_path)\n leadingdirlen = len(os.path.basename(self.archive_path))\n noleadingdirlen = rootlen - leadingdirlen\n for base, _, files in os.walk(self.archive_path):\n for file in files:\n fn = os.path.join(base, file)\n zip.write(fn, fn[noleadingdirlen:])\n\n\nclass DistRpm(Dist):\n\n def __extract__(self, dest: str) -> None:\n BundleRpm(self.filename, self.path, self.min_path).extract(dest)\n\n def __build__(self, name: str, dest: str) -> None:\n BundleRpm(self.filename, self.path, self.min_path).build(name, dest, self.archive_path, self.build_cls)\n", "path": "src/assemble_workflow/dist.py"}]}
| 3,798 | 281 |
gh_patches_debug_92
|
rasdani/github-patches
|
git_diff
|
cocotb__cocotb-1179
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Scheduler to Library Reference
The scheduler module is not at all listed in the Library Reference.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cocotb/__init__.py`
Content:
```
1 # Copyright (c) 2013 Potential Ventures Ltd
2 # Copyright (c) 2013 SolarFlare Communications Inc
3 # All rights reserved.
4
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are met:
7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution.
12 # * Neither the name of Potential Ventures Ltd,
13 # SolarFlare Communications Inc nor the
14 # names of its contributors may be used to endorse or promote products
15 # derived from this software without specific prior written permission.
16
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
21 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 """
29 Cocotb is a coroutine, cosimulation framework for writing testbenches in Python.
30
31 See http://cocotb.readthedocs.org for full documentation
32 """
33 import os
34 import sys
35 import logging
36 import threading
37 import random
38 import time
39
40 import cocotb.handle
41 from cocotb.scheduler import Scheduler
42 from cocotb.log import SimBaseLog, SimLog
43 from cocotb.regression import RegressionManager
44
45
46 # Things we want in the cocotb namespace
47 from cocotb.decorators import test, coroutine, hook, function, external # noqa: F401
48
49 # Singleton scheduler instance
50 # NB this cheekily ensures a singleton since we're replacing the reference
51 # so that cocotb.scheduler gives you the singleton instance and not the
52 # scheduler package
53
54 # GPI logging instance
55 if "COCOTB_SIM" in os.environ:
56 import simulator
57 logging.basicConfig()
58 logging.setLoggerClass(SimBaseLog)
59 log = SimLog('cocotb')
60 level = os.getenv("COCOTB_LOG_LEVEL", "INFO")
61 try:
62 _default_log = getattr(logging, level)
63 except AttributeError as e:
64 log.error("Unable to set loging level to %s" % level)
65 _default_log = logging.INFO
66 log.setLevel(_default_log)
67 loggpi = SimLog('cocotb.gpi')
68 # Notify GPI of log level
69 simulator.log_level(_default_log)
70
71 # If stdout/stderr are not TTYs, Python may not have opened them with line
72 # buffering. In that case, try to reopen them with line buffering
73 # explicitly enabled. This ensures that prints such as stack traces always
74 # appear. Continue silently if this fails.
75 try:
76 if not sys.stdout.isatty():
77 sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 1)
78 log.debug("Reopened stdout with line buffering")
79 if not sys.stderr.isatty():
80 sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 1)
81 log.debug("Reopened stderr with line buffering")
82 except Exception as e:
83 log.warning("Failed to ensure that stdout/stderr are line buffered: %s", e)
84 log.warning("Some stack traces may not appear because of this.")
85
86
87 scheduler = Scheduler()
88 regression_manager = None
89
90 plusargs = {}
91
92 # To save typing provide an alias to scheduler.add
93 fork = scheduler.add
94
95 # FIXME is this really required?
96 _rlock = threading.RLock()
97
98
99 def mem_debug(port):
100 import cocotb.memdebug
101 cocotb.memdebug.start(port)
102
103
104 def _initialise_testbench(root_name):
105 """
106 This function is called after the simulator has elaborated all
107 entities and is ready to run the test.
108
109 The test must be defined by the environment variables
110 MODULE
111 TESTCASE
112
113 The environment variable COCOTB_HOOKS contains a comma-separated list of
114 modules that should be executed before the first test.
115 """
116 _rlock.acquire()
117
118 memcheck_port = os.getenv('MEMCHECK')
119 if memcheck_port is not None:
120 mem_debug(int(memcheck_port))
121
122 exec_path = os.getenv('COCOTB_PY_DIR')
123 if exec_path is None:
124 exec_path = 'Unknown'
125
126 version = os.getenv('VERSION')
127 if version is None:
128 log.info("Unable to determine Cocotb version from %s" % exec_path)
129 else:
130 log.info("Running tests with Cocotb v%s from %s" %
131 (version, exec_path))
132
133 # Create the base handle type
134
135 process_plusargs()
136
137 # Seed the Python random number generator to make this repeatable
138 global RANDOM_SEED
139 RANDOM_SEED = os.getenv('RANDOM_SEED')
140
141 if RANDOM_SEED is None:
142 if 'ntb_random_seed' in plusargs:
143 RANDOM_SEED = eval(plusargs['ntb_random_seed'])
144 elif 'seed' in plusargs:
145 RANDOM_SEED = eval(plusargs['seed'])
146 else:
147 RANDOM_SEED = int(time.time())
148 log.info("Seeding Python random module with %d" % (RANDOM_SEED))
149 else:
150 RANDOM_SEED = int(RANDOM_SEED)
151 log.info("Seeding Python random module with supplied seed %d" % (RANDOM_SEED))
152 random.seed(RANDOM_SEED)
153
154 module_str = os.getenv('MODULE')
155 test_str = os.getenv('TESTCASE')
156 hooks_str = os.getenv('COCOTB_HOOKS', '')
157
158 if not module_str:
159 raise ImportError("Environment variables defining the module(s) to " +
160 "execute not defined. MODULE=\"%s\"" % (module_str))
161
162 modules = module_str.split(',')
163 hooks = hooks_str.split(',') if hooks_str else []
164
165 global regression_manager
166
167 regression_manager = RegressionManager(root_name, modules, tests=test_str, seed=RANDOM_SEED, hooks=hooks)
168 regression_manager.initialise()
169 regression_manager.execute()
170
171 _rlock.release()
172 return True
173
174
175 def _sim_event(level, message):
176 """Function that can be called externally to signal an event"""
177 SIM_INFO = 0
178 SIM_TEST_FAIL = 1
179 SIM_FAIL = 2
180 from cocotb.result import TestFailure, SimFailure
181
182 if level is SIM_TEST_FAIL:
183 scheduler.log.error("Failing test at simulator request")
184 scheduler.finish_test(TestFailure("Failure from external source: %s" %
185 message))
186 elif level is SIM_FAIL:
187 # We simply return here as the simulator will exit
188 # so no cleanup is needed
189 msg = ("Failing test at simulator request before test run completion: "
190 "%s" % message)
191 scheduler.log.error(msg)
192 scheduler.finish_scheduler(SimFailure(msg))
193 else:
194 scheduler.log.error("Unsupported sim event")
195
196 return True
197
198
199 def process_plusargs():
200
201 global plusargs
202
203 plusargs = {}
204
205 for option in cocotb.argv:
206 if option.startswith('+'):
207 if option.find('=') != -1:
208 (name, value) = option[1:].split('=')
209 plusargs[name] = value
210 else:
211 plusargs[option[1:]] = True
212
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cocotb/__init__.py b/cocotb/__init__.py
--- a/cocotb/__init__.py
+++ b/cocotb/__init__.py
@@ -85,6 +85,8 @@
scheduler = Scheduler()
+"""The global scheduler instance."""
+
regression_manager = None
plusargs = {}
|
{"golden_diff": "diff --git a/cocotb/__init__.py b/cocotb/__init__.py\n--- a/cocotb/__init__.py\n+++ b/cocotb/__init__.py\n@@ -85,6 +85,8 @@\n \n \n scheduler = Scheduler()\n+\"\"\"The global scheduler instance.\"\"\"\n+\n regression_manager = None\n \n plusargs = {}\n", "issue": "Add Scheduler to Library Reference\nThe scheduler module is not at all listed in the Library Reference.\n", "before_files": [{"content": "# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nCocotb is a coroutine, cosimulation framework for writing testbenches in Python.\n\nSee http://cocotb.readthedocs.org for full documentation\n\"\"\"\nimport os\nimport sys\nimport logging\nimport threading\nimport random\nimport time\n\nimport cocotb.handle\nfrom cocotb.scheduler import Scheduler\nfrom cocotb.log import SimBaseLog, SimLog\nfrom cocotb.regression import RegressionManager\n\n\n# Things we want in the cocotb namespace\nfrom cocotb.decorators import test, coroutine, hook, function, external # noqa: F401\n\n# Singleton scheduler instance\n# NB this cheekily ensures a singleton since we're replacing the reference\n# so that cocotb.scheduler gives you the singleton instance and not the\n# scheduler package\n\n# GPI logging instance\nif \"COCOTB_SIM\" in os.environ:\n import simulator\n logging.basicConfig()\n logging.setLoggerClass(SimBaseLog)\n log = SimLog('cocotb')\n level = os.getenv(\"COCOTB_LOG_LEVEL\", \"INFO\")\n try:\n _default_log = getattr(logging, level)\n except AttributeError as e:\n log.error(\"Unable to set loging level to %s\" % level)\n _default_log = logging.INFO\n log.setLevel(_default_log)\n loggpi = SimLog('cocotb.gpi')\n # Notify GPI of log level\n simulator.log_level(_default_log)\n\n # If stdout/stderr are not TTYs, Python may not have opened them with line\n # buffering. In that case, try to reopen them with line buffering\n # explicitly enabled. This ensures that prints such as stack traces always\n # appear. Continue silently if this fails.\n try:\n if not sys.stdout.isatty():\n sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 1)\n log.debug(\"Reopened stdout with line buffering\")\n if not sys.stderr.isatty():\n sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 1)\n log.debug(\"Reopened stderr with line buffering\")\n except Exception as e:\n log.warning(\"Failed to ensure that stdout/stderr are line buffered: %s\", e)\n log.warning(\"Some stack traces may not appear because of this.\")\n\n\nscheduler = Scheduler()\nregression_manager = None\n\nplusargs = {}\n\n# To save typing provide an alias to scheduler.add\nfork = scheduler.add\n\n# FIXME is this really required?\n_rlock = threading.RLock()\n\n\ndef mem_debug(port):\n import cocotb.memdebug\n cocotb.memdebug.start(port)\n\n\ndef _initialise_testbench(root_name):\n \"\"\"\n This function is called after the simulator has elaborated all\n entities and is ready to run the test.\n\n The test must be defined by the environment variables\n MODULE\n TESTCASE\n\n The environment variable COCOTB_HOOKS contains a comma-separated list of\n modules that should be executed before the first test.\n \"\"\"\n _rlock.acquire()\n\n memcheck_port = os.getenv('MEMCHECK')\n if memcheck_port is not None:\n mem_debug(int(memcheck_port))\n\n exec_path = os.getenv('COCOTB_PY_DIR')\n if exec_path is None:\n exec_path = 'Unknown'\n\n version = os.getenv('VERSION')\n if version is None:\n log.info(\"Unable to determine Cocotb version from %s\" % exec_path)\n else:\n log.info(\"Running tests with Cocotb v%s from %s\" %\n (version, exec_path))\n\n # Create the base handle type\n\n process_plusargs()\n\n # Seed the Python random number generator to make this repeatable\n global RANDOM_SEED\n RANDOM_SEED = os.getenv('RANDOM_SEED')\n\n if RANDOM_SEED is None:\n if 'ntb_random_seed' in plusargs:\n RANDOM_SEED = eval(plusargs['ntb_random_seed'])\n elif 'seed' in plusargs:\n RANDOM_SEED = eval(plusargs['seed'])\n else:\n RANDOM_SEED = int(time.time())\n log.info(\"Seeding Python random module with %d\" % (RANDOM_SEED))\n else:\n RANDOM_SEED = int(RANDOM_SEED)\n log.info(\"Seeding Python random module with supplied seed %d\" % (RANDOM_SEED))\n random.seed(RANDOM_SEED)\n\n module_str = os.getenv('MODULE')\n test_str = os.getenv('TESTCASE')\n hooks_str = os.getenv('COCOTB_HOOKS', '')\n\n if not module_str:\n raise ImportError(\"Environment variables defining the module(s) to \" +\n \"execute not defined. MODULE=\\\"%s\\\"\" % (module_str))\n\n modules = module_str.split(',')\n hooks = hooks_str.split(',') if hooks_str else []\n\n global regression_manager\n\n regression_manager = RegressionManager(root_name, modules, tests=test_str, seed=RANDOM_SEED, hooks=hooks)\n regression_manager.initialise()\n regression_manager.execute()\n\n _rlock.release()\n return True\n\n\ndef _sim_event(level, message):\n \"\"\"Function that can be called externally to signal an event\"\"\"\n SIM_INFO = 0\n SIM_TEST_FAIL = 1\n SIM_FAIL = 2\n from cocotb.result import TestFailure, SimFailure\n\n if level is SIM_TEST_FAIL:\n scheduler.log.error(\"Failing test at simulator request\")\n scheduler.finish_test(TestFailure(\"Failure from external source: %s\" %\n message))\n elif level is SIM_FAIL:\n # We simply return here as the simulator will exit\n # so no cleanup is needed\n msg = (\"Failing test at simulator request before test run completion: \"\n \"%s\" % message)\n scheduler.log.error(msg)\n scheduler.finish_scheduler(SimFailure(msg))\n else:\n scheduler.log.error(\"Unsupported sim event\")\n\n return True\n\n\ndef process_plusargs():\n\n global plusargs\n\n plusargs = {}\n\n for option in cocotb.argv:\n if option.startswith('+'):\n if option.find('=') != -1:\n (name, value) = option[1:].split('=')\n plusargs[name] = value\n else:\n plusargs[option[1:]] = True\n", "path": "cocotb/__init__.py"}], "after_files": [{"content": "# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nCocotb is a coroutine, cosimulation framework for writing testbenches in Python.\n\nSee http://cocotb.readthedocs.org for full documentation\n\"\"\"\nimport os\nimport sys\nimport logging\nimport threading\nimport random\nimport time\n\nimport cocotb.handle\nfrom cocotb.scheduler import Scheduler\nfrom cocotb.log import SimBaseLog, SimLog\nfrom cocotb.regression import RegressionManager\n\n\n# Things we want in the cocotb namespace\nfrom cocotb.decorators import test, coroutine, hook, function, external # noqa: F401\n\n# Singleton scheduler instance\n# NB this cheekily ensures a singleton since we're replacing the reference\n# so that cocotb.scheduler gives you the singleton instance and not the\n# scheduler package\n\n# GPI logging instance\nif \"COCOTB_SIM\" in os.environ:\n import simulator\n logging.basicConfig()\n logging.setLoggerClass(SimBaseLog)\n log = SimLog('cocotb')\n level = os.getenv(\"COCOTB_LOG_LEVEL\", \"INFO\")\n try:\n _default_log = getattr(logging, level)\n except AttributeError as e:\n log.error(\"Unable to set loging level to %s\" % level)\n _default_log = logging.INFO\n log.setLevel(_default_log)\n loggpi = SimLog('cocotb.gpi')\n # Notify GPI of log level\n simulator.log_level(_default_log)\n\n # If stdout/stderr are not TTYs, Python may not have opened them with line\n # buffering. In that case, try to reopen them with line buffering\n # explicitly enabled. This ensures that prints such as stack traces always\n # appear. Continue silently if this fails.\n try:\n if not sys.stdout.isatty():\n sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 1)\n log.debug(\"Reopened stdout with line buffering\")\n if not sys.stderr.isatty():\n sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 1)\n log.debug(\"Reopened stderr with line buffering\")\n except Exception as e:\n log.warning(\"Failed to ensure that stdout/stderr are line buffered: %s\", e)\n log.warning(\"Some stack traces may not appear because of this.\")\n\n\nscheduler = Scheduler()\n\"\"\"The global scheduler instance.\"\"\"\n\nregression_manager = None\n\nplusargs = {}\n\n# To save typing provide an alias to scheduler.add\nfork = scheduler.add\n\n# FIXME is this really required?\n_rlock = threading.RLock()\n\n\ndef mem_debug(port):\n import cocotb.memdebug\n cocotb.memdebug.start(port)\n\n\ndef _initialise_testbench(root_name):\n \"\"\"\n This function is called after the simulator has elaborated all\n entities and is ready to run the test.\n\n The test must be defined by the environment variables\n MODULE\n TESTCASE\n\n The environment variable COCOTB_HOOKS contains a comma-separated list of\n modules that should be executed before the first test.\n \"\"\"\n _rlock.acquire()\n\n memcheck_port = os.getenv('MEMCHECK')\n if memcheck_port is not None:\n mem_debug(int(memcheck_port))\n\n exec_path = os.getenv('COCOTB_PY_DIR')\n if exec_path is None:\n exec_path = 'Unknown'\n\n version = os.getenv('VERSION')\n if version is None:\n log.info(\"Unable to determine Cocotb version from %s\" % exec_path)\n else:\n log.info(\"Running tests with Cocotb v%s from %s\" %\n (version, exec_path))\n\n # Create the base handle type\n\n process_plusargs()\n\n # Seed the Python random number generator to make this repeatable\n global RANDOM_SEED\n RANDOM_SEED = os.getenv('RANDOM_SEED')\n\n if RANDOM_SEED is None:\n if 'ntb_random_seed' in plusargs:\n RANDOM_SEED = eval(plusargs['ntb_random_seed'])\n elif 'seed' in plusargs:\n RANDOM_SEED = eval(plusargs['seed'])\n else:\n RANDOM_SEED = int(time.time())\n log.info(\"Seeding Python random module with %d\" % (RANDOM_SEED))\n else:\n RANDOM_SEED = int(RANDOM_SEED)\n log.info(\"Seeding Python random module with supplied seed %d\" % (RANDOM_SEED))\n random.seed(RANDOM_SEED)\n\n module_str = os.getenv('MODULE')\n test_str = os.getenv('TESTCASE')\n hooks_str = os.getenv('COCOTB_HOOKS', '')\n\n if not module_str:\n raise ImportError(\"Environment variables defining the module(s) to \" +\n \"execute not defined. MODULE=\\\"%s\\\"\" % (module_str))\n\n modules = module_str.split(',')\n hooks = hooks_str.split(',') if hooks_str else []\n\n global regression_manager\n\n regression_manager = RegressionManager(root_name, modules, tests=test_str, seed=RANDOM_SEED, hooks=hooks)\n regression_manager.initialise()\n regression_manager.execute()\n\n _rlock.release()\n return True\n\n\ndef _sim_event(level, message):\n \"\"\"Function that can be called externally to signal an event\"\"\"\n SIM_INFO = 0\n SIM_TEST_FAIL = 1\n SIM_FAIL = 2\n from cocotb.result import TestFailure, SimFailure\n\n if level is SIM_TEST_FAIL:\n scheduler.log.error(\"Failing test at simulator request\")\n scheduler.finish_test(TestFailure(\"Failure from external source: %s\" %\n message))\n elif level is SIM_FAIL:\n # We simply return here as the simulator will exit\n # so no cleanup is needed\n msg = (\"Failing test at simulator request before test run completion: \"\n \"%s\" % message)\n scheduler.log.error(msg)\n scheduler.finish_scheduler(SimFailure(msg))\n else:\n scheduler.log.error(\"Unsupported sim event\")\n\n return True\n\n\ndef process_plusargs():\n\n global plusargs\n\n plusargs = {}\n\n for option in cocotb.argv:\n if option.startswith('+'):\n if option.find('=') != -1:\n (name, value) = option[1:].split('=')\n plusargs[name] = value\n else:\n plusargs[option[1:]] = True\n", "path": "cocotb/__init__.py"}]}
| 2,511 | 78 |
gh_patches_debug_26562
|
rasdani/github-patches
|
git_diff
|
LMFDB__lmfdb-1595
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The meta description is a knowl
**_1 Upvote**_ Hello,
This line:
`
<meta name="description" content="Welcome to the LMFDB, the database of L-functions, modular forms, and related objects. These pages are intended to be a modern handbook including tables, formulas, links, and references for L-functions and their underlying objects.">
`
common to all the webpages, comes from this knowl:
http://beta.lmfdb.org/knowledge/show/intro.description
see: https://github.com/LMFDB/lmfdb/blob/master/lmfdb/base.py#L203
Shouldn't this be hardcoded?
Best,
Edgar
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lmfdb/base.py`
Content:
```
1 # -*- coding: utf8 -*-
2 # LMFDB - L-function and Modular Forms Database web-site - www.lmfdb.org
3 # Copyright (C) 2010-2012 by the LMFDB authors
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Library General Public
7 # License as published by the Free Software Foundation; either
8 # version 2 of the License, or (at your option) any later version.
9
10 import sys
11 import logging
12 from time import sleep
13 from flask import Flask, session, g, render_template, url_for, request, redirect
14 from pymongo.cursor import Cursor
15 from pymongo.errors import AutoReconnect
16 from sage.all import *
17 from functools import wraps
18 from werkzeug.contrib.cache import SimpleCache
19
20 # logfocus
21 logfocus = None
22
23 def set_logfocus(lf):
24 global logfocus
25 logfocus = lf
26
27
28 def get_logfocus():
29 global logfocus
30 return logfocus
31
32 # global db connection instance (will be set by the first call to
33 # getDBConnection() and should always be obtained from that)
34 _C = None
35
36 DEFAULT_DB_PORT = 37010
37 #FIXME perhaps the global dbport removed
38 dbport = DEFAULT_DB_PORT
39
40
41 def getDBConnection():
42 return _C
43
44 def makeDBConnection(port, **kwargs):
45 global _C
46 if not _C:
47 logging.info("establishing db connection at port %s ..." % port)
48 import pymongo
49 logging.info("using pymongo version %s" % pymongo.version)
50 if pymongo.version_tuple[0] >= 3 or kwargs.get("replicaset",None) is None:
51 from pymongo import MongoClient
52 _C = MongoClient(port = port, **kwargs)
53 else:
54 from pymongo import MongoReplicaSetClient
55 _C = MongoReplicaSetClient(port = port, **kwargs)
56 mongo_info = _C.server_info()
57 logging.info("mongodb version: %s" % mongo_info["version"])
58 logging.info("_C = %s", (_C,) )
59 #the reads are not necessarily from host/address
60 #those depend on the cursor, and can be checked with cursor.conn_id or cursor.address
61 if pymongo.version_tuple[0] >= 3:
62 logging.info("_C.address = %s" % (_C.address,) )
63 else:
64 logging.info("_C.host = %s" % (_C.host,) )
65
66 logging.info("_C.nodes = %s" % (_C.nodes,) )
67 logging.info("_C.read_preference = %s" % (_C.read_preference,) )
68
69
70
71 # Global to track of many auto reconnect attempts for _db_reconnect
72 AUTO_RECONNECT_ATTEMPTS = 0
73
74 def _db_reconnect(func):
75 """
76 Wrapper to automatically reconnect when mongodb throws a AutoReconnect exception.
77
78 See
79 * http://stackoverflow.com/questions/5287621/occasional-connectionerror-cannot-connect-to-the-database-to-mongo
80 * http://paste.pocoo.org/show/224441/
81 and similar workarounds
82 """
83 # maximal number of auto reconnect attempts
84 AUTO_RECONNECT_MAX = 10
85 # delay between attempts
86 AUTO_RECONNECT_DELAY = 1
87
88 def retry(*args, **kwargs):
89 global AUTO_RECONNECT_ATTEMPTS
90 while True:
91 try:
92 return func(*args, **kwargs)
93 except AutoReconnect as e:
94 AUTO_RECONNECT_ATTEMPTS += 1
95 if AUTO_RECONNECT_ATTEMPTS > AUTO_RECONNECT_MAX:
96 AUTO_RECONNECT_ATTEMPTS = 0
97 import flask
98 flask.flash("AutoReconnect failed to reconnect", "error")
99 raise
100 logging.warning(
101 'AutoReconnect #%d - %s raised [%s]' % (AUTO_RECONNECT_ATTEMPTS, func.__name__, e))
102 sleep(AUTO_RECONNECT_DELAY)
103 return retry
104
105 # disabling this reconnect thing, doesn't really help anyways
106 # Cursor._Cursor__send_message = _db_reconnect(Cursor._Cursor__send_message)
107 # Connection._send_message = _db_reconnect(Connection._send_message)
108 # Connection._send_message_with_response =
109 # _db_reconnect(Connection._send_message_with_response)
110
111
112 def _init(port, **kwargs):
113 import pymongo
114 makeDBConnection(port = port, **kwargs)
115 C = getDBConnection()
116
117 from os.path import dirname, join
118 pw_filename = join(dirname(dirname(__file__)), "password")
119 try:
120 username = "webserver"
121 password = open(pw_filename, "r").readlines()[0].strip()
122 except:
123 # file not found or any other problem
124 # this is read-only everywhere
125 logging.warning("authentication: no password -- fallback to read-only access")
126 username = "lmfdb"
127 password = "lmfdb"
128
129 try:
130 C["admin"].authenticate(username, password)
131 if username == "webserver":
132 logging.info("authentication: partial read-write access enabled")
133 except pymongo.errors.PyMongoError as err:
134 logging.error("authentication: FAILED -- aborting")
135 raise err
136 #read something from the db
137 #and check from where was it read
138 if pymongo.version_tuple[0] >= 3:
139 cursor = C.test.test.find().limit(-1)
140 list(cursor)
141 logging.info("MongoClient conection is reading from: %s" % (cursor.address,));
142 elif kwargs.get("replicaset",None) is not None:
143 cursor = C.test.test.find().limit(-1)
144 list(cursor)
145 logging.info("MongoReplicaSetClient connection is reading from: %s" % (cursor.conn_id,));
146 else:
147 logging.info("MongoClient conection is reading from: %s" % (C.host,));
148
149 app = Flask(__name__)
150
151 # If the debug toolbar is installed then use it
152 if app.debug:
153 try:
154 from flask_debugtoolbar import DebugToolbarExtension
155 app.config['SECRET_KEY'] = '''shh, it's a secret'''
156 toolbar = DebugToolbarExtension(app)
157 except ImportError:
158 pass
159
160 # tell jinja to remove linebreaks
161 app.jinja_env.trim_blocks = True
162
163 # enable break and continue in jinja loops
164 app.jinja_env.add_extension('jinja2.ext.loopcontrols')
165 app.jinja_env.add_extension('jinja2.ext.do')
166
167 # the following context processor inserts
168 # * empty info={} dict variable
169 # * body_class = ''
170 # * bread = [...] for the default bread crumb hierarch
171 # * title = 'test string'
172
173
174 def is_debug_mode():
175 from flask import current_app
176 return current_app.debug
177
178 branch = "prod"
179 if (os.getenv('BETA')=='1'):
180 branch = "beta"
181
182 @app.before_request
183 def set_beta_state():
184 g.BETA = (os.getenv('BETA')=='1') or is_debug_mode()
185
186 @app.context_processor
187 def ctx_proc_userdata():
188 # insert an empty info={} as default
189 # set the body class to some default, blueprints should
190 # overwrite it with their name, using @<blueprint_object>.context_processor
191 # see http://flask.pocoo.org/docs/api/?highlight=context_processor#flask.Blueprint.context_processor
192 vars = {'info': {}, 'body_class': ''}
193
194 # insert the default bread crumb hierarchy
195 # overwrite this variable when you want to customize it
196 vars['bread'] = None # [ ('Bread', '.'), ('Crumb', '.'), ('Hierarchy', '.')]
197
198 # default title
199 vars['title'] = r'LMFDB'
200
201 # meta_description appears in the meta tag "description"
202 import knowledge
203 vars['meta_description'] = knowledge.knowl.Knowl("intro.description").content
204 vars['shortthanks'] = r'This project is supported by <a href="%s">grants</a> from the US National Science Foundation and the UK Engineering and Physical Sciences Research Council.' % (url_for('acknowledgment') + "#sponsors")
205 # vars['feedbackpage'] = url_for('contact')
206 vars['feedbackpage'] = r"https://docs.google.com/spreadsheet/viewform?formkey=dDJXYXBleU1BMTFERFFIdjVXVmJqdlE6MQ"
207 vars['LINK_EXT'] = lambda a, b: '<a href="%s" target="_blank">%s</a>' % (b, a)
208
209 # debug mode?
210 vars['DEBUG'] = is_debug_mode()
211 vars['BETA'] = g.BETA
212
213 return vars
214
215
216 # datetime format in jinja templates
217 # you can now pass in a datetime.datetime python object and via
218 # {{ <datetimeobject>|fmtdatetime }} you can format it right inside the template
219 # if you want to do more than just the default, use it for example this way:
220 # {{ <datetimeobject>|fmtdatetime('%H:%M:%S') }}
221 @app.template_filter("fmtdatetime")
222 def fmtdatetime(value, format='%Y-%m-%d %H:%M:%S'):
223 import datetime
224 if isinstance(value, datetime.datetime):
225 return value.strftime(format)
226 else:
227 return "-"
228
229
230 @app.template_filter("nl2br")
231 def nl2br(s):
232 return s.replace('\n', '<br>\n')
233
234
235 @app.template_filter('obfuscate_email')
236 def obfuscate_email(email):
237 """
238 obfuscating the email
239 TODO: doesn't work yet
240 """
241 return u"%s…@…%s" % (email[:2], email[-2:])
242
243
244 @app.template_filter('urlencode')
245 def urlencode(kwargs):
246 import urllib
247 return urllib.urlencode(kwargs)
248
249 # start: link to google code at the bottom
250
251
252 def git_infos():
253 try:
254 from subprocess import Popen, PIPE
255 git_rev_cmd = '''git rev-parse HEAD'''
256 git_date_cmd = '''git show --format="%ci" -s HEAD'''
257 rev = Popen([git_rev_cmd], shell=True, stdout=PIPE).communicate()[0]
258 date = Popen([git_date_cmd], shell=True, stdout=PIPE).communicate()[0]
259 cmd_output = rev, date
260 except e:
261 cmd_output = '-', '-'
262 return cmd_output
263
264 git_rev, git_date = git_infos()
265 from sage.env import SAGE_VERSION
266
267 """
268 Creates link to the source code at the most recent commit.
269 """
270 _url_source = 'https://github.com/LMFDB/lmfdb/tree/'
271 _current_source = '<a href="%s%s">%s</a>' % (_url_source, git_rev, "Source")
272 """
273 Creates link to the list of revisions on the master, where the most recent commit is on top.
274 """
275 _url_changeset = 'https://github.com/LMFDB/lmfdb/commits/%s' % branch
276 _latest_changeset = '<a href="%s">%s</a>' % (_url_changeset, git_date)
277
278
279 @app.context_processor
280 def link_to_current_source():
281 return {'current_source': _current_source,
282 'latest_changeset': _latest_changeset,
283 'sage_version': 'SageMath version %s' % SAGE_VERSION}
284
285 # end: google code links
286
287
288 # for testing.py ###
289 import unittest2
290
291
292 class DoctestExampleTest(object):
293
294 """
295 This is a general purpose class with a doctest
296 """
297
298 def __init__(self, k):
299 self.k = k
300
301 def i_am_tested(self, n):
302 """
303 >>> det = DoctestExampleTest(5)
304 >>> det.i_am_tested(1)
305 47
306 """
307 return n * 42 + self.k
308
309 def __str__(self):
310 """
311 >>> det = DoctestExampleTest(42)
312 >>> print(det)
313 I am 42
314 """
315 return "I am %d" % self.k
316
317
318 class LmfdbTest(unittest2.TestCase):
319
320 def setUp(self):
321 app.config['TESTING'] = True
322 self.app = app
323 self.tc = app.test_client()
324 import lmfdb.website
325 self.C = getDBConnection()
326
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lmfdb/base.py b/lmfdb/base.py
--- a/lmfdb/base.py
+++ b/lmfdb/base.py
@@ -33,8 +33,9 @@
# getDBConnection() and should always be obtained from that)
_C = None
+#FIXME perhaps the globals dbport and DEFAULT_DB_PORT should be removed?
+#we could had it a default argument to makeDBConnection
DEFAULT_DB_PORT = 37010
-#FIXME perhaps the global dbport removed
dbport = DEFAULT_DB_PORT
@@ -200,7 +201,7 @@
# meta_description appears in the meta tag "description"
import knowledge
- vars['meta_description'] = knowledge.knowl.Knowl("intro.description").content
+ vars['meta_description'] = r'Welcome to the LMFDB, the database of L-functions, modular forms, and related objects. These pages are intended to be a modern handbook including tables, formulas, links, and references for L-functions and their underlying objects.'
vars['shortthanks'] = r'This project is supported by <a href="%s">grants</a> from the US National Science Foundation and the UK Engineering and Physical Sciences Research Council.' % (url_for('acknowledgment') + "#sponsors")
# vars['feedbackpage'] = url_for('contact')
vars['feedbackpage'] = r"https://docs.google.com/spreadsheet/viewform?formkey=dDJXYXBleU1BMTFERFFIdjVXVmJqdlE6MQ"
|
{"golden_diff": "diff --git a/lmfdb/base.py b/lmfdb/base.py\n--- a/lmfdb/base.py\n+++ b/lmfdb/base.py\n@@ -33,8 +33,9 @@\n # getDBConnection() and should always be obtained from that)\n _C = None\n \n+#FIXME perhaps the globals dbport and DEFAULT_DB_PORT should be removed?\n+#we could had it a default argument to makeDBConnection\n DEFAULT_DB_PORT = 37010\n-#FIXME perhaps the global dbport removed\n dbport = DEFAULT_DB_PORT\n \n \n@@ -200,7 +201,7 @@\n \n # meta_description appears in the meta tag \"description\"\n import knowledge\n- vars['meta_description'] = knowledge.knowl.Knowl(\"intro.description\").content\n+ vars['meta_description'] = r'Welcome to the LMFDB, the database of L-functions, modular forms, and related objects. These pages are intended to be a modern handbook including tables, formulas, links, and references for L-functions and their underlying objects.'\n vars['shortthanks'] = r'This project is supported by <a href=\"%s\">grants</a> from the US National Science Foundation and the UK Engineering and Physical Sciences Research Council.' % (url_for('acknowledgment') + \"#sponsors\")\n # vars['feedbackpage'] = url_for('contact')\n vars['feedbackpage'] = r\"https://docs.google.com/spreadsheet/viewform?formkey=dDJXYXBleU1BMTFERFFIdjVXVmJqdlE6MQ\"\n", "issue": "The meta description is a knowl\n**_1 Upvote**_ Hello,\n\nThis line:\n`\n<meta name=\"description\" content=\"Welcome to the LMFDB, the database of L-functions, modular forms, and related objects. These pages are intended to be a modern handbook including tables, formulas, links, and references for L-functions and their underlying objects.\">\n`\ncommon to all the webpages, comes from this knowl:\n\nhttp://beta.lmfdb.org/knowledge/show/intro.description\n\nsee: https://github.com/LMFDB/lmfdb/blob/master/lmfdb/base.py#L203\n\nShouldn't this be hardcoded?\n\nBest,\nEdgar\n\n", "before_files": [{"content": "# -*- coding: utf8 -*-\n# LMFDB - L-function and Modular Forms Database web-site - www.lmfdb.org\n# Copyright (C) 2010-2012 by the LMFDB authors\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Library General Public\n# License as published by the Free Software Foundation; either\n# version 2 of the License, or (at your option) any later version.\n\nimport sys\nimport logging\nfrom time import sleep\nfrom flask import Flask, session, g, render_template, url_for, request, redirect\nfrom pymongo.cursor import Cursor\nfrom pymongo.errors import AutoReconnect\nfrom sage.all import *\nfrom functools import wraps\nfrom werkzeug.contrib.cache import SimpleCache\n\n# logfocus\nlogfocus = None\n\ndef set_logfocus(lf):\n global logfocus\n logfocus = lf\n\n\ndef get_logfocus():\n global logfocus\n return logfocus\n\n# global db connection instance (will be set by the first call to\n# getDBConnection() and should always be obtained from that)\n_C = None\n\nDEFAULT_DB_PORT = 37010\n#FIXME perhaps the global dbport removed\ndbport = DEFAULT_DB_PORT\n\n\ndef getDBConnection():\n return _C\n\ndef makeDBConnection(port, **kwargs):\n global _C\n if not _C:\n logging.info(\"establishing db connection at port %s ...\" % port)\n import pymongo\n logging.info(\"using pymongo version %s\" % pymongo.version)\n if pymongo.version_tuple[0] >= 3 or kwargs.get(\"replicaset\",None) is None:\n from pymongo import MongoClient\n _C = MongoClient(port = port, **kwargs)\n else:\n from pymongo import MongoReplicaSetClient\n _C = MongoReplicaSetClient(port = port, **kwargs)\n mongo_info = _C.server_info()\n logging.info(\"mongodb version: %s\" % mongo_info[\"version\"])\n logging.info(\"_C = %s\", (_C,) )\n #the reads are not necessarily from host/address\n #those depend on the cursor, and can be checked with cursor.conn_id or cursor.address \n if pymongo.version_tuple[0] >= 3:\n logging.info(\"_C.address = %s\" % (_C.address,) )\n else:\n logging.info(\"_C.host = %s\" % (_C.host,) )\n\n logging.info(\"_C.nodes = %s\" % (_C.nodes,) )\n logging.info(\"_C.read_preference = %s\" % (_C.read_preference,) )\n\n\n\n# Global to track of many auto reconnect attempts for _db_reconnect\nAUTO_RECONNECT_ATTEMPTS = 0\n\ndef _db_reconnect(func):\n \"\"\"\n Wrapper to automatically reconnect when mongodb throws a AutoReconnect exception.\n\n See\n * http://stackoverflow.com/questions/5287621/occasional-connectionerror-cannot-connect-to-the-database-to-mongo\n * http://paste.pocoo.org/show/224441/\n and similar workarounds\n \"\"\"\n # maximal number of auto reconnect attempts\n AUTO_RECONNECT_MAX = 10\n # delay between attempts\n AUTO_RECONNECT_DELAY = 1\n\n def retry(*args, **kwargs):\n global AUTO_RECONNECT_ATTEMPTS\n while True:\n try:\n return func(*args, **kwargs)\n except AutoReconnect as e:\n AUTO_RECONNECT_ATTEMPTS += 1\n if AUTO_RECONNECT_ATTEMPTS > AUTO_RECONNECT_MAX:\n AUTO_RECONNECT_ATTEMPTS = 0\n import flask\n flask.flash(\"AutoReconnect failed to reconnect\", \"error\")\n raise\n logging.warning(\n 'AutoReconnect #%d - %s raised [%s]' % (AUTO_RECONNECT_ATTEMPTS, func.__name__, e))\n sleep(AUTO_RECONNECT_DELAY)\n return retry\n\n# disabling this reconnect thing, doesn't really help anyways\n# Cursor._Cursor__send_message = _db_reconnect(Cursor._Cursor__send_message)\n# Connection._send_message = _db_reconnect(Connection._send_message)\n# Connection._send_message_with_response =\n# _db_reconnect(Connection._send_message_with_response)\n\n\ndef _init(port, **kwargs):\n import pymongo\n makeDBConnection(port = port, **kwargs)\n C = getDBConnection()\n\n from os.path import dirname, join\n pw_filename = join(dirname(dirname(__file__)), \"password\")\n try:\n username = \"webserver\"\n password = open(pw_filename, \"r\").readlines()[0].strip()\n except:\n # file not found or any other problem\n # this is read-only everywhere\n logging.warning(\"authentication: no password -- fallback to read-only access\")\n username = \"lmfdb\"\n password = \"lmfdb\"\n\n try:\n C[\"admin\"].authenticate(username, password)\n if username == \"webserver\":\n logging.info(\"authentication: partial read-write access enabled\")\n except pymongo.errors.PyMongoError as err:\n logging.error(\"authentication: FAILED -- aborting\")\n raise err\n #read something from the db \n #and check from where was it read\n if pymongo.version_tuple[0] >= 3:\n cursor = C.test.test.find().limit(-1)\n list(cursor)\n logging.info(\"MongoClient conection is reading from: %s\" % (cursor.address,));\n elif kwargs.get(\"replicaset\",None) is not None:\n cursor = C.test.test.find().limit(-1)\n list(cursor)\n logging.info(\"MongoReplicaSetClient connection is reading from: %s\" % (cursor.conn_id,));\n else:\n logging.info(\"MongoClient conection is reading from: %s\" % (C.host,));\n\napp = Flask(__name__)\n\n# If the debug toolbar is installed then use it\nif app.debug:\n try:\n from flask_debugtoolbar import DebugToolbarExtension\n app.config['SECRET_KEY'] = '''shh, it's a secret'''\n toolbar = DebugToolbarExtension(app)\n except ImportError:\n pass\n\n# tell jinja to remove linebreaks\napp.jinja_env.trim_blocks = True\n\n# enable break and continue in jinja loops\napp.jinja_env.add_extension('jinja2.ext.loopcontrols')\napp.jinja_env.add_extension('jinja2.ext.do')\n\n# the following context processor inserts\n# * empty info={} dict variable\n# * body_class = ''\n# * bread = [...] for the default bread crumb hierarch\n# * title = 'test string'\n\n\ndef is_debug_mode():\n from flask import current_app\n return current_app.debug\n\nbranch = \"prod\"\nif (os.getenv('BETA')=='1'):\n branch = \"beta\"\n\[email protected]_request\ndef set_beta_state():\n g.BETA = (os.getenv('BETA')=='1') or is_debug_mode()\n\[email protected]_processor\ndef ctx_proc_userdata():\n # insert an empty info={} as default\n # set the body class to some default, blueprints should\n # overwrite it with their name, using @<blueprint_object>.context_processor\n # see http://flask.pocoo.org/docs/api/?highlight=context_processor#flask.Blueprint.context_processor\n vars = {'info': {}, 'body_class': ''}\n\n # insert the default bread crumb hierarchy\n # overwrite this variable when you want to customize it\n vars['bread'] = None # [ ('Bread', '.'), ('Crumb', '.'), ('Hierarchy', '.')]\n\n # default title\n vars['title'] = r'LMFDB'\n\n # meta_description appears in the meta tag \"description\"\n import knowledge\n vars['meta_description'] = knowledge.knowl.Knowl(\"intro.description\").content\n vars['shortthanks'] = r'This project is supported by <a href=\"%s\">grants</a> from the US National Science Foundation and the UK Engineering and Physical Sciences Research Council.' % (url_for('acknowledgment') + \"#sponsors\")\n# vars['feedbackpage'] = url_for('contact')\n vars['feedbackpage'] = r\"https://docs.google.com/spreadsheet/viewform?formkey=dDJXYXBleU1BMTFERFFIdjVXVmJqdlE6MQ\"\n vars['LINK_EXT'] = lambda a, b: '<a href=\"%s\" target=\"_blank\">%s</a>' % (b, a)\n\n # debug mode?\n vars['DEBUG'] = is_debug_mode()\n vars['BETA'] = g.BETA\n\n return vars\n\n\n# datetime format in jinja templates\n# you can now pass in a datetime.datetime python object and via\n# {{ <datetimeobject>|fmtdatetime }} you can format it right inside the template\n# if you want to do more than just the default, use it for example this way:\n# {{ <datetimeobject>|fmtdatetime('%H:%M:%S') }}\[email protected]_filter(\"fmtdatetime\")\ndef fmtdatetime(value, format='%Y-%m-%d %H:%M:%S'):\n import datetime\n if isinstance(value, datetime.datetime):\n return value.strftime(format)\n else:\n return \"-\"\n\n\[email protected]_filter(\"nl2br\")\ndef nl2br(s):\n return s.replace('\\n', '<br>\\n')\n\n\[email protected]_filter('obfuscate_email')\ndef obfuscate_email(email):\n \"\"\"\n obfuscating the email\n TODO: doesn't work yet\n \"\"\"\n return u\"%s\u2026@\u2026%s\" % (email[:2], email[-2:])\n\n\[email protected]_filter('urlencode')\ndef urlencode(kwargs):\n import urllib\n return urllib.urlencode(kwargs)\n\n# start: link to google code at the bottom\n\n\ndef git_infos():\n try:\n from subprocess import Popen, PIPE\n git_rev_cmd = '''git rev-parse HEAD'''\n git_date_cmd = '''git show --format=\"%ci\" -s HEAD'''\n rev = Popen([git_rev_cmd], shell=True, stdout=PIPE).communicate()[0]\n date = Popen([git_date_cmd], shell=True, stdout=PIPE).communicate()[0]\n cmd_output = rev, date\n except e:\n cmd_output = '-', '-'\n return cmd_output\n\ngit_rev, git_date = git_infos()\nfrom sage.env import SAGE_VERSION\n\n\"\"\"\nCreates link to the source code at the most recent commit.\n\"\"\"\n_url_source = 'https://github.com/LMFDB/lmfdb/tree/'\n_current_source = '<a href=\"%s%s\">%s</a>' % (_url_source, git_rev, \"Source\")\n\"\"\"\nCreates link to the list of revisions on the master, where the most recent commit is on top.\n\"\"\"\n_url_changeset = 'https://github.com/LMFDB/lmfdb/commits/%s' % branch\n_latest_changeset = '<a href=\"%s\">%s</a>' % (_url_changeset, git_date)\n\n\[email protected]_processor\ndef link_to_current_source():\n return {'current_source': _current_source,\n 'latest_changeset': _latest_changeset,\n 'sage_version': 'SageMath version %s' % SAGE_VERSION}\n\n# end: google code links\n\n\n# for testing.py ###\nimport unittest2\n\n\nclass DoctestExampleTest(object):\n\n \"\"\"\n This is a general purpose class with a doctest\n \"\"\"\n\n def __init__(self, k):\n self.k = k\n\n def i_am_tested(self, n):\n \"\"\"\n >>> det = DoctestExampleTest(5)\n >>> det.i_am_tested(1)\n 47\n \"\"\"\n return n * 42 + self.k\n\n def __str__(self):\n \"\"\"\n >>> det = DoctestExampleTest(42)\n >>> print(det)\n I am 42\n \"\"\"\n return \"I am %d\" % self.k\n\n\nclass LmfdbTest(unittest2.TestCase):\n\n def setUp(self):\n app.config['TESTING'] = True\n self.app = app\n self.tc = app.test_client()\n import lmfdb.website\n self.C = getDBConnection()\n", "path": "lmfdb/base.py"}], "after_files": [{"content": "# -*- coding: utf8 -*-\n# LMFDB - L-function and Modular Forms Database web-site - www.lmfdb.org\n# Copyright (C) 2010-2012 by the LMFDB authors\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Library General Public\n# License as published by the Free Software Foundation; either\n# version 2 of the License, or (at your option) any later version.\n\nimport sys\nimport logging\nfrom time import sleep\nfrom flask import Flask, session, g, render_template, url_for, request, redirect\nfrom pymongo.cursor import Cursor\nfrom pymongo.errors import AutoReconnect\nfrom sage.all import *\nfrom functools import wraps\nfrom werkzeug.contrib.cache import SimpleCache\n\n# logfocus\nlogfocus = None\n\ndef set_logfocus(lf):\n global logfocus\n logfocus = lf\n\n\ndef get_logfocus():\n global logfocus\n return logfocus\n\n# global db connection instance (will be set by the first call to\n# getDBConnection() and should always be obtained from that)\n_C = None\n\n#FIXME perhaps the globals dbport and DEFAULT_DB_PORT should be removed?\n#we could had it a default argument to makeDBConnection\nDEFAULT_DB_PORT = 37010\ndbport = DEFAULT_DB_PORT\n\n\ndef getDBConnection():\n return _C\n\ndef makeDBConnection(port, **kwargs):\n global _C\n if not _C:\n logging.info(\"establishing db connection at port %s ...\" % port)\n import pymongo\n logging.info(\"using pymongo version %s\" % pymongo.version)\n if pymongo.version_tuple[0] >= 3 or kwargs.get(\"replicaset\",None) is None:\n from pymongo import MongoClient\n _C = MongoClient(port = port, **kwargs)\n else:\n from pymongo import MongoReplicaSetClient\n _C = MongoReplicaSetClient(port = port, **kwargs)\n mongo_info = _C.server_info()\n logging.info(\"mongodb version: %s\" % mongo_info[\"version\"])\n logging.info(\"_C = %s\", (_C,) )\n #the reads are not necessarily from host/address\n #those depend on the cursor, and can be checked with cursor.conn_id or cursor.address \n if pymongo.version_tuple[0] >= 3:\n logging.info(\"_C.address = %s\" % (_C.address,) )\n else:\n logging.info(\"_C.host = %s\" % (_C.host,) )\n\n logging.info(\"_C.nodes = %s\" % (_C.nodes,) )\n logging.info(\"_C.read_preference = %s\" % (_C.read_preference,) )\n\n\n\n# Global to track of many auto reconnect attempts for _db_reconnect\nAUTO_RECONNECT_ATTEMPTS = 0\n\ndef _db_reconnect(func):\n \"\"\"\n Wrapper to automatically reconnect when mongodb throws a AutoReconnect exception.\n\n See\n * http://stackoverflow.com/questions/5287621/occasional-connectionerror-cannot-connect-to-the-database-to-mongo\n * http://paste.pocoo.org/show/224441/\n and similar workarounds\n \"\"\"\n # maximal number of auto reconnect attempts\n AUTO_RECONNECT_MAX = 10\n # delay between attempts\n AUTO_RECONNECT_DELAY = 1\n\n def retry(*args, **kwargs):\n global AUTO_RECONNECT_ATTEMPTS\n while True:\n try:\n return func(*args, **kwargs)\n except AutoReconnect as e:\n AUTO_RECONNECT_ATTEMPTS += 1\n if AUTO_RECONNECT_ATTEMPTS > AUTO_RECONNECT_MAX:\n AUTO_RECONNECT_ATTEMPTS = 0\n import flask\n flask.flash(\"AutoReconnect failed to reconnect\", \"error\")\n raise\n logging.warning(\n 'AutoReconnect #%d - %s raised [%s]' % (AUTO_RECONNECT_ATTEMPTS, func.__name__, e))\n sleep(AUTO_RECONNECT_DELAY)\n return retry\n\n# disabling this reconnect thing, doesn't really help anyways\n# Cursor._Cursor__send_message = _db_reconnect(Cursor._Cursor__send_message)\n# Connection._send_message = _db_reconnect(Connection._send_message)\n# Connection._send_message_with_response =\n# _db_reconnect(Connection._send_message_with_response)\n\n\ndef _init(port, **kwargs):\n import pymongo\n makeDBConnection(port = port, **kwargs)\n C = getDBConnection()\n\n from os.path import dirname, join\n pw_filename = join(dirname(dirname(__file__)), \"password\")\n try:\n username = \"webserver\"\n password = open(pw_filename, \"r\").readlines()[0].strip()\n except:\n # file not found or any other problem\n # this is read-only everywhere\n logging.warning(\"authentication: no password -- fallback to read-only access\")\n username = \"lmfdb\"\n password = \"lmfdb\"\n\n try:\n C[\"admin\"].authenticate(username, password)\n if username == \"webserver\":\n logging.info(\"authentication: partial read-write access enabled\")\n except pymongo.errors.PyMongoError as err:\n logging.error(\"authentication: FAILED -- aborting\")\n raise err\n #read something from the db \n #and check from where was it read\n if pymongo.version_tuple[0] >= 3:\n cursor = C.test.test.find().limit(-1)\n list(cursor)\n logging.info(\"MongoClient conection is reading from: %s\" % (cursor.address,));\n elif kwargs.get(\"replicaset\",None) is not None:\n cursor = C.test.test.find().limit(-1)\n list(cursor)\n logging.info(\"MongoReplicaSetClient connection is reading from: %s\" % (cursor.conn_id,));\n else:\n logging.info(\"MongoClient conection is reading from: %s\" % (C.host,));\n\napp = Flask(__name__)\n\n# If the debug toolbar is installed then use it\nif app.debug:\n try:\n from flask_debugtoolbar import DebugToolbarExtension\n app.config['SECRET_KEY'] = '''shh, it's a secret'''\n toolbar = DebugToolbarExtension(app)\n except ImportError:\n pass\n\n# tell jinja to remove linebreaks\napp.jinja_env.trim_blocks = True\n\n# enable break and continue in jinja loops\napp.jinja_env.add_extension('jinja2.ext.loopcontrols')\napp.jinja_env.add_extension('jinja2.ext.do')\n\n# the following context processor inserts\n# * empty info={} dict variable\n# * body_class = ''\n# * bread = [...] for the default bread crumb hierarch\n# * title = 'test string'\n\n\ndef is_debug_mode():\n from flask import current_app\n return current_app.debug\n\nbranch = \"prod\"\nif (os.getenv('BETA')=='1'):\n branch = \"beta\"\n\[email protected]_request\ndef set_beta_state():\n g.BETA = (os.getenv('BETA')=='1') or is_debug_mode()\n\[email protected]_processor\ndef ctx_proc_userdata():\n # insert an empty info={} as default\n # set the body class to some default, blueprints should\n # overwrite it with their name, using @<blueprint_object>.context_processor\n # see http://flask.pocoo.org/docs/api/?highlight=context_processor#flask.Blueprint.context_processor\n vars = {'info': {}, 'body_class': ''}\n\n # insert the default bread crumb hierarchy\n # overwrite this variable when you want to customize it\n vars['bread'] = None # [ ('Bread', '.'), ('Crumb', '.'), ('Hierarchy', '.')]\n\n # default title\n vars['title'] = r'LMFDB'\n\n # meta_description appears in the meta tag \"description\"\n import knowledge\n vars['meta_description'] = r'Welcome to the LMFDB, the database of L-functions, modular forms, and related objects. These pages are intended to be a modern handbook including tables, formulas, links, and references for L-functions and their underlying objects.'\n vars['shortthanks'] = r'This project is supported by <a href=\"%s\">grants</a> from the US National Science Foundation and the UK Engineering and Physical Sciences Research Council.' % (url_for('acknowledgment') + \"#sponsors\")\n# vars['feedbackpage'] = url_for('contact')\n vars['feedbackpage'] = r\"https://docs.google.com/spreadsheet/viewform?formkey=dDJXYXBleU1BMTFERFFIdjVXVmJqdlE6MQ\"\n vars['LINK_EXT'] = lambda a, b: '<a href=\"%s\" target=\"_blank\">%s</a>' % (b, a)\n\n # debug mode?\n vars['DEBUG'] = is_debug_mode()\n vars['BETA'] = g.BETA\n\n return vars\n\n\n# datetime format in jinja templates\n# you can now pass in a datetime.datetime python object and via\n# {{ <datetimeobject>|fmtdatetime }} you can format it right inside the template\n# if you want to do more than just the default, use it for example this way:\n# {{ <datetimeobject>|fmtdatetime('%H:%M:%S') }}\[email protected]_filter(\"fmtdatetime\")\ndef fmtdatetime(value, format='%Y-%m-%d %H:%M:%S'):\n import datetime\n if isinstance(value, datetime.datetime):\n return value.strftime(format)\n else:\n return \"-\"\n\n\[email protected]_filter(\"nl2br\")\ndef nl2br(s):\n return s.replace('\\n', '<br>\\n')\n\n\[email protected]_filter('obfuscate_email')\ndef obfuscate_email(email):\n \"\"\"\n obfuscating the email\n TODO: doesn't work yet\n \"\"\"\n return u\"%s\u2026@\u2026%s\" % (email[:2], email[-2:])\n\n\[email protected]_filter('urlencode')\ndef urlencode(kwargs):\n import urllib\n return urllib.urlencode(kwargs)\n\n# start: link to google code at the bottom\n\n\ndef git_infos():\n try:\n from subprocess import Popen, PIPE\n git_rev_cmd = '''git rev-parse HEAD'''\n git_date_cmd = '''git show --format=\"%ci\" -s HEAD'''\n rev = Popen([git_rev_cmd], shell=True, stdout=PIPE).communicate()[0]\n date = Popen([git_date_cmd], shell=True, stdout=PIPE).communicate()[0]\n cmd_output = rev, date\n except e:\n cmd_output = '-', '-'\n return cmd_output\n\ngit_rev, git_date = git_infos()\nfrom sage.env import SAGE_VERSION\n\n\"\"\"\nCreates link to the source code at the most recent commit.\n\"\"\"\n_url_source = 'https://github.com/LMFDB/lmfdb/tree/'\n_current_source = '<a href=\"%s%s\">%s</a>' % (_url_source, git_rev, \"Source\")\n\"\"\"\nCreates link to the list of revisions on the master, where the most recent commit is on top.\n\"\"\"\n_url_changeset = 'https://github.com/LMFDB/lmfdb/commits/%s' % branch\n_latest_changeset = '<a href=\"%s\">%s</a>' % (_url_changeset, git_date)\n\n\[email protected]_processor\ndef link_to_current_source():\n return {'current_source': _current_source,\n 'latest_changeset': _latest_changeset,\n 'sage_version': 'SageMath version %s' % SAGE_VERSION}\n\n# end: google code links\n\n\n# for testing.py ###\nimport unittest2\n\n\nclass DoctestExampleTest(object):\n\n \"\"\"\n This is a general purpose class with a doctest\n \"\"\"\n\n def __init__(self, k):\n self.k = k\n\n def i_am_tested(self, n):\n \"\"\"\n >>> det = DoctestExampleTest(5)\n >>> det.i_am_tested(1)\n 47\n \"\"\"\n return n * 42 + self.k\n\n def __str__(self):\n \"\"\"\n >>> det = DoctestExampleTest(42)\n >>> print(det)\n I am 42\n \"\"\"\n return \"I am %d\" % self.k\n\n\nclass LmfdbTest(unittest2.TestCase):\n\n def setUp(self):\n app.config['TESTING'] = True\n self.app = app\n self.tc = app.test_client()\n import lmfdb.website\n self.C = getDBConnection()\n", "path": "lmfdb/base.py"}]}
| 3,949 | 339 |
gh_patches_debug_1648
|
rasdani/github-patches
|
git_diff
|
benoitc__gunicorn-1806
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
I get error in this package AttributeError: 'NoneType' object has no attribute 'add_extra_file'
hi every one ..
when i try to deploy keras model into google cloud i get this error ...
```py
File "/home/falahgs07/keras/env/lib/python3.5/site-packages/gunicorn/workers/base.py", line 126, in init_process
self.load_wsgi()
File "/home/falahgs07/keras/env/lib/python3.5/site-packages/gunicorn/workers/base.py", line 148, in load_wsgi
self.reloader.add_extra_file(exc_val.filename)
AttributeError: 'NoneType' object has no attribute 'add_extra_file'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gunicorn/workers/base.py`
Content:
```
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 from datetime import datetime
7 import os
8 from random import randint
9 import signal
10 from ssl import SSLError
11 import sys
12 import time
13 import traceback
14
15 from gunicorn import six
16 from gunicorn import util
17 from gunicorn.workers.workertmp import WorkerTmp
18 from gunicorn.reloader import reloader_engines
19 from gunicorn.http.errors import (
20 InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,
21 InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,
22 )
23 from gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest
24 from gunicorn.http.errors import InvalidSchemeHeaders
25 from gunicorn.http.wsgi import default_environ, Response
26 from gunicorn.six import MAXSIZE
27
28
29 class Worker(object):
30
31 SIGNALS = [getattr(signal, "SIG%s" % x)
32 for x in "ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD".split()]
33
34 PIPE = []
35
36 def __init__(self, age, ppid, sockets, app, timeout, cfg, log):
37 """\
38 This is called pre-fork so it shouldn't do anything to the
39 current process. If there's a need to make process wide
40 changes you'll want to do that in ``self.init_process()``.
41 """
42 self.age = age
43 self.pid = "[booting]"
44 self.ppid = ppid
45 self.sockets = sockets
46 self.app = app
47 self.timeout = timeout
48 self.cfg = cfg
49 self.booted = False
50 self.aborted = False
51 self.reloader = None
52
53 self.nr = 0
54 jitter = randint(0, cfg.max_requests_jitter)
55 self.max_requests = cfg.max_requests + jitter or MAXSIZE
56 self.alive = True
57 self.log = log
58 self.tmp = WorkerTmp(cfg)
59
60 def __str__(self):
61 return "<Worker %s>" % self.pid
62
63 def notify(self):
64 """\
65 Your worker subclass must arrange to have this method called
66 once every ``self.timeout`` seconds. If you fail in accomplishing
67 this task, the master process will murder your workers.
68 """
69 self.tmp.notify()
70
71 def run(self):
72 """\
73 This is the mainloop of a worker process. You should override
74 this method in a subclass to provide the intended behaviour
75 for your particular evil schemes.
76 """
77 raise NotImplementedError()
78
79 def init_process(self):
80 """\
81 If you override this method in a subclass, the last statement
82 in the function should be to call this method with
83 super(MyWorkerClass, self).init_process() so that the ``run()``
84 loop is initiated.
85 """
86
87 # set environment' variables
88 if self.cfg.env:
89 for k, v in self.cfg.env.items():
90 os.environ[k] = v
91
92 util.set_owner_process(self.cfg.uid, self.cfg.gid,
93 initgroups=self.cfg.initgroups)
94
95 # Reseed the random number generator
96 util.seed()
97
98 # For waking ourselves up
99 self.PIPE = os.pipe()
100 for p in self.PIPE:
101 util.set_non_blocking(p)
102 util.close_on_exec(p)
103
104 # Prevent fd inheritance
105 for s in self.sockets:
106 util.close_on_exec(s)
107 util.close_on_exec(self.tmp.fileno())
108
109 self.wait_fds = self.sockets + [self.PIPE[0]]
110
111 self.log.close_on_exec()
112
113 self.init_signals()
114
115 # start the reloader
116 if self.cfg.reload:
117 def changed(fname):
118 self.log.info("Worker reloading: %s modified", fname)
119 self.alive = False
120 self.cfg.worker_int(self)
121 time.sleep(0.1)
122 sys.exit(0)
123
124 reloader_cls = reloader_engines[self.cfg.reload_engine]
125 self.reloader = reloader_cls(extra_files=self.cfg.reload_extra_files,
126 callback=changed)
127 self.reloader.start()
128
129 self.load_wsgi()
130 self.cfg.post_worker_init(self)
131
132 # Enter main run loop
133 self.booted = True
134 self.run()
135
136 def load_wsgi(self):
137 try:
138 self.wsgi = self.app.wsgi()
139 except SyntaxError as e:
140 if self.cfg.reload == 'off':
141 raise
142
143 self.log.exception(e)
144
145 # fix from PR #1228
146 # storing the traceback into exc_tb will create a circular reference.
147 # per https://docs.python.org/2/library/sys.html#sys.exc_info warning,
148 # delete the traceback after use.
149 try:
150 _, exc_val, exc_tb = sys.exc_info()
151 self.reloader.add_extra_file(exc_val.filename)
152
153 tb_string = six.StringIO()
154 traceback.print_tb(exc_tb, file=tb_string)
155 self.wsgi = util.make_fail_app(tb_string.getvalue())
156 finally:
157 del exc_tb
158
159 def init_signals(self):
160 # reset signaling
161 for s in self.SIGNALS:
162 signal.signal(s, signal.SIG_DFL)
163 # init new signaling
164 signal.signal(signal.SIGQUIT, self.handle_quit)
165 signal.signal(signal.SIGTERM, self.handle_exit)
166 signal.signal(signal.SIGINT, self.handle_quit)
167 signal.signal(signal.SIGWINCH, self.handle_winch)
168 signal.signal(signal.SIGUSR1, self.handle_usr1)
169 signal.signal(signal.SIGABRT, self.handle_abort)
170
171 # Don't let SIGTERM and SIGUSR1 disturb active requests
172 # by interrupting system calls
173 if hasattr(signal, 'siginterrupt'): # python >= 2.6
174 signal.siginterrupt(signal.SIGTERM, False)
175 signal.siginterrupt(signal.SIGUSR1, False)
176
177 if hasattr(signal, 'set_wakeup_fd'):
178 signal.set_wakeup_fd(self.PIPE[1])
179
180 def handle_usr1(self, sig, frame):
181 self.log.reopen_files()
182
183 def handle_exit(self, sig, frame):
184 self.alive = False
185
186 def handle_quit(self, sig, frame):
187 self.alive = False
188 # worker_int callback
189 self.cfg.worker_int(self)
190 time.sleep(0.1)
191 sys.exit(0)
192
193 def handle_abort(self, sig, frame):
194 self.alive = False
195 self.cfg.worker_abort(self)
196 sys.exit(1)
197
198 def handle_error(self, req, client, addr, exc):
199 request_start = datetime.now()
200 addr = addr or ('', -1) # unix socket case
201 if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,
202 InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,
203 LimitRequestLine, LimitRequestHeaders,
204 InvalidProxyLine, ForbiddenProxyRequest,
205 InvalidSchemeHeaders,
206 SSLError)):
207
208 status_int = 400
209 reason = "Bad Request"
210
211 if isinstance(exc, InvalidRequestLine):
212 mesg = "Invalid Request Line '%s'" % str(exc)
213 elif isinstance(exc, InvalidRequestMethod):
214 mesg = "Invalid Method '%s'" % str(exc)
215 elif isinstance(exc, InvalidHTTPVersion):
216 mesg = "Invalid HTTP Version '%s'" % str(exc)
217 elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):
218 mesg = "%s" % str(exc)
219 if not req and hasattr(exc, "req"):
220 req = exc.req # for access log
221 elif isinstance(exc, LimitRequestLine):
222 mesg = "%s" % str(exc)
223 elif isinstance(exc, LimitRequestHeaders):
224 mesg = "Error parsing headers: '%s'" % str(exc)
225 elif isinstance(exc, InvalidProxyLine):
226 mesg = "'%s'" % str(exc)
227 elif isinstance(exc, ForbiddenProxyRequest):
228 reason = "Forbidden"
229 mesg = "Request forbidden"
230 status_int = 403
231 elif isinstance(exc, InvalidSchemeHeaders):
232 mesg = "%s" % str(exc)
233 elif isinstance(exc, SSLError):
234 reason = "Forbidden"
235 mesg = "'%s'" % str(exc)
236 status_int = 403
237
238 msg = "Invalid request from ip={ip}: {error}"
239 self.log.debug(msg.format(ip=addr[0], error=str(exc)))
240 else:
241 if hasattr(req, "uri"):
242 self.log.exception("Error handling request %s", req.uri)
243 status_int = 500
244 reason = "Internal Server Error"
245 mesg = ""
246
247 if req is not None:
248 request_time = datetime.now() - request_start
249 environ = default_environ(req, client, self.cfg)
250 environ['REMOTE_ADDR'] = addr[0]
251 environ['REMOTE_PORT'] = str(addr[1])
252 resp = Response(req, client, self.cfg)
253 resp.status = "%s %s" % (status_int, reason)
254 resp.response_length = len(mesg)
255 self.log.access(resp, req, environ, request_time)
256
257 try:
258 util.write_error(client, status_int, reason, mesg)
259 except:
260 self.log.debug("Failed to send error message.")
261
262 def handle_winch(self, sig, fname):
263 # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.
264 self.log.debug("worker: SIGWINCH ignored.")
265
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gunicorn/workers/base.py b/gunicorn/workers/base.py
--- a/gunicorn/workers/base.py
+++ b/gunicorn/workers/base.py
@@ -137,7 +137,7 @@
try:
self.wsgi = self.app.wsgi()
except SyntaxError as e:
- if self.cfg.reload == 'off':
+ if not self.cfg.reload:
raise
self.log.exception(e)
|
{"golden_diff": "diff --git a/gunicorn/workers/base.py b/gunicorn/workers/base.py\n--- a/gunicorn/workers/base.py\n+++ b/gunicorn/workers/base.py\n@@ -137,7 +137,7 @@\n try:\n self.wsgi = self.app.wsgi()\n except SyntaxError as e:\n- if self.cfg.reload == 'off':\n+ if not self.cfg.reload:\n raise\n \n self.log.exception(e)\n", "issue": "I get error in this package AttributeError: 'NoneType' object has no attribute 'add_extra_file'\nhi every one ..\r\nwhen i try to deploy keras model into google cloud i get this error ...\r\n\r\n```py\r\nFile \"/home/falahgs07/keras/env/lib/python3.5/site-packages/gunicorn/workers/base.py\", line 126, in init_process\r\n self.load_wsgi()\r\n File \"/home/falahgs07/keras/env/lib/python3.5/site-packages/gunicorn/workers/base.py\", line 148, in load_wsgi\r\n self.reloader.add_extra_file(exc_val.filename)\r\nAttributeError: 'NoneType' object has no attribute 'add_extra_file'\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport os\nfrom random import randint\nimport signal\nfrom ssl import SSLError\nimport sys\nimport time\nimport traceback\n\nfrom gunicorn import six\nfrom gunicorn import util\nfrom gunicorn.workers.workertmp import WorkerTmp\nfrom gunicorn.reloader import reloader_engines\nfrom gunicorn.http.errors import (\n InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,\n)\nfrom gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest\nfrom gunicorn.http.errors import InvalidSchemeHeaders\nfrom gunicorn.http.wsgi import default_environ, Response\nfrom gunicorn.six import MAXSIZE\n\n\nclass Worker(object):\n\n SIGNALS = [getattr(signal, \"SIG%s\" % x)\n for x in \"ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD\".split()]\n\n PIPE = []\n\n def __init__(self, age, ppid, sockets, app, timeout, cfg, log):\n \"\"\"\\\n This is called pre-fork so it shouldn't do anything to the\n current process. If there's a need to make process wide\n changes you'll want to do that in ``self.init_process()``.\n \"\"\"\n self.age = age\n self.pid = \"[booting]\"\n self.ppid = ppid\n self.sockets = sockets\n self.app = app\n self.timeout = timeout\n self.cfg = cfg\n self.booted = False\n self.aborted = False\n self.reloader = None\n\n self.nr = 0\n jitter = randint(0, cfg.max_requests_jitter)\n self.max_requests = cfg.max_requests + jitter or MAXSIZE\n self.alive = True\n self.log = log\n self.tmp = WorkerTmp(cfg)\n\n def __str__(self):\n return \"<Worker %s>\" % self.pid\n\n def notify(self):\n \"\"\"\\\n Your worker subclass must arrange to have this method called\n once every ``self.timeout`` seconds. If you fail in accomplishing\n this task, the master process will murder your workers.\n \"\"\"\n self.tmp.notify()\n\n def run(self):\n \"\"\"\\\n This is the mainloop of a worker process. You should override\n this method in a subclass to provide the intended behaviour\n for your particular evil schemes.\n \"\"\"\n raise NotImplementedError()\n\n def init_process(self):\n \"\"\"\\\n If you override this method in a subclass, the last statement\n in the function should be to call this method with\n super(MyWorkerClass, self).init_process() so that the ``run()``\n loop is initiated.\n \"\"\"\n\n # set environment' variables\n if self.cfg.env:\n for k, v in self.cfg.env.items():\n os.environ[k] = v\n\n util.set_owner_process(self.cfg.uid, self.cfg.gid,\n initgroups=self.cfg.initgroups)\n\n # Reseed the random number generator\n util.seed()\n\n # For waking ourselves up\n self.PIPE = os.pipe()\n for p in self.PIPE:\n util.set_non_blocking(p)\n util.close_on_exec(p)\n\n # Prevent fd inheritance\n for s in self.sockets:\n util.close_on_exec(s)\n util.close_on_exec(self.tmp.fileno())\n\n self.wait_fds = self.sockets + [self.PIPE[0]]\n\n self.log.close_on_exec()\n\n self.init_signals()\n\n # start the reloader\n if self.cfg.reload:\n def changed(fname):\n self.log.info(\"Worker reloading: %s modified\", fname)\n self.alive = False\n self.cfg.worker_int(self)\n time.sleep(0.1)\n sys.exit(0)\n\n reloader_cls = reloader_engines[self.cfg.reload_engine]\n self.reloader = reloader_cls(extra_files=self.cfg.reload_extra_files,\n callback=changed)\n self.reloader.start()\n\n self.load_wsgi()\n self.cfg.post_worker_init(self)\n\n # Enter main run loop\n self.booted = True\n self.run()\n\n def load_wsgi(self):\n try:\n self.wsgi = self.app.wsgi()\n except SyntaxError as e:\n if self.cfg.reload == 'off':\n raise\n\n self.log.exception(e)\n\n # fix from PR #1228\n # storing the traceback into exc_tb will create a circular reference.\n # per https://docs.python.org/2/library/sys.html#sys.exc_info warning,\n # delete the traceback after use.\n try:\n _, exc_val, exc_tb = sys.exc_info()\n self.reloader.add_extra_file(exc_val.filename)\n\n tb_string = six.StringIO()\n traceback.print_tb(exc_tb, file=tb_string)\n self.wsgi = util.make_fail_app(tb_string.getvalue())\n finally:\n del exc_tb\n\n def init_signals(self):\n # reset signaling\n for s in self.SIGNALS:\n signal.signal(s, signal.SIG_DFL)\n # init new signaling\n signal.signal(signal.SIGQUIT, self.handle_quit)\n signal.signal(signal.SIGTERM, self.handle_exit)\n signal.signal(signal.SIGINT, self.handle_quit)\n signal.signal(signal.SIGWINCH, self.handle_winch)\n signal.signal(signal.SIGUSR1, self.handle_usr1)\n signal.signal(signal.SIGABRT, self.handle_abort)\n\n # Don't let SIGTERM and SIGUSR1 disturb active requests\n # by interrupting system calls\n if hasattr(signal, 'siginterrupt'): # python >= 2.6\n signal.siginterrupt(signal.SIGTERM, False)\n signal.siginterrupt(signal.SIGUSR1, False)\n\n if hasattr(signal, 'set_wakeup_fd'):\n signal.set_wakeup_fd(self.PIPE[1])\n\n def handle_usr1(self, sig, frame):\n self.log.reopen_files()\n\n def handle_exit(self, sig, frame):\n self.alive = False\n\n def handle_quit(self, sig, frame):\n self.alive = False\n # worker_int callback\n self.cfg.worker_int(self)\n time.sleep(0.1)\n sys.exit(0)\n\n def handle_abort(self, sig, frame):\n self.alive = False\n self.cfg.worker_abort(self)\n sys.exit(1)\n\n def handle_error(self, req, client, addr, exc):\n request_start = datetime.now()\n addr = addr or ('', -1) # unix socket case\n if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,\n LimitRequestLine, LimitRequestHeaders,\n InvalidProxyLine, ForbiddenProxyRequest,\n InvalidSchemeHeaders,\n SSLError)):\n\n status_int = 400\n reason = \"Bad Request\"\n\n if isinstance(exc, InvalidRequestLine):\n mesg = \"Invalid Request Line '%s'\" % str(exc)\n elif isinstance(exc, InvalidRequestMethod):\n mesg = \"Invalid Method '%s'\" % str(exc)\n elif isinstance(exc, InvalidHTTPVersion):\n mesg = \"Invalid HTTP Version '%s'\" % str(exc)\n elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):\n mesg = \"%s\" % str(exc)\n if not req and hasattr(exc, \"req\"):\n req = exc.req # for access log\n elif isinstance(exc, LimitRequestLine):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, LimitRequestHeaders):\n mesg = \"Error parsing headers: '%s'\" % str(exc)\n elif isinstance(exc, InvalidProxyLine):\n mesg = \"'%s'\" % str(exc)\n elif isinstance(exc, ForbiddenProxyRequest):\n reason = \"Forbidden\"\n mesg = \"Request forbidden\"\n status_int = 403\n elif isinstance(exc, InvalidSchemeHeaders):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, SSLError):\n reason = \"Forbidden\"\n mesg = \"'%s'\" % str(exc)\n status_int = 403\n\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n if hasattr(req, \"uri\"):\n self.log.exception(\"Error handling request %s\", req.uri)\n status_int = 500\n reason = \"Internal Server Error\"\n mesg = \"\"\n\n if req is not None:\n request_time = datetime.now() - request_start\n environ = default_environ(req, client, self.cfg)\n environ['REMOTE_ADDR'] = addr[0]\n environ['REMOTE_PORT'] = str(addr[1])\n resp = Response(req, client, self.cfg)\n resp.status = \"%s %s\" % (status_int, reason)\n resp.response_length = len(mesg)\n self.log.access(resp, req, environ, request_time)\n\n try:\n util.write_error(client, status_int, reason, mesg)\n except:\n self.log.debug(\"Failed to send error message.\")\n\n def handle_winch(self, sig, fname):\n # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.\n self.log.debug(\"worker: SIGWINCH ignored.\")\n", "path": "gunicorn/workers/base.py"}], "after_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport os\nfrom random import randint\nimport signal\nfrom ssl import SSLError\nimport sys\nimport time\nimport traceback\n\nfrom gunicorn import six\nfrom gunicorn import util\nfrom gunicorn.workers.workertmp import WorkerTmp\nfrom gunicorn.reloader import reloader_engines\nfrom gunicorn.http.errors import (\n InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,\n)\nfrom gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest\nfrom gunicorn.http.errors import InvalidSchemeHeaders\nfrom gunicorn.http.wsgi import default_environ, Response\nfrom gunicorn.six import MAXSIZE\n\n\nclass Worker(object):\n\n SIGNALS = [getattr(signal, \"SIG%s\" % x)\n for x in \"ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD\".split()]\n\n PIPE = []\n\n def __init__(self, age, ppid, sockets, app, timeout, cfg, log):\n \"\"\"\\\n This is called pre-fork so it shouldn't do anything to the\n current process. If there's a need to make process wide\n changes you'll want to do that in ``self.init_process()``.\n \"\"\"\n self.age = age\n self.pid = \"[booting]\"\n self.ppid = ppid\n self.sockets = sockets\n self.app = app\n self.timeout = timeout\n self.cfg = cfg\n self.booted = False\n self.aborted = False\n self.reloader = None\n\n self.nr = 0\n jitter = randint(0, cfg.max_requests_jitter)\n self.max_requests = cfg.max_requests + jitter or MAXSIZE\n self.alive = True\n self.log = log\n self.tmp = WorkerTmp(cfg)\n\n def __str__(self):\n return \"<Worker %s>\" % self.pid\n\n def notify(self):\n \"\"\"\\\n Your worker subclass must arrange to have this method called\n once every ``self.timeout`` seconds. If you fail in accomplishing\n this task, the master process will murder your workers.\n \"\"\"\n self.tmp.notify()\n\n def run(self):\n \"\"\"\\\n This is the mainloop of a worker process. You should override\n this method in a subclass to provide the intended behaviour\n for your particular evil schemes.\n \"\"\"\n raise NotImplementedError()\n\n def init_process(self):\n \"\"\"\\\n If you override this method in a subclass, the last statement\n in the function should be to call this method with\n super(MyWorkerClass, self).init_process() so that the ``run()``\n loop is initiated.\n \"\"\"\n\n # set environment' variables\n if self.cfg.env:\n for k, v in self.cfg.env.items():\n os.environ[k] = v\n\n util.set_owner_process(self.cfg.uid, self.cfg.gid,\n initgroups=self.cfg.initgroups)\n\n # Reseed the random number generator\n util.seed()\n\n # For waking ourselves up\n self.PIPE = os.pipe()\n for p in self.PIPE:\n util.set_non_blocking(p)\n util.close_on_exec(p)\n\n # Prevent fd inheritance\n for s in self.sockets:\n util.close_on_exec(s)\n util.close_on_exec(self.tmp.fileno())\n\n self.wait_fds = self.sockets + [self.PIPE[0]]\n\n self.log.close_on_exec()\n\n self.init_signals()\n\n # start the reloader\n if self.cfg.reload:\n def changed(fname):\n self.log.info(\"Worker reloading: %s modified\", fname)\n self.alive = False\n self.cfg.worker_int(self)\n time.sleep(0.1)\n sys.exit(0)\n\n reloader_cls = reloader_engines[self.cfg.reload_engine]\n self.reloader = reloader_cls(extra_files=self.cfg.reload_extra_files,\n callback=changed)\n self.reloader.start()\n\n self.load_wsgi()\n self.cfg.post_worker_init(self)\n\n # Enter main run loop\n self.booted = True\n self.run()\n\n def load_wsgi(self):\n try:\n self.wsgi = self.app.wsgi()\n except SyntaxError as e:\n if not self.cfg.reload:\n raise\n\n self.log.exception(e)\n\n # fix from PR #1228\n # storing the traceback into exc_tb will create a circular reference.\n # per https://docs.python.org/2/library/sys.html#sys.exc_info warning,\n # delete the traceback after use.\n try:\n _, exc_val, exc_tb = sys.exc_info()\n self.reloader.add_extra_file(exc_val.filename)\n\n tb_string = six.StringIO()\n traceback.print_tb(exc_tb, file=tb_string)\n self.wsgi = util.make_fail_app(tb_string.getvalue())\n finally:\n del exc_tb\n\n def init_signals(self):\n # reset signaling\n for s in self.SIGNALS:\n signal.signal(s, signal.SIG_DFL)\n # init new signaling\n signal.signal(signal.SIGQUIT, self.handle_quit)\n signal.signal(signal.SIGTERM, self.handle_exit)\n signal.signal(signal.SIGINT, self.handle_quit)\n signal.signal(signal.SIGWINCH, self.handle_winch)\n signal.signal(signal.SIGUSR1, self.handle_usr1)\n signal.signal(signal.SIGABRT, self.handle_abort)\n\n # Don't let SIGTERM and SIGUSR1 disturb active requests\n # by interrupting system calls\n if hasattr(signal, 'siginterrupt'): # python >= 2.6\n signal.siginterrupt(signal.SIGTERM, False)\n signal.siginterrupt(signal.SIGUSR1, False)\n\n if hasattr(signal, 'set_wakeup_fd'):\n signal.set_wakeup_fd(self.PIPE[1])\n\n def handle_usr1(self, sig, frame):\n self.log.reopen_files()\n\n def handle_exit(self, sig, frame):\n self.alive = False\n\n def handle_quit(self, sig, frame):\n self.alive = False\n # worker_int callback\n self.cfg.worker_int(self)\n time.sleep(0.1)\n sys.exit(0)\n\n def handle_abort(self, sig, frame):\n self.alive = False\n self.cfg.worker_abort(self)\n sys.exit(1)\n\n def handle_error(self, req, client, addr, exc):\n request_start = datetime.now()\n addr = addr or ('', -1) # unix socket case\n if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,\n LimitRequestLine, LimitRequestHeaders,\n InvalidProxyLine, ForbiddenProxyRequest,\n InvalidSchemeHeaders,\n SSLError)):\n\n status_int = 400\n reason = \"Bad Request\"\n\n if isinstance(exc, InvalidRequestLine):\n mesg = \"Invalid Request Line '%s'\" % str(exc)\n elif isinstance(exc, InvalidRequestMethod):\n mesg = \"Invalid Method '%s'\" % str(exc)\n elif isinstance(exc, InvalidHTTPVersion):\n mesg = \"Invalid HTTP Version '%s'\" % str(exc)\n elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):\n mesg = \"%s\" % str(exc)\n if not req and hasattr(exc, \"req\"):\n req = exc.req # for access log\n elif isinstance(exc, LimitRequestLine):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, LimitRequestHeaders):\n mesg = \"Error parsing headers: '%s'\" % str(exc)\n elif isinstance(exc, InvalidProxyLine):\n mesg = \"'%s'\" % str(exc)\n elif isinstance(exc, ForbiddenProxyRequest):\n reason = \"Forbidden\"\n mesg = \"Request forbidden\"\n status_int = 403\n elif isinstance(exc, InvalidSchemeHeaders):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, SSLError):\n reason = \"Forbidden\"\n mesg = \"'%s'\" % str(exc)\n status_int = 403\n\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n if hasattr(req, \"uri\"):\n self.log.exception(\"Error handling request %s\", req.uri)\n status_int = 500\n reason = \"Internal Server Error\"\n mesg = \"\"\n\n if req is not None:\n request_time = datetime.now() - request_start\n environ = default_environ(req, client, self.cfg)\n environ['REMOTE_ADDR'] = addr[0]\n environ['REMOTE_PORT'] = str(addr[1])\n resp = Response(req, client, self.cfg)\n resp.status = \"%s %s\" % (status_int, reason)\n resp.response_length = len(mesg)\n self.log.access(resp, req, environ, request_time)\n\n try:\n util.write_error(client, status_int, reason, mesg)\n except:\n self.log.debug(\"Failed to send error message.\")\n\n def handle_winch(self, sig, fname):\n # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.\n self.log.debug(\"worker: SIGWINCH ignored.\")\n", "path": "gunicorn/workers/base.py"}]}
| 3,131 | 97 |
gh_patches_debug_19410
|
rasdani/github-patches
|
git_diff
|
pyload__pyload-1418
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Downloading from Oboom.com without premium ERROR
It can't download the file. I get the Error "recaptcha html not found".
Everything is up2date...:(
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `module/plugins/hoster/OboomCom.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Test links:
4 # https://www.oboom.com/B7CYZIEB/10Mio.dat
5
6 import re
7
8 from module.common.json_layer import json_loads
9 from module.plugins.Hoster import Hoster
10 from module.plugins.internal.CaptchaService import ReCaptcha
11
12
13 class OboomCom(Hoster):
14 __name__ = "OboomCom"
15 __type__ = "hoster"
16 __version__ = "0.31"
17
18 __pattern__ = r'https?://(?:www\.)?oboom\.com/(#(id=|/)?)?(?P<ID>\w{8})'
19
20 __description__ = """oboom.com hoster plugin"""
21 __license__ = "GPLv3"
22 __authors__ = [("stanley", "[email protected]")]
23
24
25 RECAPTCHA_KEY = "6LdqpO0SAAAAAJGHXo63HyalP7H4qlRs_vff0kJX"
26
27
28 def setup(self):
29 self.chunkLimit = 1
30 self.multiDL = self.resumeDownload = self.premium
31
32
33 def process(self, pyfile):
34 self.pyfile.url.replace(".com/#id=", ".com/#")
35 self.pyfile.url.replace(".com/#/", ".com/#")
36 self.getFileId(self.pyfile.url)
37 self.getSessionToken()
38 self.getFileInfo(self.sessionToken, self.fileId)
39 self.pyfile.name = self.fileName
40 self.pyfile.size = self.fileSize
41 if not self.premium:
42 self.solveCaptcha()
43 self.getDownloadTicket()
44 self.download("https://%s/1.0/dlh" % self.downloadDomain, get={"ticket": self.downloadTicket, "http_errors": 0})
45
46
47 def loadUrl(self, url, get=None):
48 if get is None:
49 get = dict()
50 return json_loads(self.load(url, get, decode=True))
51
52
53 def getFileId(self, url):
54 self.fileId = re.match(OboomCom.__pattern__, url).group('ID')
55
56
57 def getSessionToken(self):
58 if self.premium:
59 accountInfo = self.account.getAccountInfo(self.user, True)
60 if "session" in accountInfo:
61 self.sessionToken = accountInfo['session']
62 else:
63 self.fail(_("Could not retrieve premium session"))
64 else:
65 apiUrl = "https://www.oboom.com/1.0/guestsession"
66 result = self.loadUrl(apiUrl)
67 if result[0] == 200:
68 self.sessionToken = result[1]
69 else:
70 self.fail(_("Could not retrieve token for guest session. Error code: %s") % result[0])
71
72
73 def solveCaptcha(self):
74 recaptcha = ReCaptcha(self)
75
76 for _i in xrange(5):
77 response, challenge = recaptcha.challenge(self.RECAPTCHA_KEY)
78 apiUrl = "https://www.oboom.com/1.0/download/ticket"
79 params = {"recaptcha_challenge_field": challenge,
80 "recaptcha_response_field": response,
81 "download_id": self.fileId,
82 "token": self.sessionToken}
83 result = self.loadUrl(apiUrl, params)
84
85 if result[0] == 200:
86 self.downloadToken = result[1]
87 self.downloadAuth = result[2]
88 self.correctCaptcha()
89 self.setWait(30)
90 self.wait()
91 break
92
93 elif result[0] == 400:
94 if result[1] == "incorrect-captcha-sol":
95 self.invalidCaptcha()
96 elif result[1] == "captcha-timeout":
97 self.invalidCaptcha()
98 elif result[1] == "forbidden":
99 self.retry(5, 15 * 60, _("Service unavailable"))
100
101 elif result[0] == 403:
102 if result[1] == -1: # another download is running
103 self.setWait(15 * 60)
104 else:
105 self.setWait(result[1], True)
106 self.wait()
107 self.retry(5)
108 else:
109 self.invalidCaptcha()
110 self.fail(_("Received invalid captcha 5 times"))
111
112
113 def getFileInfo(self, token, fileId):
114 apiUrl = "https://api.oboom.com/1.0/info"
115 params = {"token": token, "items": fileId, "http_errors": 0}
116
117 result = self.loadUrl(apiUrl, params)
118 if result[0] == 200:
119 item = result[1][0]
120 if item['state'] == "online":
121 self.fileSize = item['size']
122 self.fileName = item['name']
123 else:
124 self.offline()
125 else:
126 self.fail(_("Could not retrieve file info. Error code %s: %s") % (result[0], result[1]))
127
128
129 def getDownloadTicket(self):
130 apiUrl = "https://api.oboom.com/1/dl"
131 params = {"item": self.fileId, "http_errors": 0}
132 if self.premium:
133 params['token'] = self.sessionToken
134 else:
135 params['token'] = self.downloadToken
136 params['auth'] = self.downloadAuth
137
138 result = self.loadUrl(apiUrl, params)
139 if result[0] == 200:
140 self.downloadDomain = result[1]
141 self.downloadTicket = result[2]
142 elif result[0] == 421:
143 self.retry(wait_time=result[2] + 60, reason=_("Connection limit exceeded"))
144 else:
145 self.fail(_("Could not retrieve download ticket. Error code: %s") % result[0])
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/module/plugins/hoster/OboomCom.py b/module/plugins/hoster/OboomCom.py
--- a/module/plugins/hoster/OboomCom.py
+++ b/module/plugins/hoster/OboomCom.py
@@ -13,9 +13,9 @@
class OboomCom(Hoster):
__name__ = "OboomCom"
__type__ = "hoster"
- __version__ = "0.31"
+ __version__ = "0.32"
- __pattern__ = r'https?://(?:www\.)?oboom\.com/(#(id=|/)?)?(?P<ID>\w{8})'
+ __pattern__ = r'https?://(?:www\.)?oboom\.com/(?:#(?:id=|/)?)?(?P<ID>\w{8})'
__description__ = """oboom.com hoster plugin"""
__license__ = "GPLv3"
@@ -33,6 +33,7 @@
def process(self, pyfile):
self.pyfile.url.replace(".com/#id=", ".com/#")
self.pyfile.url.replace(".com/#/", ".com/#")
+ self.html = self.load(pyfile.url)
self.getFileId(self.pyfile.url)
self.getSessionToken()
self.getFileInfo(self.sessionToken, self.fileId)
|
{"golden_diff": "diff --git a/module/plugins/hoster/OboomCom.py b/module/plugins/hoster/OboomCom.py\n--- a/module/plugins/hoster/OboomCom.py\n+++ b/module/plugins/hoster/OboomCom.py\n@@ -13,9 +13,9 @@\n class OboomCom(Hoster):\n __name__ = \"OboomCom\"\n __type__ = \"hoster\"\n- __version__ = \"0.31\"\n+ __version__ = \"0.32\"\n \n- __pattern__ = r'https?://(?:www\\.)?oboom\\.com/(#(id=|/)?)?(?P<ID>\\w{8})'\n+ __pattern__ = r'https?://(?:www\\.)?oboom\\.com/(?:#(?:id=|/)?)?(?P<ID>\\w{8})'\n \n __description__ = \"\"\"oboom.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n@@ -33,6 +33,7 @@\n def process(self, pyfile):\n self.pyfile.url.replace(\".com/#id=\", \".com/#\")\n self.pyfile.url.replace(\".com/#/\", \".com/#\")\n+ self.html = self.load(pyfile.url)\n self.getFileId(self.pyfile.url)\n self.getSessionToken()\n self.getFileInfo(self.sessionToken, self.fileId)\n", "issue": "Downloading from Oboom.com without premium ERROR\nIt can't download the file. I get the Error \"recaptcha html not found\".\nEverything is up2date...:(\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Test links:\n# https://www.oboom.com/B7CYZIEB/10Mio.dat\n\nimport re\n\nfrom module.common.json_layer import json_loads\nfrom module.plugins.Hoster import Hoster\nfrom module.plugins.internal.CaptchaService import ReCaptcha\n\n\nclass OboomCom(Hoster):\n __name__ = \"OboomCom\"\n __type__ = \"hoster\"\n __version__ = \"0.31\"\n\n __pattern__ = r'https?://(?:www\\.)?oboom\\.com/(#(id=|/)?)?(?P<ID>\\w{8})'\n\n __description__ = \"\"\"oboom.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"stanley\", \"[email protected]\")]\n\n\n RECAPTCHA_KEY = \"6LdqpO0SAAAAAJGHXo63HyalP7H4qlRs_vff0kJX\"\n\n\n def setup(self):\n self.chunkLimit = 1\n self.multiDL = self.resumeDownload = self.premium\n\n\n def process(self, pyfile):\n self.pyfile.url.replace(\".com/#id=\", \".com/#\")\n self.pyfile.url.replace(\".com/#/\", \".com/#\")\n self.getFileId(self.pyfile.url)\n self.getSessionToken()\n self.getFileInfo(self.sessionToken, self.fileId)\n self.pyfile.name = self.fileName\n self.pyfile.size = self.fileSize\n if not self.premium:\n self.solveCaptcha()\n self.getDownloadTicket()\n self.download(\"https://%s/1.0/dlh\" % self.downloadDomain, get={\"ticket\": self.downloadTicket, \"http_errors\": 0})\n\n\n def loadUrl(self, url, get=None):\n if get is None:\n get = dict()\n return json_loads(self.load(url, get, decode=True))\n\n\n def getFileId(self, url):\n self.fileId = re.match(OboomCom.__pattern__, url).group('ID')\n\n\n def getSessionToken(self):\n if self.premium:\n accountInfo = self.account.getAccountInfo(self.user, True)\n if \"session\" in accountInfo:\n self.sessionToken = accountInfo['session']\n else:\n self.fail(_(\"Could not retrieve premium session\"))\n else:\n apiUrl = \"https://www.oboom.com/1.0/guestsession\"\n result = self.loadUrl(apiUrl)\n if result[0] == 200:\n self.sessionToken = result[1]\n else:\n self.fail(_(\"Could not retrieve token for guest session. Error code: %s\") % result[0])\n\n\n def solveCaptcha(self):\n recaptcha = ReCaptcha(self)\n\n for _i in xrange(5):\n response, challenge = recaptcha.challenge(self.RECAPTCHA_KEY)\n apiUrl = \"https://www.oboom.com/1.0/download/ticket\"\n params = {\"recaptcha_challenge_field\": challenge,\n \"recaptcha_response_field\": response,\n \"download_id\": self.fileId,\n \"token\": self.sessionToken}\n result = self.loadUrl(apiUrl, params)\n\n if result[0] == 200:\n self.downloadToken = result[1]\n self.downloadAuth = result[2]\n self.correctCaptcha()\n self.setWait(30)\n self.wait()\n break\n\n elif result[0] == 400:\n if result[1] == \"incorrect-captcha-sol\":\n self.invalidCaptcha()\n elif result[1] == \"captcha-timeout\":\n self.invalidCaptcha()\n elif result[1] == \"forbidden\":\n self.retry(5, 15 * 60, _(\"Service unavailable\"))\n\n elif result[0] == 403:\n if result[1] == -1: # another download is running\n self.setWait(15 * 60)\n else:\n self.setWait(result[1], True)\n self.wait()\n self.retry(5)\n else:\n self.invalidCaptcha()\n self.fail(_(\"Received invalid captcha 5 times\"))\n\n\n def getFileInfo(self, token, fileId):\n apiUrl = \"https://api.oboom.com/1.0/info\"\n params = {\"token\": token, \"items\": fileId, \"http_errors\": 0}\n\n result = self.loadUrl(apiUrl, params)\n if result[0] == 200:\n item = result[1][0]\n if item['state'] == \"online\":\n self.fileSize = item['size']\n self.fileName = item['name']\n else:\n self.offline()\n else:\n self.fail(_(\"Could not retrieve file info. Error code %s: %s\") % (result[0], result[1]))\n\n\n def getDownloadTicket(self):\n apiUrl = \"https://api.oboom.com/1/dl\"\n params = {\"item\": self.fileId, \"http_errors\": 0}\n if self.premium:\n params['token'] = self.sessionToken\n else:\n params['token'] = self.downloadToken\n params['auth'] = self.downloadAuth\n\n result = self.loadUrl(apiUrl, params)\n if result[0] == 200:\n self.downloadDomain = result[1]\n self.downloadTicket = result[2]\n elif result[0] == 421:\n self.retry(wait_time=result[2] + 60, reason=_(\"Connection limit exceeded\"))\n else:\n self.fail(_(\"Could not retrieve download ticket. Error code: %s\") % result[0])\n", "path": "module/plugins/hoster/OboomCom.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Test links:\n# https://www.oboom.com/B7CYZIEB/10Mio.dat\n\nimport re\n\nfrom module.common.json_layer import json_loads\nfrom module.plugins.Hoster import Hoster\nfrom module.plugins.internal.CaptchaService import ReCaptcha\n\n\nclass OboomCom(Hoster):\n __name__ = \"OboomCom\"\n __type__ = \"hoster\"\n __version__ = \"0.32\"\n\n __pattern__ = r'https?://(?:www\\.)?oboom\\.com/(?:#(?:id=|/)?)?(?P<ID>\\w{8})'\n\n __description__ = \"\"\"oboom.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"stanley\", \"[email protected]\")]\n\n\n RECAPTCHA_KEY = \"6LdqpO0SAAAAAJGHXo63HyalP7H4qlRs_vff0kJX\"\n\n\n def setup(self):\n self.chunkLimit = 1\n self.multiDL = self.resumeDownload = self.premium\n\n\n def process(self, pyfile):\n self.pyfile.url.replace(\".com/#id=\", \".com/#\")\n self.pyfile.url.replace(\".com/#/\", \".com/#\")\n self.html = self.load(pyfile.url)\n self.getFileId(self.pyfile.url)\n self.getSessionToken()\n self.getFileInfo(self.sessionToken, self.fileId)\n self.pyfile.name = self.fileName\n self.pyfile.size = self.fileSize\n if not self.premium:\n self.solveCaptcha()\n self.getDownloadTicket()\n self.download(\"https://%s/1.0/dlh\" % self.downloadDomain, get={\"ticket\": self.downloadTicket, \"http_errors\": 0})\n\n\n def loadUrl(self, url, get=None):\n if get is None:\n get = dict()\n return json_loads(self.load(url, get, decode=True))\n\n\n def getFileId(self, url):\n self.fileId = re.match(OboomCom.__pattern__, url).group('ID')\n\n\n def getSessionToken(self):\n if self.premium:\n accountInfo = self.account.getAccountInfo(self.user, True)\n if \"session\" in accountInfo:\n self.sessionToken = accountInfo['session']\n else:\n self.fail(_(\"Could not retrieve premium session\"))\n else:\n apiUrl = \"https://www.oboom.com/1.0/guestsession\"\n result = self.loadUrl(apiUrl)\n if result[0] == 200:\n self.sessionToken = result[1]\n else:\n self.fail(_(\"Could not retrieve token for guest session. Error code: %s\") % result[0])\n\n\n def solveCaptcha(self):\n recaptcha = ReCaptcha(self)\n\n for _i in xrange(5):\n response, challenge = recaptcha.challenge(self.RECAPTCHA_KEY)\n apiUrl = \"https://www.oboom.com/1.0/download/ticket\"\n params = {\"recaptcha_challenge_field\": challenge,\n \"recaptcha_response_field\": response,\n \"download_id\": self.fileId,\n \"token\": self.sessionToken}\n result = self.loadUrl(apiUrl, params)\n\n if result[0] == 200:\n self.downloadToken = result[1]\n self.downloadAuth = result[2]\n self.correctCaptcha()\n self.setWait(30)\n self.wait()\n break\n\n elif result[0] == 400:\n if result[1] == \"incorrect-captcha-sol\":\n self.invalidCaptcha()\n elif result[1] == \"captcha-timeout\":\n self.invalidCaptcha()\n elif result[1] == \"forbidden\":\n self.retry(5, 15 * 60, _(\"Service unavailable\"))\n\n elif result[0] == 403:\n if result[1] == -1: # another download is running\n self.setWait(15 * 60)\n else:\n self.setWait(result[1], True)\n self.wait()\n self.retry(5)\n else:\n self.invalidCaptcha()\n self.fail(_(\"Received invalid captcha 5 times\"))\n\n\n def getFileInfo(self, token, fileId):\n apiUrl = \"https://api.oboom.com/1.0/info\"\n params = {\"token\": token, \"items\": fileId, \"http_errors\": 0}\n\n result = self.loadUrl(apiUrl, params)\n if result[0] == 200:\n item = result[1][0]\n if item['state'] == \"online\":\n self.fileSize = item['size']\n self.fileName = item['name']\n else:\n self.offline()\n else:\n self.fail(_(\"Could not retrieve file info. Error code %s: %s\") % (result[0], result[1]))\n\n\n def getDownloadTicket(self):\n apiUrl = \"https://api.oboom.com/1/dl\"\n params = {\"item\": self.fileId, \"http_errors\": 0}\n if self.premium:\n params['token'] = self.sessionToken\n else:\n params['token'] = self.downloadToken\n params['auth'] = self.downloadAuth\n\n result = self.loadUrl(apiUrl, params)\n if result[0] == 200:\n self.downloadDomain = result[1]\n self.downloadTicket = result[2]\n elif result[0] == 421:\n self.retry(wait_time=result[2] + 60, reason=_(\"Connection limit exceeded\"))\n else:\n self.fail(_(\"Could not retrieve download ticket. Error code: %s\") % result[0])\n", "path": "module/plugins/hoster/OboomCom.py"}]}
| 1,876 | 296 |
gh_patches_debug_7405
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-823
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
urllib instrumentation fails for local file access
When reading local files the status code is not specified and is None. This isn't handled by the instrumentation and causes an exception.
https://github.com/open-telemetry/opentelemetry-python-contrib/blob/444e0a13127304d3a04ccd44445b2e6caed3f770/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py#L212-L217
urllib instrumentation fails for local file access
When reading local files the status code is not specified and is None. This isn't handled by the instrumentation and causes an exception.
https://github.com/open-telemetry/opentelemetry-python-contrib/blob/444e0a13127304d3a04ccd44445b2e6caed3f770/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py#L212-L217
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Dict, Sequence
16
17 from wrapt import ObjectProxy
18
19 from opentelemetry import context, trace
20
21 # pylint: disable=unused-import
22 # pylint: disable=E0611
23 from opentelemetry.context import _SUPPRESS_INSTRUMENTATION_KEY # noqa: F401
24 from opentelemetry.propagate import extract
25 from opentelemetry.trace import StatusCode
26
27
28 def extract_attributes_from_object(
29 obj: any, attributes: Sequence[str], existing: Dict[str, str] = None
30 ) -> Dict[str, str]:
31 extracted = {}
32 if existing:
33 extracted.update(existing)
34 for attr in attributes:
35 value = getattr(obj, attr, None)
36 if value is not None:
37 extracted[attr] = str(value)
38 return extracted
39
40
41 def http_status_to_status_code(
42 status: int,
43 allow_redirect: bool = True,
44 server_span: bool = False,
45 ) -> StatusCode:
46 """Converts an HTTP status code to an OpenTelemetry canonical status code
47
48 Args:
49 status (int): HTTP status code
50 """
51 # See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#status
52 if status < 100:
53 return StatusCode.ERROR
54 if status <= 299:
55 return StatusCode.UNSET
56 if status <= 399 and allow_redirect:
57 return StatusCode.UNSET
58 if status <= 499 and server_span:
59 return StatusCode.UNSET
60 return StatusCode.ERROR
61
62
63 def unwrap(obj, attr: str):
64 """Given a function that was wrapped by wrapt.wrap_function_wrapper, unwrap it
65
66 Args:
67 obj: Object that holds a reference to the wrapped function
68 attr (str): Name of the wrapped function
69 """
70 func = getattr(obj, attr, None)
71 if func and isinstance(func, ObjectProxy) and hasattr(func, "__wrapped__"):
72 setattr(obj, attr, func.__wrapped__)
73
74
75 def _start_internal_or_server_span(
76 tracer, span_name, start_time, context_carrier, context_getter
77 ):
78 """Returns internal or server span along with the token which can be used by caller to reset context
79
80
81 Args:
82 tracer : tracer in use by given instrumentation library
83 name (string): name of the span
84 start_time : start time of the span
85 context_carrier : object which contains values that are
86 used to construct a Context. This object
87 must be paired with an appropriate getter
88 which understands how to extract a value from it.
89 context_getter : an object which contains a get function that can retrieve zero
90 or more values from the carrier and a keys function that can get all the keys
91 from carrier.
92 """
93
94 token = ctx = span_kind = None
95 if trace.get_current_span() is trace.INVALID_SPAN:
96 ctx = extract(context_carrier, getter=context_getter)
97 token = context.attach(ctx)
98 span_kind = trace.SpanKind.SERVER
99 else:
100 ctx = context.get_current()
101 span_kind = trace.SpanKind.INTERNAL
102 span = tracer.start_span(
103 name=span_name,
104 context=ctx,
105 kind=span_kind,
106 start_time=start_time,
107 )
108 return span, token
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py
--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py
+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py
@@ -49,6 +49,9 @@
status (int): HTTP status code
"""
# See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#status
+ if not isinstance(status, int):
+ return StatusCode.UNSET
+
if status < 100:
return StatusCode.ERROR
if status <= 299:
|
{"golden_diff": "diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py\n@@ -49,6 +49,9 @@\n status (int): HTTP status code\n \"\"\"\n # See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#status\n+ if not isinstance(status, int):\n+ return StatusCode.UNSET\n+\n if status < 100:\n return StatusCode.ERROR\n if status <= 299:\n", "issue": "urllib instrumentation fails for local file access\nWhen reading local files the status code is not specified and is None. This isn't handled by the instrumentation and causes an exception.\r\n\r\nhttps://github.com/open-telemetry/opentelemetry-python-contrib/blob/444e0a13127304d3a04ccd44445b2e6caed3f770/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py#L212-L217\nurllib instrumentation fails for local file access\nWhen reading local files the status code is not specified and is None. This isn't handled by the instrumentation and causes an exception.\r\n\r\nhttps://github.com/open-telemetry/opentelemetry-python-contrib/blob/444e0a13127304d3a04ccd44445b2e6caed3f770/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py#L212-L217\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, Sequence\n\nfrom wrapt import ObjectProxy\n\nfrom opentelemetry import context, trace\n\n# pylint: disable=unused-import\n# pylint: disable=E0611\nfrom opentelemetry.context import _SUPPRESS_INSTRUMENTATION_KEY # noqa: F401\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.trace import StatusCode\n\n\ndef extract_attributes_from_object(\n obj: any, attributes: Sequence[str], existing: Dict[str, str] = None\n) -> Dict[str, str]:\n extracted = {}\n if existing:\n extracted.update(existing)\n for attr in attributes:\n value = getattr(obj, attr, None)\n if value is not None:\n extracted[attr] = str(value)\n return extracted\n\n\ndef http_status_to_status_code(\n status: int,\n allow_redirect: bool = True,\n server_span: bool = False,\n) -> StatusCode:\n \"\"\"Converts an HTTP status code to an OpenTelemetry canonical status code\n\n Args:\n status (int): HTTP status code\n \"\"\"\n # See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#status\n if status < 100:\n return StatusCode.ERROR\n if status <= 299:\n return StatusCode.UNSET\n if status <= 399 and allow_redirect:\n return StatusCode.UNSET\n if status <= 499 and server_span:\n return StatusCode.UNSET\n return StatusCode.ERROR\n\n\ndef unwrap(obj, attr: str):\n \"\"\"Given a function that was wrapped by wrapt.wrap_function_wrapper, unwrap it\n\n Args:\n obj: Object that holds a reference to the wrapped function\n attr (str): Name of the wrapped function\n \"\"\"\n func = getattr(obj, attr, None)\n if func and isinstance(func, ObjectProxy) and hasattr(func, \"__wrapped__\"):\n setattr(obj, attr, func.__wrapped__)\n\n\ndef _start_internal_or_server_span(\n tracer, span_name, start_time, context_carrier, context_getter\n):\n \"\"\"Returns internal or server span along with the token which can be used by caller to reset context\n\n\n Args:\n tracer : tracer in use by given instrumentation library\n name (string): name of the span\n start_time : start time of the span\n context_carrier : object which contains values that are\n used to construct a Context. This object\n must be paired with an appropriate getter\n which understands how to extract a value from it.\n context_getter : an object which contains a get function that can retrieve zero\n or more values from the carrier and a keys function that can get all the keys\n from carrier.\n \"\"\"\n\n token = ctx = span_kind = None\n if trace.get_current_span() is trace.INVALID_SPAN:\n ctx = extract(context_carrier, getter=context_getter)\n token = context.attach(ctx)\n span_kind = trace.SpanKind.SERVER\n else:\n ctx = context.get_current()\n span_kind = trace.SpanKind.INTERNAL\n span = tracer.start_span(\n name=span_name,\n context=ctx,\n kind=span_kind,\n start_time=start_time,\n )\n return span, token\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, Sequence\n\nfrom wrapt import ObjectProxy\n\nfrom opentelemetry import context, trace\n\n# pylint: disable=unused-import\n# pylint: disable=E0611\nfrom opentelemetry.context import _SUPPRESS_INSTRUMENTATION_KEY # noqa: F401\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.trace import StatusCode\n\n\ndef extract_attributes_from_object(\n obj: any, attributes: Sequence[str], existing: Dict[str, str] = None\n) -> Dict[str, str]:\n extracted = {}\n if existing:\n extracted.update(existing)\n for attr in attributes:\n value = getattr(obj, attr, None)\n if value is not None:\n extracted[attr] = str(value)\n return extracted\n\n\ndef http_status_to_status_code(\n status: int,\n allow_redirect: bool = True,\n server_span: bool = False,\n) -> StatusCode:\n \"\"\"Converts an HTTP status code to an OpenTelemetry canonical status code\n\n Args:\n status (int): HTTP status code\n \"\"\"\n # See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#status\n if not isinstance(status, int):\n return StatusCode.UNSET\n\n if status < 100:\n return StatusCode.ERROR\n if status <= 299:\n return StatusCode.UNSET\n if status <= 399 and allow_redirect:\n return StatusCode.UNSET\n if status <= 499 and server_span:\n return StatusCode.UNSET\n return StatusCode.ERROR\n\n\ndef unwrap(obj, attr: str):\n \"\"\"Given a function that was wrapped by wrapt.wrap_function_wrapper, unwrap it\n\n Args:\n obj: Object that holds a reference to the wrapped function\n attr (str): Name of the wrapped function\n \"\"\"\n func = getattr(obj, attr, None)\n if func and isinstance(func, ObjectProxy) and hasattr(func, \"__wrapped__\"):\n setattr(obj, attr, func.__wrapped__)\n\n\ndef _start_internal_or_server_span(\n tracer, span_name, start_time, context_carrier, context_getter\n):\n \"\"\"Returns internal or server span along with the token which can be used by caller to reset context\n\n\n Args:\n tracer : tracer in use by given instrumentation library\n name (string): name of the span\n start_time : start time of the span\n context_carrier : object which contains values that are\n used to construct a Context. This object\n must be paired with an appropriate getter\n which understands how to extract a value from it.\n context_getter : an object which contains a get function that can retrieve zero\n or more values from the carrier and a keys function that can get all the keys\n from carrier.\n \"\"\"\n\n token = ctx = span_kind = None\n if trace.get_current_span() is trace.INVALID_SPAN:\n ctx = extract(context_carrier, getter=context_getter)\n token = context.attach(ctx)\n span_kind = trace.SpanKind.SERVER\n else:\n ctx = context.get_current()\n span_kind = trace.SpanKind.INTERNAL\n span = tracer.start_span(\n name=span_name,\n context=ctx,\n kind=span_kind,\n start_time=start_time,\n )\n return span, token\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py"}]}
| 1,567 | 166 |
gh_patches_debug_8569
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-bids-1091
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CI: Problem with `gen_cli.py`
see: https://app.circleci.com/pipelines/github/mne-tools/mne-bids/4785/workflows/21ad6804-1cc2-42dd-9133-f24de2ea3db5/jobs/6923
```
Traceback (most recent call last):
File "/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/events.py", line 94, in emit
results.append(listener.handler(self.app, *args))
File "/home/circleci/project/doc/sphinxext/gen_cli.py", line 84, in generate_cli_rst
output[0], output[2] = output[2], output[0]
IndexError: list index out of range
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/cmd/build.py", line 276, in build_main
app = Sphinx(args.sourcedir, args.confdir, args.outputdir,
File "/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/application.py", line 262, in __init__
self._init_builder()
File "/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/application.py", line 335, in _init_builder
self.events.emit('builder-inited')
File "/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/events.py", line 105, in emit
raise ExtensionError(__("Handler %r for event %r threw an exception") %
sphinx.errors.ExtensionError: Handler <function generate_cli_rst at 0x7fe9bf90c160> for event 'builder-inited' threw an exception (exception: list index out of range)
```
https://github.com/mne-tools/mne-bids/blob/46b0a5300ed5c17ca93b8bbf1d9542069597ef62/doc/sphinxext/gen_cli.py#L1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/sphinxext/gen_cli.py`
Content:
```
1 """Custom sphinx extension to generate docs for the command line interface.
2
3 Inspired by MNE-Python's `gen_commands.py`
4 see: github.com/mne-tools/mne-python/blob/main/doc/sphinxext/gen_commands.py
5 """
6 # Authors: Eric Larson <[email protected]>
7 # Alexandre Gramfort <[email protected]>
8 # Stefan Appelhoff <[email protected]>
9 #
10 # License: BSD-3-Clause
11 import os
12 import glob
13 from os import path as op
14 import subprocess
15 import sys
16
17 import sphinx.util
18 from mne.utils import run_subprocess, _replace_md5
19
20
21 def setup(app):
22 """Set up the app."""
23 app.connect('builder-inited', generate_cli_rst)
24
25
26 # Header markings go:
27 # 1. =/= : Page title
28 # 2. = : Command name
29 # 3. -/- : Command description
30 # 4. - : Command sections (Examples, Notes)
31
32 header = """\
33 :orphan:
34
35 .. _python_cli:
36
37 =====================================
38 MNE-BIDS Command Line Interface (CLI)
39 =====================================
40
41 Here we list the MNE-BIDS tools that you can use from the command line.
42
43 """
44
45 command_rst = """
46
47 .. _gen_%s:
48
49 %s
50 %s
51
52 .. rst-class:: callout
53
54 %s
55
56 """
57
58
59 def generate_cli_rst(app=None):
60 """Generate the command line interface docs."""
61 out_dir = op.abspath(op.join(op.dirname(__file__), '..', 'generated'))
62 if not op.isdir(out_dir):
63 os.mkdir(out_dir)
64 out_fname = op.join(out_dir, 'cli.rst.new')
65
66 cli_path = op.abspath(
67 op.join(os.path.dirname(__file__), '..', '..', 'mne_bids', 'commands'))
68 fnames = sorted([
69 op.basename(fname)
70 for fname in glob.glob(op.join(cli_path, 'mne_bids*.py'))])
71 iterator = sphinx.util.status_iterator(
72 fnames, 'generating MNE-BIDS cli help ... ', length=len(fnames))
73 with open(out_fname, 'w', encoding='utf-8') as f:
74 f.write(header)
75 for fname in iterator:
76 cmd_name = fname[:-3]
77 run_name = op.join(cli_path, fname)
78 output, _ = run_subprocess([sys.executable, run_name, '--help'],
79 stdout=subprocess.PIPE,
80 stderr=subprocess.PIPE, verbose=False)
81 output = output.splitlines()
82
83 # Swap usage and title lines
84 output[0], output[2] = output[2], output[0]
85
86 # Add header marking
87 for idx in (1, 0):
88 output.insert(idx, '-' * len(output[0]))
89
90 # Add code styling for the "Usage: " line
91 for li, line in enumerate(output):
92 if line.startswith('Usage: mne_bids '):
93 output[li] = 'Usage: ``%s``' % line[7:]
94 break
95
96 # Turn "Options:" into field list
97 if 'Options:' in output:
98 ii = output.index('Options:')
99 output[ii] = 'Options'
100 output.insert(ii + 1, '-------')
101 output.insert(ii + 2, '')
102 output.insert(ii + 3, '.. rst-class:: field-list cmd-list')
103 output.insert(ii + 4, '')
104 output = '\n'.join(output)
105 f.write(command_rst % (cmd_name,
106 cmd_name.replace('mne_bids_', 'mne_bids '),
107 '=' * len(cmd_name),
108 output))
109 _replace_md5(out_fname)
110 print('[Done]')
111
112
113 # This is useful for testing/iterating to see what the result looks like
114 if __name__ == '__main__':
115 generate_cli_rst()
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/doc/sphinxext/gen_cli.py b/doc/sphinxext/gen_cli.py
--- a/doc/sphinxext/gen_cli.py
+++ b/doc/sphinxext/gen_cli.py
@@ -76,8 +76,7 @@
cmd_name = fname[:-3]
run_name = op.join(cli_path, fname)
output, _ = run_subprocess([sys.executable, run_name, '--help'],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, verbose=False)
+ verbose=False)
output = output.splitlines()
# Swap usage and title lines
|
{"golden_diff": "diff --git a/doc/sphinxext/gen_cli.py b/doc/sphinxext/gen_cli.py\n--- a/doc/sphinxext/gen_cli.py\n+++ b/doc/sphinxext/gen_cli.py\n@@ -76,8 +76,7 @@\n cmd_name = fname[:-3]\n run_name = op.join(cli_path, fname)\n output, _ = run_subprocess([sys.executable, run_name, '--help'],\n- stdout=subprocess.PIPE,\n- stderr=subprocess.PIPE, verbose=False)\n+ verbose=False)\n output = output.splitlines()\n \n # Swap usage and title lines\n", "issue": "CI: Problem with `gen_cli.py`\nsee: https://app.circleci.com/pipelines/github/mne-tools/mne-bids/4785/workflows/21ad6804-1cc2-42dd-9133-f24de2ea3db5/jobs/6923\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/events.py\", line 94, in emit\r\n results.append(listener.handler(self.app, *args))\r\n File \"/home/circleci/project/doc/sphinxext/gen_cli.py\", line 84, in generate_cli_rst\r\n output[0], output[2] = output[2], output[0]\r\nIndexError: list index out of range\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/cmd/build.py\", line 276, in build_main\r\n app = Sphinx(args.sourcedir, args.confdir, args.outputdir,\r\n File \"/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/application.py\", line 262, in __init__\r\n self._init_builder()\r\n File \"/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/application.py\", line 335, in _init_builder\r\n self.events.emit('builder-inited')\r\n File \"/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/events.py\", line 105, in emit\r\n raise ExtensionError(__(\"Handler %r for event %r threw an exception\") %\r\nsphinx.errors.ExtensionError: Handler <function generate_cli_rst at 0x7fe9bf90c160> for event 'builder-inited' threw an exception (exception: list index out of range)\r\n```\r\n\r\nhttps://github.com/mne-tools/mne-bids/blob/46b0a5300ed5c17ca93b8bbf1d9542069597ef62/doc/sphinxext/gen_cli.py#L1\n", "before_files": [{"content": "\"\"\"Custom sphinx extension to generate docs for the command line interface.\n\nInspired by MNE-Python's `gen_commands.py`\nsee: github.com/mne-tools/mne-python/blob/main/doc/sphinxext/gen_commands.py\n\"\"\"\n# Authors: Eric Larson <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD-3-Clause\nimport os\nimport glob\nfrom os import path as op\nimport subprocess\nimport sys\n\nimport sphinx.util\nfrom mne.utils import run_subprocess, _replace_md5\n\n\ndef setup(app):\n \"\"\"Set up the app.\"\"\"\n app.connect('builder-inited', generate_cli_rst)\n\n\n# Header markings go:\n# 1. =/= : Page title\n# 2. = : Command name\n# 3. -/- : Command description\n# 4. - : Command sections (Examples, Notes)\n\nheader = \"\"\"\\\n:orphan:\n\n.. _python_cli:\n\n=====================================\nMNE-BIDS Command Line Interface (CLI)\n=====================================\n\nHere we list the MNE-BIDS tools that you can use from the command line.\n\n\"\"\"\n\ncommand_rst = \"\"\"\n\n.. _gen_%s:\n\n%s\n%s\n\n.. rst-class:: callout\n\n%s\n\n\"\"\"\n\n\ndef generate_cli_rst(app=None):\n \"\"\"Generate the command line interface docs.\"\"\"\n out_dir = op.abspath(op.join(op.dirname(__file__), '..', 'generated'))\n if not op.isdir(out_dir):\n os.mkdir(out_dir)\n out_fname = op.join(out_dir, 'cli.rst.new')\n\n cli_path = op.abspath(\n op.join(os.path.dirname(__file__), '..', '..', 'mne_bids', 'commands'))\n fnames = sorted([\n op.basename(fname)\n for fname in glob.glob(op.join(cli_path, 'mne_bids*.py'))])\n iterator = sphinx.util.status_iterator(\n fnames, 'generating MNE-BIDS cli help ... ', length=len(fnames))\n with open(out_fname, 'w', encoding='utf-8') as f:\n f.write(header)\n for fname in iterator:\n cmd_name = fname[:-3]\n run_name = op.join(cli_path, fname)\n output, _ = run_subprocess([sys.executable, run_name, '--help'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, verbose=False)\n output = output.splitlines()\n\n # Swap usage and title lines\n output[0], output[2] = output[2], output[0]\n\n # Add header marking\n for idx in (1, 0):\n output.insert(idx, '-' * len(output[0]))\n\n # Add code styling for the \"Usage: \" line\n for li, line in enumerate(output):\n if line.startswith('Usage: mne_bids '):\n output[li] = 'Usage: ``%s``' % line[7:]\n break\n\n # Turn \"Options:\" into field list\n if 'Options:' in output:\n ii = output.index('Options:')\n output[ii] = 'Options'\n output.insert(ii + 1, '-------')\n output.insert(ii + 2, '')\n output.insert(ii + 3, '.. rst-class:: field-list cmd-list')\n output.insert(ii + 4, '')\n output = '\\n'.join(output)\n f.write(command_rst % (cmd_name,\n cmd_name.replace('mne_bids_', 'mne_bids '),\n '=' * len(cmd_name),\n output))\n _replace_md5(out_fname)\n print('[Done]')\n\n\n# This is useful for testing/iterating to see what the result looks like\nif __name__ == '__main__':\n generate_cli_rst()\n", "path": "doc/sphinxext/gen_cli.py"}], "after_files": [{"content": "\"\"\"Custom sphinx extension to generate docs for the command line interface.\n\nInspired by MNE-Python's `gen_commands.py`\nsee: github.com/mne-tools/mne-python/blob/main/doc/sphinxext/gen_commands.py\n\"\"\"\n# Authors: Eric Larson <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD-3-Clause\nimport os\nimport glob\nfrom os import path as op\nimport subprocess\nimport sys\n\nimport sphinx.util\nfrom mne.utils import run_subprocess, _replace_md5\n\n\ndef setup(app):\n \"\"\"Set up the app.\"\"\"\n app.connect('builder-inited', generate_cli_rst)\n\n\n# Header markings go:\n# 1. =/= : Page title\n# 2. = : Command name\n# 3. -/- : Command description\n# 4. - : Command sections (Examples, Notes)\n\nheader = \"\"\"\\\n:orphan:\n\n.. _python_cli:\n\n=====================================\nMNE-BIDS Command Line Interface (CLI)\n=====================================\n\nHere we list the MNE-BIDS tools that you can use from the command line.\n\n\"\"\"\n\ncommand_rst = \"\"\"\n\n.. _gen_%s:\n\n%s\n%s\n\n.. rst-class:: callout\n\n%s\n\n\"\"\"\n\n\ndef generate_cli_rst(app=None):\n \"\"\"Generate the command line interface docs.\"\"\"\n out_dir = op.abspath(op.join(op.dirname(__file__), '..', 'generated'))\n if not op.isdir(out_dir):\n os.mkdir(out_dir)\n out_fname = op.join(out_dir, 'cli.rst.new')\n\n cli_path = op.abspath(\n op.join(os.path.dirname(__file__), '..', '..', 'mne_bids', 'commands'))\n fnames = sorted([\n op.basename(fname)\n for fname in glob.glob(op.join(cli_path, 'mne_bids*.py'))])\n iterator = sphinx.util.status_iterator(\n fnames, 'generating MNE-BIDS cli help ... ', length=len(fnames))\n with open(out_fname, 'w', encoding='utf-8') as f:\n f.write(header)\n for fname in iterator:\n cmd_name = fname[:-3]\n run_name = op.join(cli_path, fname)\n output, _ = run_subprocess([sys.executable, run_name, '--help'],\n verbose=False)\n output = output.splitlines()\n\n # Swap usage and title lines\n output[0], output[2] = output[2], output[0]\n\n # Add header marking\n for idx in (1, 0):\n output.insert(idx, '-' * len(output[0]))\n\n # Add code styling for the \"Usage: \" line\n for li, line in enumerate(output):\n if line.startswith('Usage: mne_bids '):\n output[li] = 'Usage: ``%s``' % line[7:]\n break\n\n # Turn \"Options:\" into field list\n if 'Options:' in output:\n ii = output.index('Options:')\n output[ii] = 'Options'\n output.insert(ii + 1, '-------')\n output.insert(ii + 2, '')\n output.insert(ii + 3, '.. rst-class:: field-list cmd-list')\n output.insert(ii + 4, '')\n output = '\\n'.join(output)\n f.write(command_rst % (cmd_name,\n cmd_name.replace('mne_bids_', 'mne_bids '),\n '=' * len(cmd_name),\n output))\n _replace_md5(out_fname)\n print('[Done]')\n\n\n# This is useful for testing/iterating to see what the result looks like\nif __name__ == '__main__':\n generate_cli_rst()\n", "path": "doc/sphinxext/gen_cli.py"}]}
| 1,833 | 126 |
gh_patches_debug_4621
|
rasdani/github-patches
|
git_diff
|
cupy__cupy-379
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
UnicodeDecodeError in compiler.py
Reported here: https://stackoverflow.com/questions/45473903/unicodedecodeerror-when-i-use-cuda-to-train-dataset
Versions:
- chainer (2.0.2)
- cupy (1.0.2)
It seems `nvcc` generated non-UTF8 output.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/cuda/compiler.py`
Content:
```
1 import hashlib
2 import os
3 import re
4 import shutil
5 import subprocess
6 import sys
7 import tempfile
8
9 import six
10
11 from cupy.cuda import device
12 from cupy.cuda import function
13
14
15 _nvcc_version = None
16
17
18 def _get_nvcc_version():
19 global _nvcc_version
20 if _nvcc_version is None:
21 cmd = ['nvcc', '--version']
22 _nvcc_version = _run_nvcc(cmd, '.')
23
24 return _nvcc_version
25
26
27 def _get_arch():
28 cc = device.Device().compute_capability
29 return 'sm_%s' % cc
30
31
32 class TemporaryDirectory(object):
33
34 def __enter__(self):
35 self.path = tempfile.mkdtemp()
36 return self.path
37
38 def __exit__(self, exc_type, exc_value, traceback):
39 if exc_value is not None:
40 return
41
42 for name in os.listdir(self.path):
43 os.unlink(os.path.join(self.path, name))
44 os.rmdir(self.path)
45
46
47 def _run_nvcc(cmd, cwd):
48 try:
49 return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)
50 except subprocess.CalledProcessError as e:
51 msg = ('`nvcc` command returns non-zero exit status. \n'
52 'command: {0}\n'
53 'return-code: {1}\n'
54 'stdout/stderr: \n'
55 '{2}'.format(e.cmd, e.returncode, e.output))
56 raise RuntimeError(msg)
57 except OSError as e:
58 msg = 'Failed to run `nvcc` command. ' \
59 'Check PATH environment variable: ' \
60 + str(e)
61 raise OSError(msg)
62
63
64 def nvcc(source, options=(), arch=None):
65 if not arch:
66 arch = _get_arch()
67 cmd = ['nvcc', '--cubin', '-arch', arch] + list(options)
68
69 with TemporaryDirectory() as root_dir:
70 path = os.path.join(root_dir, 'kern')
71 cu_path = '%s.cu' % path
72 cubin_path = '%s.cubin' % path
73
74 with open(cu_path, 'w') as cu_file:
75 cu_file.write(source)
76
77 cmd.append(cu_path)
78 _run_nvcc(cmd, root_dir)
79
80 with open(cubin_path, 'rb') as bin_file:
81 return bin_file.read()
82
83
84 def preprocess(source, options=()):
85 cmd = ['nvcc', '--preprocess'] + list(options)
86 with TemporaryDirectory() as root_dir:
87 path = os.path.join(root_dir, 'kern')
88 cu_path = '%s.cu' % path
89
90 with open(cu_path, 'w') as cu_file:
91 cu_file.write(source)
92
93 cmd.append(cu_path)
94 pp_src = _run_nvcc(cmd, root_dir)
95
96 if isinstance(pp_src, six.binary_type):
97 pp_src = pp_src.decode('utf-8')
98 return re.sub('(?m)^#.*$', '', pp_src)
99
100
101 _default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')
102
103
104 def get_cache_dir():
105 return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)
106
107
108 _empty_file_preprocess_cache = {}
109
110
111 def compile_with_cache(source, options=(), arch=None, cache_dir=None):
112 global _empty_file_preprocess_cache
113 if cache_dir is None:
114 cache_dir = get_cache_dir()
115 if arch is None:
116 arch = _get_arch()
117
118 if 'win32' == sys.platform:
119 options += ('-Xcompiler', '/wd 4819')
120 if sys.maxsize == 9223372036854775807:
121 options += '-m64',
122 elif sys.maxsize == 2147483647:
123 options += '-m32',
124
125 env = (arch, options, _get_nvcc_version())
126 if '#include' in source:
127 pp_src = '%s %s' % (env, preprocess(source, options))
128 else:
129 base = _empty_file_preprocess_cache.get(env, None)
130 if base is None:
131 base = _empty_file_preprocess_cache[env] = preprocess('', options)
132 pp_src = '%s %s %s' % (env, base, source)
133
134 if isinstance(pp_src, six.text_type):
135 pp_src = pp_src.encode('utf-8')
136 name = '%s_2.cubin' % hashlib.md5(pp_src).hexdigest()
137
138 if not os.path.isdir(cache_dir):
139 try:
140 os.makedirs(cache_dir)
141 except OSError:
142 if not os.path.isdir(cache_dir):
143 raise
144
145 mod = function.Module()
146 # To handle conflicts in concurrent situation, we adopt lock-free method
147 # to avoid performance degradation.
148 path = os.path.join(cache_dir, name)
149 if os.path.exists(path):
150 with open(path, 'rb') as file:
151 data = file.read()
152 if len(data) >= 32:
153 hash = data[:32]
154 cubin = data[32:]
155 cubin_hash = six.b(hashlib.md5(cubin).hexdigest())
156 if hash == cubin_hash:
157 mod.load(cubin)
158 return mod
159
160 cubin = nvcc(source, options, arch)
161 cubin_hash = six.b(hashlib.md5(cubin).hexdigest())
162
163 # shutil.move is not atomic operation, so it could result in a corrupted
164 # file. We detect it by appending md5 hash at the beginning of each cache
165 # file. If the file is corrupted, it will be ignored next time it is read.
166 with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:
167 tf.write(cubin_hash)
168 tf.write(cubin)
169 temp_path = tf.name
170 shutil.move(temp_path, path)
171
172 mod.load(cubin)
173 return mod
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py
--- a/cupy/cuda/compiler.py
+++ b/cupy/cuda/compiler.py
@@ -93,9 +93,8 @@
cmd.append(cu_path)
pp_src = _run_nvcc(cmd, root_dir)
- if isinstance(pp_src, six.binary_type):
- pp_src = pp_src.decode('utf-8')
- return re.sub('(?m)^#.*$', '', pp_src)
+ assert isinstance(pp_src, six.binary_type)
+ return re.sub(b'(?m)^#.*$', b'', pp_src)
_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')
|
{"golden_diff": "diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py\n--- a/cupy/cuda/compiler.py\n+++ b/cupy/cuda/compiler.py\n@@ -93,9 +93,8 @@\n cmd.append(cu_path)\n pp_src = _run_nvcc(cmd, root_dir)\n \n- if isinstance(pp_src, six.binary_type):\n- pp_src = pp_src.decode('utf-8')\n- return re.sub('(?m)^#.*$', '', pp_src)\n+ assert isinstance(pp_src, six.binary_type)\n+ return re.sub(b'(?m)^#.*$', b'', pp_src)\n \n \n _default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\n", "issue": "UnicodeDecodeError in compiler.py\nReported here: https://stackoverflow.com/questions/45473903/unicodedecodeerror-when-i-use-cuda-to-train-dataset\r\n\r\nVersions:\r\n- chainer (2.0.2)\r\n- cupy (1.0.2)\r\n\r\nIt seems `nvcc` generated non-UTF8 output.\n", "before_files": [{"content": "import hashlib\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nimport six\n\nfrom cupy.cuda import device\nfrom cupy.cuda import function\n\n\n_nvcc_version = None\n\n\ndef _get_nvcc_version():\n global _nvcc_version\n if _nvcc_version is None:\n cmd = ['nvcc', '--version']\n _nvcc_version = _run_nvcc(cmd, '.')\n\n return _nvcc_version\n\n\ndef _get_arch():\n cc = device.Device().compute_capability\n return 'sm_%s' % cc\n\n\nclass TemporaryDirectory(object):\n\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_value is not None:\n return\n\n for name in os.listdir(self.path):\n os.unlink(os.path.join(self.path, name))\n os.rmdir(self.path)\n\n\ndef _run_nvcc(cmd, cwd):\n try:\n return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n msg = ('`nvcc` command returns non-zero exit status. \\n'\n 'command: {0}\\n'\n 'return-code: {1}\\n'\n 'stdout/stderr: \\n'\n '{2}'.format(e.cmd, e.returncode, e.output))\n raise RuntimeError(msg)\n except OSError as e:\n msg = 'Failed to run `nvcc` command. ' \\\n 'Check PATH environment variable: ' \\\n + str(e)\n raise OSError(msg)\n\n\ndef nvcc(source, options=(), arch=None):\n if not arch:\n arch = _get_arch()\n cmd = ['nvcc', '--cubin', '-arch', arch] + list(options)\n\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n cubin_path = '%s.cubin' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n _run_nvcc(cmd, root_dir)\n\n with open(cubin_path, 'rb') as bin_file:\n return bin_file.read()\n\n\ndef preprocess(source, options=()):\n cmd = ['nvcc', '--preprocess'] + list(options)\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n pp_src = _run_nvcc(cmd, root_dir)\n\n if isinstance(pp_src, six.binary_type):\n pp_src = pp_src.decode('utf-8')\n return re.sub('(?m)^#.*$', '', pp_src)\n\n\n_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\n\n\ndef get_cache_dir():\n return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)\n\n\n_empty_file_preprocess_cache = {}\n\n\ndef compile_with_cache(source, options=(), arch=None, cache_dir=None):\n global _empty_file_preprocess_cache\n if cache_dir is None:\n cache_dir = get_cache_dir()\n if arch is None:\n arch = _get_arch()\n\n if 'win32' == sys.platform:\n options += ('-Xcompiler', '/wd 4819')\n if sys.maxsize == 9223372036854775807:\n options += '-m64',\n elif sys.maxsize == 2147483647:\n options += '-m32',\n\n env = (arch, options, _get_nvcc_version())\n if '#include' in source:\n pp_src = '%s %s' % (env, preprocess(source, options))\n else:\n base = _empty_file_preprocess_cache.get(env, None)\n if base is None:\n base = _empty_file_preprocess_cache[env] = preprocess('', options)\n pp_src = '%s %s %s' % (env, base, source)\n\n if isinstance(pp_src, six.text_type):\n pp_src = pp_src.encode('utf-8')\n name = '%s_2.cubin' % hashlib.md5(pp_src).hexdigest()\n\n if not os.path.isdir(cache_dir):\n try:\n os.makedirs(cache_dir)\n except OSError:\n if not os.path.isdir(cache_dir):\n raise\n\n mod = function.Module()\n # To handle conflicts in concurrent situation, we adopt lock-free method\n # to avoid performance degradation.\n path = os.path.join(cache_dir, name)\n if os.path.exists(path):\n with open(path, 'rb') as file:\n data = file.read()\n if len(data) >= 32:\n hash = data[:32]\n cubin = data[32:]\n cubin_hash = six.b(hashlib.md5(cubin).hexdigest())\n if hash == cubin_hash:\n mod.load(cubin)\n return mod\n\n cubin = nvcc(source, options, arch)\n cubin_hash = six.b(hashlib.md5(cubin).hexdigest())\n\n # shutil.move is not atomic operation, so it could result in a corrupted\n # file. We detect it by appending md5 hash at the beginning of each cache\n # file. If the file is corrupted, it will be ignored next time it is read.\n with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:\n tf.write(cubin_hash)\n tf.write(cubin)\n temp_path = tf.name\n shutil.move(temp_path, path)\n\n mod.load(cubin)\n return mod\n", "path": "cupy/cuda/compiler.py"}], "after_files": [{"content": "import hashlib\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nimport six\n\nfrom cupy.cuda import device\nfrom cupy.cuda import function\n\n\n_nvcc_version = None\n\n\ndef _get_nvcc_version():\n global _nvcc_version\n if _nvcc_version is None:\n cmd = ['nvcc', '--version']\n _nvcc_version = _run_nvcc(cmd, '.')\n\n return _nvcc_version\n\n\ndef _get_arch():\n cc = device.Device().compute_capability\n return 'sm_%s' % cc\n\n\nclass TemporaryDirectory(object):\n\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_value is not None:\n return\n\n for name in os.listdir(self.path):\n os.unlink(os.path.join(self.path, name))\n os.rmdir(self.path)\n\n\ndef _run_nvcc(cmd, cwd):\n try:\n return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n msg = ('`nvcc` command returns non-zero exit status. \\n'\n 'command: {0}\\n'\n 'return-code: {1}\\n'\n 'stdout/stderr: \\n'\n '{2}'.format(e.cmd, e.returncode, e.output))\n raise RuntimeError(msg)\n except OSError as e:\n msg = 'Failed to run `nvcc` command. ' \\\n 'Check PATH environment variable: ' \\\n + str(e)\n raise OSError(msg)\n\n\ndef nvcc(source, options=(), arch=None):\n if not arch:\n arch = _get_arch()\n cmd = ['nvcc', '--cubin', '-arch', arch] + list(options)\n\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n cubin_path = '%s.cubin' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n _run_nvcc(cmd, root_dir)\n\n with open(cubin_path, 'rb') as bin_file:\n return bin_file.read()\n\n\ndef preprocess(source, options=()):\n cmd = ['nvcc', '--preprocess'] + list(options)\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n pp_src = _run_nvcc(cmd, root_dir)\n\n assert isinstance(pp_src, six.binary_type)\n return re.sub(b'(?m)^#.*$', b'', pp_src)\n\n\n_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\n\n\ndef get_cache_dir():\n return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)\n\n\n_empty_file_preprocess_cache = {}\n\n\ndef compile_with_cache(source, options=(), arch=None, cache_dir=None):\n global _empty_file_preprocess_cache\n if cache_dir is None:\n cache_dir = get_cache_dir()\n if arch is None:\n arch = _get_arch()\n\n if 'win32' == sys.platform:\n options += ('-Xcompiler', '/wd 4819')\n if sys.maxsize == 9223372036854775807:\n options += '-m64',\n elif sys.maxsize == 2147483647:\n options += '-m32',\n\n env = (arch, options, _get_nvcc_version())\n if '#include' in source:\n pp_src = '%s %s' % (env, preprocess(source, options))\n else:\n base = _empty_file_preprocess_cache.get(env, None)\n if base is None:\n base = _empty_file_preprocess_cache[env] = preprocess('', options)\n pp_src = '%s %s %s' % (env, base, source)\n\n if isinstance(pp_src, six.text_type):\n pp_src = pp_src.encode('utf-8')\n name = '%s_2.cubin' % hashlib.md5(pp_src).hexdigest()\n\n if not os.path.isdir(cache_dir):\n try:\n os.makedirs(cache_dir)\n except OSError:\n if not os.path.isdir(cache_dir):\n raise\n\n mod = function.Module()\n # To handle conflicts in concurrent situation, we adopt lock-free method\n # to avoid performance degradation.\n path = os.path.join(cache_dir, name)\n if os.path.exists(path):\n with open(path, 'rb') as file:\n data = file.read()\n if len(data) >= 32:\n hash = data[:32]\n cubin = data[32:]\n cubin_hash = six.b(hashlib.md5(cubin).hexdigest())\n if hash == cubin_hash:\n mod.load(cubin)\n return mod\n\n cubin = nvcc(source, options, arch)\n cubin_hash = six.b(hashlib.md5(cubin).hexdigest())\n\n # shutil.move is not atomic operation, so it could result in a corrupted\n # file. We detect it by appending md5 hash at the beginning of each cache\n # file. If the file is corrupted, it will be ignored next time it is read.\n with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:\n tf.write(cubin_hash)\n tf.write(cubin)\n temp_path = tf.name\n shutil.move(temp_path, path)\n\n mod.load(cubin)\n return mod\n", "path": "cupy/cuda/compiler.py"}]}
| 2,056 | 155 |
gh_patches_debug_19589
|
rasdani/github-patches
|
git_diff
|
cloudtools__troposphere-839
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use the PlatformArn property to specify a custom platform for Elastic Beanstalk.
[AWS::ElasticBeanstalk::ConfigurationTemplate](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-beanstalk-configurationtemplate.html) and [AWS::ElasticBeanstalk::Environment](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html)
Use the PlatformArn property to specify a custom platform for Elastic Beanstalk.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/elasticbeanstalk.py`
Content:
```
1 # Copyright (c) 2013, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSObject, AWSProperty, Tags
7
8
9 WebServer = "WebServer"
10 Worker = "Worker"
11 WebServerType = "Standard"
12 WorkerType = "SQS/HTTP"
13
14
15 class SourceBundle(AWSProperty):
16 props = {
17 'S3Bucket': (basestring, True),
18 'S3Key': (basestring, True),
19 }
20
21
22 class SourceConfiguration(AWSProperty):
23 props = {
24 'ApplicationName': (basestring, True),
25 'TemplateName': (basestring, True),
26 }
27
28
29 class OptionSettings(AWSProperty):
30 props = {
31 'Namespace': (basestring, True),
32 'OptionName': (basestring, True),
33 'Value': (basestring, True),
34 }
35
36
37 class Application(AWSObject):
38 resource_type = "AWS::ElasticBeanstalk::Application"
39
40 props = {
41 'ApplicationName': (basestring, False),
42 'Description': (basestring, False),
43 }
44
45
46 class ApplicationVersion(AWSObject):
47 resource_type = "AWS::ElasticBeanstalk::ApplicationVersion"
48
49 props = {
50 'ApplicationName': (basestring, True),
51 'Description': (basestring, False),
52 'SourceBundle': (SourceBundle, False),
53 }
54
55
56 class ConfigurationTemplate(AWSObject):
57 resource_type = "AWS::ElasticBeanstalk::ConfigurationTemplate"
58
59 props = {
60 'ApplicationName': (basestring, True),
61 'Description': (basestring, False),
62 'EnvironmentId': (basestring, False),
63 'OptionSettings': ([OptionSettings], False),
64 'SolutionStackName': (basestring, False),
65 'SourceConfiguration': (SourceConfiguration, False),
66 }
67
68
69 def validate_tier_name(name):
70 valid_names = [WebServer, Worker]
71 if name not in valid_names:
72 raise ValueError('Tier name needs to be one of %r' % valid_names)
73 return name
74
75
76 def validate_tier_type(tier_type):
77 valid_types = [WebServerType, WorkerType]
78 if tier_type not in valid_types:
79 raise ValueError('Tier type needs to be one of %r' % valid_types)
80 return tier_type
81
82
83 class Tier(AWSProperty):
84 props = {
85 'Name': (validate_tier_name, False),
86 'Type': (validate_tier_type, False),
87 'Version': (basestring, False),
88 }
89
90
91 class Environment(AWSObject):
92 resource_type = "AWS::ElasticBeanstalk::Environment"
93
94 props = {
95 'ApplicationName': (basestring, True),
96 'CNAMEPrefix': (basestring, False),
97 'Description': (basestring, False),
98 'EnvironmentName': (basestring, False),
99 'OptionSettings': ([OptionSettings], False),
100 'SolutionStackName': (basestring, False),
101 'Tags': (Tags, False),
102 'TemplateName': (basestring, False),
103 'Tier': (Tier, False),
104 'VersionLabel': (basestring, False),
105 }
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/troposphere/elasticbeanstalk.py b/troposphere/elasticbeanstalk.py
--- a/troposphere/elasticbeanstalk.py
+++ b/troposphere/elasticbeanstalk.py
@@ -61,6 +61,7 @@
'Description': (basestring, False),
'EnvironmentId': (basestring, False),
'OptionSettings': ([OptionSettings], False),
+ 'PlatformArn': (basestring, False),
'SolutionStackName': (basestring, False),
'SourceConfiguration': (SourceConfiguration, False),
}
@@ -97,6 +98,7 @@
'Description': (basestring, False),
'EnvironmentName': (basestring, False),
'OptionSettings': ([OptionSettings], False),
+ 'PlatformArn': (basestring, False),
'SolutionStackName': (basestring, False),
'Tags': (Tags, False),
'TemplateName': (basestring, False),
|
{"golden_diff": "diff --git a/troposphere/elasticbeanstalk.py b/troposphere/elasticbeanstalk.py\n--- a/troposphere/elasticbeanstalk.py\n+++ b/troposphere/elasticbeanstalk.py\n@@ -61,6 +61,7 @@\n 'Description': (basestring, False),\n 'EnvironmentId': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n+ 'PlatformArn': (basestring, False),\n 'SolutionStackName': (basestring, False),\n 'SourceConfiguration': (SourceConfiguration, False),\n }\n@@ -97,6 +98,7 @@\n 'Description': (basestring, False),\n 'EnvironmentName': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n+ 'PlatformArn': (basestring, False),\n 'SolutionStackName': (basestring, False),\n 'Tags': (Tags, False),\n 'TemplateName': (basestring, False),\n", "issue": "Use the PlatformArn property to specify a custom platform for Elastic Beanstalk.\n[AWS::ElasticBeanstalk::ConfigurationTemplate](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-beanstalk-configurationtemplate.html) and [AWS::ElasticBeanstalk::Environment](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html)\r\nUse the PlatformArn property to specify a custom platform for Elastic Beanstalk.\n", "before_files": [{"content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Tags\n\n\nWebServer = \"WebServer\"\nWorker = \"Worker\"\nWebServerType = \"Standard\"\nWorkerType = \"SQS/HTTP\"\n\n\nclass SourceBundle(AWSProperty):\n props = {\n 'S3Bucket': (basestring, True),\n 'S3Key': (basestring, True),\n }\n\n\nclass SourceConfiguration(AWSProperty):\n props = {\n 'ApplicationName': (basestring, True),\n 'TemplateName': (basestring, True),\n }\n\n\nclass OptionSettings(AWSProperty):\n props = {\n 'Namespace': (basestring, True),\n 'OptionName': (basestring, True),\n 'Value': (basestring, True),\n }\n\n\nclass Application(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::Application\"\n\n props = {\n 'ApplicationName': (basestring, False),\n 'Description': (basestring, False),\n }\n\n\nclass ApplicationVersion(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::ApplicationVersion\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'Description': (basestring, False),\n 'SourceBundle': (SourceBundle, False),\n }\n\n\nclass ConfigurationTemplate(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'Description': (basestring, False),\n 'EnvironmentId': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n 'SolutionStackName': (basestring, False),\n 'SourceConfiguration': (SourceConfiguration, False),\n }\n\n\ndef validate_tier_name(name):\n valid_names = [WebServer, Worker]\n if name not in valid_names:\n raise ValueError('Tier name needs to be one of %r' % valid_names)\n return name\n\n\ndef validate_tier_type(tier_type):\n valid_types = [WebServerType, WorkerType]\n if tier_type not in valid_types:\n raise ValueError('Tier type needs to be one of %r' % valid_types)\n return tier_type\n\n\nclass Tier(AWSProperty):\n props = {\n 'Name': (validate_tier_name, False),\n 'Type': (validate_tier_type, False),\n 'Version': (basestring, False),\n }\n\n\nclass Environment(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::Environment\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'CNAMEPrefix': (basestring, False),\n 'Description': (basestring, False),\n 'EnvironmentName': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n 'SolutionStackName': (basestring, False),\n 'Tags': (Tags, False),\n 'TemplateName': (basestring, False),\n 'Tier': (Tier, False),\n 'VersionLabel': (basestring, False),\n }\n", "path": "troposphere/elasticbeanstalk.py"}], "after_files": [{"content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Tags\n\n\nWebServer = \"WebServer\"\nWorker = \"Worker\"\nWebServerType = \"Standard\"\nWorkerType = \"SQS/HTTP\"\n\n\nclass SourceBundle(AWSProperty):\n props = {\n 'S3Bucket': (basestring, True),\n 'S3Key': (basestring, True),\n }\n\n\nclass SourceConfiguration(AWSProperty):\n props = {\n 'ApplicationName': (basestring, True),\n 'TemplateName': (basestring, True),\n }\n\n\nclass OptionSettings(AWSProperty):\n props = {\n 'Namespace': (basestring, True),\n 'OptionName': (basestring, True),\n 'Value': (basestring, True),\n }\n\n\nclass Application(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::Application\"\n\n props = {\n 'ApplicationName': (basestring, False),\n 'Description': (basestring, False),\n }\n\n\nclass ApplicationVersion(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::ApplicationVersion\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'Description': (basestring, False),\n 'SourceBundle': (SourceBundle, False),\n }\n\n\nclass ConfigurationTemplate(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'Description': (basestring, False),\n 'EnvironmentId': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n 'PlatformArn': (basestring, False),\n 'SolutionStackName': (basestring, False),\n 'SourceConfiguration': (SourceConfiguration, False),\n }\n\n\ndef validate_tier_name(name):\n valid_names = [WebServer, Worker]\n if name not in valid_names:\n raise ValueError('Tier name needs to be one of %r' % valid_names)\n return name\n\n\ndef validate_tier_type(tier_type):\n valid_types = [WebServerType, WorkerType]\n if tier_type not in valid_types:\n raise ValueError('Tier type needs to be one of %r' % valid_types)\n return tier_type\n\n\nclass Tier(AWSProperty):\n props = {\n 'Name': (validate_tier_name, False),\n 'Type': (validate_tier_type, False),\n 'Version': (basestring, False),\n }\n\n\nclass Environment(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::Environment\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'CNAMEPrefix': (basestring, False),\n 'Description': (basestring, False),\n 'EnvironmentName': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n 'PlatformArn': (basestring, False),\n 'SolutionStackName': (basestring, False),\n 'Tags': (Tags, False),\n 'TemplateName': (basestring, False),\n 'Tier': (Tier, False),\n 'VersionLabel': (basestring, False),\n }\n", "path": "troposphere/elasticbeanstalk.py"}]}
| 1,262 | 212 |
gh_patches_debug_26636
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-456
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Filter archived and draft projects from the wagtail frontpage selection element
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/cms/models.py`
Content:
```
1 from django.db import models
2 from django.forms import widgets
3 from modelcluster.fields import ParentalKey
4 from modelcluster.models import ClusterableModel
5 from wagtail.wagtailadmin import edit_handlers
6 from wagtail.wagtailcore import blocks
7 from wagtail.wagtailcore import fields
8 from wagtail.wagtailcore.models import Orderable
9 from wagtail.wagtailcore.models import Page
10 from wagtail.wagtailforms.models import AbstractEmailForm
11 from wagtail.wagtailforms.models import AbstractFormField
12 from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
13 from wagtail.wagtailsnippets.models import register_snippet
14
15 from adhocracy4.projects.models import Project
16
17 from . import emails
18
19
20 class SimplePage(Page):
21 body = fields.RichTextField(blank=True)
22
23 content_panels = [
24 edit_handlers.FieldPanel('title'),
25 edit_handlers.FieldPanel('body'),
26 ]
27
28 subpage_types = []
29
30
31 class ProjectSelectionBlock(blocks.ChooserBlock):
32 target_model = Project
33 widget = widgets.Select
34
35 def value_for_form(self, value):
36 if isinstance(value, Project):
37 return value.pk
38 return value
39
40
41 class ProjectsWrapperBlock(blocks.StructBlock):
42 title = blocks.CharBlock(max_length=80)
43 projects = blocks.ListBlock(
44 ProjectSelectionBlock(label='Project'),
45 )
46
47 class Meta:
48 template = 'meinberlin_cms/blocks/projects_block.html'
49
50
51 class CallToActionBlock(blocks.StructBlock):
52 body = blocks.RichTextBlock()
53 link = blocks.CharBlock()
54 link_text = blocks.CharBlock(max_length=50, label='Link Text')
55
56 class Meta:
57 template = 'meinberlin_cms/blocks/cta_block.html'
58
59
60 class ColumnsBlock(blocks.StructBlock):
61 columns_count = blocks.ChoiceBlock(choices=[
62 (2, 'Two columns'),
63 (3, 'Three columns'),
64 (4, 'Four columns'),
65 ], default=2)
66
67 columns = blocks.ListBlock(
68 blocks.RichTextBlock(label='Column body'),
69 )
70
71 class Meta:
72 template = 'meinberlin_cms/blocks/columns_block.html'
73
74
75 class HomePage(Page):
76 body = fields.StreamField([
77 ('paragraph', blocks.RichTextBlock(
78 template='meinberlin_cms/blocks/richtext_block.html'
79 )),
80 ('call_to_action', CallToActionBlock()),
81 ('columns_text', ColumnsBlock()),
82 ('projects', ProjectsWrapperBlock()),
83 ])
84
85 subtitle = models.CharField(max_length=120)
86
87 header_image = models.ForeignKey(
88 'wagtailimages.Image',
89 null=True,
90 blank=False,
91 on_delete=models.SET_NULL,
92 related_name='+'
93 )
94
95 content_panels = Page.content_panels + [
96 edit_handlers.FieldPanel('subtitle'),
97 ImageChooserPanel('header_image'),
98 edit_handlers.StreamFieldPanel('body'),
99 ]
100
101
102 class MenuItem(models.Model):
103 title = models.CharField(max_length=255)
104 link_page = models.ForeignKey('wagtailcore.Page')
105
106 @property
107 def url(self):
108 return self.link_page.url
109
110 def __str__(self):
111 return self.title
112
113 panels = [
114 edit_handlers.FieldPanel('title'),
115 edit_handlers.PageChooserPanel('link_page')
116 ]
117
118
119 @register_snippet
120 class NavigationMenu(ClusterableModel):
121 title = models.CharField(max_length=255, null=False, blank=False)
122
123 def __str__(self):
124 return self.title
125
126 panels = [
127 edit_handlers.FieldPanel('title'),
128 edit_handlers.InlinePanel('items')
129 ]
130
131
132 class NavigationMenuItem(Orderable, MenuItem):
133 parent = ParentalKey('meinberlin_cms.NavigationMenu', related_name='items')
134
135
136 class EmailFormField(AbstractFormField):
137 page = ParentalKey('EmailFormPage', related_name='form_fields')
138
139
140 class EmailFormPage(AbstractEmailForm):
141 intro = fields.RichTextField(
142 help_text='Introduction text shown above the form'
143 )
144 thank_you = fields.RichTextField(
145 help_text='Text shown after form submission',
146 )
147 email_content = models.CharField(
148 max_length=200,
149 help_text='Email content message',
150 )
151 attach_as = models.CharField(
152 max_length=3,
153 choices=(
154 ('csv', 'CSV Document'),
155 ('txt', 'Text'),
156 ),
157 default='csv',
158 help_text='Form results are send in this document format',
159 )
160
161 content_panels = AbstractEmailForm.content_panels + [
162 edit_handlers.MultiFieldPanel([
163 edit_handlers.FieldPanel('intro', classname='full'),
164 edit_handlers.FieldPanel('thank_you', classname='full'),
165 ], 'Page'),
166 edit_handlers.MultiFieldPanel([
167 edit_handlers.FieldPanel('to_address'),
168 edit_handlers.FieldPanel('subject'),
169 edit_handlers.FieldPanel('email_content', classname='full'),
170 edit_handlers.FieldPanel('attach_as'),
171 ], 'Email'),
172 edit_handlers.InlinePanel('form_fields', label='Form fields'),
173 ]
174
175 def send_mail(self, form):
176 self.form = form
177 if self.attach_as == 'csv':
178 emails.CsvFormEmail.send(self)
179 elif self.attach_as == 'txt':
180 emails.TextFormEmail.send(self)
181
182 @property
183 def field_values(self):
184 fields = {}
185 for field in self.form:
186 value = field.value()
187 if isinstance(value, list):
188 value = ', '.join(value)
189 fields[field.label] = value
190 return fields
191
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/apps/cms/models.py b/apps/cms/models.py
--- a/apps/cms/models.py
+++ b/apps/cms/models.py
@@ -1,5 +1,6 @@
+from django import forms
from django.db import models
-from django.forms import widgets
+from django.utils.functional import cached_property
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.wagtailadmin import edit_handlers
@@ -30,13 +31,30 @@
class ProjectSelectionBlock(blocks.ChooserBlock):
target_model = Project
- widget = widgets.Select
+ widget = forms.widgets.Select
+
+ @cached_property
+ def field(self):
+ return forms.ModelChoiceField(
+ queryset=self.target_model.objects.filter(
+ is_draft=False,
+ is_archived=False,
+ is_public=True),
+ widget=self.widget,
+ required=self._required,
+ help_text=self._help_text)
def value_for_form(self, value):
if isinstance(value, Project):
return value.pk
return value
+ def value_from_form(self, value):
+ # if project became unavailable (unpublished), selection will become an
+ # empty string and cause a server error on save, so we give a fallback
+ value = value or None
+ return super().value_from_form(value)
+
class ProjectsWrapperBlock(blocks.StructBlock):
title = blocks.CharBlock(max_length=80)
|
{"golden_diff": "diff --git a/apps/cms/models.py b/apps/cms/models.py\n--- a/apps/cms/models.py\n+++ b/apps/cms/models.py\n@@ -1,5 +1,6 @@\n+from django import forms\n from django.db import models\n-from django.forms import widgets\n+from django.utils.functional import cached_property\n from modelcluster.fields import ParentalKey\n from modelcluster.models import ClusterableModel\n from wagtail.wagtailadmin import edit_handlers\n@@ -30,13 +31,30 @@\n \n class ProjectSelectionBlock(blocks.ChooserBlock):\n target_model = Project\n- widget = widgets.Select\n+ widget = forms.widgets.Select\n+\n+ @cached_property\n+ def field(self):\n+ return forms.ModelChoiceField(\n+ queryset=self.target_model.objects.filter(\n+ is_draft=False,\n+ is_archived=False,\n+ is_public=True),\n+ widget=self.widget,\n+ required=self._required,\n+ help_text=self._help_text)\n \n def value_for_form(self, value):\n if isinstance(value, Project):\n return value.pk\n return value\n \n+ def value_from_form(self, value):\n+ # if project became unavailable (unpublished), selection will become an\n+ # empty string and cause a server error on save, so we give a fallback\n+ value = value or None\n+ return super().value_from_form(value)\n+\n \n class ProjectsWrapperBlock(blocks.StructBlock):\n title = blocks.CharBlock(max_length=80)\n", "issue": "Filter archived and draft projects from the wagtail frontpage selection element\n\n", "before_files": [{"content": "from django.db import models\nfrom django.forms import widgets\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.wagtailadmin import edit_handlers\nfrom wagtail.wagtailcore import blocks\nfrom wagtail.wagtailcore import fields\nfrom wagtail.wagtailcore.models import Orderable\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailforms.models import AbstractEmailForm\nfrom wagtail.wagtailforms.models import AbstractFormField\nfrom wagtail.wagtailimages.edit_handlers import ImageChooserPanel\nfrom wagtail.wagtailsnippets.models import register_snippet\n\nfrom adhocracy4.projects.models import Project\n\nfrom . import emails\n\n\nclass SimplePage(Page):\n body = fields.RichTextField(blank=True)\n\n content_panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.FieldPanel('body'),\n ]\n\n subpage_types = []\n\n\nclass ProjectSelectionBlock(blocks.ChooserBlock):\n target_model = Project\n widget = widgets.Select\n\n def value_for_form(self, value):\n if isinstance(value, Project):\n return value.pk\n return value\n\n\nclass ProjectsWrapperBlock(blocks.StructBlock):\n title = blocks.CharBlock(max_length=80)\n projects = blocks.ListBlock(\n ProjectSelectionBlock(label='Project'),\n )\n\n class Meta:\n template = 'meinberlin_cms/blocks/projects_block.html'\n\n\nclass CallToActionBlock(blocks.StructBlock):\n body = blocks.RichTextBlock()\n link = blocks.CharBlock()\n link_text = blocks.CharBlock(max_length=50, label='Link Text')\n\n class Meta:\n template = 'meinberlin_cms/blocks/cta_block.html'\n\n\nclass ColumnsBlock(blocks.StructBlock):\n columns_count = blocks.ChoiceBlock(choices=[\n (2, 'Two columns'),\n (3, 'Three columns'),\n (4, 'Four columns'),\n ], default=2)\n\n columns = blocks.ListBlock(\n blocks.RichTextBlock(label='Column body'),\n )\n\n class Meta:\n template = 'meinberlin_cms/blocks/columns_block.html'\n\n\nclass HomePage(Page):\n body = fields.StreamField([\n ('paragraph', blocks.RichTextBlock(\n template='meinberlin_cms/blocks/richtext_block.html'\n )),\n ('call_to_action', CallToActionBlock()),\n ('columns_text', ColumnsBlock()),\n ('projects', ProjectsWrapperBlock()),\n ])\n\n subtitle = models.CharField(max_length=120)\n\n header_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=False,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n content_panels = Page.content_panels + [\n edit_handlers.FieldPanel('subtitle'),\n ImageChooserPanel('header_image'),\n edit_handlers.StreamFieldPanel('body'),\n ]\n\n\nclass MenuItem(models.Model):\n title = models.CharField(max_length=255)\n link_page = models.ForeignKey('wagtailcore.Page')\n\n @property\n def url(self):\n return self.link_page.url\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.PageChooserPanel('link_page')\n ]\n\n\n@register_snippet\nclass NavigationMenu(ClusterableModel):\n title = models.CharField(max_length=255, null=False, blank=False)\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.InlinePanel('items')\n ]\n\n\nclass NavigationMenuItem(Orderable, MenuItem):\n parent = ParentalKey('meinberlin_cms.NavigationMenu', related_name='items')\n\n\nclass EmailFormField(AbstractFormField):\n page = ParentalKey('EmailFormPage', related_name='form_fields')\n\n\nclass EmailFormPage(AbstractEmailForm):\n intro = fields.RichTextField(\n help_text='Introduction text shown above the form'\n )\n thank_you = fields.RichTextField(\n help_text='Text shown after form submission',\n )\n email_content = models.CharField(\n max_length=200,\n help_text='Email content message',\n )\n attach_as = models.CharField(\n max_length=3,\n choices=(\n ('csv', 'CSV Document'),\n ('txt', 'Text'),\n ),\n default='csv',\n help_text='Form results are send in this document format',\n )\n\n content_panels = AbstractEmailForm.content_panels + [\n edit_handlers.MultiFieldPanel([\n edit_handlers.FieldPanel('intro', classname='full'),\n edit_handlers.FieldPanel('thank_you', classname='full'),\n ], 'Page'),\n edit_handlers.MultiFieldPanel([\n edit_handlers.FieldPanel('to_address'),\n edit_handlers.FieldPanel('subject'),\n edit_handlers.FieldPanel('email_content', classname='full'),\n edit_handlers.FieldPanel('attach_as'),\n ], 'Email'),\n edit_handlers.InlinePanel('form_fields', label='Form fields'),\n ]\n\n def send_mail(self, form):\n self.form = form\n if self.attach_as == 'csv':\n emails.CsvFormEmail.send(self)\n elif self.attach_as == 'txt':\n emails.TextFormEmail.send(self)\n\n @property\n def field_values(self):\n fields = {}\n for field in self.form:\n value = field.value()\n if isinstance(value, list):\n value = ', '.join(value)\n fields[field.label] = value\n return fields\n", "path": "apps/cms/models.py"}], "after_files": [{"content": "from django import forms\nfrom django.db import models\nfrom django.utils.functional import cached_property\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.wagtailadmin import edit_handlers\nfrom wagtail.wagtailcore import blocks\nfrom wagtail.wagtailcore import fields\nfrom wagtail.wagtailcore.models import Orderable\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailforms.models import AbstractEmailForm\nfrom wagtail.wagtailforms.models import AbstractFormField\nfrom wagtail.wagtailimages.edit_handlers import ImageChooserPanel\nfrom wagtail.wagtailsnippets.models import register_snippet\n\nfrom adhocracy4.projects.models import Project\n\nfrom . import emails\n\n\nclass SimplePage(Page):\n body = fields.RichTextField(blank=True)\n\n content_panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.FieldPanel('body'),\n ]\n\n subpage_types = []\n\n\nclass ProjectSelectionBlock(blocks.ChooserBlock):\n target_model = Project\n widget = forms.widgets.Select\n\n @cached_property\n def field(self):\n return forms.ModelChoiceField(\n queryset=self.target_model.objects.filter(\n is_draft=False,\n is_archived=False,\n is_public=True),\n widget=self.widget,\n required=self._required,\n help_text=self._help_text)\n\n def value_for_form(self, value):\n if isinstance(value, Project):\n return value.pk\n return value\n\n def value_from_form(self, value):\n # if project became unavailable (unpublished), selection will become an\n # empty string and cause a server error on save, so we give a fallback\n value = value or None\n return super().value_from_form(value)\n\n\nclass ProjectsWrapperBlock(blocks.StructBlock):\n title = blocks.CharBlock(max_length=80)\n projects = blocks.ListBlock(\n ProjectSelectionBlock(label='Project'),\n )\n\n class Meta:\n template = 'meinberlin_cms/blocks/projects_block.html'\n\n\nclass CallToActionBlock(blocks.StructBlock):\n body = blocks.RichTextBlock()\n link = blocks.CharBlock()\n link_text = blocks.CharBlock(max_length=50, label='Link Text')\n\n class Meta:\n template = 'meinberlin_cms/blocks/cta_block.html'\n\n\nclass ColumnsBlock(blocks.StructBlock):\n columns_count = blocks.ChoiceBlock(choices=[\n (2, 'Two columns'),\n (3, 'Three columns'),\n (4, 'Four columns'),\n ], default=2)\n\n columns = blocks.ListBlock(\n blocks.RichTextBlock(label='Column body'),\n )\n\n class Meta:\n template = 'meinberlin_cms/blocks/columns_block.html'\n\n\nclass HomePage(Page):\n body = fields.StreamField([\n ('paragraph', blocks.RichTextBlock(\n template='meinberlin_cms/blocks/richtext_block.html'\n )),\n ('call_to_action', CallToActionBlock()),\n ('columns_text', ColumnsBlock()),\n ('projects', ProjectsWrapperBlock()),\n ])\n\n subtitle = models.CharField(max_length=120)\n\n header_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=False,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n content_panels = Page.content_panels + [\n edit_handlers.FieldPanel('subtitle'),\n ImageChooserPanel('header_image'),\n edit_handlers.StreamFieldPanel('body'),\n ]\n\n\nclass MenuItem(models.Model):\n title = models.CharField(max_length=255)\n link_page = models.ForeignKey('wagtailcore.Page')\n\n @property\n def url(self):\n return self.link_page.url\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.PageChooserPanel('link_page')\n ]\n\n\n@register_snippet\nclass NavigationMenu(ClusterableModel):\n title = models.CharField(max_length=255, null=False, blank=False)\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.InlinePanel('items')\n ]\n\n\nclass NavigationMenuItem(Orderable, MenuItem):\n parent = ParentalKey('meinberlin_cms.NavigationMenu', related_name='items')\n\n\nclass EmailFormField(AbstractFormField):\n page = ParentalKey('EmailFormPage', related_name='form_fields')\n\n\nclass EmailFormPage(AbstractEmailForm):\n intro = fields.RichTextField(\n help_text='Introduction text shown above the form'\n )\n thank_you = fields.RichTextField(\n help_text='Text shown after form submission',\n )\n email_content = models.CharField(\n max_length=200,\n help_text='Email content message',\n )\n attach_as = models.CharField(\n max_length=3,\n choices=(\n ('csv', 'CSV Document'),\n ('txt', 'Text'),\n ),\n default='csv',\n help_text='Form results are send in this document format',\n )\n\n content_panels = AbstractEmailForm.content_panels + [\n edit_handlers.MultiFieldPanel([\n edit_handlers.FieldPanel('intro', classname='full'),\n edit_handlers.FieldPanel('thank_you', classname='full'),\n ], 'Page'),\n edit_handlers.MultiFieldPanel([\n edit_handlers.FieldPanel('to_address'),\n edit_handlers.FieldPanel('subject'),\n edit_handlers.FieldPanel('email_content', classname='full'),\n edit_handlers.FieldPanel('attach_as'),\n ], 'Email'),\n edit_handlers.InlinePanel('form_fields', label='Form fields'),\n ]\n\n def send_mail(self, form):\n self.form = form\n if self.attach_as == 'csv':\n emails.CsvFormEmail.send(self)\n elif self.attach_as == 'txt':\n emails.TextFormEmail.send(self)\n\n @property\n def field_values(self):\n fields = {}\n for field in self.form:\n value = field.value()\n if isinstance(value, list):\n value = ', '.join(value)\n fields[field.label] = value\n return fields\n", "path": "apps/cms/models.py"}]}
| 1,936 | 321 |
gh_patches_debug_8055
|
rasdani/github-patches
|
git_diff
|
urllib3__urllib3-1497
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
setup.py lacks appropriate metadata for differing python versions
Please see this issue for the full context: https://github.com/NixOS/nixpkgs/issues/46318
Basically, it appears the METADATA in the resulting installation differs depending on the installation method.
I've done some minimal patching to `setup.py` to include the same `python_version` constraints that appear in `setup.cfg` and it appears to fix the issues with regards to METADATA.
However, I'm not very experienced in python packaging and am surprised that no one else has run into this issue before me.
Can anyone confirm that there is a mismatch here and that adding additional constraints to `setup.py` would be appropriate? I'll go ahead and get a PR together in the meantime.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from setuptools import setup
4
5 import os
6 import re
7 import codecs
8
9 base_path = os.path.dirname(__file__)
10
11 # Get the version (borrowed from SQLAlchemy)
12 with open(os.path.join(base_path, 'src', 'urllib3', '__init__.py')) as fp:
13 VERSION = re.compile(r".*__version__ = '(.*?)'",
14 re.S).match(fp.read()).group(1)
15
16 with codecs.open('README.rst', encoding='utf-8') as fp:
17 readme = fp.read()
18 with codecs.open('CHANGES.rst', encoding='utf-8') as fp:
19 changes = fp.read()
20 version = VERSION
21
22 setup(name='urllib3',
23 version=version,
24 description="HTTP library with thread-safe connection pooling, file post, and more.",
25 long_description=u'\n\n'.join([readme, changes]),
26 classifiers=[
27 'Environment :: Web Environment',
28 'Intended Audience :: Developers',
29 'License :: OSI Approved :: MIT License',
30 'Operating System :: OS Independent',
31 'Programming Language :: Python',
32 'Programming Language :: Python :: 2',
33 'Programming Language :: Python :: 2.7',
34 'Programming Language :: Python :: 3',
35 'Programming Language :: Python :: 3.4',
36 'Programming Language :: Python :: 3.5',
37 'Programming Language :: Python :: 3.6',
38 'Programming Language :: Python :: 3.7',
39 'Programming Language :: Python :: 3.8',
40 'Programming Language :: Python :: Implementation :: CPython',
41 'Programming Language :: Python :: Implementation :: PyPy',
42 'Topic :: Internet :: WWW/HTTP',
43 'Topic :: Software Development :: Libraries',
44 ],
45 keywords='urllib httplib threadsafe filepost http https ssl pooling',
46 author='Andrey Petrov',
47 author_email='[email protected]',
48 url='https://urllib3.readthedocs.io/',
49 license='MIT',
50 packages=['urllib3',
51 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',
52 'urllib3.packages.backports', 'urllib3.packages.rfc3986',
53 'urllib3.contrib', 'urllib3.contrib._securetransport',
54 'urllib3.util'],
55 package_dir={'': 'src'},
56 requires=[],
57 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
58 tests_require=[
59 # These are a less-specific subset of dev-requirements.txt, for the
60 # convenience of distro package maintainers.
61 'pytest',
62 'mock',
63 'tornado',
64 ],
65 test_suite='test',
66 extras_require={
67 'secure': [
68 'pyOpenSSL >= 0.14',
69 'cryptography>=1.3.4',
70 'idna>=2.0.0',
71 'certifi',
72 "ipaddress",
73 ],
74 'socks': [
75 'PySocks>=1.5.6,<2.0,!=1.5.7',
76 ]
77 },
78 )
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -65,11 +65,11 @@
test_suite='test',
extras_require={
'secure': [
- 'pyOpenSSL >= 0.14',
+ 'pyOpenSSL>=0.14',
'cryptography>=1.3.4',
'idna>=2.0.0',
'certifi',
- "ipaddress",
+ "ipaddress; python_version=='2.7'",
],
'socks': [
'PySocks>=1.5.6,<2.0,!=1.5.7',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -65,11 +65,11 @@\n test_suite='test',\n extras_require={\n 'secure': [\n- 'pyOpenSSL >= 0.14',\n+ 'pyOpenSSL>=0.14',\n 'cryptography>=1.3.4',\n 'idna>=2.0.0',\n 'certifi',\n- \"ipaddress\",\n+ \"ipaddress; python_version=='2.7'\",\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0,!=1.5.7',\n", "issue": "setup.py lacks appropriate metadata for differing python versions\nPlease see this issue for the full context: https://github.com/NixOS/nixpkgs/issues/46318\r\n\r\nBasically, it appears the METADATA in the resulting installation differs depending on the installation method.\r\n\r\nI've done some minimal patching to `setup.py` to include the same `python_version` constraints that appear in `setup.cfg` and it appears to fix the issues with regards to METADATA.\r\n\r\nHowever, I'm not very experienced in python packaging and am surprised that no one else has run into this issue before me.\r\n\r\nCan anyone confirm that there is a mismatch here and that adding additional constraints to `setup.py` would be appropriate? I'll go ahead and get a PR together in the meantime.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nimport os\nimport re\nimport codecs\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nwith open(os.path.join(base_path, 'src', 'urllib3', '__init__.py')) as fp:\n VERSION = re.compile(r\".*__version__ = '(.*?)'\",\n re.S).match(fp.read()).group(1)\n\nwith codecs.open('README.rst', encoding='utf-8') as fp:\n readme = fp.read()\nwith codecs.open('CHANGES.rst', encoding='utf-8') as fp:\n changes = fp.read()\nversion = VERSION\n\nsetup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u'\\n\\n'.join([readme, changes]),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='urllib httplib threadsafe filepost http https ssl pooling',\n author='Andrey Petrov',\n author_email='[email protected]',\n url='https://urllib3.readthedocs.io/',\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n 'urllib3.packages.backports', 'urllib3.packages.rfc3986',\n 'urllib3.contrib', 'urllib3.contrib._securetransport',\n 'urllib3.util'],\n package_dir={'': 'src'},\n requires=[],\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n tests_require=[\n # These are a less-specific subset of dev-requirements.txt, for the\n # convenience of distro package maintainers.\n 'pytest',\n 'mock',\n 'tornado',\n ],\n test_suite='test',\n extras_require={\n 'secure': [\n 'pyOpenSSL >= 0.14',\n 'cryptography>=1.3.4',\n 'idna>=2.0.0',\n 'certifi',\n \"ipaddress\",\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0,!=1.5.7',\n ]\n },\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nimport os\nimport re\nimport codecs\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nwith open(os.path.join(base_path, 'src', 'urllib3', '__init__.py')) as fp:\n VERSION = re.compile(r\".*__version__ = '(.*?)'\",\n re.S).match(fp.read()).group(1)\n\nwith codecs.open('README.rst', encoding='utf-8') as fp:\n readme = fp.read()\nwith codecs.open('CHANGES.rst', encoding='utf-8') as fp:\n changes = fp.read()\nversion = VERSION\n\nsetup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u'\\n\\n'.join([readme, changes]),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='urllib httplib threadsafe filepost http https ssl pooling',\n author='Andrey Petrov',\n author_email='[email protected]',\n url='https://urllib3.readthedocs.io/',\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n 'urllib3.packages.backports', 'urllib3.packages.rfc3986',\n 'urllib3.contrib', 'urllib3.contrib._securetransport',\n 'urllib3.util'],\n package_dir={'': 'src'},\n requires=[],\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n tests_require=[\n # These are a less-specific subset of dev-requirements.txt, for the\n # convenience of distro package maintainers.\n 'pytest',\n 'mock',\n 'tornado',\n ],\n test_suite='test',\n extras_require={\n 'secure': [\n 'pyOpenSSL>=0.14',\n 'cryptography>=1.3.4',\n 'idna>=2.0.0',\n 'certifi',\n \"ipaddress; python_version=='2.7'\",\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0,!=1.5.7',\n ]\n },\n )\n", "path": "setup.py"}]}
| 1,249 | 151 |
gh_patches_debug_3178
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-1810
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace reward point redemption dropdown with number input field
If a user selects an option, a new line is added and the selection spans two rows. This looks wrong.
A user can insert custom options. If the user inputs something invalid like "abcdef" or an empty string, only parts of "Please select"-placeholder is visible. This looks wrong as well.
Replace reward point redemption dropdown with number input field
If a user selects an option, a new line is added and the selection spans two rows. This looks wrong.
A user can insert custom options. If the user inputs something invalid like "abcdef" or an empty string, only parts of "Please select"-placeholder is visible. This looks wrong as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/rewards/views.py`
Content:
```
1 from datetime import datetime
2
3 from django.contrib import messages
4 from django.core.exceptions import BadRequest, SuspiciousOperation
5 from django.http import HttpResponse
6 from django.shortcuts import get_object_or_404, redirect, render
7 from django.utils.translation import get_language
8 from django.utils.translation import gettext as _
9 from django.views.decorators.http import require_POST
10
11 from evap.evaluation.auth import manager_required, reward_user_required
12 from evap.evaluation.models import Semester
13 from evap.evaluation.tools import AttachmentResponse, get_object_from_dict_pk_entry_or_logged_40x
14 from evap.rewards.exporters import RewardsExporter
15 from evap.rewards.forms import RewardPointRedemptionEventForm
16 from evap.rewards.models import (
17 NoPointsSelected,
18 NotEnoughPoints,
19 RedemptionEventExpired,
20 RewardPointGranting,
21 RewardPointRedemption,
22 RewardPointRedemptionEvent,
23 SemesterActivation,
24 )
25 from evap.rewards.tools import grant_eligible_reward_points_for_semester, reward_points_of_user, save_redemptions
26 from evap.staff.views import semester_view
27
28
29 @reward_user_required
30 def index(request):
31 if request.method == "POST":
32 redemptions = {}
33 try:
34 for key, value in request.POST.items():
35 if key.startswith("points-"):
36 event_id = int(key.rpartition("-")[2])
37 redemptions[event_id] = int(value)
38 except ValueError as e:
39 raise BadRequest from e
40
41 try:
42 save_redemptions(request, redemptions)
43 messages.success(request, _("You successfully redeemed your points."))
44 except (NoPointsSelected, NotEnoughPoints, RedemptionEventExpired) as error:
45 messages.warning(request, error)
46
47 total_points_available = reward_points_of_user(request.user)
48 reward_point_grantings = RewardPointGranting.objects.filter(user_profile=request.user)
49 reward_point_redemptions = RewardPointRedemption.objects.filter(user_profile=request.user)
50 events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now()).order_by("date")
51
52 reward_point_actions = []
53 for granting in reward_point_grantings:
54 reward_point_actions.append(
55 (granting.granting_time, _("Reward for") + " " + granting.semester.name, granting.value, "")
56 )
57 for redemption in reward_point_redemptions:
58 reward_point_actions.append((redemption.redemption_time, redemption.event.name, "", redemption.value))
59
60 reward_point_actions.sort(key=lambda action: action[0], reverse=True)
61
62 template_data = dict(
63 reward_point_actions=reward_point_actions,
64 total_points_available=total_points_available,
65 events=events,
66 point_selection=range(0, total_points_available + 1),
67 )
68 return render(request, "rewards_index.html", template_data)
69
70
71 @manager_required
72 def reward_point_redemption_events(request):
73 upcoming_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now()).order_by("date")
74 past_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__lt=datetime.now()).order_by("-date")
75 template_data = dict(upcoming_events=upcoming_events, past_events=past_events)
76 return render(request, "rewards_reward_point_redemption_events.html", template_data)
77
78
79 @manager_required
80 def reward_point_redemption_event_create(request):
81 event = RewardPointRedemptionEvent()
82 form = RewardPointRedemptionEventForm(request.POST or None, instance=event)
83
84 if form.is_valid():
85 form.save()
86 messages.success(request, _("Successfully created event."))
87 return redirect("rewards:reward_point_redemption_events")
88
89 return render(request, "rewards_reward_point_redemption_event_form.html", dict(form=form))
90
91
92 @manager_required
93 def reward_point_redemption_event_edit(request, event_id):
94 event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)
95 form = RewardPointRedemptionEventForm(request.POST or None, instance=event)
96
97 if form.is_valid():
98 event = form.save()
99
100 messages.success(request, _("Successfully updated event."))
101 return redirect("rewards:reward_point_redemption_events")
102
103 return render(request, "rewards_reward_point_redemption_event_form.html", dict(event=event, form=form))
104
105
106 @require_POST
107 @manager_required
108 def reward_point_redemption_event_delete(request):
109 event = get_object_from_dict_pk_entry_or_logged_40x(RewardPointRedemptionEvent, request.POST, "event_id")
110
111 if not event.can_delete:
112 raise SuspiciousOperation("Deleting redemption event not allowed")
113 event.delete()
114 return HttpResponse() # 200 OK
115
116
117 @manager_required
118 def reward_point_redemption_event_export(request, event_id):
119 event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)
120
121 filename = _("RewardPoints") + f"-{event.date}-{event.name}-{get_language()}.xls"
122 response = AttachmentResponse(filename, content_type="application/vnd.ms-excel")
123
124 RewardsExporter().export(response, event.redemptions_by_user())
125
126 return response
127
128
129 @manager_required
130 def semester_activation(request, semester_id, active):
131 semester = get_object_or_404(Semester, id=semester_id)
132 active = active == "on"
133
134 SemesterActivation.objects.update_or_create(semester=semester, defaults={"is_active": active})
135 if active:
136 grant_eligible_reward_points_for_semester(request, semester)
137
138 return semester_view(request=request, semester_id=semester_id)
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/evap/rewards/views.py b/evap/rewards/views.py
--- a/evap/rewards/views.py
+++ b/evap/rewards/views.py
@@ -63,7 +63,6 @@
reward_point_actions=reward_point_actions,
total_points_available=total_points_available,
events=events,
- point_selection=range(0, total_points_available + 1),
)
return render(request, "rewards_index.html", template_data)
|
{"golden_diff": "diff --git a/evap/rewards/views.py b/evap/rewards/views.py\n--- a/evap/rewards/views.py\n+++ b/evap/rewards/views.py\n@@ -63,7 +63,6 @@\n reward_point_actions=reward_point_actions,\n total_points_available=total_points_available,\n events=events,\n- point_selection=range(0, total_points_available + 1),\n )\n return render(request, \"rewards_index.html\", template_data)\n", "issue": "Replace reward point redemption dropdown with number input field\nIf a user selects an option, a new line is added and the selection spans two rows. This looks wrong.\r\n\r\nA user can insert custom options. If the user inputs something invalid like \"abcdef\" or an empty string, only parts of \"Please select\"-placeholder is visible. This looks wrong as well.\nReplace reward point redemption dropdown with number input field\nIf a user selects an option, a new line is added and the selection spans two rows. This looks wrong.\r\n\r\nA user can insert custom options. If the user inputs something invalid like \"abcdef\" or an empty string, only parts of \"Please select\"-placeholder is visible. This looks wrong as well.\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django.contrib import messages\nfrom django.core.exceptions import BadRequest, SuspiciousOperation\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import get_language\nfrom django.utils.translation import gettext as _\nfrom django.views.decorators.http import require_POST\n\nfrom evap.evaluation.auth import manager_required, reward_user_required\nfrom evap.evaluation.models import Semester\nfrom evap.evaluation.tools import AttachmentResponse, get_object_from_dict_pk_entry_or_logged_40x\nfrom evap.rewards.exporters import RewardsExporter\nfrom evap.rewards.forms import RewardPointRedemptionEventForm\nfrom evap.rewards.models import (\n NoPointsSelected,\n NotEnoughPoints,\n RedemptionEventExpired,\n RewardPointGranting,\n RewardPointRedemption,\n RewardPointRedemptionEvent,\n SemesterActivation,\n)\nfrom evap.rewards.tools import grant_eligible_reward_points_for_semester, reward_points_of_user, save_redemptions\nfrom evap.staff.views import semester_view\n\n\n@reward_user_required\ndef index(request):\n if request.method == \"POST\":\n redemptions = {}\n try:\n for key, value in request.POST.items():\n if key.startswith(\"points-\"):\n event_id = int(key.rpartition(\"-\")[2])\n redemptions[event_id] = int(value)\n except ValueError as e:\n raise BadRequest from e\n\n try:\n save_redemptions(request, redemptions)\n messages.success(request, _(\"You successfully redeemed your points.\"))\n except (NoPointsSelected, NotEnoughPoints, RedemptionEventExpired) as error:\n messages.warning(request, error)\n\n total_points_available = reward_points_of_user(request.user)\n reward_point_grantings = RewardPointGranting.objects.filter(user_profile=request.user)\n reward_point_redemptions = RewardPointRedemption.objects.filter(user_profile=request.user)\n events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now()).order_by(\"date\")\n\n reward_point_actions = []\n for granting in reward_point_grantings:\n reward_point_actions.append(\n (granting.granting_time, _(\"Reward for\") + \" \" + granting.semester.name, granting.value, \"\")\n )\n for redemption in reward_point_redemptions:\n reward_point_actions.append((redemption.redemption_time, redemption.event.name, \"\", redemption.value))\n\n reward_point_actions.sort(key=lambda action: action[0], reverse=True)\n\n template_data = dict(\n reward_point_actions=reward_point_actions,\n total_points_available=total_points_available,\n events=events,\n point_selection=range(0, total_points_available + 1),\n )\n return render(request, \"rewards_index.html\", template_data)\n\n\n@manager_required\ndef reward_point_redemption_events(request):\n upcoming_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now()).order_by(\"date\")\n past_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__lt=datetime.now()).order_by(\"-date\")\n template_data = dict(upcoming_events=upcoming_events, past_events=past_events)\n return render(request, \"rewards_reward_point_redemption_events.html\", template_data)\n\n\n@manager_required\ndef reward_point_redemption_event_create(request):\n event = RewardPointRedemptionEvent()\n form = RewardPointRedemptionEventForm(request.POST or None, instance=event)\n\n if form.is_valid():\n form.save()\n messages.success(request, _(\"Successfully created event.\"))\n return redirect(\"rewards:reward_point_redemption_events\")\n\n return render(request, \"rewards_reward_point_redemption_event_form.html\", dict(form=form))\n\n\n@manager_required\ndef reward_point_redemption_event_edit(request, event_id):\n event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)\n form = RewardPointRedemptionEventForm(request.POST or None, instance=event)\n\n if form.is_valid():\n event = form.save()\n\n messages.success(request, _(\"Successfully updated event.\"))\n return redirect(\"rewards:reward_point_redemption_events\")\n\n return render(request, \"rewards_reward_point_redemption_event_form.html\", dict(event=event, form=form))\n\n\n@require_POST\n@manager_required\ndef reward_point_redemption_event_delete(request):\n event = get_object_from_dict_pk_entry_or_logged_40x(RewardPointRedemptionEvent, request.POST, \"event_id\")\n\n if not event.can_delete:\n raise SuspiciousOperation(\"Deleting redemption event not allowed\")\n event.delete()\n return HttpResponse() # 200 OK\n\n\n@manager_required\ndef reward_point_redemption_event_export(request, event_id):\n event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)\n\n filename = _(\"RewardPoints\") + f\"-{event.date}-{event.name}-{get_language()}.xls\"\n response = AttachmentResponse(filename, content_type=\"application/vnd.ms-excel\")\n\n RewardsExporter().export(response, event.redemptions_by_user())\n\n return response\n\n\n@manager_required\ndef semester_activation(request, semester_id, active):\n semester = get_object_or_404(Semester, id=semester_id)\n active = active == \"on\"\n\n SemesterActivation.objects.update_or_create(semester=semester, defaults={\"is_active\": active})\n if active:\n grant_eligible_reward_points_for_semester(request, semester)\n\n return semester_view(request=request, semester_id=semester_id)\n", "path": "evap/rewards/views.py"}], "after_files": [{"content": "from datetime import datetime\n\nfrom django.contrib import messages\nfrom django.core.exceptions import BadRequest, SuspiciousOperation\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import get_language\nfrom django.utils.translation import gettext as _\nfrom django.views.decorators.http import require_POST\n\nfrom evap.evaluation.auth import manager_required, reward_user_required\nfrom evap.evaluation.models import Semester\nfrom evap.evaluation.tools import AttachmentResponse, get_object_from_dict_pk_entry_or_logged_40x\nfrom evap.rewards.exporters import RewardsExporter\nfrom evap.rewards.forms import RewardPointRedemptionEventForm\nfrom evap.rewards.models import (\n NoPointsSelected,\n NotEnoughPoints,\n RedemptionEventExpired,\n RewardPointGranting,\n RewardPointRedemption,\n RewardPointRedemptionEvent,\n SemesterActivation,\n)\nfrom evap.rewards.tools import grant_eligible_reward_points_for_semester, reward_points_of_user, save_redemptions\nfrom evap.staff.views import semester_view\n\n\n@reward_user_required\ndef index(request):\n if request.method == \"POST\":\n redemptions = {}\n try:\n for key, value in request.POST.items():\n if key.startswith(\"points-\"):\n event_id = int(key.rpartition(\"-\")[2])\n redemptions[event_id] = int(value)\n except ValueError as e:\n raise BadRequest from e\n\n try:\n save_redemptions(request, redemptions)\n messages.success(request, _(\"You successfully redeemed your points.\"))\n except (NoPointsSelected, NotEnoughPoints, RedemptionEventExpired) as error:\n messages.warning(request, error)\n\n total_points_available = reward_points_of_user(request.user)\n reward_point_grantings = RewardPointGranting.objects.filter(user_profile=request.user)\n reward_point_redemptions = RewardPointRedemption.objects.filter(user_profile=request.user)\n events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now()).order_by(\"date\")\n\n reward_point_actions = []\n for granting in reward_point_grantings:\n reward_point_actions.append(\n (granting.granting_time, _(\"Reward for\") + \" \" + granting.semester.name, granting.value, \"\")\n )\n for redemption in reward_point_redemptions:\n reward_point_actions.append((redemption.redemption_time, redemption.event.name, \"\", redemption.value))\n\n reward_point_actions.sort(key=lambda action: action[0], reverse=True)\n\n template_data = dict(\n reward_point_actions=reward_point_actions,\n total_points_available=total_points_available,\n events=events,\n )\n return render(request, \"rewards_index.html\", template_data)\n\n\n@manager_required\ndef reward_point_redemption_events(request):\n upcoming_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now()).order_by(\"date\")\n past_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__lt=datetime.now()).order_by(\"-date\")\n template_data = dict(upcoming_events=upcoming_events, past_events=past_events)\n return render(request, \"rewards_reward_point_redemption_events.html\", template_data)\n\n\n@manager_required\ndef reward_point_redemption_event_create(request):\n event = RewardPointRedemptionEvent()\n form = RewardPointRedemptionEventForm(request.POST or None, instance=event)\n\n if form.is_valid():\n form.save()\n messages.success(request, _(\"Successfully created event.\"))\n return redirect(\"rewards:reward_point_redemption_events\")\n\n return render(request, \"rewards_reward_point_redemption_event_form.html\", dict(form=form))\n\n\n@manager_required\ndef reward_point_redemption_event_edit(request, event_id):\n event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)\n form = RewardPointRedemptionEventForm(request.POST or None, instance=event)\n\n if form.is_valid():\n event = form.save()\n\n messages.success(request, _(\"Successfully updated event.\"))\n return redirect(\"rewards:reward_point_redemption_events\")\n\n return render(request, \"rewards_reward_point_redemption_event_form.html\", dict(event=event, form=form))\n\n\n@require_POST\n@manager_required\ndef reward_point_redemption_event_delete(request):\n event = get_object_from_dict_pk_entry_or_logged_40x(RewardPointRedemptionEvent, request.POST, \"event_id\")\n\n if not event.can_delete:\n raise SuspiciousOperation(\"Deleting redemption event not allowed\")\n event.delete()\n return HttpResponse() # 200 OK\n\n\n@manager_required\ndef reward_point_redemption_event_export(request, event_id):\n event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)\n\n filename = _(\"RewardPoints\") + f\"-{event.date}-{event.name}-{get_language()}.xls\"\n response = AttachmentResponse(filename, content_type=\"application/vnd.ms-excel\")\n\n RewardsExporter().export(response, event.redemptions_by_user())\n\n return response\n\n\n@manager_required\ndef semester_activation(request, semester_id, active):\n semester = get_object_or_404(Semester, id=semester_id)\n active = active == \"on\"\n\n SemesterActivation.objects.update_or_create(semester=semester, defaults={\"is_active\": active})\n if active:\n grant_eligible_reward_points_for_semester(request, semester)\n\n return semester_view(request=request, semester_id=semester_id)\n", "path": "evap/rewards/views.py"}]}
| 1,901 | 107 |
gh_patches_debug_9175
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-5692
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DeprecationWarning: ANTIALIAS is deprecated
> scrapy/pipelines/images.py:163: DeprecationWarning: ANTIALIAS is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.LANCZOS instead.
> image.thumbnail(size, self._Image.ANTIALIAS)
This is deprecated since Pillow 9.1.0, released in April. We should check if `Resampling.LANCZOS` is already available in the earliest version we support (7.1.0 as far as I can see), and use it if it's available there. If it was added later, I think we need to decide how to proceed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/pipelines/images.py`
Content:
```
1 """
2 Images Pipeline
3
4 See documentation in topics/media-pipeline.rst
5 """
6 import functools
7 import hashlib
8 from contextlib import suppress
9 from io import BytesIO
10
11 from itemadapter import ItemAdapter
12
13 from scrapy.exceptions import DropItem, NotConfigured
14 from scrapy.http import Request
15 from scrapy.pipelines.files import FileException, FilesPipeline
16 # TODO: from scrapy.pipelines.media import MediaPipeline
17 from scrapy.settings import Settings
18 from scrapy.utils.misc import md5sum
19 from scrapy.utils.python import to_bytes
20
21
22 class NoimagesDrop(DropItem):
23 """Product with no images exception"""
24
25
26 class ImageException(FileException):
27 """General image error exception"""
28
29
30 class ImagesPipeline(FilesPipeline):
31 """Abstract pipeline that implement the image thumbnail generation logic
32
33 """
34
35 MEDIA_NAME = 'image'
36
37 # Uppercase attributes kept for backward compatibility with code that subclasses
38 # ImagesPipeline. They may be overridden by settings.
39 MIN_WIDTH = 0
40 MIN_HEIGHT = 0
41 EXPIRES = 90
42 THUMBS = {}
43 DEFAULT_IMAGES_URLS_FIELD = 'image_urls'
44 DEFAULT_IMAGES_RESULT_FIELD = 'images'
45
46 def __init__(self, store_uri, download_func=None, settings=None):
47 try:
48 from PIL import Image
49 self._Image = Image
50 except ImportError:
51 raise NotConfigured(
52 'ImagesPipeline requires installing Pillow 4.0.0 or later'
53 )
54
55 super().__init__(store_uri, settings=settings, download_func=download_func)
56
57 if isinstance(settings, dict) or settings is None:
58 settings = Settings(settings)
59
60 resolve = functools.partial(self._key_for_pipe,
61 base_class_name="ImagesPipeline",
62 settings=settings)
63 self.expires = settings.getint(
64 resolve("IMAGES_EXPIRES"), self.EXPIRES
65 )
66
67 if not hasattr(self, "IMAGES_RESULT_FIELD"):
68 self.IMAGES_RESULT_FIELD = self.DEFAULT_IMAGES_RESULT_FIELD
69 if not hasattr(self, "IMAGES_URLS_FIELD"):
70 self.IMAGES_URLS_FIELD = self.DEFAULT_IMAGES_URLS_FIELD
71
72 self.images_urls_field = settings.get(
73 resolve('IMAGES_URLS_FIELD'),
74 self.IMAGES_URLS_FIELD
75 )
76 self.images_result_field = settings.get(
77 resolve('IMAGES_RESULT_FIELD'),
78 self.IMAGES_RESULT_FIELD
79 )
80 self.min_width = settings.getint(
81 resolve('IMAGES_MIN_WIDTH'), self.MIN_WIDTH
82 )
83 self.min_height = settings.getint(
84 resolve('IMAGES_MIN_HEIGHT'), self.MIN_HEIGHT
85 )
86 self.thumbs = settings.get(
87 resolve('IMAGES_THUMBS'), self.THUMBS
88 )
89
90 @classmethod
91 def from_settings(cls, settings):
92 s3store = cls.STORE_SCHEMES['s3']
93 s3store.AWS_ACCESS_KEY_ID = settings['AWS_ACCESS_KEY_ID']
94 s3store.AWS_SECRET_ACCESS_KEY = settings['AWS_SECRET_ACCESS_KEY']
95 s3store.AWS_SESSION_TOKEN = settings['AWS_SESSION_TOKEN']
96 s3store.AWS_ENDPOINT_URL = settings['AWS_ENDPOINT_URL']
97 s3store.AWS_REGION_NAME = settings['AWS_REGION_NAME']
98 s3store.AWS_USE_SSL = settings['AWS_USE_SSL']
99 s3store.AWS_VERIFY = settings['AWS_VERIFY']
100 s3store.POLICY = settings['IMAGES_STORE_S3_ACL']
101
102 gcs_store = cls.STORE_SCHEMES['gs']
103 gcs_store.GCS_PROJECT_ID = settings['GCS_PROJECT_ID']
104 gcs_store.POLICY = settings['IMAGES_STORE_GCS_ACL'] or None
105
106 ftp_store = cls.STORE_SCHEMES['ftp']
107 ftp_store.FTP_USERNAME = settings['FTP_USER']
108 ftp_store.FTP_PASSWORD = settings['FTP_PASSWORD']
109 ftp_store.USE_ACTIVE_MODE = settings.getbool('FEED_STORAGE_FTP_ACTIVE')
110
111 store_uri = settings['IMAGES_STORE']
112 return cls(store_uri, settings=settings)
113
114 def file_downloaded(self, response, request, info, *, item=None):
115 return self.image_downloaded(response, request, info, item=item)
116
117 def image_downloaded(self, response, request, info, *, item=None):
118 checksum = None
119 for path, image, buf in self.get_images(response, request, info, item=item):
120 if checksum is None:
121 buf.seek(0)
122 checksum = md5sum(buf)
123 width, height = image.size
124 self.store.persist_file(
125 path, buf, info,
126 meta={'width': width, 'height': height},
127 headers={'Content-Type': 'image/jpeg'})
128 return checksum
129
130 def get_images(self, response, request, info, *, item=None):
131 path = self.file_path(request, response=response, info=info, item=item)
132 orig_image = self._Image.open(BytesIO(response.body))
133
134 width, height = orig_image.size
135 if width < self.min_width or height < self.min_height:
136 raise ImageException("Image too small "
137 f"({width}x{height} < "
138 f"{self.min_width}x{self.min_height})")
139
140 image, buf = self.convert_image(orig_image)
141 yield path, image, buf
142
143 for thumb_id, size in self.thumbs.items():
144 thumb_path = self.thumb_path(request, thumb_id, response=response, info=info, item=item)
145 thumb_image, thumb_buf = self.convert_image(image, size)
146 yield thumb_path, thumb_image, thumb_buf
147
148 def convert_image(self, image, size=None):
149 if image.format == 'PNG' and image.mode == 'RGBA':
150 background = self._Image.new('RGBA', image.size, (255, 255, 255))
151 background.paste(image, image)
152 image = background.convert('RGB')
153 elif image.mode == 'P':
154 image = image.convert("RGBA")
155 background = self._Image.new('RGBA', image.size, (255, 255, 255))
156 background.paste(image, image)
157 image = background.convert('RGB')
158 elif image.mode != 'RGB':
159 image = image.convert('RGB')
160
161 if size:
162 image = image.copy()
163 image.thumbnail(size, self._Image.ANTIALIAS)
164
165 buf = BytesIO()
166 image.save(buf, 'JPEG')
167 return image, buf
168
169 def get_media_requests(self, item, info):
170 urls = ItemAdapter(item).get(self.images_urls_field, [])
171 return [Request(u) for u in urls]
172
173 def item_completed(self, results, item, info):
174 with suppress(KeyError):
175 ItemAdapter(item)[self.images_result_field] = [x for ok, x in results if ok]
176 return item
177
178 def file_path(self, request, response=None, info=None, *, item=None):
179 image_guid = hashlib.sha1(to_bytes(request.url)).hexdigest()
180 return f'full/{image_guid}.jpg'
181
182 def thumb_path(self, request, thumb_id, response=None, info=None, *, item=None):
183 thumb_guid = hashlib.sha1(to_bytes(request.url)).hexdigest()
184 return f'thumbs/{thumb_id}/{thumb_guid}.jpg'
185
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/pipelines/images.py b/scrapy/pipelines/images.py
--- a/scrapy/pipelines/images.py
+++ b/scrapy/pipelines/images.py
@@ -160,7 +160,14 @@
if size:
image = image.copy()
- image.thumbnail(size, self._Image.ANTIALIAS)
+ try:
+ # Image.Resampling.LANCZOS was added in Pillow 9.1.0
+ # remove this try except block,
+ # when updating the minimum requirements for Pillow.
+ resampling_filter = self._Image.Resampling.LANCZOS
+ except AttributeError:
+ resampling_filter = self._Image.ANTIALIAS
+ image.thumbnail(size, resampling_filter)
buf = BytesIO()
image.save(buf, 'JPEG')
|
{"golden_diff": "diff --git a/scrapy/pipelines/images.py b/scrapy/pipelines/images.py\n--- a/scrapy/pipelines/images.py\n+++ b/scrapy/pipelines/images.py\n@@ -160,7 +160,14 @@\n \n if size:\n image = image.copy()\n- image.thumbnail(size, self._Image.ANTIALIAS)\n+ try:\n+ # Image.Resampling.LANCZOS was added in Pillow 9.1.0\n+ # remove this try except block,\n+ # when updating the minimum requirements for Pillow.\n+ resampling_filter = self._Image.Resampling.LANCZOS\n+ except AttributeError:\n+ resampling_filter = self._Image.ANTIALIAS\n+ image.thumbnail(size, resampling_filter)\n \n buf = BytesIO()\n image.save(buf, 'JPEG')\n", "issue": "DeprecationWarning: ANTIALIAS is deprecated\n> scrapy/pipelines/images.py:163: DeprecationWarning: ANTIALIAS is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.LANCZOS instead.\r\n> image.thumbnail(size, self._Image.ANTIALIAS)\r\n\r\nThis is deprecated since Pillow 9.1.0, released in April. We should check if `Resampling.LANCZOS` is already available in the earliest version we support (7.1.0 as far as I can see), and use it if it's available there. If it was added later, I think we need to decide how to proceed.\n", "before_files": [{"content": "\"\"\"\nImages Pipeline\n\nSee documentation in topics/media-pipeline.rst\n\"\"\"\nimport functools\nimport hashlib\nfrom contextlib import suppress\nfrom io import BytesIO\n\nfrom itemadapter import ItemAdapter\n\nfrom scrapy.exceptions import DropItem, NotConfigured\nfrom scrapy.http import Request\nfrom scrapy.pipelines.files import FileException, FilesPipeline\n# TODO: from scrapy.pipelines.media import MediaPipeline\nfrom scrapy.settings import Settings\nfrom scrapy.utils.misc import md5sum\nfrom scrapy.utils.python import to_bytes\n\n\nclass NoimagesDrop(DropItem):\n \"\"\"Product with no images exception\"\"\"\n\n\nclass ImageException(FileException):\n \"\"\"General image error exception\"\"\"\n\n\nclass ImagesPipeline(FilesPipeline):\n \"\"\"Abstract pipeline that implement the image thumbnail generation logic\n\n \"\"\"\n\n MEDIA_NAME = 'image'\n\n # Uppercase attributes kept for backward compatibility with code that subclasses\n # ImagesPipeline. They may be overridden by settings.\n MIN_WIDTH = 0\n MIN_HEIGHT = 0\n EXPIRES = 90\n THUMBS = {}\n DEFAULT_IMAGES_URLS_FIELD = 'image_urls'\n DEFAULT_IMAGES_RESULT_FIELD = 'images'\n\n def __init__(self, store_uri, download_func=None, settings=None):\n try:\n from PIL import Image\n self._Image = Image\n except ImportError:\n raise NotConfigured(\n 'ImagesPipeline requires installing Pillow 4.0.0 or later'\n )\n\n super().__init__(store_uri, settings=settings, download_func=download_func)\n\n if isinstance(settings, dict) or settings is None:\n settings = Settings(settings)\n\n resolve = functools.partial(self._key_for_pipe,\n base_class_name=\"ImagesPipeline\",\n settings=settings)\n self.expires = settings.getint(\n resolve(\"IMAGES_EXPIRES\"), self.EXPIRES\n )\n\n if not hasattr(self, \"IMAGES_RESULT_FIELD\"):\n self.IMAGES_RESULT_FIELD = self.DEFAULT_IMAGES_RESULT_FIELD\n if not hasattr(self, \"IMAGES_URLS_FIELD\"):\n self.IMAGES_URLS_FIELD = self.DEFAULT_IMAGES_URLS_FIELD\n\n self.images_urls_field = settings.get(\n resolve('IMAGES_URLS_FIELD'),\n self.IMAGES_URLS_FIELD\n )\n self.images_result_field = settings.get(\n resolve('IMAGES_RESULT_FIELD'),\n self.IMAGES_RESULT_FIELD\n )\n self.min_width = settings.getint(\n resolve('IMAGES_MIN_WIDTH'), self.MIN_WIDTH\n )\n self.min_height = settings.getint(\n resolve('IMAGES_MIN_HEIGHT'), self.MIN_HEIGHT\n )\n self.thumbs = settings.get(\n resolve('IMAGES_THUMBS'), self.THUMBS\n )\n\n @classmethod\n def from_settings(cls, settings):\n s3store = cls.STORE_SCHEMES['s3']\n s3store.AWS_ACCESS_KEY_ID = settings['AWS_ACCESS_KEY_ID']\n s3store.AWS_SECRET_ACCESS_KEY = settings['AWS_SECRET_ACCESS_KEY']\n s3store.AWS_SESSION_TOKEN = settings['AWS_SESSION_TOKEN']\n s3store.AWS_ENDPOINT_URL = settings['AWS_ENDPOINT_URL']\n s3store.AWS_REGION_NAME = settings['AWS_REGION_NAME']\n s3store.AWS_USE_SSL = settings['AWS_USE_SSL']\n s3store.AWS_VERIFY = settings['AWS_VERIFY']\n s3store.POLICY = settings['IMAGES_STORE_S3_ACL']\n\n gcs_store = cls.STORE_SCHEMES['gs']\n gcs_store.GCS_PROJECT_ID = settings['GCS_PROJECT_ID']\n gcs_store.POLICY = settings['IMAGES_STORE_GCS_ACL'] or None\n\n ftp_store = cls.STORE_SCHEMES['ftp']\n ftp_store.FTP_USERNAME = settings['FTP_USER']\n ftp_store.FTP_PASSWORD = settings['FTP_PASSWORD']\n ftp_store.USE_ACTIVE_MODE = settings.getbool('FEED_STORAGE_FTP_ACTIVE')\n\n store_uri = settings['IMAGES_STORE']\n return cls(store_uri, settings=settings)\n\n def file_downloaded(self, response, request, info, *, item=None):\n return self.image_downloaded(response, request, info, item=item)\n\n def image_downloaded(self, response, request, info, *, item=None):\n checksum = None\n for path, image, buf in self.get_images(response, request, info, item=item):\n if checksum is None:\n buf.seek(0)\n checksum = md5sum(buf)\n width, height = image.size\n self.store.persist_file(\n path, buf, info,\n meta={'width': width, 'height': height},\n headers={'Content-Type': 'image/jpeg'})\n return checksum\n\n def get_images(self, response, request, info, *, item=None):\n path = self.file_path(request, response=response, info=info, item=item)\n orig_image = self._Image.open(BytesIO(response.body))\n\n width, height = orig_image.size\n if width < self.min_width or height < self.min_height:\n raise ImageException(\"Image too small \"\n f\"({width}x{height} < \"\n f\"{self.min_width}x{self.min_height})\")\n\n image, buf = self.convert_image(orig_image)\n yield path, image, buf\n\n for thumb_id, size in self.thumbs.items():\n thumb_path = self.thumb_path(request, thumb_id, response=response, info=info, item=item)\n thumb_image, thumb_buf = self.convert_image(image, size)\n yield thumb_path, thumb_image, thumb_buf\n\n def convert_image(self, image, size=None):\n if image.format == 'PNG' and image.mode == 'RGBA':\n background = self._Image.new('RGBA', image.size, (255, 255, 255))\n background.paste(image, image)\n image = background.convert('RGB')\n elif image.mode == 'P':\n image = image.convert(\"RGBA\")\n background = self._Image.new('RGBA', image.size, (255, 255, 255))\n background.paste(image, image)\n image = background.convert('RGB')\n elif image.mode != 'RGB':\n image = image.convert('RGB')\n\n if size:\n image = image.copy()\n image.thumbnail(size, self._Image.ANTIALIAS)\n\n buf = BytesIO()\n image.save(buf, 'JPEG')\n return image, buf\n\n def get_media_requests(self, item, info):\n urls = ItemAdapter(item).get(self.images_urls_field, [])\n return [Request(u) for u in urls]\n\n def item_completed(self, results, item, info):\n with suppress(KeyError):\n ItemAdapter(item)[self.images_result_field] = [x for ok, x in results if ok]\n return item\n\n def file_path(self, request, response=None, info=None, *, item=None):\n image_guid = hashlib.sha1(to_bytes(request.url)).hexdigest()\n return f'full/{image_guid}.jpg'\n\n def thumb_path(self, request, thumb_id, response=None, info=None, *, item=None):\n thumb_guid = hashlib.sha1(to_bytes(request.url)).hexdigest()\n return f'thumbs/{thumb_id}/{thumb_guid}.jpg'\n", "path": "scrapy/pipelines/images.py"}], "after_files": [{"content": "\"\"\"\nImages Pipeline\n\nSee documentation in topics/media-pipeline.rst\n\"\"\"\nimport functools\nimport hashlib\nfrom contextlib import suppress\nfrom io import BytesIO\n\nfrom itemadapter import ItemAdapter\n\nfrom scrapy.exceptions import DropItem, NotConfigured\nfrom scrapy.http import Request\nfrom scrapy.pipelines.files import FileException, FilesPipeline\n# TODO: from scrapy.pipelines.media import MediaPipeline\nfrom scrapy.settings import Settings\nfrom scrapy.utils.misc import md5sum\nfrom scrapy.utils.python import to_bytes\n\n\nclass NoimagesDrop(DropItem):\n \"\"\"Product with no images exception\"\"\"\n\n\nclass ImageException(FileException):\n \"\"\"General image error exception\"\"\"\n\n\nclass ImagesPipeline(FilesPipeline):\n \"\"\"Abstract pipeline that implement the image thumbnail generation logic\n\n \"\"\"\n\n MEDIA_NAME = 'image'\n\n # Uppercase attributes kept for backward compatibility with code that subclasses\n # ImagesPipeline. They may be overridden by settings.\n MIN_WIDTH = 0\n MIN_HEIGHT = 0\n EXPIRES = 90\n THUMBS = {}\n DEFAULT_IMAGES_URLS_FIELD = 'image_urls'\n DEFAULT_IMAGES_RESULT_FIELD = 'images'\n\n def __init__(self, store_uri, download_func=None, settings=None):\n try:\n from PIL import Image\n self._Image = Image\n except ImportError:\n raise NotConfigured(\n 'ImagesPipeline requires installing Pillow 4.0.0 or later'\n )\n\n super().__init__(store_uri, settings=settings, download_func=download_func)\n\n if isinstance(settings, dict) or settings is None:\n settings = Settings(settings)\n\n resolve = functools.partial(self._key_for_pipe,\n base_class_name=\"ImagesPipeline\",\n settings=settings)\n self.expires = settings.getint(\n resolve(\"IMAGES_EXPIRES\"), self.EXPIRES\n )\n\n if not hasattr(self, \"IMAGES_RESULT_FIELD\"):\n self.IMAGES_RESULT_FIELD = self.DEFAULT_IMAGES_RESULT_FIELD\n if not hasattr(self, \"IMAGES_URLS_FIELD\"):\n self.IMAGES_URLS_FIELD = self.DEFAULT_IMAGES_URLS_FIELD\n\n self.images_urls_field = settings.get(\n resolve('IMAGES_URLS_FIELD'),\n self.IMAGES_URLS_FIELD\n )\n self.images_result_field = settings.get(\n resolve('IMAGES_RESULT_FIELD'),\n self.IMAGES_RESULT_FIELD\n )\n self.min_width = settings.getint(\n resolve('IMAGES_MIN_WIDTH'), self.MIN_WIDTH\n )\n self.min_height = settings.getint(\n resolve('IMAGES_MIN_HEIGHT'), self.MIN_HEIGHT\n )\n self.thumbs = settings.get(\n resolve('IMAGES_THUMBS'), self.THUMBS\n )\n\n @classmethod\n def from_settings(cls, settings):\n s3store = cls.STORE_SCHEMES['s3']\n s3store.AWS_ACCESS_KEY_ID = settings['AWS_ACCESS_KEY_ID']\n s3store.AWS_SECRET_ACCESS_KEY = settings['AWS_SECRET_ACCESS_KEY']\n s3store.AWS_SESSION_TOKEN = settings['AWS_SESSION_TOKEN']\n s3store.AWS_ENDPOINT_URL = settings['AWS_ENDPOINT_URL']\n s3store.AWS_REGION_NAME = settings['AWS_REGION_NAME']\n s3store.AWS_USE_SSL = settings['AWS_USE_SSL']\n s3store.AWS_VERIFY = settings['AWS_VERIFY']\n s3store.POLICY = settings['IMAGES_STORE_S3_ACL']\n\n gcs_store = cls.STORE_SCHEMES['gs']\n gcs_store.GCS_PROJECT_ID = settings['GCS_PROJECT_ID']\n gcs_store.POLICY = settings['IMAGES_STORE_GCS_ACL'] or None\n\n ftp_store = cls.STORE_SCHEMES['ftp']\n ftp_store.FTP_USERNAME = settings['FTP_USER']\n ftp_store.FTP_PASSWORD = settings['FTP_PASSWORD']\n ftp_store.USE_ACTIVE_MODE = settings.getbool('FEED_STORAGE_FTP_ACTIVE')\n\n store_uri = settings['IMAGES_STORE']\n return cls(store_uri, settings=settings)\n\n def file_downloaded(self, response, request, info, *, item=None):\n return self.image_downloaded(response, request, info, item=item)\n\n def image_downloaded(self, response, request, info, *, item=None):\n checksum = None\n for path, image, buf in self.get_images(response, request, info, item=item):\n if checksum is None:\n buf.seek(0)\n checksum = md5sum(buf)\n width, height = image.size\n self.store.persist_file(\n path, buf, info,\n meta={'width': width, 'height': height},\n headers={'Content-Type': 'image/jpeg'})\n return checksum\n\n def get_images(self, response, request, info, *, item=None):\n path = self.file_path(request, response=response, info=info, item=item)\n orig_image = self._Image.open(BytesIO(response.body))\n\n width, height = orig_image.size\n if width < self.min_width or height < self.min_height:\n raise ImageException(\"Image too small \"\n f\"({width}x{height} < \"\n f\"{self.min_width}x{self.min_height})\")\n\n image, buf = self.convert_image(orig_image)\n yield path, image, buf\n\n for thumb_id, size in self.thumbs.items():\n thumb_path = self.thumb_path(request, thumb_id, response=response, info=info, item=item)\n thumb_image, thumb_buf = self.convert_image(image, size)\n yield thumb_path, thumb_image, thumb_buf\n\n def convert_image(self, image, size=None):\n if image.format == 'PNG' and image.mode == 'RGBA':\n background = self._Image.new('RGBA', image.size, (255, 255, 255))\n background.paste(image, image)\n image = background.convert('RGB')\n elif image.mode == 'P':\n image = image.convert(\"RGBA\")\n background = self._Image.new('RGBA', image.size, (255, 255, 255))\n background.paste(image, image)\n image = background.convert('RGB')\n elif image.mode != 'RGB':\n image = image.convert('RGB')\n\n if size:\n image = image.copy()\n try:\n # Image.Resampling.LANCZOS was added in Pillow 9.1.0\n # remove this try except block,\n # when updating the minimum requirements for Pillow.\n resampling_filter = self._Image.Resampling.LANCZOS\n except AttributeError:\n resampling_filter = self._Image.ANTIALIAS\n image.thumbnail(size, resampling_filter)\n\n buf = BytesIO()\n image.save(buf, 'JPEG')\n return image, buf\n\n def get_media_requests(self, item, info):\n urls = ItemAdapter(item).get(self.images_urls_field, [])\n return [Request(u) for u in urls]\n\n def item_completed(self, results, item, info):\n with suppress(KeyError):\n ItemAdapter(item)[self.images_result_field] = [x for ok, x in results if ok]\n return item\n\n def file_path(self, request, response=None, info=None, *, item=None):\n image_guid = hashlib.sha1(to_bytes(request.url)).hexdigest()\n return f'full/{image_guid}.jpg'\n\n def thumb_path(self, request, thumb_id, response=None, info=None, *, item=None):\n thumb_guid = hashlib.sha1(to_bytes(request.url)).hexdigest()\n return f'thumbs/{thumb_id}/{thumb_guid}.jpg'\n", "path": "scrapy/pipelines/images.py"}]}
| 2,405 | 181 |
gh_patches_debug_40636
|
rasdani/github-patches
|
git_diff
|
rasterio__rasterio-158
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
shapes from data types other than uint8
The companion to #136.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/features.py`
Content:
```
1 """Functions for working with features in a raster dataset."""
2
3 import json
4 import logging
5 import time
6 import warnings
7
8 import numpy as np
9
10 import rasterio
11 from rasterio._features import _shapes, _sieve, _rasterize
12 from rasterio.transform import IDENTITY, guard_transform
13 from rasterio.dtypes import get_minimum_int_dtype
14
15
16 log = logging.getLogger('rasterio')
17 class NullHandler(logging.Handler):
18 def emit(self, record):
19 pass
20 log.addHandler(NullHandler())
21
22
23 def shapes(image, mask=None, connectivity=4, transform=IDENTITY):
24 """Yields a (shape, image_value) pair for each feature in the image.
25
26 The shapes are GeoJSON-like dicts and the image values are ints.
27
28 Features are found using a connected-component labeling algorithm.
29
30 The image must be of unsigned 8-bit integer (rasterio.byte or
31 numpy.uint8) data type. If a mask is provided, pixels for which the
32 mask is `False` will be excluded from feature generation.
33 """
34 if np.dtype(image.dtype) != np.dtype(rasterio.ubyte):
35 raise ValueError("Image must be dtype uint8/ubyte")
36
37 if mask is not None and np.dtype(mask.dtype) != np.dtype(rasterio.bool_):
38 raise ValueError("Mask must be dtype rasterio.bool_")
39
40 if connectivity not in (4, 8):
41 raise ValueError("Connectivity Option must be 4 or 8")
42
43 transform = guard_transform(transform)
44
45 with rasterio.drivers():
46 for s, v in _shapes(image, mask, connectivity, transform.to_gdal()):
47 yield s, v
48
49
50 def sieve(image, size, connectivity=4, output=None):
51 """Returns a copy of the image, but with smaller features removed.
52
53 Features smaller than the specified size have their pixel value
54 replaced by that of the largest neighboring features.
55
56 The image must be of unsigned 8-bit integer (rasterio.byte or
57 numpy.uint8) data type.
58 """
59 if np.dtype(image.dtype) != np.dtype(rasterio.ubyte):
60 raise ValueError("Image must be dtype uint8/ubyte")
61
62 if output is not None and (
63 np.dtype(output.dtype) != np.dtype(rasterio.ubyte)):
64 raise ValueError("Output must be dtype uint8/ubyte")
65
66 with rasterio.drivers():
67 return _sieve(image, size, connectivity)
68
69
70 def rasterize(
71 shapes,
72 out_shape=None,
73 fill=0,
74 output=None,
75 transform=IDENTITY,
76 all_touched=False,
77 default_value=1,
78 dtype=None):
79 """Returns an image array with points, lines, or polygons burned in.
80
81 A different value may be specified for each shape. The shapes may
82 be georeferenced or may have image coordinates. An existing image
83 array may be provided, or one may be created. By default, the center
84 of image elements determines whether they are updated, but all
85 touched elements may be optionally updated.
86
87 Valid data types are: int16, int32, uint8, uint16, uint32, float32, float64
88
89 :param shapes: an iterator over Fiona style geometry objects (with a default
90 value of default_value) or an iterator over (geometry, value) pairs.
91
92 :param transform: GDAL style geotransform to be applied to the
93 image.
94
95 :param out_shape: shape of created image array
96 :param fill: fill value for created image array
97 :param output: alternatively, an existing image array
98
99 :param all_touched: if True, will rasterize all pixels touched,
100 otherwise will use GDAL default method.
101 :param default_value: value burned in for shapes if not provided as part
102 of shapes.
103 """
104
105 valid_dtypes = ('int16', 'int32', 'uint8', 'uint16', 'uint32', 'float32',
106 'float64')
107
108 def get_valid_dtype(values):
109 values_dtype = values.dtype
110 if values_dtype.kind == 'i':
111 values_dtype = np.dtype(get_minimum_int_dtype(values))
112 if values_dtype.name in valid_dtypes:
113 return values_dtype
114 return None
115
116 def can_cast_dtype(values, dtype):
117 if values.dtype.name == np.dtype(dtype).name:
118 return True
119 elif values.dtype.kind == 'f':
120 return np.allclose(values, values.astype(dtype))
121 else:
122 return np.array_equal(values, values.astype(dtype))
123
124 if fill != 0:
125 fill_array = np.array([fill])
126 if get_valid_dtype(fill_array) is None:
127 raise ValueError('fill must be one of these types: %s'
128 % (', '.join(valid_dtypes)))
129 elif dtype is not None and not can_cast_dtype(fill_array, dtype):
130 raise ValueError('fill value cannot be cast to specified dtype')
131
132
133 if default_value != 1:
134 default_value_array = np.array([default_value])
135 if get_valid_dtype(default_value_array) is None:
136 raise ValueError('default_value must be one of these types: %s'
137 % (', '.join(valid_dtypes)))
138 elif dtype is not None and not can_cast_dtype(default_value_array,
139 dtype):
140 raise ValueError('default_value cannot be cast to specified dtype')
141
142 valid_shapes = []
143 shape_values = []
144 for index, item in enumerate(shapes):
145 try:
146 if isinstance(item, (tuple, list)):
147 geom, value = item
148 else:
149 geom = item
150 value = default_value
151 geom = getattr(geom, '__geo_interface__', None) or geom
152 if (not isinstance(geom, dict) or
153 'type' not in geom or 'coordinates' not in geom):
154 raise ValueError(
155 'Object %r at index %d is not a geometry object' %
156 (geom, index))
157 valid_shapes.append((geom, value))
158 shape_values.append(value)
159 except Exception:
160 log.exception('Exception caught, skipping shape %d', index)
161
162 if not valid_shapes:
163 raise ValueError('No valid shapes found for rasterize. Shapes must be '
164 'valid geometry objects')
165
166 shape_values = np.array(shape_values)
167 values_dtype = get_valid_dtype(shape_values)
168 if values_dtype is None:
169 raise ValueError('shape values must be one of these dtypes: %s' %
170 (', '.join(valid_dtypes)))
171
172 if dtype is None:
173 dtype = values_dtype
174 elif np.dtype(dtype).name not in valid_dtypes:
175 raise ValueError('dtype must be one of: %s' % (', '.join(valid_dtypes)))
176 elif not can_cast_dtype(shape_values, dtype):
177 raise ValueError('shape values could not be cast to specified dtype')
178
179 if output is not None:
180 if np.dtype(output.dtype).name not in valid_dtypes:
181 raise ValueError('Output image dtype must be one of: %s'
182 % (', '.join(valid_dtypes)))
183 if not can_cast_dtype(shape_values, output.dtype):
184 raise ValueError('shape values cannot be cast to dtype of output '
185 'image')
186
187 elif out_shape is not None:
188 output = np.empty(out_shape, dtype=dtype)
189 output.fill(fill)
190 else:
191 raise ValueError('Either an output shape or image must be provided')
192
193 transform = guard_transform(transform)
194
195 with rasterio.drivers():
196 _rasterize(valid_shapes, output, transform.to_gdal(), all_touched)
197
198 return output
199
200
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rasterio/features.py b/rasterio/features.py
--- a/rasterio/features.py
+++ b/rasterio/features.py
@@ -22,17 +22,26 @@
def shapes(image, mask=None, connectivity=4, transform=IDENTITY):
"""Yields a (shape, image_value) pair for each feature in the image.
-
- The shapes are GeoJSON-like dicts and the image values are ints.
-
+
+ The shapes are GeoJSON-like dicts and the image values are ints or floats
+ depending on the data type of the image.
+
Features are found using a connected-component labeling algorithm.
- The image must be of unsigned 8-bit integer (rasterio.byte or
- numpy.uint8) data type. If a mask is provided, pixels for which the
- mask is `False` will be excluded from feature generation.
+ The image must be one of int16, int32, uint8, uint16, float32 data types.
+ Note: due to floating point precision issues, the floating point values
+ returned from a floating point image may not exactly match the original
+ values.
+
+ If a mask is provided, pixels for which the mask is `False` will be
+ excluded from feature generation.
"""
- if np.dtype(image.dtype) != np.dtype(rasterio.ubyte):
- raise ValueError("Image must be dtype uint8/ubyte")
+
+ valid_dtypes = ('int16', 'int32', 'uint8', 'uint16', 'float32')
+
+ if np.dtype(image.dtype).name not in valid_dtypes:
+ raise ValueError('image dtype must be one of: %s'
+ % (', '.join(valid_dtypes)))
if mask is not None and np.dtype(mask.dtype) != np.dtype(rasterio.bool_):
raise ValueError("Mask must be dtype rasterio.bool_")
@@ -52,7 +61,7 @@
Features smaller than the specified size have their pixel value
replaced by that of the largest neighboring features.
-
+
The image must be of unsigned 8-bit integer (rasterio.byte or
numpy.uint8) data type.
"""
@@ -68,7 +77,7 @@
def rasterize(
- shapes,
+ shapes,
out_shape=None,
fill=0,
output=None,
@@ -96,7 +105,7 @@
:param fill: fill value for created image array
:param output: alternatively, an existing image array
- :param all_touched: if True, will rasterize all pixels touched,
+ :param all_touched: if True, will rasterize all pixels touched,
otherwise will use GDAL default method.
:param default_value: value burned in for shapes if not provided as part
of shapes.
@@ -178,7 +187,7 @@
if output is not None:
if np.dtype(output.dtype).name not in valid_dtypes:
- raise ValueError('Output image dtype must be one of: %s'
+ raise ValueError('Output image dtype must be one of: %s'
% (', '.join(valid_dtypes)))
if not can_cast_dtype(shape_values, output.dtype):
raise ValueError('shape values cannot be cast to dtype of output '
|
{"golden_diff": "diff --git a/rasterio/features.py b/rasterio/features.py\n--- a/rasterio/features.py\n+++ b/rasterio/features.py\n@@ -22,17 +22,26 @@\n \n def shapes(image, mask=None, connectivity=4, transform=IDENTITY):\n \"\"\"Yields a (shape, image_value) pair for each feature in the image.\n- \n- The shapes are GeoJSON-like dicts and the image values are ints.\n- \n+\n+ The shapes are GeoJSON-like dicts and the image values are ints or floats\n+ depending on the data type of the image.\n+\n Features are found using a connected-component labeling algorithm.\n \n- The image must be of unsigned 8-bit integer (rasterio.byte or\n- numpy.uint8) data type. If a mask is provided, pixels for which the\n- mask is `False` will be excluded from feature generation.\n+ The image must be one of int16, int32, uint8, uint16, float32 data types.\n+ Note: due to floating point precision issues, the floating point values\n+ returned from a floating point image may not exactly match the original\n+ values.\n+\n+ If a mask is provided, pixels for which the mask is `False` will be\n+ excluded from feature generation.\n \"\"\"\n- if np.dtype(image.dtype) != np.dtype(rasterio.ubyte):\n- raise ValueError(\"Image must be dtype uint8/ubyte\")\n+\n+ valid_dtypes = ('int16', 'int32', 'uint8', 'uint16', 'float32')\n+\n+ if np.dtype(image.dtype).name not in valid_dtypes:\n+ raise ValueError('image dtype must be one of: %s'\n+ % (', '.join(valid_dtypes)))\n \n if mask is not None and np.dtype(mask.dtype) != np.dtype(rasterio.bool_):\n raise ValueError(\"Mask must be dtype rasterio.bool_\")\n@@ -52,7 +61,7 @@\n \n Features smaller than the specified size have their pixel value\n replaced by that of the largest neighboring features.\n- \n+\n The image must be of unsigned 8-bit integer (rasterio.byte or\n numpy.uint8) data type.\n \"\"\"\n@@ -68,7 +77,7 @@\n \n \n def rasterize(\n- shapes, \n+ shapes,\n out_shape=None,\n fill=0,\n output=None,\n@@ -96,7 +105,7 @@\n :param fill: fill value for created image array\n :param output: alternatively, an existing image array\n \n- :param all_touched: if True, will rasterize all pixels touched, \n+ :param all_touched: if True, will rasterize all pixels touched,\n otherwise will use GDAL default method.\n :param default_value: value burned in for shapes if not provided as part\n of shapes.\n@@ -178,7 +187,7 @@\n \n if output is not None:\n if np.dtype(output.dtype).name not in valid_dtypes:\n- raise ValueError('Output image dtype must be one of: %s' \n+ raise ValueError('Output image dtype must be one of: %s'\n % (', '.join(valid_dtypes)))\n if not can_cast_dtype(shape_values, output.dtype):\n raise ValueError('shape values cannot be cast to dtype of output '\n", "issue": "shapes from data types other than uint8\nThe companion to #136.\n\n", "before_files": [{"content": "\"\"\"Functions for working with features in a raster dataset.\"\"\"\n\nimport json\nimport logging\nimport time\nimport warnings\n\nimport numpy as np\n\nimport rasterio\nfrom rasterio._features import _shapes, _sieve, _rasterize\nfrom rasterio.transform import IDENTITY, guard_transform\nfrom rasterio.dtypes import get_minimum_int_dtype\n\n\nlog = logging.getLogger('rasterio')\nclass NullHandler(logging.Handler):\n def emit(self, record):\n pass\nlog.addHandler(NullHandler())\n\n\ndef shapes(image, mask=None, connectivity=4, transform=IDENTITY):\n \"\"\"Yields a (shape, image_value) pair for each feature in the image.\n \n The shapes are GeoJSON-like dicts and the image values are ints.\n \n Features are found using a connected-component labeling algorithm.\n\n The image must be of unsigned 8-bit integer (rasterio.byte or\n numpy.uint8) data type. If a mask is provided, pixels for which the\n mask is `False` will be excluded from feature generation.\n \"\"\"\n if np.dtype(image.dtype) != np.dtype(rasterio.ubyte):\n raise ValueError(\"Image must be dtype uint8/ubyte\")\n\n if mask is not None and np.dtype(mask.dtype) != np.dtype(rasterio.bool_):\n raise ValueError(\"Mask must be dtype rasterio.bool_\")\n\n if connectivity not in (4, 8):\n raise ValueError(\"Connectivity Option must be 4 or 8\")\n\n transform = guard_transform(transform)\n\n with rasterio.drivers():\n for s, v in _shapes(image, mask, connectivity, transform.to_gdal()):\n yield s, v\n\n\ndef sieve(image, size, connectivity=4, output=None):\n \"\"\"Returns a copy of the image, but with smaller features removed.\n\n Features smaller than the specified size have their pixel value\n replaced by that of the largest neighboring features.\n \n The image must be of unsigned 8-bit integer (rasterio.byte or\n numpy.uint8) data type.\n \"\"\"\n if np.dtype(image.dtype) != np.dtype(rasterio.ubyte):\n raise ValueError(\"Image must be dtype uint8/ubyte\")\n\n if output is not None and (\n np.dtype(output.dtype) != np.dtype(rasterio.ubyte)):\n raise ValueError(\"Output must be dtype uint8/ubyte\")\n\n with rasterio.drivers():\n return _sieve(image, size, connectivity)\n\n\ndef rasterize(\n shapes, \n out_shape=None,\n fill=0,\n output=None,\n transform=IDENTITY,\n all_touched=False,\n default_value=1,\n dtype=None):\n \"\"\"Returns an image array with points, lines, or polygons burned in.\n\n A different value may be specified for each shape. The shapes may\n be georeferenced or may have image coordinates. An existing image\n array may be provided, or one may be created. By default, the center\n of image elements determines whether they are updated, but all\n touched elements may be optionally updated.\n\n Valid data types are: int16, int32, uint8, uint16, uint32, float32, float64\n\n :param shapes: an iterator over Fiona style geometry objects (with a default\n value of default_value) or an iterator over (geometry, value) pairs.\n\n :param transform: GDAL style geotransform to be applied to the\n image.\n\n :param out_shape: shape of created image array\n :param fill: fill value for created image array\n :param output: alternatively, an existing image array\n\n :param all_touched: if True, will rasterize all pixels touched, \n otherwise will use GDAL default method.\n :param default_value: value burned in for shapes if not provided as part\n of shapes.\n \"\"\"\n\n valid_dtypes = ('int16', 'int32', 'uint8', 'uint16', 'uint32', 'float32',\n 'float64')\n\n def get_valid_dtype(values):\n values_dtype = values.dtype\n if values_dtype.kind == 'i':\n values_dtype = np.dtype(get_minimum_int_dtype(values))\n if values_dtype.name in valid_dtypes:\n return values_dtype\n return None\n\n def can_cast_dtype(values, dtype):\n if values.dtype.name == np.dtype(dtype).name:\n return True\n elif values.dtype.kind == 'f':\n return np.allclose(values, values.astype(dtype))\n else:\n return np.array_equal(values, values.astype(dtype))\n\n if fill != 0:\n fill_array = np.array([fill])\n if get_valid_dtype(fill_array) is None:\n raise ValueError('fill must be one of these types: %s'\n % (', '.join(valid_dtypes)))\n elif dtype is not None and not can_cast_dtype(fill_array, dtype):\n raise ValueError('fill value cannot be cast to specified dtype')\n\n\n if default_value != 1:\n default_value_array = np.array([default_value])\n if get_valid_dtype(default_value_array) is None:\n raise ValueError('default_value must be one of these types: %s'\n % (', '.join(valid_dtypes)))\n elif dtype is not None and not can_cast_dtype(default_value_array,\n dtype):\n raise ValueError('default_value cannot be cast to specified dtype')\n\n valid_shapes = []\n shape_values = []\n for index, item in enumerate(shapes):\n try:\n if isinstance(item, (tuple, list)):\n geom, value = item\n else:\n geom = item\n value = default_value\n geom = getattr(geom, '__geo_interface__', None) or geom\n if (not isinstance(geom, dict) or\n 'type' not in geom or 'coordinates' not in geom):\n raise ValueError(\n 'Object %r at index %d is not a geometry object' %\n (geom, index))\n valid_shapes.append((geom, value))\n shape_values.append(value)\n except Exception:\n log.exception('Exception caught, skipping shape %d', index)\n\n if not valid_shapes:\n raise ValueError('No valid shapes found for rasterize. Shapes must be '\n 'valid geometry objects')\n\n shape_values = np.array(shape_values)\n values_dtype = get_valid_dtype(shape_values)\n if values_dtype is None:\n raise ValueError('shape values must be one of these dtypes: %s' %\n (', '.join(valid_dtypes)))\n\n if dtype is None:\n dtype = values_dtype\n elif np.dtype(dtype).name not in valid_dtypes:\n raise ValueError('dtype must be one of: %s' % (', '.join(valid_dtypes)))\n elif not can_cast_dtype(shape_values, dtype):\n raise ValueError('shape values could not be cast to specified dtype')\n\n if output is not None:\n if np.dtype(output.dtype).name not in valid_dtypes:\n raise ValueError('Output image dtype must be one of: %s' \n % (', '.join(valid_dtypes)))\n if not can_cast_dtype(shape_values, output.dtype):\n raise ValueError('shape values cannot be cast to dtype of output '\n 'image')\n\n elif out_shape is not None:\n output = np.empty(out_shape, dtype=dtype)\n output.fill(fill)\n else:\n raise ValueError('Either an output shape or image must be provided')\n \n transform = guard_transform(transform)\n\n with rasterio.drivers():\n _rasterize(valid_shapes, output, transform.to_gdal(), all_touched)\n \n return output\n\n", "path": "rasterio/features.py"}], "after_files": [{"content": "\"\"\"Functions for working with features in a raster dataset.\"\"\"\n\nimport json\nimport logging\nimport time\nimport warnings\n\nimport numpy as np\n\nimport rasterio\nfrom rasterio._features import _shapes, _sieve, _rasterize\nfrom rasterio.transform import IDENTITY, guard_transform\nfrom rasterio.dtypes import get_minimum_int_dtype\n\n\nlog = logging.getLogger('rasterio')\nclass NullHandler(logging.Handler):\n def emit(self, record):\n pass\nlog.addHandler(NullHandler())\n\n\ndef shapes(image, mask=None, connectivity=4, transform=IDENTITY):\n \"\"\"Yields a (shape, image_value) pair for each feature in the image.\n\n The shapes are GeoJSON-like dicts and the image values are ints or floats\n depending on the data type of the image.\n\n Features are found using a connected-component labeling algorithm.\n\n The image must be one of int16, int32, uint8, uint16, float32 data types.\n Note: due to floating point precision issues, the floating point values\n returned from a floating point image may not exactly match the original\n values.\n\n If a mask is provided, pixels for which the mask is `False` will be\n excluded from feature generation.\n \"\"\"\n\n valid_dtypes = ('int16', 'int32', 'uint8', 'uint16', 'float32')\n\n if np.dtype(image.dtype).name not in valid_dtypes:\n raise ValueError('image dtype must be one of: %s'\n % (', '.join(valid_dtypes)))\n\n if mask is not None and np.dtype(mask.dtype) != np.dtype(rasterio.bool_):\n raise ValueError(\"Mask must be dtype rasterio.bool_\")\n\n if connectivity not in (4, 8):\n raise ValueError(\"Connectivity Option must be 4 or 8\")\n\n transform = guard_transform(transform)\n\n with rasterio.drivers():\n for s, v in _shapes(image, mask, connectivity, transform.to_gdal()):\n yield s, v\n\n\ndef sieve(image, size, connectivity=4, output=None):\n \"\"\"Returns a copy of the image, but with smaller features removed.\n\n Features smaller than the specified size have their pixel value\n replaced by that of the largest neighboring features.\n\n The image must be of unsigned 8-bit integer (rasterio.byte or\n numpy.uint8) data type.\n \"\"\"\n if np.dtype(image.dtype) != np.dtype(rasterio.ubyte):\n raise ValueError(\"Image must be dtype uint8/ubyte\")\n\n if output is not None and (\n np.dtype(output.dtype) != np.dtype(rasterio.ubyte)):\n raise ValueError(\"Output must be dtype uint8/ubyte\")\n\n with rasterio.drivers():\n return _sieve(image, size, connectivity)\n\n\ndef rasterize(\n shapes,\n out_shape=None,\n fill=0,\n output=None,\n transform=IDENTITY,\n all_touched=False,\n default_value=1,\n dtype=None):\n \"\"\"Returns an image array with points, lines, or polygons burned in.\n\n A different value may be specified for each shape. The shapes may\n be georeferenced or may have image coordinates. An existing image\n array may be provided, or one may be created. By default, the center\n of image elements determines whether they are updated, but all\n touched elements may be optionally updated.\n\n Valid data types are: int16, int32, uint8, uint16, uint32, float32, float64\n\n :param shapes: an iterator over Fiona style geometry objects (with a default\n value of default_value) or an iterator over (geometry, value) pairs.\n\n :param transform: GDAL style geotransform to be applied to the\n image.\n\n :param out_shape: shape of created image array\n :param fill: fill value for created image array\n :param output: alternatively, an existing image array\n\n :param all_touched: if True, will rasterize all pixels touched,\n otherwise will use GDAL default method.\n :param default_value: value burned in for shapes if not provided as part\n of shapes.\n \"\"\"\n\n valid_dtypes = ('int16', 'int32', 'uint8', 'uint16', 'uint32', 'float32',\n 'float64')\n\n def get_valid_dtype(values):\n values_dtype = values.dtype\n if values_dtype.kind == 'i':\n values_dtype = np.dtype(get_minimum_int_dtype(values))\n if values_dtype.name in valid_dtypes:\n return values_dtype\n return None\n\n def can_cast_dtype(values, dtype):\n if values.dtype.name == np.dtype(dtype).name:\n return True\n elif values.dtype.kind == 'f':\n return np.allclose(values, values.astype(dtype))\n else:\n return np.array_equal(values, values.astype(dtype))\n\n if fill != 0:\n fill_array = np.array([fill])\n if get_valid_dtype(fill_array) is None:\n raise ValueError('fill must be one of these types: %s'\n % (', '.join(valid_dtypes)))\n elif dtype is not None and not can_cast_dtype(fill_array, dtype):\n raise ValueError('fill value cannot be cast to specified dtype')\n\n\n if default_value != 1:\n default_value_array = np.array([default_value])\n if get_valid_dtype(default_value_array) is None:\n raise ValueError('default_value must be one of these types: %s'\n % (', '.join(valid_dtypes)))\n elif dtype is not None and not can_cast_dtype(default_value_array,\n dtype):\n raise ValueError('default_value cannot be cast to specified dtype')\n\n valid_shapes = []\n shape_values = []\n for index, item in enumerate(shapes):\n try:\n if isinstance(item, (tuple, list)):\n geom, value = item\n else:\n geom = item\n value = default_value\n geom = getattr(geom, '__geo_interface__', None) or geom\n if (not isinstance(geom, dict) or\n 'type' not in geom or 'coordinates' not in geom):\n raise ValueError(\n 'Object %r at index %d is not a geometry object' %\n (geom, index))\n valid_shapes.append((geom, value))\n shape_values.append(value)\n except Exception:\n log.exception('Exception caught, skipping shape %d', index)\n\n if not valid_shapes:\n raise ValueError('No valid shapes found for rasterize. Shapes must be '\n 'valid geometry objects')\n\n shape_values = np.array(shape_values)\n values_dtype = get_valid_dtype(shape_values)\n if values_dtype is None:\n raise ValueError('shape values must be one of these dtypes: %s' %\n (', '.join(valid_dtypes)))\n\n if dtype is None:\n dtype = values_dtype\n elif np.dtype(dtype).name not in valid_dtypes:\n raise ValueError('dtype must be one of: %s' % (', '.join(valid_dtypes)))\n elif not can_cast_dtype(shape_values, dtype):\n raise ValueError('shape values could not be cast to specified dtype')\n\n if output is not None:\n if np.dtype(output.dtype).name not in valid_dtypes:\n raise ValueError('Output image dtype must be one of: %s'\n % (', '.join(valid_dtypes)))\n if not can_cast_dtype(shape_values, output.dtype):\n raise ValueError('shape values cannot be cast to dtype of output '\n 'image')\n\n elif out_shape is not None:\n output = np.empty(out_shape, dtype=dtype)\n output.fill(fill)\n else:\n raise ValueError('Either an output shape or image must be provided')\n \n transform = guard_transform(transform)\n\n with rasterio.drivers():\n _rasterize(valid_shapes, output, transform.to_gdal(), all_touched)\n \n return output\n\n", "path": "rasterio/features.py"}]}
| 2,410 | 746 |
gh_patches_debug_15518
|
rasdani/github-patches
|
git_diff
|
coala__coala-5935
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in docstring
In `coala/coalib/settings/Setting.py`,
In line 174, the word `of` should be replaced with `off`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `coalib/settings/Setting.py`
Content:
```
1 import os
2 from collections import Iterable, OrderedDict
3
4 from coala_utils.decorators import (
5 enforce_signature,
6 generate_repr,
7 )
8 from coala_utils.string_processing.StringConverter import StringConverter
9 from coalib.bearlib.languages.Language import Language, UnknownLanguageError
10 from coalib.parsing.Globbing import glob_escape
11 from coalib.results.SourcePosition import SourcePosition
12
13
14 def path(obj, *args, **kwargs):
15 return obj.__path__(*args, **kwargs)
16
17
18 def path_list(obj, *args, **kwargs):
19 return obj.__path_list__(*args, **kwargs)
20
21
22 def url(obj, *args, **kwargs):
23 return obj.__url__(*args, **kwargs)
24
25
26 def glob(obj, *args, **kwargs):
27 """
28 Creates a path in which all special glob characters in all the
29 parent directories in the given setting are properly escaped.
30
31 :param obj: The ``Setting`` object from which the key is obtained.
32 :return: Returns a path in which special glob characters are escaped.
33 """
34 return obj.__glob__(*args, **kwargs)
35
36
37 def glob_list(obj, *args, **kwargs):
38 """
39 Creates a list of paths in which all special glob characters in all the
40 parent directories of all paths in the given setting are properly escaped.
41
42 :param obj: The ``Setting`` object from which the key is obtained.
43 :return: Returns a list of paths in which special glob characters are
44 escaped.
45 """
46 return obj.__glob_list__(*args, **kwargs)
47
48
49 def language(name):
50 """
51 Convert a string into ``Language`` object.
52
53 :param name: String containing language name.
54 :return: ``Language`` object.
55 :raises ValueError: If the ``name`` contain invalid language name.
56 """
57 try:
58 return Language[name]
59 except UnknownLanguageError as e:
60 raise ValueError(e)
61
62
63 def typed_list(conversion_func):
64 """
65 Creates a class that converts a setting into a list of elements each
66 converted with the given conversion function.
67
68 :param conversion_func: The conversion function that converts a string into
69 your desired list item object.
70 :return: An instance of the created conversion class.
71 """
72
73 class Converter:
74
75 def __call__(self, setting):
76 return [conversion_func(StringConverter(elem))
77 for elem in setting]
78
79 def __repr__(self):
80 return 'typed_list(%s)' % conversion_func.__name__
81
82 return Converter()
83
84
85 str_list = typed_list(str)
86
87
88 int_list = typed_list(int)
89
90
91 float_list = typed_list(float)
92
93
94 bool_list = typed_list(bool)
95
96
97 def typed_dict(key_type, value_type, default):
98 """
99 Creates a class that converts a setting into a dict with the given types.
100
101 :param key_type: The type conversion function for the keys.
102 :param value_type: The type conversion function for the values.
103 :param default: The default value to use if no one is given by the user.
104 :return: An instance of the created conversion class.
105 """
106
107 class Converter:
108
109 def __call__(self, setting):
110 return {key_type(StringConverter(key)):
111 value_type(StringConverter(value))
112 if value != '' else default
113 for key, value in dict(setting).items()}
114
115 def __repr__(self):
116 return 'typed_dict(%s, %s, default=%s)' % (
117 key_type.__name__, value_type.__name__, default)
118
119 return Converter()
120
121
122 def typed_ordered_dict(key_type, value_type, default):
123 """
124 Creates a class that converts a setting into an ordered dict with the
125 given types.
126
127 :param key_type: The type conversion function for the keys.
128 :param value_type: The type conversion function for the values.
129 :param default: The default value to use if no one is given by the user.
130 :return: An instance of the created conversion class.
131 """
132
133 class Converter:
134
135 def __call__(self, setting):
136 return OrderedDict((key_type(StringConverter(key)),
137 value_type(StringConverter(value))
138 if value != '' else default)
139 for key, value in OrderedDict(setting).items())
140
141 def __repr__(self):
142 return 'typed_ordered_dict(%s, %s, default=%s)' % (
143 key_type.__name__, value_type.__name__, default)
144
145 return Converter()
146
147
148 @generate_repr('key', 'value', 'origin', 'from_cli', 'to_append')
149 class Setting(StringConverter):
150 """
151 A Setting consists mainly of a key and a value. It mainly offers many
152 conversions into common data types.
153 """
154
155 @enforce_signature
156 def __init__(self,
157 key,
158 value,
159 origin: (str, SourcePosition) = '',
160 strip_whitespaces: bool = True,
161 list_delimiters: Iterable = (',', ';'),
162 from_cli: bool = False,
163 remove_empty_iter_elements: bool = True,
164 to_append: bool = False):
165 """
166 Initializes a new Setting,
167
168 :param key: The key of the Setting.
169 :param value: The value, if you apply conversions
170 to this object these will be applied
171 to this value.
172 :param origin: The originating file. This will be
173 used for path conversions and the
174 last part will be stripped of. If
175 you want to specify a directory as
176 origin be sure to end it with a
177 directory separator.
178 :param strip_whitespaces: Whether to strip whitespaces from
179 the value or not
180 :param list_delimiters: Delimiters for list conversion
181 :param from_cli: True if this setting was read by the
182 CliParser.
183 :param remove_empty_iter_elements: Whether to remove empty elements in
184 iterable values.
185 :param to_append: The boolean value if setting value
186 needs to be appended to a setting in
187 the defaults of a section.
188 """
189 self.to_append = to_append
190
191 StringConverter.__init__(
192 self,
193 value,
194 strip_whitespaces=strip_whitespaces,
195 list_delimiters=list_delimiters,
196 remove_empty_iter_elements=remove_empty_iter_elements)
197
198 self.from_cli = from_cli
199 self.key = key
200 self._origin = origin
201
202 def __path__(self, origin=None, glob_escape_origin=False):
203 """
204 Determines the path of this setting.
205
206 Note: You can also use this function on strings, in that case the
207 origin argument will be taken in every case.
208
209 :param origin: The origin file to take if no origin is
210 specified for the given setting. If you
211 want to provide a directory, make sure it
212 ends with a directory separator.
213 :param glob_escape_origin: When this is set to true, the origin of
214 this setting will be escaped with
215 ``glob_escape``.
216 :return: An absolute path.
217 :raises ValueError: If no origin is specified in the setting
218 nor the given origin parameter.
219 """
220 strrep = str(self).strip()
221 if os.path.isabs(strrep):
222 return strrep
223
224 if hasattr(self, 'origin') and self.origin != '':
225 origin = self.origin
226
227 if origin is None:
228 raise ValueError('Cannot determine path without origin.')
229
230 # We need to get full path before escaping since the full path
231 # may introduce unintended glob characters
232 origin = os.path.abspath(os.path.dirname(origin))
233
234 if glob_escape_origin:
235 origin = glob_escape(origin)
236
237 return os.path.normpath(os.path.join(origin, strrep))
238
239 def __glob__(self, origin=None):
240 """
241 Determines the path of this setting with proper escaping of its
242 parent directories.
243
244 :param origin: The origin file to take if no origin is specified
245 for the given setting. If you want to provide a
246 directory, make sure it ends with a directory
247 separator.
248 :return: An absolute path in which the parent directories
249 are escaped.
250 :raises ValueError: If no origin is specified in the setting nor the
251 given origin parameter.
252 """
253 return Setting.__path__(self, origin, glob_escape_origin=True)
254
255 def __path_list__(self):
256 """
257 Splits the value into a list and creates a path out of each item taking
258 the origin of the setting into account.
259
260 :return: A list of absolute paths.
261 """
262 return [Setting.__path__(elem, self.origin) for elem in self]
263
264 def __glob_list__(self):
265 """
266 Splits the value into a list and creates a path out of each item in
267 which the special glob characters in origin are escaped.
268
269 :return: A list of absolute paths in which the special characters in
270 the parent directories of the setting are escaped.
271 """
272 return [Setting.__glob__(elem, self.origin) for elem in self]
273
274 def __iter__(self, remove_backslashes=True):
275 if self.to_append:
276 raise ValueError('Iteration on this object is invalid because the '
277 'value is incomplete. Please access the value of '
278 'the setting in a section to iterate through it.')
279 return StringConverter.__iter__(self, remove_backslashes)
280
281 @property
282 def key(self):
283 return self._key
284
285 @key.setter
286 def key(self, key):
287 newkey = str(key)
288 if newkey == '':
289 raise ValueError('An empty key is not allowed for a setting.')
290
291 self._key = newkey
292
293 @StringConverter.value.getter
294 def value(self):
295 if self.to_append:
296 raise ValueError('This property is invalid because the value is '
297 'incomplete. Please access the value of the '
298 'setting in a section to get the complete value.')
299 return self._value
300
301 @property
302 def origin(self):
303 """
304 Returns the filename.
305 """
306 if isinstance(self._origin, SourcePosition):
307 return self._origin.filename
308 else:
309 return self._origin
310
311 @property
312 def line_number(self):
313 if isinstance(self._origin, SourcePosition):
314 return self._origin.line
315 else:
316 raise TypeError("Instantiated with str 'origin' "
317 'which does not have line numbers. '
318 'Use SourcePosition for line numbers.')
319
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/coalib/settings/Setting.py b/coalib/settings/Setting.py
--- a/coalib/settings/Setting.py
+++ b/coalib/settings/Setting.py
@@ -171,7 +171,7 @@
to this value.
:param origin: The originating file. This will be
used for path conversions and the
- last part will be stripped of. If
+ last part will be stripped off. If
you want to specify a directory as
origin be sure to end it with a
directory separator.
|
{"golden_diff": "diff --git a/coalib/settings/Setting.py b/coalib/settings/Setting.py\n--- a/coalib/settings/Setting.py\n+++ b/coalib/settings/Setting.py\n@@ -171,7 +171,7 @@\n to this value.\n :param origin: The originating file. This will be\n used for path conversions and the\n- last part will be stripped of. If\n+ last part will be stripped off. If\n you want to specify a directory as\n origin be sure to end it with a\n directory separator.\n", "issue": "Typo in docstring\nIn `coala/coalib/settings/Setting.py`,\r\nIn line 174, the word `of` should be replaced with `off`. \n", "before_files": [{"content": "import os\nfrom collections import Iterable, OrderedDict\n\nfrom coala_utils.decorators import (\n enforce_signature,\n generate_repr,\n)\nfrom coala_utils.string_processing.StringConverter import StringConverter\nfrom coalib.bearlib.languages.Language import Language, UnknownLanguageError\nfrom coalib.parsing.Globbing import glob_escape\nfrom coalib.results.SourcePosition import SourcePosition\n\n\ndef path(obj, *args, **kwargs):\n return obj.__path__(*args, **kwargs)\n\n\ndef path_list(obj, *args, **kwargs):\n return obj.__path_list__(*args, **kwargs)\n\n\ndef url(obj, *args, **kwargs):\n return obj.__url__(*args, **kwargs)\n\n\ndef glob(obj, *args, **kwargs):\n \"\"\"\n Creates a path in which all special glob characters in all the\n parent directories in the given setting are properly escaped.\n\n :param obj: The ``Setting`` object from which the key is obtained.\n :return: Returns a path in which special glob characters are escaped.\n \"\"\"\n return obj.__glob__(*args, **kwargs)\n\n\ndef glob_list(obj, *args, **kwargs):\n \"\"\"\n Creates a list of paths in which all special glob characters in all the\n parent directories of all paths in the given setting are properly escaped.\n\n :param obj: The ``Setting`` object from which the key is obtained.\n :return: Returns a list of paths in which special glob characters are\n escaped.\n \"\"\"\n return obj.__glob_list__(*args, **kwargs)\n\n\ndef language(name):\n \"\"\"\n Convert a string into ``Language`` object.\n\n :param name: String containing language name.\n :return: ``Language`` object.\n :raises ValueError: If the ``name`` contain invalid language name.\n \"\"\"\n try:\n return Language[name]\n except UnknownLanguageError as e:\n raise ValueError(e)\n\n\ndef typed_list(conversion_func):\n \"\"\"\n Creates a class that converts a setting into a list of elements each\n converted with the given conversion function.\n\n :param conversion_func: The conversion function that converts a string into\n your desired list item object.\n :return: An instance of the created conversion class.\n \"\"\"\n\n class Converter:\n\n def __call__(self, setting):\n return [conversion_func(StringConverter(elem))\n for elem in setting]\n\n def __repr__(self):\n return 'typed_list(%s)' % conversion_func.__name__\n\n return Converter()\n\n\nstr_list = typed_list(str)\n\n\nint_list = typed_list(int)\n\n\nfloat_list = typed_list(float)\n\n\nbool_list = typed_list(bool)\n\n\ndef typed_dict(key_type, value_type, default):\n \"\"\"\n Creates a class that converts a setting into a dict with the given types.\n\n :param key_type: The type conversion function for the keys.\n :param value_type: The type conversion function for the values.\n :param default: The default value to use if no one is given by the user.\n :return: An instance of the created conversion class.\n \"\"\"\n\n class Converter:\n\n def __call__(self, setting):\n return {key_type(StringConverter(key)):\n value_type(StringConverter(value))\n if value != '' else default\n for key, value in dict(setting).items()}\n\n def __repr__(self):\n return 'typed_dict(%s, %s, default=%s)' % (\n key_type.__name__, value_type.__name__, default)\n\n return Converter()\n\n\ndef typed_ordered_dict(key_type, value_type, default):\n \"\"\"\n Creates a class that converts a setting into an ordered dict with the\n given types.\n\n :param key_type: The type conversion function for the keys.\n :param value_type: The type conversion function for the values.\n :param default: The default value to use if no one is given by the user.\n :return: An instance of the created conversion class.\n \"\"\"\n\n class Converter:\n\n def __call__(self, setting):\n return OrderedDict((key_type(StringConverter(key)),\n value_type(StringConverter(value))\n if value != '' else default)\n for key, value in OrderedDict(setting).items())\n\n def __repr__(self):\n return 'typed_ordered_dict(%s, %s, default=%s)' % (\n key_type.__name__, value_type.__name__, default)\n\n return Converter()\n\n\n@generate_repr('key', 'value', 'origin', 'from_cli', 'to_append')\nclass Setting(StringConverter):\n \"\"\"\n A Setting consists mainly of a key and a value. It mainly offers many\n conversions into common data types.\n \"\"\"\n\n @enforce_signature\n def __init__(self,\n key,\n value,\n origin: (str, SourcePosition) = '',\n strip_whitespaces: bool = True,\n list_delimiters: Iterable = (',', ';'),\n from_cli: bool = False,\n remove_empty_iter_elements: bool = True,\n to_append: bool = False):\n \"\"\"\n Initializes a new Setting,\n\n :param key: The key of the Setting.\n :param value: The value, if you apply conversions\n to this object these will be applied\n to this value.\n :param origin: The originating file. This will be\n used for path conversions and the\n last part will be stripped of. If\n you want to specify a directory as\n origin be sure to end it with a\n directory separator.\n :param strip_whitespaces: Whether to strip whitespaces from\n the value or not\n :param list_delimiters: Delimiters for list conversion\n :param from_cli: True if this setting was read by the\n CliParser.\n :param remove_empty_iter_elements: Whether to remove empty elements in\n iterable values.\n :param to_append: The boolean value if setting value\n needs to be appended to a setting in\n the defaults of a section.\n \"\"\"\n self.to_append = to_append\n\n StringConverter.__init__(\n self,\n value,\n strip_whitespaces=strip_whitespaces,\n list_delimiters=list_delimiters,\n remove_empty_iter_elements=remove_empty_iter_elements)\n\n self.from_cli = from_cli\n self.key = key\n self._origin = origin\n\n def __path__(self, origin=None, glob_escape_origin=False):\n \"\"\"\n Determines the path of this setting.\n\n Note: You can also use this function on strings, in that case the\n origin argument will be taken in every case.\n\n :param origin: The origin file to take if no origin is\n specified for the given setting. If you\n want to provide a directory, make sure it\n ends with a directory separator.\n :param glob_escape_origin: When this is set to true, the origin of\n this setting will be escaped with\n ``glob_escape``.\n :return: An absolute path.\n :raises ValueError: If no origin is specified in the setting\n nor the given origin parameter.\n \"\"\"\n strrep = str(self).strip()\n if os.path.isabs(strrep):\n return strrep\n\n if hasattr(self, 'origin') and self.origin != '':\n origin = self.origin\n\n if origin is None:\n raise ValueError('Cannot determine path without origin.')\n\n # We need to get full path before escaping since the full path\n # may introduce unintended glob characters\n origin = os.path.abspath(os.path.dirname(origin))\n\n if glob_escape_origin:\n origin = glob_escape(origin)\n\n return os.path.normpath(os.path.join(origin, strrep))\n\n def __glob__(self, origin=None):\n \"\"\"\n Determines the path of this setting with proper escaping of its\n parent directories.\n\n :param origin: The origin file to take if no origin is specified\n for the given setting. If you want to provide a\n directory, make sure it ends with a directory\n separator.\n :return: An absolute path in which the parent directories\n are escaped.\n :raises ValueError: If no origin is specified in the setting nor the\n given origin parameter.\n \"\"\"\n return Setting.__path__(self, origin, glob_escape_origin=True)\n\n def __path_list__(self):\n \"\"\"\n Splits the value into a list and creates a path out of each item taking\n the origin of the setting into account.\n\n :return: A list of absolute paths.\n \"\"\"\n return [Setting.__path__(elem, self.origin) for elem in self]\n\n def __glob_list__(self):\n \"\"\"\n Splits the value into a list and creates a path out of each item in\n which the special glob characters in origin are escaped.\n\n :return: A list of absolute paths in which the special characters in\n the parent directories of the setting are escaped.\n \"\"\"\n return [Setting.__glob__(elem, self.origin) for elem in self]\n\n def __iter__(self, remove_backslashes=True):\n if self.to_append:\n raise ValueError('Iteration on this object is invalid because the '\n 'value is incomplete. Please access the value of '\n 'the setting in a section to iterate through it.')\n return StringConverter.__iter__(self, remove_backslashes)\n\n @property\n def key(self):\n return self._key\n\n @key.setter\n def key(self, key):\n newkey = str(key)\n if newkey == '':\n raise ValueError('An empty key is not allowed for a setting.')\n\n self._key = newkey\n\n @StringConverter.value.getter\n def value(self):\n if self.to_append:\n raise ValueError('This property is invalid because the value is '\n 'incomplete. Please access the value of the '\n 'setting in a section to get the complete value.')\n return self._value\n\n @property\n def origin(self):\n \"\"\"\n Returns the filename.\n \"\"\"\n if isinstance(self._origin, SourcePosition):\n return self._origin.filename\n else:\n return self._origin\n\n @property\n def line_number(self):\n if isinstance(self._origin, SourcePosition):\n return self._origin.line\n else:\n raise TypeError(\"Instantiated with str 'origin' \"\n 'which does not have line numbers. '\n 'Use SourcePosition for line numbers.')\n", "path": "coalib/settings/Setting.py"}], "after_files": [{"content": "import os\nfrom collections import Iterable, OrderedDict\n\nfrom coala_utils.decorators import (\n enforce_signature,\n generate_repr,\n)\nfrom coala_utils.string_processing.StringConverter import StringConverter\nfrom coalib.bearlib.languages.Language import Language, UnknownLanguageError\nfrom coalib.parsing.Globbing import glob_escape\nfrom coalib.results.SourcePosition import SourcePosition\n\n\ndef path(obj, *args, **kwargs):\n return obj.__path__(*args, **kwargs)\n\n\ndef path_list(obj, *args, **kwargs):\n return obj.__path_list__(*args, **kwargs)\n\n\ndef url(obj, *args, **kwargs):\n return obj.__url__(*args, **kwargs)\n\n\ndef glob(obj, *args, **kwargs):\n \"\"\"\n Creates a path in which all special glob characters in all the\n parent directories in the given setting are properly escaped.\n\n :param obj: The ``Setting`` object from which the key is obtained.\n :return: Returns a path in which special glob characters are escaped.\n \"\"\"\n return obj.__glob__(*args, **kwargs)\n\n\ndef glob_list(obj, *args, **kwargs):\n \"\"\"\n Creates a list of paths in which all special glob characters in all the\n parent directories of all paths in the given setting are properly escaped.\n\n :param obj: The ``Setting`` object from which the key is obtained.\n :return: Returns a list of paths in which special glob characters are\n escaped.\n \"\"\"\n return obj.__glob_list__(*args, **kwargs)\n\n\ndef language(name):\n \"\"\"\n Convert a string into ``Language`` object.\n\n :param name: String containing language name.\n :return: ``Language`` object.\n :raises ValueError: If the ``name`` contain invalid language name.\n \"\"\"\n try:\n return Language[name]\n except UnknownLanguageError as e:\n raise ValueError(e)\n\n\ndef typed_list(conversion_func):\n \"\"\"\n Creates a class that converts a setting into a list of elements each\n converted with the given conversion function.\n\n :param conversion_func: The conversion function that converts a string into\n your desired list item object.\n :return: An instance of the created conversion class.\n \"\"\"\n\n class Converter:\n\n def __call__(self, setting):\n return [conversion_func(StringConverter(elem))\n for elem in setting]\n\n def __repr__(self):\n return 'typed_list(%s)' % conversion_func.__name__\n\n return Converter()\n\n\nstr_list = typed_list(str)\n\n\nint_list = typed_list(int)\n\n\nfloat_list = typed_list(float)\n\n\nbool_list = typed_list(bool)\n\n\ndef typed_dict(key_type, value_type, default):\n \"\"\"\n Creates a class that converts a setting into a dict with the given types.\n\n :param key_type: The type conversion function for the keys.\n :param value_type: The type conversion function for the values.\n :param default: The default value to use if no one is given by the user.\n :return: An instance of the created conversion class.\n \"\"\"\n\n class Converter:\n\n def __call__(self, setting):\n return {key_type(StringConverter(key)):\n value_type(StringConverter(value))\n if value != '' else default\n for key, value in dict(setting).items()}\n\n def __repr__(self):\n return 'typed_dict(%s, %s, default=%s)' % (\n key_type.__name__, value_type.__name__, default)\n\n return Converter()\n\n\ndef typed_ordered_dict(key_type, value_type, default):\n \"\"\"\n Creates a class that converts a setting into an ordered dict with the\n given types.\n\n :param key_type: The type conversion function for the keys.\n :param value_type: The type conversion function for the values.\n :param default: The default value to use if no one is given by the user.\n :return: An instance of the created conversion class.\n \"\"\"\n\n class Converter:\n\n def __call__(self, setting):\n return OrderedDict((key_type(StringConverter(key)),\n value_type(StringConverter(value))\n if value != '' else default)\n for key, value in OrderedDict(setting).items())\n\n def __repr__(self):\n return 'typed_ordered_dict(%s, %s, default=%s)' % (\n key_type.__name__, value_type.__name__, default)\n\n return Converter()\n\n\n@generate_repr('key', 'value', 'origin', 'from_cli', 'to_append')\nclass Setting(StringConverter):\n \"\"\"\n A Setting consists mainly of a key and a value. It mainly offers many\n conversions into common data types.\n \"\"\"\n\n @enforce_signature\n def __init__(self,\n key,\n value,\n origin: (str, SourcePosition) = '',\n strip_whitespaces: bool = True,\n list_delimiters: Iterable = (',', ';'),\n from_cli: bool = False,\n remove_empty_iter_elements: bool = True,\n to_append: bool = False):\n \"\"\"\n Initializes a new Setting,\n\n :param key: The key of the Setting.\n :param value: The value, if you apply conversions\n to this object these will be applied\n to this value.\n :param origin: The originating file. This will be\n used for path conversions and the\n last part will be stripped off. If\n you want to specify a directory as\n origin be sure to end it with a\n directory separator.\n :param strip_whitespaces: Whether to strip whitespaces from\n the value or not\n :param list_delimiters: Delimiters for list conversion\n :param from_cli: True if this setting was read by the\n CliParser.\n :param remove_empty_iter_elements: Whether to remove empty elements in\n iterable values.\n :param to_append: The boolean value if setting value\n needs to be appended to a setting in\n the defaults of a section.\n \"\"\"\n self.to_append = to_append\n\n StringConverter.__init__(\n self,\n value,\n strip_whitespaces=strip_whitespaces,\n list_delimiters=list_delimiters,\n remove_empty_iter_elements=remove_empty_iter_elements)\n\n self.from_cli = from_cli\n self.key = key\n self._origin = origin\n\n def __path__(self, origin=None, glob_escape_origin=False):\n \"\"\"\n Determines the path of this setting.\n\n Note: You can also use this function on strings, in that case the\n origin argument will be taken in every case.\n\n :param origin: The origin file to take if no origin is\n specified for the given setting. If you\n want to provide a directory, make sure it\n ends with a directory separator.\n :param glob_escape_origin: When this is set to true, the origin of\n this setting will be escaped with\n ``glob_escape``.\n :return: An absolute path.\n :raises ValueError: If no origin is specified in the setting\n nor the given origin parameter.\n \"\"\"\n strrep = str(self).strip()\n if os.path.isabs(strrep):\n return strrep\n\n if hasattr(self, 'origin') and self.origin != '':\n origin = self.origin\n\n if origin is None:\n raise ValueError('Cannot determine path without origin.')\n\n # We need to get full path before escaping since the full path\n # may introduce unintended glob characters\n origin = os.path.abspath(os.path.dirname(origin))\n\n if glob_escape_origin:\n origin = glob_escape(origin)\n\n return os.path.normpath(os.path.join(origin, strrep))\n\n def __glob__(self, origin=None):\n \"\"\"\n Determines the path of this setting with proper escaping of its\n parent directories.\n\n :param origin: The origin file to take if no origin is specified\n for the given setting. If you want to provide a\n directory, make sure it ends with a directory\n separator.\n :return: An absolute path in which the parent directories\n are escaped.\n :raises ValueError: If no origin is specified in the setting nor the\n given origin parameter.\n \"\"\"\n return Setting.__path__(self, origin, glob_escape_origin=True)\n\n def __path_list__(self):\n \"\"\"\n Splits the value into a list and creates a path out of each item taking\n the origin of the setting into account.\n\n :return: A list of absolute paths.\n \"\"\"\n return [Setting.__path__(elem, self.origin) for elem in self]\n\n def __glob_list__(self):\n \"\"\"\n Splits the value into a list and creates a path out of each item in\n which the special glob characters in origin are escaped.\n\n :return: A list of absolute paths in which the special characters in\n the parent directories of the setting are escaped.\n \"\"\"\n return [Setting.__glob__(elem, self.origin) for elem in self]\n\n def __iter__(self, remove_backslashes=True):\n if self.to_append:\n raise ValueError('Iteration on this object is invalid because the '\n 'value is incomplete. Please access the value of '\n 'the setting in a section to iterate through it.')\n return StringConverter.__iter__(self, remove_backslashes)\n\n @property\n def key(self):\n return self._key\n\n @key.setter\n def key(self, key):\n newkey = str(key)\n if newkey == '':\n raise ValueError('An empty key is not allowed for a setting.')\n\n self._key = newkey\n\n @StringConverter.value.getter\n def value(self):\n if self.to_append:\n raise ValueError('This property is invalid because the value is '\n 'incomplete. Please access the value of the '\n 'setting in a section to get the complete value.')\n return self._value\n\n @property\n def origin(self):\n \"\"\"\n Returns the filename.\n \"\"\"\n if isinstance(self._origin, SourcePosition):\n return self._origin.filename\n else:\n return self._origin\n\n @property\n def line_number(self):\n if isinstance(self._origin, SourcePosition):\n return self._origin.line\n else:\n raise TypeError(\"Instantiated with str 'origin' \"\n 'which does not have line numbers. '\n 'Use SourcePosition for line numbers.')\n", "path": "coalib/settings/Setting.py"}]}
| 3,401 | 126 |
gh_patches_debug_15663
|
rasdani/github-patches
|
git_diff
|
huggingface__peft-653
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deepcopy not copying the LoraConfig
I'm trying to make a deepcopy (using copy.deepcopy) of a LoraModel on the version '0.2.0'. But the values in PeftConfig remain the default ones (e.g. r = 8), not the ones of the copied model. Is it normal ? Am I supposed to do a `model_copy = get_peft_model(model_copy, peft_config)` ?
It is possible to implement `__deepcopy__` and `__copy__` if necessary.
Here is a reproduction example :
``` Python
import copy
from transformers import AutoModelForCausalLM
from peft import get_peft_config, get_peft_model, LoraConfig, TaskType
model_name_or_path = "gpt2"
tokenizer_name_or_path = "gpt2"
peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM, inference_mode=False, r=87, lora_alpha=32, lora_dropout=0.1)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path)
model = get_peft_model(model, peft_config)
model_copy = copy.deepcopy(model)
assert(model.peft_config.r == model_copy.peft_config.r)
```
Moreover, I also get an AssertionError if I continue with :
``` Python
model_copy = get_peft_model(model_copy, peft_config)
assert(model == model_copy)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/peft/utils/config.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2023-present the HuggingFace Inc. team.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 import enum
16 import inspect
17 import json
18 import os
19 from dataclasses import asdict, dataclass, field
20 from typing import Optional, Union
21
22 from huggingface_hub import hf_hub_download
23 from transformers.utils import PushToHubMixin
24
25 from .other import CONFIG_NAME
26
27
28 class PeftType(str, enum.Enum):
29 PROMPT_TUNING = "PROMPT_TUNING"
30 P_TUNING = "P_TUNING"
31 PREFIX_TUNING = "PREFIX_TUNING"
32 LORA = "LORA"
33 ADALORA = "ADALORA"
34 ADAPTION_PROMPT = "ADAPTION_PROMPT"
35
36
37 class TaskType(str, enum.Enum):
38 SEQ_CLS = "SEQ_CLS"
39 SEQ_2_SEQ_LM = "SEQ_2_SEQ_LM"
40 CAUSAL_LM = "CAUSAL_LM"
41 TOKEN_CLS = "TOKEN_CLS"
42 QUESTION_ANS = "QUESTION_ANS"
43
44
45 @dataclass
46 class PeftConfigMixin(PushToHubMixin):
47 r"""
48 This is the base configuration class for PEFT adapter models. It contains all the methods that are common to all
49 PEFT adapter models. This class inherits from [`~transformers.utils.PushToHubMixin`] which contains the methods to
50 push your model to the Hub. The method `save_pretrained` will save the configuration of your adapter model in a
51 directory. The method `from_pretrained` will load the configuration of your adapter model from a directory.
52
53 Args:
54 peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.
55 """
56 peft_type: Optional[PeftType] = field(default=None, metadata={"help": "The type of PEFT model."})
57
58 @property
59 def __dict__(self):
60 return asdict(self)
61
62 def to_dict(self):
63 return self.__dict__
64
65 def save_pretrained(self, save_directory, **kwargs):
66 r"""
67 This method saves the configuration of your adapter model in a directory.
68
69 Args:
70 save_directory (`str`):
71 The directory where the configuration will be saved.
72 kwargs (additional keyword arguments, *optional*):
73 Additional keyword arguments passed along to the [`~transformers.utils.PushToHubMixin.push_to_hub`]
74 method.
75 """
76 if os.path.isfile(save_directory):
77 raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
78
79 os.makedirs(save_directory, exist_ok=True)
80
81 output_dict = self.__dict__
82 output_path = os.path.join(save_directory, CONFIG_NAME)
83
84 # save it
85 with open(output_path, "w") as writer:
86 writer.write(json.dumps(output_dict, indent=2, sort_keys=True))
87
88 @classmethod
89 def from_pretrained(cls, pretrained_model_name_or_path, subfolder=None, **kwargs):
90 r"""
91 This method loads the configuration of your adapter model from a directory.
92
93 Args:
94 pretrained_model_name_or_path (`str`):
95 The directory or the Hub repository id where the configuration is saved.
96 kwargs (additional keyword arguments, *optional*):
97 Additional keyword arguments passed along to the child class initialization.
98 """
99 path = (
100 os.path.join(pretrained_model_name_or_path, subfolder)
101 if subfolder is not None
102 else pretrained_model_name_or_path
103 )
104
105 hf_hub_download_kwargs, class_kwargs, other_kwargs = cls._split_kwargs(kwargs)
106
107 if os.path.isfile(os.path.join(path, CONFIG_NAME)):
108 config_file = os.path.join(path, CONFIG_NAME)
109 else:
110 try:
111 config_file = hf_hub_download(
112 pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder, **hf_hub_download_kwargs
113 )
114 except Exception:
115 raise ValueError(f"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'")
116
117 loaded_attributes = cls.from_json_file(config_file)
118
119 config = cls(**class_kwargs)
120
121 for key, value in loaded_attributes.items():
122 if hasattr(config, key):
123 setattr(config, key, value)
124
125 return config
126
127 @classmethod
128 def from_json_file(cls, path_json_file, **kwargs):
129 r"""
130 Loads a configuration file from a json file.
131
132 Args:
133 path_json_file (`str`):
134 The path to the json file.
135 """
136 with open(path_json_file, "r") as file:
137 json_object = json.load(file)
138
139 return json_object
140
141 @classmethod
142 def _split_kwargs(cls, kwargs):
143 hf_hub_download_kwargs = {}
144 class_kwargs = {}
145 other_kwargs = {}
146
147 for key, value in kwargs.items():
148 if key in inspect.signature(hf_hub_download).parameters:
149 hf_hub_download_kwargs[key] = value
150 elif key in list(cls.__annotations__):
151 class_kwargs[key] = value
152 else:
153 other_kwargs[key] = value
154
155 return hf_hub_download_kwargs, class_kwargs, other_kwargs
156
157 @classmethod
158 def _get_peft_type(
159 cls,
160 model_id,
161 subfolder: Optional[str] = None,
162 revision: Optional[str] = None,
163 cache_dir: Optional[str] = None,
164 ):
165 path = os.path.join(model_id, subfolder) if subfolder is not None else model_id
166
167 if os.path.isfile(os.path.join(path, CONFIG_NAME)):
168 config_file = os.path.join(path, CONFIG_NAME)
169 else:
170 try:
171 config_file = hf_hub_download(
172 model_id, CONFIG_NAME, subfolder=subfolder, revision=revision, cache_dir=cache_dir
173 )
174 except Exception:
175 raise ValueError(f"Can't find '{CONFIG_NAME}' at '{model_id}'")
176
177 loaded_attributes = cls.from_json_file(config_file)
178 return loaded_attributes["peft_type"]
179
180
181 @dataclass
182 class PeftConfig(PeftConfigMixin):
183 """
184 This is the base configuration class to store the configuration of a [`PeftModel`].
185
186 Args:
187 peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.
188 task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.
189 inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.
190 """
191
192 base_model_name_or_path: str = field(default=None, metadata={"help": "The name of the base model to use."})
193 revision: str = field(default=None, metadata={"help": "The specific model version to use."})
194 peft_type: Union[str, PeftType] = field(default=None, metadata={"help": "Peft type"})
195 task_type: Union[str, TaskType] = field(default=None, metadata={"help": "Task type"})
196 inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"})
197
198
199 @dataclass
200 class PromptLearningConfig(PeftConfig):
201 """
202 This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or
203 [`PromptTuning`].
204
205 Args:
206 num_virtual_tokens (`int`): The number of virtual tokens to use.
207 token_dim (`int`): The hidden embedding dimension of the base transformer model.
208 num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.
209 num_attention_heads (`int`): The number of attention heads in the base transformer model.
210 num_layers (`int`): The number of layers in the base transformer model.
211 """
212
213 num_virtual_tokens: int = field(default=None, metadata={"help": "Number of virtual tokens"})
214 token_dim: int = field(
215 default=None, metadata={"help": "The hidden embedding dimension of the base transformer model"}
216 )
217 num_transformer_submodules: Optional[int] = field(
218 default=None, metadata={"help": "Number of transformer submodules"}
219 )
220 num_attention_heads: Optional[int] = field(default=None, metadata={"help": "Number of attention heads"})
221 num_layers: Optional[int] = field(default=None, metadata={"help": "Number of transformer layers"})
222
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/peft/utils/config.py b/src/peft/utils/config.py
--- a/src/peft/utils/config.py
+++ b/src/peft/utils/config.py
@@ -55,12 +55,8 @@
"""
peft_type: Optional[PeftType] = field(default=None, metadata={"help": "The type of PEFT model."})
- @property
- def __dict__(self):
- return asdict(self)
-
def to_dict(self):
- return self.__dict__
+ return asdict(self)
def save_pretrained(self, save_directory, **kwargs):
r"""
@@ -78,7 +74,7 @@
os.makedirs(save_directory, exist_ok=True)
- output_dict = self.__dict__
+ output_dict = asdict(self)
output_path = os.path.join(save_directory, CONFIG_NAME)
# save it
|
{"golden_diff": "diff --git a/src/peft/utils/config.py b/src/peft/utils/config.py\n--- a/src/peft/utils/config.py\n+++ b/src/peft/utils/config.py\n@@ -55,12 +55,8 @@\n \"\"\"\n peft_type: Optional[PeftType] = field(default=None, metadata={\"help\": \"The type of PEFT model.\"})\n \n- @property\n- def __dict__(self):\n- return asdict(self)\n-\n def to_dict(self):\n- return self.__dict__\n+ return asdict(self)\n \n def save_pretrained(self, save_directory, **kwargs):\n r\"\"\"\n@@ -78,7 +74,7 @@\n \n os.makedirs(save_directory, exist_ok=True)\n \n- output_dict = self.__dict__\n+ output_dict = asdict(self)\n output_path = os.path.join(save_directory, CONFIG_NAME)\n \n # save it\n", "issue": "Deepcopy not copying the LoraConfig\nI'm trying to make a deepcopy (using copy.deepcopy) of a LoraModel on the version '0.2.0'. But the values in PeftConfig remain the default ones (e.g. r = 8), not the ones of the copied model. Is it normal ? Am I supposed to do a `model_copy = get_peft_model(model_copy, peft_config)` ?\r\nIt is possible to implement `__deepcopy__` and `__copy__` if necessary.\r\n\r\nHere is a reproduction example :\r\n``` Python\r\nimport copy\r\nfrom transformers import AutoModelForCausalLM\r\nfrom peft import get_peft_config, get_peft_model, LoraConfig, TaskType\r\nmodel_name_or_path = \"gpt2\"\r\ntokenizer_name_or_path = \"gpt2\"\r\npeft_config = LoraConfig(task_type=TaskType.CAUSAL_LM, inference_mode=False, r=87, lora_alpha=32, lora_dropout=0.1)\r\nmodel = AutoModelForCausalLM.from_pretrained(model_name_or_path)\r\nmodel = get_peft_model(model, peft_config)\r\n\r\nmodel_copy = copy.deepcopy(model)\r\nassert(model.peft_config.r == model_copy.peft_config.r)\r\n```\r\n\r\nMoreover, I also get an AssertionError if I continue with :\r\n``` Python\r\nmodel_copy = get_peft_model(model_copy, peft_config)\r\nassert(model == model_copy)\r\n```\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2023-present the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport enum\nimport inspect\nimport json\nimport os\nfrom dataclasses import asdict, dataclass, field\nfrom typing import Optional, Union\n\nfrom huggingface_hub import hf_hub_download\nfrom transformers.utils import PushToHubMixin\n\nfrom .other import CONFIG_NAME\n\n\nclass PeftType(str, enum.Enum):\n PROMPT_TUNING = \"PROMPT_TUNING\"\n P_TUNING = \"P_TUNING\"\n PREFIX_TUNING = \"PREFIX_TUNING\"\n LORA = \"LORA\"\n ADALORA = \"ADALORA\"\n ADAPTION_PROMPT = \"ADAPTION_PROMPT\"\n\n\nclass TaskType(str, enum.Enum):\n SEQ_CLS = \"SEQ_CLS\"\n SEQ_2_SEQ_LM = \"SEQ_2_SEQ_LM\"\n CAUSAL_LM = \"CAUSAL_LM\"\n TOKEN_CLS = \"TOKEN_CLS\"\n QUESTION_ANS = \"QUESTION_ANS\"\n\n\n@dataclass\nclass PeftConfigMixin(PushToHubMixin):\n r\"\"\"\n This is the base configuration class for PEFT adapter models. It contains all the methods that are common to all\n PEFT adapter models. This class inherits from [`~transformers.utils.PushToHubMixin`] which contains the methods to\n push your model to the Hub. The method `save_pretrained` will save the configuration of your adapter model in a\n directory. The method `from_pretrained` will load the configuration of your adapter model from a directory.\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.\n \"\"\"\n peft_type: Optional[PeftType] = field(default=None, metadata={\"help\": \"The type of PEFT model.\"})\n\n @property\n def __dict__(self):\n return asdict(self)\n\n def to_dict(self):\n return self.__dict__\n\n def save_pretrained(self, save_directory, **kwargs):\n r\"\"\"\n This method saves the configuration of your adapter model in a directory.\n\n Args:\n save_directory (`str`):\n The directory where the configuration will be saved.\n kwargs (additional keyword arguments, *optional*):\n Additional keyword arguments passed along to the [`~transformers.utils.PushToHubMixin.push_to_hub`]\n method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n output_dict = self.__dict__\n output_path = os.path.join(save_directory, CONFIG_NAME)\n\n # save it\n with open(output_path, \"w\") as writer:\n writer.write(json.dumps(output_dict, indent=2, sort_keys=True))\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, subfolder=None, **kwargs):\n r\"\"\"\n This method loads the configuration of your adapter model from a directory.\n\n Args:\n pretrained_model_name_or_path (`str`):\n The directory or the Hub repository id where the configuration is saved.\n kwargs (additional keyword arguments, *optional*):\n Additional keyword arguments passed along to the child class initialization.\n \"\"\"\n path = (\n os.path.join(pretrained_model_name_or_path, subfolder)\n if subfolder is not None\n else pretrained_model_name_or_path\n )\n\n hf_hub_download_kwargs, class_kwargs, other_kwargs = cls._split_kwargs(kwargs)\n\n if os.path.isfile(os.path.join(path, CONFIG_NAME)):\n config_file = os.path.join(path, CONFIG_NAME)\n else:\n try:\n config_file = hf_hub_download(\n pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder, **hf_hub_download_kwargs\n )\n except Exception:\n raise ValueError(f\"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'\")\n\n loaded_attributes = cls.from_json_file(config_file)\n\n config = cls(**class_kwargs)\n\n for key, value in loaded_attributes.items():\n if hasattr(config, key):\n setattr(config, key, value)\n\n return config\n\n @classmethod\n def from_json_file(cls, path_json_file, **kwargs):\n r\"\"\"\n Loads a configuration file from a json file.\n\n Args:\n path_json_file (`str`):\n The path to the json file.\n \"\"\"\n with open(path_json_file, \"r\") as file:\n json_object = json.load(file)\n\n return json_object\n\n @classmethod\n def _split_kwargs(cls, kwargs):\n hf_hub_download_kwargs = {}\n class_kwargs = {}\n other_kwargs = {}\n\n for key, value in kwargs.items():\n if key in inspect.signature(hf_hub_download).parameters:\n hf_hub_download_kwargs[key] = value\n elif key in list(cls.__annotations__):\n class_kwargs[key] = value\n else:\n other_kwargs[key] = value\n\n return hf_hub_download_kwargs, class_kwargs, other_kwargs\n\n @classmethod\n def _get_peft_type(\n cls,\n model_id,\n subfolder: Optional[str] = None,\n revision: Optional[str] = None,\n cache_dir: Optional[str] = None,\n ):\n path = os.path.join(model_id, subfolder) if subfolder is not None else model_id\n\n if os.path.isfile(os.path.join(path, CONFIG_NAME)):\n config_file = os.path.join(path, CONFIG_NAME)\n else:\n try:\n config_file = hf_hub_download(\n model_id, CONFIG_NAME, subfolder=subfolder, revision=revision, cache_dir=cache_dir\n )\n except Exception:\n raise ValueError(f\"Can't find '{CONFIG_NAME}' at '{model_id}'\")\n\n loaded_attributes = cls.from_json_file(config_file)\n return loaded_attributes[\"peft_type\"]\n\n\n@dataclass\nclass PeftConfig(PeftConfigMixin):\n \"\"\"\n This is the base configuration class to store the configuration of a [`PeftModel`].\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.\n task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.\n inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.\n \"\"\"\n\n base_model_name_or_path: str = field(default=None, metadata={\"help\": \"The name of the base model to use.\"})\n revision: str = field(default=None, metadata={\"help\": \"The specific model version to use.\"})\n peft_type: Union[str, PeftType] = field(default=None, metadata={\"help\": \"Peft type\"})\n task_type: Union[str, TaskType] = field(default=None, metadata={\"help\": \"Task type\"})\n inference_mode: bool = field(default=False, metadata={\"help\": \"Whether to use inference mode\"})\n\n\n@dataclass\nclass PromptLearningConfig(PeftConfig):\n \"\"\"\n This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or\n [`PromptTuning`].\n\n Args:\n num_virtual_tokens (`int`): The number of virtual tokens to use.\n token_dim (`int`): The hidden embedding dimension of the base transformer model.\n num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.\n num_attention_heads (`int`): The number of attention heads in the base transformer model.\n num_layers (`int`): The number of layers in the base transformer model.\n \"\"\"\n\n num_virtual_tokens: int = field(default=None, metadata={\"help\": \"Number of virtual tokens\"})\n token_dim: int = field(\n default=None, metadata={\"help\": \"The hidden embedding dimension of the base transformer model\"}\n )\n num_transformer_submodules: Optional[int] = field(\n default=None, metadata={\"help\": \"Number of transformer submodules\"}\n )\n num_attention_heads: Optional[int] = field(default=None, metadata={\"help\": \"Number of attention heads\"})\n num_layers: Optional[int] = field(default=None, metadata={\"help\": \"Number of transformer layers\"})\n", "path": "src/peft/utils/config.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2023-present the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport enum\nimport inspect\nimport json\nimport os\nfrom dataclasses import asdict, dataclass, field\nfrom typing import Optional, Union\n\nfrom huggingface_hub import hf_hub_download\nfrom transformers.utils import PushToHubMixin\n\nfrom .other import CONFIG_NAME\n\n\nclass PeftType(str, enum.Enum):\n PROMPT_TUNING = \"PROMPT_TUNING\"\n P_TUNING = \"P_TUNING\"\n PREFIX_TUNING = \"PREFIX_TUNING\"\n LORA = \"LORA\"\n ADALORA = \"ADALORA\"\n ADAPTION_PROMPT = \"ADAPTION_PROMPT\"\n\n\nclass TaskType(str, enum.Enum):\n SEQ_CLS = \"SEQ_CLS\"\n SEQ_2_SEQ_LM = \"SEQ_2_SEQ_LM\"\n CAUSAL_LM = \"CAUSAL_LM\"\n TOKEN_CLS = \"TOKEN_CLS\"\n QUESTION_ANS = \"QUESTION_ANS\"\n\n\n@dataclass\nclass PeftConfigMixin(PushToHubMixin):\n r\"\"\"\n This is the base configuration class for PEFT adapter models. It contains all the methods that are common to all\n PEFT adapter models. This class inherits from [`~transformers.utils.PushToHubMixin`] which contains the methods to\n push your model to the Hub. The method `save_pretrained` will save the configuration of your adapter model in a\n directory. The method `from_pretrained` will load the configuration of your adapter model from a directory.\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.\n \"\"\"\n peft_type: Optional[PeftType] = field(default=None, metadata={\"help\": \"The type of PEFT model.\"})\n\n def to_dict(self):\n return asdict(self)\n\n def save_pretrained(self, save_directory, **kwargs):\n r\"\"\"\n This method saves the configuration of your adapter model in a directory.\n\n Args:\n save_directory (`str`):\n The directory where the configuration will be saved.\n kwargs (additional keyword arguments, *optional*):\n Additional keyword arguments passed along to the [`~transformers.utils.PushToHubMixin.push_to_hub`]\n method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n output_dict = asdict(self)\n output_path = os.path.join(save_directory, CONFIG_NAME)\n\n # save it\n with open(output_path, \"w\") as writer:\n writer.write(json.dumps(output_dict, indent=2, sort_keys=True))\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, subfolder=None, **kwargs):\n r\"\"\"\n This method loads the configuration of your adapter model from a directory.\n\n Args:\n pretrained_model_name_or_path (`str`):\n The directory or the Hub repository id where the configuration is saved.\n kwargs (additional keyword arguments, *optional*):\n Additional keyword arguments passed along to the child class initialization.\n \"\"\"\n path = (\n os.path.join(pretrained_model_name_or_path, subfolder)\n if subfolder is not None\n else pretrained_model_name_or_path\n )\n\n hf_hub_download_kwargs, class_kwargs, other_kwargs = cls._split_kwargs(kwargs)\n\n if os.path.isfile(os.path.join(path, CONFIG_NAME)):\n config_file = os.path.join(path, CONFIG_NAME)\n else:\n try:\n config_file = hf_hub_download(\n pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder, **hf_hub_download_kwargs\n )\n except Exception:\n raise ValueError(f\"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'\")\n\n loaded_attributes = cls.from_json_file(config_file)\n\n config = cls(**class_kwargs)\n\n for key, value in loaded_attributes.items():\n if hasattr(config, key):\n setattr(config, key, value)\n\n return config\n\n @classmethod\n def from_json_file(cls, path_json_file, **kwargs):\n r\"\"\"\n Loads a configuration file from a json file.\n\n Args:\n path_json_file (`str`):\n The path to the json file.\n \"\"\"\n with open(path_json_file, \"r\") as file:\n json_object = json.load(file)\n\n return json_object\n\n @classmethod\n def _split_kwargs(cls, kwargs):\n hf_hub_download_kwargs = {}\n class_kwargs = {}\n other_kwargs = {}\n\n for key, value in kwargs.items():\n if key in inspect.signature(hf_hub_download).parameters:\n hf_hub_download_kwargs[key] = value\n elif key in list(cls.__annotations__):\n class_kwargs[key] = value\n else:\n other_kwargs[key] = value\n\n return hf_hub_download_kwargs, class_kwargs, other_kwargs\n\n @classmethod\n def _get_peft_type(\n cls,\n model_id,\n subfolder: Optional[str] = None,\n revision: Optional[str] = None,\n cache_dir: Optional[str] = None,\n ):\n path = os.path.join(model_id, subfolder) if subfolder is not None else model_id\n\n if os.path.isfile(os.path.join(path, CONFIG_NAME)):\n config_file = os.path.join(path, CONFIG_NAME)\n else:\n try:\n config_file = hf_hub_download(\n model_id, CONFIG_NAME, subfolder=subfolder, revision=revision, cache_dir=cache_dir\n )\n except Exception:\n raise ValueError(f\"Can't find '{CONFIG_NAME}' at '{model_id}'\")\n\n loaded_attributes = cls.from_json_file(config_file)\n return loaded_attributes[\"peft_type\"]\n\n\n@dataclass\nclass PeftConfig(PeftConfigMixin):\n \"\"\"\n This is the base configuration class to store the configuration of a [`PeftModel`].\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.\n task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.\n inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.\n \"\"\"\n\n base_model_name_or_path: str = field(default=None, metadata={\"help\": \"The name of the base model to use.\"})\n revision: str = field(default=None, metadata={\"help\": \"The specific model version to use.\"})\n peft_type: Union[str, PeftType] = field(default=None, metadata={\"help\": \"Peft type\"})\n task_type: Union[str, TaskType] = field(default=None, metadata={\"help\": \"Task type\"})\n inference_mode: bool = field(default=False, metadata={\"help\": \"Whether to use inference mode\"})\n\n\n@dataclass\nclass PromptLearningConfig(PeftConfig):\n \"\"\"\n This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or\n [`PromptTuning`].\n\n Args:\n num_virtual_tokens (`int`): The number of virtual tokens to use.\n token_dim (`int`): The hidden embedding dimension of the base transformer model.\n num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.\n num_attention_heads (`int`): The number of attention heads in the base transformer model.\n num_layers (`int`): The number of layers in the base transformer model.\n \"\"\"\n\n num_virtual_tokens: int = field(default=None, metadata={\"help\": \"Number of virtual tokens\"})\n token_dim: int = field(\n default=None, metadata={\"help\": \"The hidden embedding dimension of the base transformer model\"}\n )\n num_transformer_submodules: Optional[int] = field(\n default=None, metadata={\"help\": \"Number of transformer submodules\"}\n )\n num_attention_heads: Optional[int] = field(default=None, metadata={\"help\": \"Number of attention heads\"})\n num_layers: Optional[int] = field(default=None, metadata={\"help\": \"Number of transformer layers\"})\n", "path": "src/peft/utils/config.py"}]}
| 3,044 | 204 |
gh_patches_debug_8931
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-1306
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DOC: missing changelog in documentation
Our changelog is available only as a .md file in the root folder. It should be part of the documentation online as well as @StevenLi-DS correctly pointed out in https://github.com/geopandas/geopandas/issues/1076#issuecomment-590126250.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/source/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # GeoPandas documentation build configuration file, created by
4 # sphinx-quickstart on Tue Oct 15 08:08:14 2013.
5 #
6 # This file is execfile()d with the current directory set to its containing dir.
7 #
8 # Note that not all possible configuration values are present in this
9 # autogenerated file.
10 #
11 # All configuration values have a default; values that are commented out
12 # serve to show the default.
13
14 import sys, os
15 import warnings
16
17 # If extensions (or modules to document with autodoc) are in another directory,
18 # add these directories to sys.path here. If the directory is relative to the
19 # documentation root, use os.path.abspath to make it absolute, like shown here.
20 #sys.path.insert(0, os.path.abspath('.'))
21
22 # -- General configuration -----------------------------------------------------
23
24 # If your documentation needs a minimal Sphinx version, state it here.
25 #needs_sphinx = '1.0'
26
27 # Add any Sphinx extension module names here, as strings. They can be extensions
28 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
29 extensions = ['IPython.sphinxext.ipython_console_highlighting',
30 'IPython.sphinxext.ipython_directive',
31 'sphinx_gallery.gen_gallery',
32 'sphinx.ext.autosummary',
33 'sphinx.ext.intersphinx',
34 'sphinx.ext.autodoc',
35 'numpydoc',
36 ]
37
38 # continue doc build and only print warnings/errors in examples
39 ipython_warning_is_error = False
40 ipython_exec_lines = [
41 # ensure that dataframes are not truncated in the IPython code blocks
42 'import pandas as _pd',
43 '_pd.set_option("display.max_columns", 20)',
44 '_pd.set_option("display.width", 100)'
45 ]
46
47 # Fix issue with warnings from numpydoc (see discussion in PR #534)
48 numpydoc_show_class_members = False
49
50 def setup(app):
51 app.add_stylesheet('custom.css') # may also be an URL
52
53 # Add any paths that contain templates here, relative to this directory.
54
55 templates_path = ['_templates']
56
57 autosummary_generate = True
58
59 # Sphinx gallery configuration
60 sphinx_gallery_conf = {
61 'examples_dirs': ['../../examples'],
62 'filename_pattern': '^((?!sgskip).)*$',
63 'gallery_dirs': ['gallery'],
64 'doc_module': ('geopandas',),
65 'reference_url': {'matplotlib': 'http://matplotlib.org',
66 'numpy': 'http://docs.scipy.org/doc/numpy',
67 'scipy': 'http://docs.scipy.org/doc/scipy/reference',
68 'pyproj': 'http://pyproj4.github.io/pyproj/stable/',
69 'geopandas': None},
70 'backreferences_dir': 'reference'
71 }
72
73 # suppress matplotlib warning in examples
74 warnings.filterwarnings(
75 "ignore",
76 category=UserWarning,
77 message="Matplotlib is currently using agg, which is a"
78 " non-GUI backend, so cannot show the figure.",
79 )
80
81 # The suffix of source filenames.
82 source_suffix = '.rst'
83
84 # The encoding of source files.
85 #source_encoding = 'utf-8-sig'
86
87 # The master toctree document.
88 master_doc = 'index'
89
90 # General information about the project.
91 project = u'GeoPandas'
92 copyright = u'2013–2019, GeoPandas developers'
93
94 # The version info for the project you're documenting, acts as replacement for
95 # |version| and |release|, also used in various other places throughout the
96 # built documents.
97 import geopandas
98 version = release = geopandas.__version__
99
100 # The language for content autogenerated by Sphinx. Refer to documentation
101 # for a list of supported languages.
102 #language = None
103
104 # There are two options for replacing |today|: either, you set today to some
105 # non-false value, then it is used:
106 #today = ''
107 # Else, today_fmt is used as the format for a strftime call.
108 #today_fmt = '%B %d, %Y'
109
110 # List of patterns, relative to source directory, that match files and
111 # directories to ignore when looking for source files.
112 exclude_patterns = []
113
114 # The reST default role (used for this markup: `text`) to use for all documents.
115 #default_role = None
116
117 # If true, '()' will be appended to :func: etc. cross-reference text.
118 #add_function_parentheses = True
119
120 # If true, the current module name will be prepended to all description
121 # unit titles (such as .. function::).
122 #add_module_names = True
123
124 # If true, sectionauthor and moduleauthor directives will be shown in the
125 # output. They are ignored by default.
126 #show_authors = False
127
128 # The name of the Pygments (syntax highlighting) style to use.
129 pygments_style = 'sphinx'
130
131 # A list of ignored prefixes for module index sorting.
132 #modindex_common_prefix = []
133
134
135 # -- Options for HTML output ---------------------------------------------------
136
137 # The theme to use for HTML and HTML Help pages. See the documentation for
138 # a list of builtin themes.
139 import sphinx_rtd_theme
140 html_theme = "sphinx_rtd_theme"
141 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
142
143 # Theme options are theme-specific and customize the look and feel of a theme
144 # further. For a list of options available for each theme, see the
145 # documentation.
146 #html_theme_options = {}
147
148 # Add any paths that contain custom themes here, relative to this directory.
149 #html_theme_path = []
150
151 # The name for this set of Sphinx documents. If None, it defaults to
152 # "<project> v<release> documentation".
153 #html_title = None
154
155 # A shorter title for the navigation bar. Default is the same as html_title.
156 #html_short_title = None
157
158 # The name of an image file (relative to this directory) to place at the top
159 # of the sidebar.
160 #html_logo = None
161
162 # The name of an image file (within the static path) to use as favicon of the
163 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
164 # pixels large.
165 #html_favicon = None
166
167 # Add any paths that contain custom static files (such as style sheets) here,
168 # relative to this directory. They are copied after the builtin static files,
169 # so a file named "default.css" will overwrite the builtin "default.css".
170 html_static_path = ['_static']
171
172 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
173 # using the given strftime format.
174 #html_last_updated_fmt = '%b %d, %Y'
175
176 # If true, SmartyPants will be used to convert quotes and dashes to
177 # typographically correct entities.
178 #html_use_smartypants = True
179
180 # Custom sidebar templates, maps document names to template names.
181 #html_sidebars = {}
182
183 # Additional templates that should be rendered to pages, maps page names to
184 # template names.
185 #html_additional_pages = {}
186
187 # If false, no module index is generated.
188 #html_domain_indices = True
189
190 # If false, no index is generated.
191 #html_use_index = True
192
193 # If true, the index is split into individual pages for each letter.
194 #html_split_index = False
195
196 # If true, links to the reST sources are added to the pages.
197 #html_show_sourcelink = True
198
199 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
200 #html_show_sphinx = True
201
202 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
203 #html_show_copyright = True
204
205 # If true, an OpenSearch description file will be output, and all pages will
206 # contain a <link> tag referring to it. The value of this option must be the
207 # base URL from which the finished HTML is served.
208 #html_use_opensearch = ''
209
210 # This is the file name suffix for HTML files (e.g. ".xhtml").
211 #html_file_suffix = None
212
213 # Output file base name for HTML help builder.
214 htmlhelp_basename = 'GeoPandasdoc'
215
216
217 # -- Options for LaTeX output --------------------------------------------------
218
219 latex_elements = {
220 # The paper size ('letterpaper' or 'a4paper').
221 #'papersize': 'letterpaper',
222
223 # The font size ('10pt', '11pt' or '12pt').
224 #'pointsize': '10pt',
225
226 # Additional stuff for the LaTeX preamble.
227 #'preamble': '',
228 }
229
230 # Grouping the document tree into LaTeX files. List of tuples
231 # (source start file, target name, title, author, documentclass [howto/manual]).
232 latex_documents = [
233 ('index', 'GeoPandas.tex', u'GeoPandas Documentation',
234 u'Kelsey Jordahl', 'manual'),
235 ]
236
237 # The name of an image file (relative to this directory) to place at the top of
238 # the title page.
239 #latex_logo = None
240
241 # For "manual" documents, if this is true, then toplevel headings are parts,
242 # not chapters.
243 #latex_use_parts = False
244
245 # If true, show page references after internal links.
246 #latex_show_pagerefs = False
247
248 # If true, show URL addresses after external links.
249 #latex_show_urls = False
250
251 # Documents to append as an appendix to all manuals.
252 #latex_appendices = []
253
254 # If false, no module index is generated.
255 #latex_domain_indices = True
256
257
258 # -- Options for manual page output --------------------------------------------
259
260 # One entry per manual page. List of tuples
261 # (source start file, name, description, authors, manual section).
262 man_pages = [
263 ('index', 'geopandas', u'GeoPandas Documentation',
264 [u'Kelsey Jordahl'], 1)
265 ]
266
267 # If true, show URL addresses after external links.
268 #man_show_urls = False
269
270
271 # -- Options for Texinfo output ------------------------------------------------
272
273 # Grouping the document tree into Texinfo files. List of tuples
274 # (source start file, target name, title, author,
275 # dir menu entry, description, category)
276 texinfo_documents = [
277 ('index', 'GeoPandas', u'GeoPandas Documentation',
278 u'Kelsey Jordahl', 'GeoPandas', 'One line description of project.',
279 'Miscellaneous'),
280 ]
281
282 # Documents to append as an appendix to all manuals.
283 #texinfo_appendices = []
284
285 # If false, no module index is generated.
286 #texinfo_domain_indices = True
287
288 # How to display URL addresses: 'footnote', 'no', or 'inline'.
289 #texinfo_show_urls = 'footnote'
290
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/doc/source/conf.py b/doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -32,6 +32,7 @@
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
+ 'recommonmark',
'numpydoc',
]
@@ -79,7 +80,7 @@
)
# The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
|
{"golden_diff": "diff --git a/doc/source/conf.py b/doc/source/conf.py\n--- a/doc/source/conf.py\n+++ b/doc/source/conf.py\n@@ -32,6 +32,7 @@\n 'sphinx.ext.autosummary',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.autodoc',\n+ 'recommonmark',\n 'numpydoc',\n ]\n \n@@ -79,7 +80,7 @@\n )\n \n # The suffix of source filenames.\n-source_suffix = '.rst'\n+source_suffix = ['.rst', '.md']\n \n # The encoding of source files.\n #source_encoding = 'utf-8-sig'\n", "issue": "DOC: missing changelog in documentation\nOur changelog is available only as a .md file in the root folder. It should be part of the documentation online as well as @StevenLi-DS correctly pointed out in https://github.com/geopandas/geopandas/issues/1076#issuecomment-590126250.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# GeoPandas documentation build configuration file, created by\n# sphinx-quickstart on Tue Oct 15 08:08:14 2013.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys, os\nimport warnings\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['IPython.sphinxext.ipython_console_highlighting',\n 'IPython.sphinxext.ipython_directive',\n 'sphinx_gallery.gen_gallery',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.autodoc',\n 'numpydoc',\n]\n\n# continue doc build and only print warnings/errors in examples\nipython_warning_is_error = False\nipython_exec_lines = [\n # ensure that dataframes are not truncated in the IPython code blocks\n 'import pandas as _pd',\n '_pd.set_option(\"display.max_columns\", 20)',\n '_pd.set_option(\"display.width\", 100)'\n]\n\n# Fix issue with warnings from numpydoc (see discussion in PR #534)\nnumpydoc_show_class_members = False\n\ndef setup(app):\n app.add_stylesheet('custom.css') # may also be an URL\n\n# Add any paths that contain templates here, relative to this directory.\n\ntemplates_path = ['_templates']\n\nautosummary_generate = True\n\n# Sphinx gallery configuration\nsphinx_gallery_conf = {\n 'examples_dirs': ['../../examples'],\n 'filename_pattern': '^((?!sgskip).)*$',\n 'gallery_dirs': ['gallery'],\n 'doc_module': ('geopandas',),\n 'reference_url': {'matplotlib': 'http://matplotlib.org',\n 'numpy': 'http://docs.scipy.org/doc/numpy',\n 'scipy': 'http://docs.scipy.org/doc/scipy/reference',\n 'pyproj': 'http://pyproj4.github.io/pyproj/stable/',\n 'geopandas': None},\n 'backreferences_dir': 'reference'\n}\n\n# suppress matplotlib warning in examples\nwarnings.filterwarnings(\n \"ignore\",\n category=UserWarning,\n message=\"Matplotlib is currently using agg, which is a\"\n \" non-GUI backend, so cannot show the figure.\",\n)\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'GeoPandas'\ncopyright = u'2013\u20132019, GeoPandas developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\nimport geopandas\nversion = release = geopandas.__version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nimport sphinx_rtd_theme\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'GeoPandasdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'GeoPandas.tex', u'GeoPandas Documentation',\n u'Kelsey Jordahl', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'geopandas', u'GeoPandas Documentation',\n [u'Kelsey Jordahl'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'GeoPandas', u'GeoPandas Documentation',\n u'Kelsey Jordahl', 'GeoPandas', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n", "path": "doc/source/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# GeoPandas documentation build configuration file, created by\n# sphinx-quickstart on Tue Oct 15 08:08:14 2013.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys, os\nimport warnings\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['IPython.sphinxext.ipython_console_highlighting',\n 'IPython.sphinxext.ipython_directive',\n 'sphinx_gallery.gen_gallery',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.autodoc',\n 'recommonmark',\n 'numpydoc',\n]\n\n# continue doc build and only print warnings/errors in examples\nipython_warning_is_error = False\nipython_exec_lines = [\n # ensure that dataframes are not truncated in the IPython code blocks\n 'import pandas as _pd',\n '_pd.set_option(\"display.max_columns\", 20)',\n '_pd.set_option(\"display.width\", 100)'\n]\n\n# Fix issue with warnings from numpydoc (see discussion in PR #534)\nnumpydoc_show_class_members = False\n\ndef setup(app):\n app.add_stylesheet('custom.css') # may also be an URL\n\n# Add any paths that contain templates here, relative to this directory.\n\ntemplates_path = ['_templates']\n\nautosummary_generate = True\n\n# Sphinx gallery configuration\nsphinx_gallery_conf = {\n 'examples_dirs': ['../../examples'],\n 'filename_pattern': '^((?!sgskip).)*$',\n 'gallery_dirs': ['gallery'],\n 'doc_module': ('geopandas',),\n 'reference_url': {'matplotlib': 'http://matplotlib.org',\n 'numpy': 'http://docs.scipy.org/doc/numpy',\n 'scipy': 'http://docs.scipy.org/doc/scipy/reference',\n 'pyproj': 'http://pyproj4.github.io/pyproj/stable/',\n 'geopandas': None},\n 'backreferences_dir': 'reference'\n}\n\n# suppress matplotlib warning in examples\nwarnings.filterwarnings(\n \"ignore\",\n category=UserWarning,\n message=\"Matplotlib is currently using agg, which is a\"\n \" non-GUI backend, so cannot show the figure.\",\n)\n\n# The suffix of source filenames.\nsource_suffix = ['.rst', '.md']\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'GeoPandas'\ncopyright = u'2013\u20132019, GeoPandas developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\nimport geopandas\nversion = release = geopandas.__version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nimport sphinx_rtd_theme\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'GeoPandasdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'GeoPandas.tex', u'GeoPandas Documentation',\n u'Kelsey Jordahl', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'geopandas', u'GeoPandas Documentation',\n [u'Kelsey Jordahl'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'GeoPandas', u'GeoPandas Documentation',\n u'Kelsey Jordahl', 'GeoPandas', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n", "path": "doc/source/conf.py"}]}
| 3,434 | 138 |
gh_patches_debug_16485
|
rasdani/github-patches
|
git_diff
|
Textualize__rich-3192
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] `Panel.fit` forgot `height` and `highlight` parameters
- [x] I've checked [docs](https://rich.readthedocs.io/en/latest/introduction.html) and [closed issues](https://github.com/Textualize/rich/issues?q=is%3Aissue+is%3Aclosed) for possible solutions.
- [x] I can't find my issue in the [FAQ](https://github.com/Textualize/rich/blob/master/FAQ.md).
**Describe the bug**
`Panel.fit` forgot `height` and `highlight` parameters. It should be updated to synchronize with the constructor.
```python
class Panel(JupyterMixin):
def __init__(
self,
renderable: "RenderableType",
box: Box = ROUNDED,
*,
title: Optional[TextType] = None,
title_align: AlignMethod = "center",
subtitle: Optional[TextType] = None,
subtitle_align: AlignMethod = "center",
safe_box: Optional[bool] = None,
expand: bool = True,
style: StyleType = "none",
border_style: StyleType = "none",
width: Optional[int] = None,
height: Optional[int] = None,
padding: PaddingDimensions = (0, 1),
highlight: bool = False,
) -> None:
...
@classmethod
def fit(
cls,
renderable: "RenderableType",
box: Box = ROUNDED,
*,
title: Optional[TextType] = None,
title_align: AlignMethod = "center",
subtitle: Optional[TextType] = None,
subtitle_align: AlignMethod = "center",
safe_box: Optional[bool] = None,
style: StyleType = "none",
border_style: StyleType = "none",
width: Optional[int] = None,
padding: PaddingDimensions = (0, 1),
) -> "Panel":
...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rich/panel.py`
Content:
```
1 from typing import TYPE_CHECKING, Optional
2
3 from .align import AlignMethod
4 from .box import ROUNDED, Box
5 from .cells import cell_len
6 from .jupyter import JupyterMixin
7 from .measure import Measurement, measure_renderables
8 from .padding import Padding, PaddingDimensions
9 from .segment import Segment
10 from .style import Style, StyleType
11 from .text import Text, TextType
12
13 if TYPE_CHECKING:
14 from .console import Console, ConsoleOptions, RenderableType, RenderResult
15
16
17 class Panel(JupyterMixin):
18 """A console renderable that draws a border around its contents.
19
20 Example:
21 >>> console.print(Panel("Hello, World!"))
22
23 Args:
24 renderable (RenderableType): A console renderable object.
25 box (Box, optional): A Box instance that defines the look of the border (see :ref:`appendix_box`.
26 Defaults to box.ROUNDED.
27 safe_box (bool, optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True.
28 expand (bool, optional): If True the panel will stretch to fill the console
29 width, otherwise it will be sized to fit the contents. Defaults to True.
30 style (str, optional): The style of the panel (border and contents). Defaults to "none".
31 border_style (str, optional): The style of the border. Defaults to "none".
32 width (Optional[int], optional): Optional width of panel. Defaults to None to auto-detect.
33 height (Optional[int], optional): Optional height of panel. Defaults to None to auto-detect.
34 padding (Optional[PaddingDimensions]): Optional padding around renderable. Defaults to 0.
35 highlight (bool, optional): Enable automatic highlighting of panel title (if str). Defaults to False.
36 """
37
38 def __init__(
39 self,
40 renderable: "RenderableType",
41 box: Box = ROUNDED,
42 *,
43 title: Optional[TextType] = None,
44 title_align: AlignMethod = "center",
45 subtitle: Optional[TextType] = None,
46 subtitle_align: AlignMethod = "center",
47 safe_box: Optional[bool] = None,
48 expand: bool = True,
49 style: StyleType = "none",
50 border_style: StyleType = "none",
51 width: Optional[int] = None,
52 height: Optional[int] = None,
53 padding: PaddingDimensions = (0, 1),
54 highlight: bool = False,
55 ) -> None:
56 self.renderable = renderable
57 self.box = box
58 self.title = title
59 self.title_align: AlignMethod = title_align
60 self.subtitle = subtitle
61 self.subtitle_align = subtitle_align
62 self.safe_box = safe_box
63 self.expand = expand
64 self.style = style
65 self.border_style = border_style
66 self.width = width
67 self.height = height
68 self.padding = padding
69 self.highlight = highlight
70
71 @classmethod
72 def fit(
73 cls,
74 renderable: "RenderableType",
75 box: Box = ROUNDED,
76 *,
77 title: Optional[TextType] = None,
78 title_align: AlignMethod = "center",
79 subtitle: Optional[TextType] = None,
80 subtitle_align: AlignMethod = "center",
81 safe_box: Optional[bool] = None,
82 style: StyleType = "none",
83 border_style: StyleType = "none",
84 width: Optional[int] = None,
85 padding: PaddingDimensions = (0, 1),
86 ) -> "Panel":
87 """An alternative constructor that sets expand=False."""
88 return cls(
89 renderable,
90 box,
91 title=title,
92 title_align=title_align,
93 subtitle=subtitle,
94 subtitle_align=subtitle_align,
95 safe_box=safe_box,
96 style=style,
97 border_style=border_style,
98 width=width,
99 padding=padding,
100 expand=False,
101 )
102
103 @property
104 def _title(self) -> Optional[Text]:
105 if self.title:
106 title_text = (
107 Text.from_markup(self.title)
108 if isinstance(self.title, str)
109 else self.title.copy()
110 )
111 title_text.end = ""
112 title_text.plain = title_text.plain.replace("\n", " ")
113 title_text.no_wrap = True
114 title_text.expand_tabs()
115 title_text.pad(1)
116 return title_text
117 return None
118
119 @property
120 def _subtitle(self) -> Optional[Text]:
121 if self.subtitle:
122 subtitle_text = (
123 Text.from_markup(self.subtitle)
124 if isinstance(self.subtitle, str)
125 else self.subtitle.copy()
126 )
127 subtitle_text.end = ""
128 subtitle_text.plain = subtitle_text.plain.replace("\n", " ")
129 subtitle_text.no_wrap = True
130 subtitle_text.expand_tabs()
131 subtitle_text.pad(1)
132 return subtitle_text
133 return None
134
135 def __rich_console__(
136 self, console: "Console", options: "ConsoleOptions"
137 ) -> "RenderResult":
138 _padding = Padding.unpack(self.padding)
139 renderable = (
140 Padding(self.renderable, _padding) if any(_padding) else self.renderable
141 )
142 style = console.get_style(self.style)
143 border_style = style + console.get_style(self.border_style)
144 width = (
145 options.max_width
146 if self.width is None
147 else min(options.max_width, self.width)
148 )
149
150 safe_box: bool = console.safe_box if self.safe_box is None else self.safe_box
151 box = self.box.substitute(options, safe=safe_box)
152
153 def align_text(
154 text: Text, width: int, align: str, character: str, style: Style
155 ) -> Text:
156 """Gets new aligned text.
157
158 Args:
159 text (Text): Title or subtitle text.
160 width (int): Desired width.
161 align (str): Alignment.
162 character (str): Character for alignment.
163 style (Style): Border style
164
165 Returns:
166 Text: New text instance
167 """
168 text = text.copy()
169 text.truncate(width)
170 excess_space = width - cell_len(text.plain)
171 if excess_space:
172 if align == "left":
173 return Text.assemble(
174 text,
175 (character * excess_space, style),
176 no_wrap=True,
177 end="",
178 )
179 elif align == "center":
180 left = excess_space // 2
181 return Text.assemble(
182 (character * left, style),
183 text,
184 (character * (excess_space - left), style),
185 no_wrap=True,
186 end="",
187 )
188 else:
189 return Text.assemble(
190 (character * excess_space, style),
191 text,
192 no_wrap=True,
193 end="",
194 )
195 return text
196
197 title_text = self._title
198 if title_text is not None:
199 title_text.stylize_before(border_style)
200
201 child_width = (
202 width - 2
203 if self.expand
204 else console.measure(
205 renderable, options=options.update_width(width - 2)
206 ).maximum
207 )
208 child_height = self.height or options.height or None
209 if child_height:
210 child_height -= 2
211 if title_text is not None:
212 child_width = min(
213 options.max_width - 2, max(child_width, title_text.cell_len + 2)
214 )
215
216 width = child_width + 2
217 child_options = options.update(
218 width=child_width, height=child_height, highlight=self.highlight
219 )
220 lines = console.render_lines(renderable, child_options, style=style)
221
222 line_start = Segment(box.mid_left, border_style)
223 line_end = Segment(f"{box.mid_right}", border_style)
224 new_line = Segment.line()
225 if title_text is None or width <= 4:
226 yield Segment(box.get_top([width - 2]), border_style)
227 else:
228 title_text = align_text(
229 title_text,
230 width - 4,
231 self.title_align,
232 box.top,
233 border_style,
234 )
235 yield Segment(box.top_left + box.top, border_style)
236 yield from console.render(title_text, child_options.update_width(width - 4))
237 yield Segment(box.top + box.top_right, border_style)
238
239 yield new_line
240 for line in lines:
241 yield line_start
242 yield from line
243 yield line_end
244 yield new_line
245
246 subtitle_text = self._subtitle
247 if subtitle_text is not None:
248 subtitle_text.stylize_before(border_style)
249
250 if subtitle_text is None or width <= 4:
251 yield Segment(box.get_bottom([width - 2]), border_style)
252 else:
253 subtitle_text = align_text(
254 subtitle_text,
255 width - 4,
256 self.subtitle_align,
257 box.bottom,
258 border_style,
259 )
260 yield Segment(box.bottom_left + box.bottom, border_style)
261 yield from console.render(
262 subtitle_text, child_options.update_width(width - 4)
263 )
264 yield Segment(box.bottom + box.bottom_right, border_style)
265
266 yield new_line
267
268 def __rich_measure__(
269 self, console: "Console", options: "ConsoleOptions"
270 ) -> "Measurement":
271 _title = self._title
272 _, right, _, left = Padding.unpack(self.padding)
273 padding = left + right
274 renderables = [self.renderable, _title] if _title else [self.renderable]
275
276 if self.width is None:
277 width = (
278 measure_renderables(
279 console,
280 options.update_width(options.max_width - padding - 2),
281 renderables,
282 ).maximum
283 + padding
284 + 2
285 )
286 else:
287 width = self.width
288 return Measurement(width, width)
289
290
291 if __name__ == "__main__": # pragma: no cover
292 from .console import Console
293
294 c = Console()
295
296 from .box import DOUBLE, ROUNDED
297 from .padding import Padding
298
299 p = Panel(
300 "Hello, World!",
301 title="rich.Panel",
302 style="white on blue",
303 box=DOUBLE,
304 padding=1,
305 )
306
307 c.print()
308 c.print(p)
309
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rich/panel.py b/rich/panel.py
--- a/rich/panel.py
+++ b/rich/panel.py
@@ -82,7 +82,9 @@
style: StyleType = "none",
border_style: StyleType = "none",
width: Optional[int] = None,
+ height: Optional[int] = None,
padding: PaddingDimensions = (0, 1),
+ highlight: bool = False,
) -> "Panel":
"""An alternative constructor that sets expand=False."""
return cls(
@@ -96,7 +98,9 @@
style=style,
border_style=border_style,
width=width,
+ height=height,
padding=padding,
+ highlight=highlight,
expand=False,
)
|
{"golden_diff": "diff --git a/rich/panel.py b/rich/panel.py\n--- a/rich/panel.py\n+++ b/rich/panel.py\n@@ -82,7 +82,9 @@\n style: StyleType = \"none\",\n border_style: StyleType = \"none\",\n width: Optional[int] = None,\n+ height: Optional[int] = None,\n padding: PaddingDimensions = (0, 1),\n+ highlight: bool = False,\n ) -> \"Panel\":\n \"\"\"An alternative constructor that sets expand=False.\"\"\"\n return cls(\n@@ -96,7 +98,9 @@\n style=style,\n border_style=border_style,\n width=width,\n+ height=height,\n padding=padding,\n+ highlight=highlight,\n expand=False,\n )\n", "issue": "[BUG] `Panel.fit` forgot `height` and `highlight` parameters\n- [x] I've checked [docs](https://rich.readthedocs.io/en/latest/introduction.html) and [closed issues](https://github.com/Textualize/rich/issues?q=is%3Aissue+is%3Aclosed) for possible solutions.\r\n- [x] I can't find my issue in the [FAQ](https://github.com/Textualize/rich/blob/master/FAQ.md).\r\n\r\n**Describe the bug**\r\n\r\n`Panel.fit` forgot `height` and `highlight` parameters. It should be updated to synchronize with the constructor.\r\n\r\n```python\r\nclass Panel(JupyterMixin):\r\n def __init__(\r\n self,\r\n renderable: \"RenderableType\",\r\n box: Box = ROUNDED,\r\n *,\r\n title: Optional[TextType] = None,\r\n title_align: AlignMethod = \"center\",\r\n subtitle: Optional[TextType] = None,\r\n subtitle_align: AlignMethod = \"center\",\r\n safe_box: Optional[bool] = None,\r\n expand: bool = True,\r\n style: StyleType = \"none\",\r\n border_style: StyleType = \"none\",\r\n width: Optional[int] = None,\r\n height: Optional[int] = None,\r\n padding: PaddingDimensions = (0, 1),\r\n highlight: bool = False,\r\n ) -> None:\r\n ...\r\n\r\n @classmethod\r\n def fit(\r\n cls,\r\n renderable: \"RenderableType\",\r\n box: Box = ROUNDED,\r\n *,\r\n title: Optional[TextType] = None,\r\n title_align: AlignMethod = \"center\",\r\n subtitle: Optional[TextType] = None,\r\n subtitle_align: AlignMethod = \"center\",\r\n safe_box: Optional[bool] = None,\r\n style: StyleType = \"none\",\r\n border_style: StyleType = \"none\",\r\n width: Optional[int] = None,\r\n padding: PaddingDimensions = (0, 1),\r\n ) -> \"Panel\":\r\n ...\r\n```\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Optional\n\nfrom .align import AlignMethod\nfrom .box import ROUNDED, Box\nfrom .cells import cell_len\nfrom .jupyter import JupyterMixin\nfrom .measure import Measurement, measure_renderables\nfrom .padding import Padding, PaddingDimensions\nfrom .segment import Segment\nfrom .style import Style, StyleType\nfrom .text import Text, TextType\n\nif TYPE_CHECKING:\n from .console import Console, ConsoleOptions, RenderableType, RenderResult\n\n\nclass Panel(JupyterMixin):\n \"\"\"A console renderable that draws a border around its contents.\n\n Example:\n >>> console.print(Panel(\"Hello, World!\"))\n\n Args:\n renderable (RenderableType): A console renderable object.\n box (Box, optional): A Box instance that defines the look of the border (see :ref:`appendix_box`.\n Defaults to box.ROUNDED.\n safe_box (bool, optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True.\n expand (bool, optional): If True the panel will stretch to fill the console\n width, otherwise it will be sized to fit the contents. Defaults to True.\n style (str, optional): The style of the panel (border and contents). Defaults to \"none\".\n border_style (str, optional): The style of the border. Defaults to \"none\".\n width (Optional[int], optional): Optional width of panel. Defaults to None to auto-detect.\n height (Optional[int], optional): Optional height of panel. Defaults to None to auto-detect.\n padding (Optional[PaddingDimensions]): Optional padding around renderable. Defaults to 0.\n highlight (bool, optional): Enable automatic highlighting of panel title (if str). Defaults to False.\n \"\"\"\n\n def __init__(\n self,\n renderable: \"RenderableType\",\n box: Box = ROUNDED,\n *,\n title: Optional[TextType] = None,\n title_align: AlignMethod = \"center\",\n subtitle: Optional[TextType] = None,\n subtitle_align: AlignMethod = \"center\",\n safe_box: Optional[bool] = None,\n expand: bool = True,\n style: StyleType = \"none\",\n border_style: StyleType = \"none\",\n width: Optional[int] = None,\n height: Optional[int] = None,\n padding: PaddingDimensions = (0, 1),\n highlight: bool = False,\n ) -> None:\n self.renderable = renderable\n self.box = box\n self.title = title\n self.title_align: AlignMethod = title_align\n self.subtitle = subtitle\n self.subtitle_align = subtitle_align\n self.safe_box = safe_box\n self.expand = expand\n self.style = style\n self.border_style = border_style\n self.width = width\n self.height = height\n self.padding = padding\n self.highlight = highlight\n\n @classmethod\n def fit(\n cls,\n renderable: \"RenderableType\",\n box: Box = ROUNDED,\n *,\n title: Optional[TextType] = None,\n title_align: AlignMethod = \"center\",\n subtitle: Optional[TextType] = None,\n subtitle_align: AlignMethod = \"center\",\n safe_box: Optional[bool] = None,\n style: StyleType = \"none\",\n border_style: StyleType = \"none\",\n width: Optional[int] = None,\n padding: PaddingDimensions = (0, 1),\n ) -> \"Panel\":\n \"\"\"An alternative constructor that sets expand=False.\"\"\"\n return cls(\n renderable,\n box,\n title=title,\n title_align=title_align,\n subtitle=subtitle,\n subtitle_align=subtitle_align,\n safe_box=safe_box,\n style=style,\n border_style=border_style,\n width=width,\n padding=padding,\n expand=False,\n )\n\n @property\n def _title(self) -> Optional[Text]:\n if self.title:\n title_text = (\n Text.from_markup(self.title)\n if isinstance(self.title, str)\n else self.title.copy()\n )\n title_text.end = \"\"\n title_text.plain = title_text.plain.replace(\"\\n\", \" \")\n title_text.no_wrap = True\n title_text.expand_tabs()\n title_text.pad(1)\n return title_text\n return None\n\n @property\n def _subtitle(self) -> Optional[Text]:\n if self.subtitle:\n subtitle_text = (\n Text.from_markup(self.subtitle)\n if isinstance(self.subtitle, str)\n else self.subtitle.copy()\n )\n subtitle_text.end = \"\"\n subtitle_text.plain = subtitle_text.plain.replace(\"\\n\", \" \")\n subtitle_text.no_wrap = True\n subtitle_text.expand_tabs()\n subtitle_text.pad(1)\n return subtitle_text\n return None\n\n def __rich_console__(\n self, console: \"Console\", options: \"ConsoleOptions\"\n ) -> \"RenderResult\":\n _padding = Padding.unpack(self.padding)\n renderable = (\n Padding(self.renderable, _padding) if any(_padding) else self.renderable\n )\n style = console.get_style(self.style)\n border_style = style + console.get_style(self.border_style)\n width = (\n options.max_width\n if self.width is None\n else min(options.max_width, self.width)\n )\n\n safe_box: bool = console.safe_box if self.safe_box is None else self.safe_box\n box = self.box.substitute(options, safe=safe_box)\n\n def align_text(\n text: Text, width: int, align: str, character: str, style: Style\n ) -> Text:\n \"\"\"Gets new aligned text.\n\n Args:\n text (Text): Title or subtitle text.\n width (int): Desired width.\n align (str): Alignment.\n character (str): Character for alignment.\n style (Style): Border style\n\n Returns:\n Text: New text instance\n \"\"\"\n text = text.copy()\n text.truncate(width)\n excess_space = width - cell_len(text.plain)\n if excess_space:\n if align == \"left\":\n return Text.assemble(\n text,\n (character * excess_space, style),\n no_wrap=True,\n end=\"\",\n )\n elif align == \"center\":\n left = excess_space // 2\n return Text.assemble(\n (character * left, style),\n text,\n (character * (excess_space - left), style),\n no_wrap=True,\n end=\"\",\n )\n else:\n return Text.assemble(\n (character * excess_space, style),\n text,\n no_wrap=True,\n end=\"\",\n )\n return text\n\n title_text = self._title\n if title_text is not None:\n title_text.stylize_before(border_style)\n\n child_width = (\n width - 2\n if self.expand\n else console.measure(\n renderable, options=options.update_width(width - 2)\n ).maximum\n )\n child_height = self.height or options.height or None\n if child_height:\n child_height -= 2\n if title_text is not None:\n child_width = min(\n options.max_width - 2, max(child_width, title_text.cell_len + 2)\n )\n\n width = child_width + 2\n child_options = options.update(\n width=child_width, height=child_height, highlight=self.highlight\n )\n lines = console.render_lines(renderable, child_options, style=style)\n\n line_start = Segment(box.mid_left, border_style)\n line_end = Segment(f\"{box.mid_right}\", border_style)\n new_line = Segment.line()\n if title_text is None or width <= 4:\n yield Segment(box.get_top([width - 2]), border_style)\n else:\n title_text = align_text(\n title_text,\n width - 4,\n self.title_align,\n box.top,\n border_style,\n )\n yield Segment(box.top_left + box.top, border_style)\n yield from console.render(title_text, child_options.update_width(width - 4))\n yield Segment(box.top + box.top_right, border_style)\n\n yield new_line\n for line in lines:\n yield line_start\n yield from line\n yield line_end\n yield new_line\n\n subtitle_text = self._subtitle\n if subtitle_text is not None:\n subtitle_text.stylize_before(border_style)\n\n if subtitle_text is None or width <= 4:\n yield Segment(box.get_bottom([width - 2]), border_style)\n else:\n subtitle_text = align_text(\n subtitle_text,\n width - 4,\n self.subtitle_align,\n box.bottom,\n border_style,\n )\n yield Segment(box.bottom_left + box.bottom, border_style)\n yield from console.render(\n subtitle_text, child_options.update_width(width - 4)\n )\n yield Segment(box.bottom + box.bottom_right, border_style)\n\n yield new_line\n\n def __rich_measure__(\n self, console: \"Console\", options: \"ConsoleOptions\"\n ) -> \"Measurement\":\n _title = self._title\n _, right, _, left = Padding.unpack(self.padding)\n padding = left + right\n renderables = [self.renderable, _title] if _title else [self.renderable]\n\n if self.width is None:\n width = (\n measure_renderables(\n console,\n options.update_width(options.max_width - padding - 2),\n renderables,\n ).maximum\n + padding\n + 2\n )\n else:\n width = self.width\n return Measurement(width, width)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n from .console import Console\n\n c = Console()\n\n from .box import DOUBLE, ROUNDED\n from .padding import Padding\n\n p = Panel(\n \"Hello, World!\",\n title=\"rich.Panel\",\n style=\"white on blue\",\n box=DOUBLE,\n padding=1,\n )\n\n c.print()\n c.print(p)\n", "path": "rich/panel.py"}], "after_files": [{"content": "from typing import TYPE_CHECKING, Optional\n\nfrom .align import AlignMethod\nfrom .box import ROUNDED, Box\nfrom .cells import cell_len\nfrom .jupyter import JupyterMixin\nfrom .measure import Measurement, measure_renderables\nfrom .padding import Padding, PaddingDimensions\nfrom .segment import Segment\nfrom .style import Style, StyleType\nfrom .text import Text, TextType\n\nif TYPE_CHECKING:\n from .console import Console, ConsoleOptions, RenderableType, RenderResult\n\n\nclass Panel(JupyterMixin):\n \"\"\"A console renderable that draws a border around its contents.\n\n Example:\n >>> console.print(Panel(\"Hello, World!\"))\n\n Args:\n renderable (RenderableType): A console renderable object.\n box (Box, optional): A Box instance that defines the look of the border (see :ref:`appendix_box`.\n Defaults to box.ROUNDED.\n safe_box (bool, optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True.\n expand (bool, optional): If True the panel will stretch to fill the console\n width, otherwise it will be sized to fit the contents. Defaults to True.\n style (str, optional): The style of the panel (border and contents). Defaults to \"none\".\n border_style (str, optional): The style of the border. Defaults to \"none\".\n width (Optional[int], optional): Optional width of panel. Defaults to None to auto-detect.\n height (Optional[int], optional): Optional height of panel. Defaults to None to auto-detect.\n padding (Optional[PaddingDimensions]): Optional padding around renderable. Defaults to 0.\n highlight (bool, optional): Enable automatic highlighting of panel title (if str). Defaults to False.\n \"\"\"\n\n def __init__(\n self,\n renderable: \"RenderableType\",\n box: Box = ROUNDED,\n *,\n title: Optional[TextType] = None,\n title_align: AlignMethod = \"center\",\n subtitle: Optional[TextType] = None,\n subtitle_align: AlignMethod = \"center\",\n safe_box: Optional[bool] = None,\n expand: bool = True,\n style: StyleType = \"none\",\n border_style: StyleType = \"none\",\n width: Optional[int] = None,\n height: Optional[int] = None,\n padding: PaddingDimensions = (0, 1),\n highlight: bool = False,\n ) -> None:\n self.renderable = renderable\n self.box = box\n self.title = title\n self.title_align: AlignMethod = title_align\n self.subtitle = subtitle\n self.subtitle_align = subtitle_align\n self.safe_box = safe_box\n self.expand = expand\n self.style = style\n self.border_style = border_style\n self.width = width\n self.height = height\n self.padding = padding\n self.highlight = highlight\n\n @classmethod\n def fit(\n cls,\n renderable: \"RenderableType\",\n box: Box = ROUNDED,\n *,\n title: Optional[TextType] = None,\n title_align: AlignMethod = \"center\",\n subtitle: Optional[TextType] = None,\n subtitle_align: AlignMethod = \"center\",\n safe_box: Optional[bool] = None,\n style: StyleType = \"none\",\n border_style: StyleType = \"none\",\n width: Optional[int] = None,\n height: Optional[int] = None,\n padding: PaddingDimensions = (0, 1),\n highlight: bool = False,\n ) -> \"Panel\":\n \"\"\"An alternative constructor that sets expand=False.\"\"\"\n return cls(\n renderable,\n box,\n title=title,\n title_align=title_align,\n subtitle=subtitle,\n subtitle_align=subtitle_align,\n safe_box=safe_box,\n style=style,\n border_style=border_style,\n width=width,\n height=height,\n padding=padding,\n highlight=highlight,\n expand=False,\n )\n\n @property\n def _title(self) -> Optional[Text]:\n if self.title:\n title_text = (\n Text.from_markup(self.title)\n if isinstance(self.title, str)\n else self.title.copy()\n )\n title_text.end = \"\"\n title_text.plain = title_text.plain.replace(\"\\n\", \" \")\n title_text.no_wrap = True\n title_text.expand_tabs()\n title_text.pad(1)\n return title_text\n return None\n\n @property\n def _subtitle(self) -> Optional[Text]:\n if self.subtitle:\n subtitle_text = (\n Text.from_markup(self.subtitle)\n if isinstance(self.subtitle, str)\n else self.subtitle.copy()\n )\n subtitle_text.end = \"\"\n subtitle_text.plain = subtitle_text.plain.replace(\"\\n\", \" \")\n subtitle_text.no_wrap = True\n subtitle_text.expand_tabs()\n subtitle_text.pad(1)\n return subtitle_text\n return None\n\n def __rich_console__(\n self, console: \"Console\", options: \"ConsoleOptions\"\n ) -> \"RenderResult\":\n _padding = Padding.unpack(self.padding)\n renderable = (\n Padding(self.renderable, _padding) if any(_padding) else self.renderable\n )\n style = console.get_style(self.style)\n border_style = style + console.get_style(self.border_style)\n width = (\n options.max_width\n if self.width is None\n else min(options.max_width, self.width)\n )\n\n safe_box: bool = console.safe_box if self.safe_box is None else self.safe_box\n box = self.box.substitute(options, safe=safe_box)\n\n def align_text(\n text: Text, width: int, align: str, character: str, style: Style\n ) -> Text:\n \"\"\"Gets new aligned text.\n\n Args:\n text (Text): Title or subtitle text.\n width (int): Desired width.\n align (str): Alignment.\n character (str): Character for alignment.\n style (Style): Border style\n\n Returns:\n Text: New text instance\n \"\"\"\n text = text.copy()\n text.truncate(width)\n excess_space = width - cell_len(text.plain)\n if excess_space:\n if align == \"left\":\n return Text.assemble(\n text,\n (character * excess_space, style),\n no_wrap=True,\n end=\"\",\n )\n elif align == \"center\":\n left = excess_space // 2\n return Text.assemble(\n (character * left, style),\n text,\n (character * (excess_space - left), style),\n no_wrap=True,\n end=\"\",\n )\n else:\n return Text.assemble(\n (character * excess_space, style),\n text,\n no_wrap=True,\n end=\"\",\n )\n return text\n\n title_text = self._title\n if title_text is not None:\n title_text.stylize_before(border_style)\n\n child_width = (\n width - 2\n if self.expand\n else console.measure(\n renderable, options=options.update_width(width - 2)\n ).maximum\n )\n child_height = self.height or options.height or None\n if child_height:\n child_height -= 2\n if title_text is not None:\n child_width = min(\n options.max_width - 2, max(child_width, title_text.cell_len + 2)\n )\n\n width = child_width + 2\n child_options = options.update(\n width=child_width, height=child_height, highlight=self.highlight\n )\n lines = console.render_lines(renderable, child_options, style=style)\n\n line_start = Segment(box.mid_left, border_style)\n line_end = Segment(f\"{box.mid_right}\", border_style)\n new_line = Segment.line()\n if title_text is None or width <= 4:\n yield Segment(box.get_top([width - 2]), border_style)\n else:\n title_text = align_text(\n title_text,\n width - 4,\n self.title_align,\n box.top,\n border_style,\n )\n yield Segment(box.top_left + box.top, border_style)\n yield from console.render(title_text, child_options.update_width(width - 4))\n yield Segment(box.top + box.top_right, border_style)\n\n yield new_line\n for line in lines:\n yield line_start\n yield from line\n yield line_end\n yield new_line\n\n subtitle_text = self._subtitle\n if subtitle_text is not None:\n subtitle_text.stylize_before(border_style)\n\n if subtitle_text is None or width <= 4:\n yield Segment(box.get_bottom([width - 2]), border_style)\n else:\n subtitle_text = align_text(\n subtitle_text,\n width - 4,\n self.subtitle_align,\n box.bottom,\n border_style,\n )\n yield Segment(box.bottom_left + box.bottom, border_style)\n yield from console.render(\n subtitle_text, child_options.update_width(width - 4)\n )\n yield Segment(box.bottom + box.bottom_right, border_style)\n\n yield new_line\n\n def __rich_measure__(\n self, console: \"Console\", options: \"ConsoleOptions\"\n ) -> \"Measurement\":\n _title = self._title\n _, right, _, left = Padding.unpack(self.padding)\n padding = left + right\n renderables = [self.renderable, _title] if _title else [self.renderable]\n\n if self.width is None:\n width = (\n measure_renderables(\n console,\n options.update_width(options.max_width - padding - 2),\n renderables,\n ).maximum\n + padding\n + 2\n )\n else:\n width = self.width\n return Measurement(width, width)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n from .console import Console\n\n c = Console()\n\n from .box import DOUBLE, ROUNDED\n from .padding import Padding\n\n p = Panel(\n \"Hello, World!\",\n title=\"rich.Panel\",\n style=\"white on blue\",\n box=DOUBLE,\n padding=1,\n )\n\n c.print()\n c.print(p)\n", "path": "rich/panel.py"}]}
| 3,694 | 174 |
gh_patches_debug_34723
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-58281
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix typing for `sentry.rules.processor`
Many files related to post-process are skipping type checks.
* Remove the file from [sentry/pyproject.toml](https://github.com/getsentry/sentry/blob/b7810a9dca4b57afd2858903a6a9ec7ab50cdead/pyproject.toml)
* Run `mypy sentry.rules.processor` and fix the typing errors
From https://github.com/getsentry/sentry/issues/55193
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/rules/processor.py`
Content:
```
1 from __future__ import annotations
2
3 import logging
4 import uuid
5 from datetime import timedelta
6 from random import randrange
7 from typing import (
8 Any,
9 Callable,
10 Collection,
11 List,
12 Mapping,
13 MutableMapping,
14 Optional,
15 Sequence,
16 Set,
17 Tuple,
18 )
19
20 from django.core.cache import cache
21 from django.utils import timezone
22
23 from sentry import analytics
24 from sentry.eventstore.models import GroupEvent
25 from sentry.models.environment import Environment
26 from sentry.models.grouprulestatus import GroupRuleStatus
27 from sentry.models.rule import Rule
28 from sentry.models.rulesnooze import RuleSnooze
29 from sentry.rules import EventState, history, rules
30 from sentry.rules.conditions.base import EventCondition
31 from sentry.types.rules import RuleFuture
32 from sentry.utils.hashlib import hash_values
33 from sentry.utils.safe import safe_execute
34
35 SLOW_CONDITION_MATCHES = ["event_frequency"]
36
37
38 def get_match_function(match_name: str) -> Callable[..., bool] | None:
39 if match_name == "all":
40 return all
41 elif match_name == "any":
42 return any
43 elif match_name == "none":
44 return lambda bool_iter: not any(bool_iter)
45 return None
46
47
48 def is_condition_slow(condition: Mapping[str, str]) -> bool:
49 for slow_conditions in SLOW_CONDITION_MATCHES:
50 if slow_conditions in condition["id"]:
51 return True
52 return False
53
54
55 class RuleProcessor:
56 logger = logging.getLogger("sentry.rules")
57
58 def __init__(
59 self,
60 event: GroupEvent,
61 is_new: bool,
62 is_regression: bool,
63 is_new_group_environment: bool,
64 has_reappeared: bool,
65 ) -> None:
66 self.event = event
67 self.group = event.group
68 self.project = event.project
69
70 self.is_new = is_new
71 self.is_regression = is_regression
72 self.is_new_group_environment = is_new_group_environment
73 self.has_reappeared = has_reappeared
74
75 self.grouped_futures: MutableMapping[
76 str, Tuple[Callable[[GroupEvent, Sequence[RuleFuture]], None], List[RuleFuture]]
77 ] = {}
78
79 def get_rules(self) -> Sequence[Rule]:
80 """Get all of the rules for this project from the DB (or cache)."""
81 rules_: Sequence[Rule] = Rule.get_for_project(self.project.id)
82 return rules_
83
84 def _build_rule_status_cache_key(self, rule_id: int) -> str:
85 return "grouprulestatus:1:%s" % hash_values([self.group.id, rule_id])
86
87 def bulk_get_rule_status(self, rules: Sequence[Rule]) -> Mapping[int, GroupRuleStatus]:
88 keys = [self._build_rule_status_cache_key(rule.id) for rule in rules]
89 cache_results: Mapping[str, GroupRuleStatus] = cache.get_many(keys)
90 missing_rule_ids: Set[int] = set()
91 rule_statuses: MutableMapping[int, GroupRuleStatus] = {}
92 for key, rule in zip(keys, rules):
93 rule_status = cache_results.get(key)
94 if not rule_status:
95 missing_rule_ids.add(rule.id)
96 else:
97 rule_statuses[rule.id] = rule_status
98
99 if missing_rule_ids:
100 # If not cached, attempt to fetch status from the database
101 statuses = GroupRuleStatus.objects.filter(
102 group=self.group, rule_id__in=missing_rule_ids
103 )
104 to_cache: List[GroupRuleStatus] = list()
105 for status in statuses:
106 rule_statuses[status.rule_id] = status
107 missing_rule_ids.remove(status.rule_id)
108 to_cache.append(status)
109
110 # We might need to create some statuses if they don't already exist
111 if missing_rule_ids:
112 # We use `ignore_conflicts=True` here to avoid race conditions where the statuses
113 # might be created between when we queried above and attempt to create the rows now.
114 GroupRuleStatus.objects.bulk_create(
115 [
116 GroupRuleStatus(rule_id=rule_id, group=self.group, project=self.project)
117 for rule_id in missing_rule_ids
118 ],
119 ignore_conflicts=True,
120 )
121 # Using `ignore_conflicts=True` prevents the pk from being set on the model
122 # instances. Re-query the database to fetch the rows, they should all exist at this
123 # point.
124 statuses = GroupRuleStatus.objects.filter(
125 group=self.group, rule_id__in=missing_rule_ids
126 )
127 for status in statuses:
128 rule_statuses[status.rule_id] = status
129 missing_rule_ids.remove(status.rule_id)
130 to_cache.append(status)
131
132 if missing_rule_ids:
133 # Shouldn't happen, but log just in case
134 self.logger.error(
135 "Failed to fetch some GroupRuleStatuses in RuleProcessor",
136 extra={"missing_rule_ids": missing_rule_ids, "group_id": self.group.id},
137 )
138 if to_cache:
139 cache.set_many(
140 {self._build_rule_status_cache_key(item.rule_id): item for item in to_cache}
141 )
142
143 return rule_statuses
144
145 def condition_matches(
146 self, condition: Mapping[str, Any], state: EventState, rule: Rule
147 ) -> bool | None:
148 condition_cls = rules.get(condition["id"])
149 if condition_cls is None:
150 self.logger.warning("Unregistered condition %r", condition["id"])
151 return None
152
153 condition_inst: EventCondition = condition_cls(self.project, data=condition, rule=rule)
154 passes: bool = safe_execute(
155 condition_inst.passes, self.event, state, _with_transaction=False
156 )
157 return passes
158
159 def get_rule_type(self, condition: Mapping[str, Any]) -> str | None:
160 rule_cls = rules.get(condition["id"])
161 if rule_cls is None:
162 self.logger.warning("Unregistered condition or filter %r", condition["id"])
163 return None
164
165 rule_type: str = rule_cls.rule_type
166 return rule_type
167
168 def get_state(self) -> EventState:
169 return EventState(
170 is_new=self.is_new,
171 is_regression=self.is_regression,
172 is_new_group_environment=self.is_new_group_environment,
173 has_reappeared=self.has_reappeared,
174 )
175
176 def apply_rule(self, rule: Rule, status: GroupRuleStatus) -> None:
177 """
178 If all conditions and filters pass, execute every action.
179
180 :param rule: `Rule` object
181 :return: void
182 """
183 logging_details = {
184 "rule_id": rule.id,
185 "group_id": self.group.id,
186 "event_id": self.event.event_id,
187 "project_id": self.project.id,
188 "is_new": self.is_new,
189 "is_regression": self.is_regression,
190 "has_reappeared": self.has_reappeared,
191 "new_group_environment": self.is_new_group_environment,
192 }
193
194 condition_match = rule.data.get("action_match") or Rule.DEFAULT_CONDITION_MATCH
195 filter_match = rule.data.get("filter_match") or Rule.DEFAULT_FILTER_MATCH
196 rule_condition_list = rule.data.get("conditions", ())
197 frequency = rule.data.get("frequency") or Rule.DEFAULT_FREQUENCY
198 try:
199 environment = self.event.get_environment()
200 except Environment.DoesNotExist:
201 return
202
203 if rule.environment_id is not None and environment.id != rule.environment_id:
204 return
205
206 now = timezone.now()
207 freq_offset = now - timedelta(minutes=frequency)
208 if status.last_active and status.last_active > freq_offset:
209 return
210
211 state = self.get_state()
212
213 condition_list = []
214 filter_list = []
215 for rule_cond in rule_condition_list:
216 if self.get_rule_type(rule_cond) == "condition/event":
217 condition_list.append(rule_cond)
218 else:
219 filter_list.append(rule_cond)
220
221 # Sort `condition_list` so that most expensive conditions run last.
222 condition_list.sort(key=lambda condition: is_condition_slow(condition))
223
224 for predicate_list, match, name in (
225 (filter_list, filter_match, "filter"),
226 (condition_list, condition_match, "condition"),
227 ):
228 if not predicate_list:
229 continue
230 predicate_iter = (self.condition_matches(f, state, rule) for f in predicate_list)
231 predicate_func = get_match_function(match)
232 if predicate_func:
233 if not predicate_func(predicate_iter):
234 return
235 else:
236 self.logger.error(
237 f"Unsupported {name}_match {match!r} for rule {rule.id}",
238 filter_match,
239 rule.id,
240 extra={**logging_details},
241 )
242 return
243
244 updated = (
245 GroupRuleStatus.objects.filter(id=status.id)
246 .exclude(last_active__gt=freq_offset)
247 .update(last_active=now)
248 )
249
250 if not updated:
251 return
252
253 if randrange(10) == 0:
254 analytics.record(
255 "issue_alert.fired",
256 issue_id=self.group.id,
257 project_id=rule.project.id,
258 organization_id=rule.project.organization.id,
259 rule_id=rule.id,
260 )
261
262 notification_uuid = str(uuid.uuid4())
263 history.record(rule, self.group, self.event.event_id, notification_uuid)
264 self.activate_downstream_actions(rule, notification_uuid)
265
266 def activate_downstream_actions(
267 self, rule: Rule, notification_uuid: Optional[str] = None
268 ) -> None:
269 state = self.get_state()
270 for action in rule.data.get("actions", ()):
271 action_cls = rules.get(action["id"])
272 if action_cls is None:
273 self.logger.warning("Unregistered action %r", action["id"])
274 continue
275
276 action_inst = action_cls(self.project, data=action, rule=rule)
277
278 results = safe_execute(
279 action_inst.after,
280 event=self.event,
281 state=state,
282 _with_transaction=False,
283 notification_uuid=notification_uuid,
284 )
285 if results is None:
286 self.logger.warning("Action %s did not return any futures", action["id"])
287 continue
288
289 for future in results:
290 key = future.key if future.key is not None else future.callback
291 rule_future = RuleFuture(rule=rule, kwargs=future.kwargs)
292
293 if key not in self.grouped_futures:
294 self.grouped_futures[key] = (future.callback, [rule_future])
295 else:
296 self.grouped_futures[key][1].append(rule_future)
297
298 def apply(
299 self,
300 ) -> Collection[Tuple[Callable[[GroupEvent, Sequence[RuleFuture]], None], List[RuleFuture]]]:
301 # we should only apply rules on unresolved issues
302 if not self.event.group.is_unresolved():
303 return {}.values()
304
305 self.grouped_futures.clear()
306 rules = self.get_rules()
307 snoozed_rules = RuleSnooze.objects.filter(rule__in=rules, user_id=None).values_list(
308 "rule", flat=True
309 )
310 rule_statuses = self.bulk_get_rule_status(rules)
311 for rule in rules:
312 if rule.id not in snoozed_rules:
313 self.apply_rule(rule, rule_statuses[rule.id])
314
315 return self.grouped_futures.values()
316
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sentry/rules/processor.py b/src/sentry/rules/processor.py
--- a/src/sentry/rules/processor.py
+++ b/src/sentry/rules/processor.py
@@ -27,7 +27,9 @@
from sentry.models.rule import Rule
from sentry.models.rulesnooze import RuleSnooze
from sentry.rules import EventState, history, rules
+from sentry.rules.actions.base import EventAction
from sentry.rules.conditions.base import EventCondition
+from sentry.rules.filters.base import EventFilter
from sentry.types.rules import RuleFuture
from sentry.utils.hashlib import hash_values
from sentry.utils.safe import safe_execute
@@ -143,16 +145,22 @@
return rule_statuses
def condition_matches(
- self, condition: Mapping[str, Any], state: EventState, rule: Rule
+ self, condition: dict[str, Any], state: EventState, rule: Rule
) -> bool | None:
condition_cls = rules.get(condition["id"])
if condition_cls is None:
self.logger.warning("Unregistered condition %r", condition["id"])
return None
- condition_inst: EventCondition = condition_cls(self.project, data=condition, rule=rule)
+ condition_inst = condition_cls(self.project, data=condition, rule=rule)
+ if not isinstance(condition_inst, (EventCondition, EventFilter)):
+ self.logger.warning("Unregistered condition %r", condition["id"])
+ return None
passes: bool = safe_execute(
- condition_inst.passes, self.event, state, _with_transaction=False
+ condition_inst.passes,
+ self.event,
+ state,
+ _with_transaction=False,
)
return passes
@@ -274,6 +282,9 @@
continue
action_inst = action_cls(self.project, data=action, rule=rule)
+ if not isinstance(action_inst, EventAction):
+ self.logger.warning("Unregistered action %r", action["id"])
+ continue
results = safe_execute(
action_inst.after,
|
{"golden_diff": "diff --git a/src/sentry/rules/processor.py b/src/sentry/rules/processor.py\n--- a/src/sentry/rules/processor.py\n+++ b/src/sentry/rules/processor.py\n@@ -27,7 +27,9 @@\n from sentry.models.rule import Rule\n from sentry.models.rulesnooze import RuleSnooze\n from sentry.rules import EventState, history, rules\n+from sentry.rules.actions.base import EventAction\n from sentry.rules.conditions.base import EventCondition\n+from sentry.rules.filters.base import EventFilter\n from sentry.types.rules import RuleFuture\n from sentry.utils.hashlib import hash_values\n from sentry.utils.safe import safe_execute\n@@ -143,16 +145,22 @@\n return rule_statuses\n \n def condition_matches(\n- self, condition: Mapping[str, Any], state: EventState, rule: Rule\n+ self, condition: dict[str, Any], state: EventState, rule: Rule\n ) -> bool | None:\n condition_cls = rules.get(condition[\"id\"])\n if condition_cls is None:\n self.logger.warning(\"Unregistered condition %r\", condition[\"id\"])\n return None\n \n- condition_inst: EventCondition = condition_cls(self.project, data=condition, rule=rule)\n+ condition_inst = condition_cls(self.project, data=condition, rule=rule)\n+ if not isinstance(condition_inst, (EventCondition, EventFilter)):\n+ self.logger.warning(\"Unregistered condition %r\", condition[\"id\"])\n+ return None\n passes: bool = safe_execute(\n- condition_inst.passes, self.event, state, _with_transaction=False\n+ condition_inst.passes,\n+ self.event,\n+ state,\n+ _with_transaction=False,\n )\n return passes\n \n@@ -274,6 +282,9 @@\n continue\n \n action_inst = action_cls(self.project, data=action, rule=rule)\n+ if not isinstance(action_inst, EventAction):\n+ self.logger.warning(\"Unregistered action %r\", action[\"id\"])\n+ continue\n \n results = safe_execute(\n action_inst.after,\n", "issue": "Fix typing for `sentry.rules.processor`\nMany files related to post-process are skipping type checks.\n\n* Remove the file from [sentry/pyproject.toml](https://github.com/getsentry/sentry/blob/b7810a9dca4b57afd2858903a6a9ec7ab50cdead/pyproject.toml)\n* Run `mypy sentry.rules.processor` and fix the typing errors\n\nFrom https://github.com/getsentry/sentry/issues/55193\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport uuid\nfrom datetime import timedelta\nfrom random import randrange\nfrom typing import (\n Any,\n Callable,\n Collection,\n List,\n Mapping,\n MutableMapping,\n Optional,\n Sequence,\n Set,\n Tuple,\n)\n\nfrom django.core.cache import cache\nfrom django.utils import timezone\n\nfrom sentry import analytics\nfrom sentry.eventstore.models import GroupEvent\nfrom sentry.models.environment import Environment\nfrom sentry.models.grouprulestatus import GroupRuleStatus\nfrom sentry.models.rule import Rule\nfrom sentry.models.rulesnooze import RuleSnooze\nfrom sentry.rules import EventState, history, rules\nfrom sentry.rules.conditions.base import EventCondition\nfrom sentry.types.rules import RuleFuture\nfrom sentry.utils.hashlib import hash_values\nfrom sentry.utils.safe import safe_execute\n\nSLOW_CONDITION_MATCHES = [\"event_frequency\"]\n\n\ndef get_match_function(match_name: str) -> Callable[..., bool] | None:\n if match_name == \"all\":\n return all\n elif match_name == \"any\":\n return any\n elif match_name == \"none\":\n return lambda bool_iter: not any(bool_iter)\n return None\n\n\ndef is_condition_slow(condition: Mapping[str, str]) -> bool:\n for slow_conditions in SLOW_CONDITION_MATCHES:\n if slow_conditions in condition[\"id\"]:\n return True\n return False\n\n\nclass RuleProcessor:\n logger = logging.getLogger(\"sentry.rules\")\n\n def __init__(\n self,\n event: GroupEvent,\n is_new: bool,\n is_regression: bool,\n is_new_group_environment: bool,\n has_reappeared: bool,\n ) -> None:\n self.event = event\n self.group = event.group\n self.project = event.project\n\n self.is_new = is_new\n self.is_regression = is_regression\n self.is_new_group_environment = is_new_group_environment\n self.has_reappeared = has_reappeared\n\n self.grouped_futures: MutableMapping[\n str, Tuple[Callable[[GroupEvent, Sequence[RuleFuture]], None], List[RuleFuture]]\n ] = {}\n\n def get_rules(self) -> Sequence[Rule]:\n \"\"\"Get all of the rules for this project from the DB (or cache).\"\"\"\n rules_: Sequence[Rule] = Rule.get_for_project(self.project.id)\n return rules_\n\n def _build_rule_status_cache_key(self, rule_id: int) -> str:\n return \"grouprulestatus:1:%s\" % hash_values([self.group.id, rule_id])\n\n def bulk_get_rule_status(self, rules: Sequence[Rule]) -> Mapping[int, GroupRuleStatus]:\n keys = [self._build_rule_status_cache_key(rule.id) for rule in rules]\n cache_results: Mapping[str, GroupRuleStatus] = cache.get_many(keys)\n missing_rule_ids: Set[int] = set()\n rule_statuses: MutableMapping[int, GroupRuleStatus] = {}\n for key, rule in zip(keys, rules):\n rule_status = cache_results.get(key)\n if not rule_status:\n missing_rule_ids.add(rule.id)\n else:\n rule_statuses[rule.id] = rule_status\n\n if missing_rule_ids:\n # If not cached, attempt to fetch status from the database\n statuses = GroupRuleStatus.objects.filter(\n group=self.group, rule_id__in=missing_rule_ids\n )\n to_cache: List[GroupRuleStatus] = list()\n for status in statuses:\n rule_statuses[status.rule_id] = status\n missing_rule_ids.remove(status.rule_id)\n to_cache.append(status)\n\n # We might need to create some statuses if they don't already exist\n if missing_rule_ids:\n # We use `ignore_conflicts=True` here to avoid race conditions where the statuses\n # might be created between when we queried above and attempt to create the rows now.\n GroupRuleStatus.objects.bulk_create(\n [\n GroupRuleStatus(rule_id=rule_id, group=self.group, project=self.project)\n for rule_id in missing_rule_ids\n ],\n ignore_conflicts=True,\n )\n # Using `ignore_conflicts=True` prevents the pk from being set on the model\n # instances. Re-query the database to fetch the rows, they should all exist at this\n # point.\n statuses = GroupRuleStatus.objects.filter(\n group=self.group, rule_id__in=missing_rule_ids\n )\n for status in statuses:\n rule_statuses[status.rule_id] = status\n missing_rule_ids.remove(status.rule_id)\n to_cache.append(status)\n\n if missing_rule_ids:\n # Shouldn't happen, but log just in case\n self.logger.error(\n \"Failed to fetch some GroupRuleStatuses in RuleProcessor\",\n extra={\"missing_rule_ids\": missing_rule_ids, \"group_id\": self.group.id},\n )\n if to_cache:\n cache.set_many(\n {self._build_rule_status_cache_key(item.rule_id): item for item in to_cache}\n )\n\n return rule_statuses\n\n def condition_matches(\n self, condition: Mapping[str, Any], state: EventState, rule: Rule\n ) -> bool | None:\n condition_cls = rules.get(condition[\"id\"])\n if condition_cls is None:\n self.logger.warning(\"Unregistered condition %r\", condition[\"id\"])\n return None\n\n condition_inst: EventCondition = condition_cls(self.project, data=condition, rule=rule)\n passes: bool = safe_execute(\n condition_inst.passes, self.event, state, _with_transaction=False\n )\n return passes\n\n def get_rule_type(self, condition: Mapping[str, Any]) -> str | None:\n rule_cls = rules.get(condition[\"id\"])\n if rule_cls is None:\n self.logger.warning(\"Unregistered condition or filter %r\", condition[\"id\"])\n return None\n\n rule_type: str = rule_cls.rule_type\n return rule_type\n\n def get_state(self) -> EventState:\n return EventState(\n is_new=self.is_new,\n is_regression=self.is_regression,\n is_new_group_environment=self.is_new_group_environment,\n has_reappeared=self.has_reappeared,\n )\n\n def apply_rule(self, rule: Rule, status: GroupRuleStatus) -> None:\n \"\"\"\n If all conditions and filters pass, execute every action.\n\n :param rule: `Rule` object\n :return: void\n \"\"\"\n logging_details = {\n \"rule_id\": rule.id,\n \"group_id\": self.group.id,\n \"event_id\": self.event.event_id,\n \"project_id\": self.project.id,\n \"is_new\": self.is_new,\n \"is_regression\": self.is_regression,\n \"has_reappeared\": self.has_reappeared,\n \"new_group_environment\": self.is_new_group_environment,\n }\n\n condition_match = rule.data.get(\"action_match\") or Rule.DEFAULT_CONDITION_MATCH\n filter_match = rule.data.get(\"filter_match\") or Rule.DEFAULT_FILTER_MATCH\n rule_condition_list = rule.data.get(\"conditions\", ())\n frequency = rule.data.get(\"frequency\") or Rule.DEFAULT_FREQUENCY\n try:\n environment = self.event.get_environment()\n except Environment.DoesNotExist:\n return\n\n if rule.environment_id is not None and environment.id != rule.environment_id:\n return\n\n now = timezone.now()\n freq_offset = now - timedelta(minutes=frequency)\n if status.last_active and status.last_active > freq_offset:\n return\n\n state = self.get_state()\n\n condition_list = []\n filter_list = []\n for rule_cond in rule_condition_list:\n if self.get_rule_type(rule_cond) == \"condition/event\":\n condition_list.append(rule_cond)\n else:\n filter_list.append(rule_cond)\n\n # Sort `condition_list` so that most expensive conditions run last.\n condition_list.sort(key=lambda condition: is_condition_slow(condition))\n\n for predicate_list, match, name in (\n (filter_list, filter_match, \"filter\"),\n (condition_list, condition_match, \"condition\"),\n ):\n if not predicate_list:\n continue\n predicate_iter = (self.condition_matches(f, state, rule) for f in predicate_list)\n predicate_func = get_match_function(match)\n if predicate_func:\n if not predicate_func(predicate_iter):\n return\n else:\n self.logger.error(\n f\"Unsupported {name}_match {match!r} for rule {rule.id}\",\n filter_match,\n rule.id,\n extra={**logging_details},\n )\n return\n\n updated = (\n GroupRuleStatus.objects.filter(id=status.id)\n .exclude(last_active__gt=freq_offset)\n .update(last_active=now)\n )\n\n if not updated:\n return\n\n if randrange(10) == 0:\n analytics.record(\n \"issue_alert.fired\",\n issue_id=self.group.id,\n project_id=rule.project.id,\n organization_id=rule.project.organization.id,\n rule_id=rule.id,\n )\n\n notification_uuid = str(uuid.uuid4())\n history.record(rule, self.group, self.event.event_id, notification_uuid)\n self.activate_downstream_actions(rule, notification_uuid)\n\n def activate_downstream_actions(\n self, rule: Rule, notification_uuid: Optional[str] = None\n ) -> None:\n state = self.get_state()\n for action in rule.data.get(\"actions\", ()):\n action_cls = rules.get(action[\"id\"])\n if action_cls is None:\n self.logger.warning(\"Unregistered action %r\", action[\"id\"])\n continue\n\n action_inst = action_cls(self.project, data=action, rule=rule)\n\n results = safe_execute(\n action_inst.after,\n event=self.event,\n state=state,\n _with_transaction=False,\n notification_uuid=notification_uuid,\n )\n if results is None:\n self.logger.warning(\"Action %s did not return any futures\", action[\"id\"])\n continue\n\n for future in results:\n key = future.key if future.key is not None else future.callback\n rule_future = RuleFuture(rule=rule, kwargs=future.kwargs)\n\n if key not in self.grouped_futures:\n self.grouped_futures[key] = (future.callback, [rule_future])\n else:\n self.grouped_futures[key][1].append(rule_future)\n\n def apply(\n self,\n ) -> Collection[Tuple[Callable[[GroupEvent, Sequence[RuleFuture]], None], List[RuleFuture]]]:\n # we should only apply rules on unresolved issues\n if not self.event.group.is_unresolved():\n return {}.values()\n\n self.grouped_futures.clear()\n rules = self.get_rules()\n snoozed_rules = RuleSnooze.objects.filter(rule__in=rules, user_id=None).values_list(\n \"rule\", flat=True\n )\n rule_statuses = self.bulk_get_rule_status(rules)\n for rule in rules:\n if rule.id not in snoozed_rules:\n self.apply_rule(rule, rule_statuses[rule.id])\n\n return self.grouped_futures.values()\n", "path": "src/sentry/rules/processor.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport uuid\nfrom datetime import timedelta\nfrom random import randrange\nfrom typing import (\n Any,\n Callable,\n Collection,\n List,\n Mapping,\n MutableMapping,\n Optional,\n Sequence,\n Set,\n Tuple,\n)\n\nfrom django.core.cache import cache\nfrom django.utils import timezone\n\nfrom sentry import analytics\nfrom sentry.eventstore.models import GroupEvent\nfrom sentry.models.environment import Environment\nfrom sentry.models.grouprulestatus import GroupRuleStatus\nfrom sentry.models.rule import Rule\nfrom sentry.models.rulesnooze import RuleSnooze\nfrom sentry.rules import EventState, history, rules\nfrom sentry.rules.actions.base import EventAction\nfrom sentry.rules.conditions.base import EventCondition\nfrom sentry.rules.filters.base import EventFilter\nfrom sentry.types.rules import RuleFuture\nfrom sentry.utils.hashlib import hash_values\nfrom sentry.utils.safe import safe_execute\n\nSLOW_CONDITION_MATCHES = [\"event_frequency\"]\n\n\ndef get_match_function(match_name: str) -> Callable[..., bool] | None:\n if match_name == \"all\":\n return all\n elif match_name == \"any\":\n return any\n elif match_name == \"none\":\n return lambda bool_iter: not any(bool_iter)\n return None\n\n\ndef is_condition_slow(condition: Mapping[str, str]) -> bool:\n for slow_conditions in SLOW_CONDITION_MATCHES:\n if slow_conditions in condition[\"id\"]:\n return True\n return False\n\n\nclass RuleProcessor:\n logger = logging.getLogger(\"sentry.rules\")\n\n def __init__(\n self,\n event: GroupEvent,\n is_new: bool,\n is_regression: bool,\n is_new_group_environment: bool,\n has_reappeared: bool,\n ) -> None:\n self.event = event\n self.group = event.group\n self.project = event.project\n\n self.is_new = is_new\n self.is_regression = is_regression\n self.is_new_group_environment = is_new_group_environment\n self.has_reappeared = has_reappeared\n\n self.grouped_futures: MutableMapping[\n str, Tuple[Callable[[GroupEvent, Sequence[RuleFuture]], None], List[RuleFuture]]\n ] = {}\n\n def get_rules(self) -> Sequence[Rule]:\n \"\"\"Get all of the rules for this project from the DB (or cache).\"\"\"\n rules_: Sequence[Rule] = Rule.get_for_project(self.project.id)\n return rules_\n\n def _build_rule_status_cache_key(self, rule_id: int) -> str:\n return \"grouprulestatus:1:%s\" % hash_values([self.group.id, rule_id])\n\n def bulk_get_rule_status(self, rules: Sequence[Rule]) -> Mapping[int, GroupRuleStatus]:\n keys = [self._build_rule_status_cache_key(rule.id) for rule in rules]\n cache_results: Mapping[str, GroupRuleStatus] = cache.get_many(keys)\n missing_rule_ids: Set[int] = set()\n rule_statuses: MutableMapping[int, GroupRuleStatus] = {}\n for key, rule in zip(keys, rules):\n rule_status = cache_results.get(key)\n if not rule_status:\n missing_rule_ids.add(rule.id)\n else:\n rule_statuses[rule.id] = rule_status\n\n if missing_rule_ids:\n # If not cached, attempt to fetch status from the database\n statuses = GroupRuleStatus.objects.filter(\n group=self.group, rule_id__in=missing_rule_ids\n )\n to_cache: List[GroupRuleStatus] = list()\n for status in statuses:\n rule_statuses[status.rule_id] = status\n missing_rule_ids.remove(status.rule_id)\n to_cache.append(status)\n\n # We might need to create some statuses if they don't already exist\n if missing_rule_ids:\n # We use `ignore_conflicts=True` here to avoid race conditions where the statuses\n # might be created between when we queried above and attempt to create the rows now.\n GroupRuleStatus.objects.bulk_create(\n [\n GroupRuleStatus(rule_id=rule_id, group=self.group, project=self.project)\n for rule_id in missing_rule_ids\n ],\n ignore_conflicts=True,\n )\n # Using `ignore_conflicts=True` prevents the pk from being set on the model\n # instances. Re-query the database to fetch the rows, they should all exist at this\n # point.\n statuses = GroupRuleStatus.objects.filter(\n group=self.group, rule_id__in=missing_rule_ids\n )\n for status in statuses:\n rule_statuses[status.rule_id] = status\n missing_rule_ids.remove(status.rule_id)\n to_cache.append(status)\n\n if missing_rule_ids:\n # Shouldn't happen, but log just in case\n self.logger.error(\n \"Failed to fetch some GroupRuleStatuses in RuleProcessor\",\n extra={\"missing_rule_ids\": missing_rule_ids, \"group_id\": self.group.id},\n )\n if to_cache:\n cache.set_many(\n {self._build_rule_status_cache_key(item.rule_id): item for item in to_cache}\n )\n\n return rule_statuses\n\n def condition_matches(\n self, condition: dict[str, Any], state: EventState, rule: Rule\n ) -> bool | None:\n condition_cls = rules.get(condition[\"id\"])\n if condition_cls is None:\n self.logger.warning(\"Unregistered condition %r\", condition[\"id\"])\n return None\n\n condition_inst = condition_cls(self.project, data=condition, rule=rule)\n if not isinstance(condition_inst, (EventCondition, EventFilter)):\n self.logger.warning(\"Unregistered condition %r\", condition[\"id\"])\n return None\n passes: bool = safe_execute(\n condition_inst.passes,\n self.event,\n state,\n _with_transaction=False,\n )\n return passes\n\n def get_rule_type(self, condition: Mapping[str, Any]) -> str | None:\n rule_cls = rules.get(condition[\"id\"])\n if rule_cls is None:\n self.logger.warning(\"Unregistered condition or filter %r\", condition[\"id\"])\n return None\n\n rule_type: str = rule_cls.rule_type\n return rule_type\n\n def get_state(self) -> EventState:\n return EventState(\n is_new=self.is_new,\n is_regression=self.is_regression,\n is_new_group_environment=self.is_new_group_environment,\n has_reappeared=self.has_reappeared,\n )\n\n def apply_rule(self, rule: Rule, status: GroupRuleStatus) -> None:\n \"\"\"\n If all conditions and filters pass, execute every action.\n\n :param rule: `Rule` object\n :return: void\n \"\"\"\n logging_details = {\n \"rule_id\": rule.id,\n \"group_id\": self.group.id,\n \"event_id\": self.event.event_id,\n \"project_id\": self.project.id,\n \"is_new\": self.is_new,\n \"is_regression\": self.is_regression,\n \"has_reappeared\": self.has_reappeared,\n \"new_group_environment\": self.is_new_group_environment,\n }\n\n condition_match = rule.data.get(\"action_match\") or Rule.DEFAULT_CONDITION_MATCH\n filter_match = rule.data.get(\"filter_match\") or Rule.DEFAULT_FILTER_MATCH\n rule_condition_list = rule.data.get(\"conditions\", ())\n frequency = rule.data.get(\"frequency\") or Rule.DEFAULT_FREQUENCY\n try:\n environment = self.event.get_environment()\n except Environment.DoesNotExist:\n return\n\n if rule.environment_id is not None and environment.id != rule.environment_id:\n return\n\n now = timezone.now()\n freq_offset = now - timedelta(minutes=frequency)\n if status.last_active and status.last_active > freq_offset:\n return\n\n state = self.get_state()\n\n condition_list = []\n filter_list = []\n for rule_cond in rule_condition_list:\n if self.get_rule_type(rule_cond) == \"condition/event\":\n condition_list.append(rule_cond)\n else:\n filter_list.append(rule_cond)\n\n # Sort `condition_list` so that most expensive conditions run last.\n condition_list.sort(key=lambda condition: is_condition_slow(condition))\n\n for predicate_list, match, name in (\n (filter_list, filter_match, \"filter\"),\n (condition_list, condition_match, \"condition\"),\n ):\n if not predicate_list:\n continue\n predicate_iter = (self.condition_matches(f, state, rule) for f in predicate_list)\n predicate_func = get_match_function(match)\n if predicate_func:\n if not predicate_func(predicate_iter):\n return\n else:\n self.logger.error(\n f\"Unsupported {name}_match {match!r} for rule {rule.id}\",\n filter_match,\n rule.id,\n extra={**logging_details},\n )\n return\n\n updated = (\n GroupRuleStatus.objects.filter(id=status.id)\n .exclude(last_active__gt=freq_offset)\n .update(last_active=now)\n )\n\n if not updated:\n return\n\n if randrange(10) == 0:\n analytics.record(\n \"issue_alert.fired\",\n issue_id=self.group.id,\n project_id=rule.project.id,\n organization_id=rule.project.organization.id,\n rule_id=rule.id,\n )\n\n notification_uuid = str(uuid.uuid4())\n history.record(rule, self.group, self.event.event_id, notification_uuid)\n self.activate_downstream_actions(rule, notification_uuid)\n\n def activate_downstream_actions(\n self, rule: Rule, notification_uuid: Optional[str] = None\n ) -> None:\n state = self.get_state()\n for action in rule.data.get(\"actions\", ()):\n action_cls = rules.get(action[\"id\"])\n if action_cls is None:\n self.logger.warning(\"Unregistered action %r\", action[\"id\"])\n continue\n\n action_inst = action_cls(self.project, data=action, rule=rule)\n if not isinstance(action_inst, EventAction):\n self.logger.warning(\"Unregistered action %r\", action[\"id\"])\n continue\n\n results = safe_execute(\n action_inst.after,\n event=self.event,\n state=state,\n _with_transaction=False,\n notification_uuid=notification_uuid,\n )\n if results is None:\n self.logger.warning(\"Action %s did not return any futures\", action[\"id\"])\n continue\n\n for future in results:\n key = future.key if future.key is not None else future.callback\n rule_future = RuleFuture(rule=rule, kwargs=future.kwargs)\n\n if key not in self.grouped_futures:\n self.grouped_futures[key] = (future.callback, [rule_future])\n else:\n self.grouped_futures[key][1].append(rule_future)\n\n def apply(\n self,\n ) -> Collection[Tuple[Callable[[GroupEvent, Sequence[RuleFuture]], None], List[RuleFuture]]]:\n # we should only apply rules on unresolved issues\n if not self.event.group.is_unresolved():\n return {}.values()\n\n self.grouped_futures.clear()\n rules = self.get_rules()\n snoozed_rules = RuleSnooze.objects.filter(rule__in=rules, user_id=None).values_list(\n \"rule\", flat=True\n )\n rule_statuses = self.bulk_get_rule_status(rules)\n for rule in rules:\n if rule.id not in snoozed_rules:\n self.apply_rule(rule, rule_statuses[rule.id])\n\n return self.grouped_futures.values()\n", "path": "src/sentry/rules/processor.py"}]}
| 3,588 | 459 |
gh_patches_debug_18891
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-3198
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix `safely_reserve_a_username`
This function keeps using a cursor after an `IntegrityError` exception is raised, that's invalid and raises another exception. See https://github.com/gratipay/gratipay.com/pull/2752#issuecomment-65266388.
Fix `safely_reserve_a_username`
This function keeps using a cursor after an `IntegrityError` exception is raised, that's invalid and raises another exception. See https://github.com/gratipay/gratipay.com/pull/2752#issuecomment-65266388.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gratipay/utils/username.py`
Content:
```
1 from psycopg2 import IntegrityError
2 import random
3
4
5 class FailedToReserveUsername(Exception): pass
6 class RanOutOfUsernameAttempts(Exception): pass
7
8
9 def gen_random_usernames():
10 """Yield random 12-hex-digit unicodes.
11 """
12 while 1:
13 yield hex(int(random.random() * 16**12))[2:].zfill(12).decode('ASCII')
14
15
16 def insert_into_participants(cursor, username):
17 return cursor.one( "INSERT INTO participants (username, username_lower) "
18 "VALUES (%s, %s) RETURNING username"
19 , (username, username.lower())
20 )
21
22
23 def safely_reserve_a_username(cursor, gen_usernames=gen_random_usernames,
24 reserve=insert_into_participants):
25 """Safely reserve a username.
26
27 :param cursor: a :py:class:`psycopg2.cursor` managed as a :py:mod:`postgres`
28 transaction
29 :param gen_usernames: a generator of usernames to try
30 :param reserve: a function that takes the cursor and does the SQL
31 stuff
32 :database: one ``INSERT`` on average
33 :returns: a 12-hex-digit unicode
34 :raises: :py:class:`FailedToReserveUsername` if no acceptable username is found
35 within 100 attempts, or :py:class:`RanOutOfUsernameAttempts` if the username
36 generator runs out first
37
38 The returned value is guaranteed to have been reserved in the database.
39
40 """
41 seatbelt = 0
42 for username in gen_usernames():
43 seatbelt += 1
44 if seatbelt > 100:
45 raise FailedToReserveUsername
46
47 try:
48 check = reserve(cursor, username)
49 except IntegrityError: # Collision, try again with another value.
50 continue
51 else:
52 assert check == username
53 break
54 else:
55 raise RanOutOfUsernameAttempts
56 return username
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gratipay/utils/username.py b/gratipay/utils/username.py
--- a/gratipay/utils/username.py
+++ b/gratipay/utils/username.py
@@ -38,6 +38,8 @@
The returned value is guaranteed to have been reserved in the database.
"""
+ cursor.execute("SAVEPOINT safely_reserve_a_username")
+
seatbelt = 0
for username in gen_usernames():
seatbelt += 1
@@ -47,10 +49,13 @@
try:
check = reserve(cursor, username)
except IntegrityError: # Collision, try again with another value.
+ cursor.execute("ROLLBACK TO safely_reserve_a_username")
continue
else:
assert check == username
break
else:
raise RanOutOfUsernameAttempts
+
+ cursor.execute("RELEASE safely_reserve_a_username")
return username
|
{"golden_diff": "diff --git a/gratipay/utils/username.py b/gratipay/utils/username.py\n--- a/gratipay/utils/username.py\n+++ b/gratipay/utils/username.py\n@@ -38,6 +38,8 @@\n The returned value is guaranteed to have been reserved in the database.\n \n \"\"\"\n+ cursor.execute(\"SAVEPOINT safely_reserve_a_username\")\n+\n seatbelt = 0\n for username in gen_usernames():\n seatbelt += 1\n@@ -47,10 +49,13 @@\n try:\n check = reserve(cursor, username)\n except IntegrityError: # Collision, try again with another value.\n+ cursor.execute(\"ROLLBACK TO safely_reserve_a_username\")\n continue\n else:\n assert check == username\n break\n else:\n raise RanOutOfUsernameAttempts\n+\n+ cursor.execute(\"RELEASE safely_reserve_a_username\")\n return username\n", "issue": "Fix `safely_reserve_a_username`\nThis function keeps using a cursor after an `IntegrityError` exception is raised, that's invalid and raises another exception. See https://github.com/gratipay/gratipay.com/pull/2752#issuecomment-65266388.\n\nFix `safely_reserve_a_username`\nThis function keeps using a cursor after an `IntegrityError` exception is raised, that's invalid and raises another exception. See https://github.com/gratipay/gratipay.com/pull/2752#issuecomment-65266388.\n\n", "before_files": [{"content": "from psycopg2 import IntegrityError\nimport random\n\n\nclass FailedToReserveUsername(Exception): pass\nclass RanOutOfUsernameAttempts(Exception): pass\n\n\ndef gen_random_usernames():\n \"\"\"Yield random 12-hex-digit unicodes.\n \"\"\"\n while 1:\n yield hex(int(random.random() * 16**12))[2:].zfill(12).decode('ASCII')\n\n\ndef insert_into_participants(cursor, username):\n return cursor.one( \"INSERT INTO participants (username, username_lower) \"\n \"VALUES (%s, %s) RETURNING username\"\n , (username, username.lower())\n )\n\n\ndef safely_reserve_a_username(cursor, gen_usernames=gen_random_usernames,\n reserve=insert_into_participants):\n \"\"\"Safely reserve a username.\n\n :param cursor: a :py:class:`psycopg2.cursor` managed as a :py:mod:`postgres`\n transaction\n :param gen_usernames: a generator of usernames to try\n :param reserve: a function that takes the cursor and does the SQL\n stuff\n :database: one ``INSERT`` on average\n :returns: a 12-hex-digit unicode\n :raises: :py:class:`FailedToReserveUsername` if no acceptable username is found\n within 100 attempts, or :py:class:`RanOutOfUsernameAttempts` if the username\n generator runs out first\n\n The returned value is guaranteed to have been reserved in the database.\n\n \"\"\"\n seatbelt = 0\n for username in gen_usernames():\n seatbelt += 1\n if seatbelt > 100:\n raise FailedToReserveUsername\n\n try:\n check = reserve(cursor, username)\n except IntegrityError: # Collision, try again with another value.\n continue\n else:\n assert check == username\n break\n else:\n raise RanOutOfUsernameAttempts\n return username\n", "path": "gratipay/utils/username.py"}], "after_files": [{"content": "from psycopg2 import IntegrityError\nimport random\n\n\nclass FailedToReserveUsername(Exception): pass\nclass RanOutOfUsernameAttempts(Exception): pass\n\n\ndef gen_random_usernames():\n \"\"\"Yield random 12-hex-digit unicodes.\n \"\"\"\n while 1:\n yield hex(int(random.random() * 16**12))[2:].zfill(12).decode('ASCII')\n\n\ndef insert_into_participants(cursor, username):\n return cursor.one( \"INSERT INTO participants (username, username_lower) \"\n \"VALUES (%s, %s) RETURNING username\"\n , (username, username.lower())\n )\n\n\ndef safely_reserve_a_username(cursor, gen_usernames=gen_random_usernames,\n reserve=insert_into_participants):\n \"\"\"Safely reserve a username.\n\n :param cursor: a :py:class:`psycopg2.cursor` managed as a :py:mod:`postgres`\n transaction\n :param gen_usernames: a generator of usernames to try\n :param reserve: a function that takes the cursor and does the SQL\n stuff\n :database: one ``INSERT`` on average\n :returns: a 12-hex-digit unicode\n :raises: :py:class:`FailedToReserveUsername` if no acceptable username is found\n within 100 attempts, or :py:class:`RanOutOfUsernameAttempts` if the username\n generator runs out first\n\n The returned value is guaranteed to have been reserved in the database.\n\n \"\"\"\n cursor.execute(\"SAVEPOINT safely_reserve_a_username\")\n\n seatbelt = 0\n for username in gen_usernames():\n seatbelt += 1\n if seatbelt > 100:\n raise FailedToReserveUsername\n\n try:\n check = reserve(cursor, username)\n except IntegrityError: # Collision, try again with another value.\n cursor.execute(\"ROLLBACK TO safely_reserve_a_username\")\n continue\n else:\n assert check == username\n break\n else:\n raise RanOutOfUsernameAttempts\n\n cursor.execute(\"RELEASE safely_reserve_a_username\")\n return username\n", "path": "gratipay/utils/username.py"}]}
| 920 | 198 |
gh_patches_debug_26872
|
rasdani/github-patches
|
git_diff
|
frappe__frappe-22956
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Custom Page Redirection in Notifications
<!--
Welcome to the Frappe Framework issue tracker! Before creating an issue, please heed the following:
1. This tracker should only be used to report bugs and request features / enhancements to Frappe
- For questions and general support, refer to https://stackoverflow.com/questions/tagged/frappe
- For documentation issues, use https://frappeframework.com/docs/user/en or the developer cheetsheet https://frappeframework.com/docs/user/en/bench/resources/bench-commands-cheatsheet
2. Use the search function before creating a new issue. Duplicates will be closed and directed to
the original discussion.
3. When making a feature request, make sure to be as verbose as possible. The better you convey your message, the greater the drive to make it happen.
-->
Is your feature request related to a problem? Please describe.
Allow redirection to custom URL on notification click.
Describe the solution you'd like
Currently, in the Frappe framework, notifications only support a default redirection mechanism to a particular doctype.
We should have ability to specify:
portal URL (custom app like desk has some notification)
Framework page
Kanban link or something like that
Suggested Changes:
1. We will add “Custom Link” in type
2. We will have another field called “Link Uri” where we will store the custom link.
3. We will check if it is “Custom Link”. If it is Custom Link then we will Open the specified link instead of Opening Reference Document
4. We will specify the
Example use case:
We are having a custom WhatsApp-like which is built inside the framework, we want the user to be directed to a custom page in Frappe when they click on a notification (which is linked to whatsapp message), rather than the standard WhatsApp message doctype.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frappe/desk/doctype/notification_log/notification_log.py`
Content:
```
1 # Copyright (c) 2019, Frappe Technologies and contributors
2 # License: MIT. See LICENSE
3
4 import frappe
5 from frappe import _
6 from frappe.desk.doctype.notification_settings.notification_settings import (
7 is_email_notifications_enabled_for_type,
8 is_notifications_enabled,
9 )
10 from frappe.model.document import Document
11
12
13 class NotificationLog(Document):
14 # begin: auto-generated types
15 # This code is auto-generated. Do not modify anything in this block.
16
17 from typing import TYPE_CHECKING
18
19 if TYPE_CHECKING:
20 from frappe.types import DF
21
22 attached_file: DF.Code | None
23 document_name: DF.Data | None
24 document_type: DF.Link | None
25 email_content: DF.TextEditor | None
26 for_user: DF.Link | None
27 from_user: DF.Link | None
28 read: DF.Check
29 subject: DF.Text | None
30 type: DF.Literal["Mention", "Energy Point", "Assignment", "Share", "Alert"]
31 # end: auto-generated types
32 def after_insert(self):
33 frappe.publish_realtime("notification", after_commit=True, user=self.for_user)
34 set_notifications_as_unseen(self.for_user)
35 if is_email_notifications_enabled_for_type(self.for_user, self.type):
36 try:
37 send_notification_email(self)
38 except frappe.OutgoingEmailError:
39 self.log_error(_("Failed to send notification email"))
40
41 @staticmethod
42 def clear_old_logs(days=180):
43 from frappe.query_builder import Interval
44 from frappe.query_builder.functions import Now
45
46 table = frappe.qb.DocType("Notification Log")
47 frappe.db.delete(table, filters=(table.modified < (Now() - Interval(days=days))))
48
49
50 def get_permission_query_conditions(for_user):
51 if not for_user:
52 for_user = frappe.session.user
53
54 if for_user == "Administrator":
55 return
56
57 return f"""(`tabNotification Log`.for_user = {frappe.db.escape(for_user)})"""
58
59
60 def get_title(doctype, docname, title_field=None):
61 if not title_field:
62 title_field = frappe.get_meta(doctype).get_title_field()
63 return docname if title_field == "name" else frappe.db.get_value(doctype, docname, title_field)
64
65
66 def get_title_html(title):
67 return f'<b class="subject-title">{title}</b>'
68
69
70 def enqueue_create_notification(users: list[str] | str, doc: dict):
71 """Send notification to users.
72
73 users: list of user emails or string of users with comma separated emails
74 doc: contents of `Notification` doc
75 """
76
77 # During installation of new site, enqueue_create_notification tries to connect to Redis.
78 # This breaks new site creation if Redis server is not running.
79 # We do not need any notifications in fresh installation
80 if frappe.flags.in_install:
81 return
82
83 doc = frappe._dict(doc)
84
85 if isinstance(users, str):
86 users = [user.strip() for user in users.split(",") if user.strip()]
87 users = list(set(users))
88
89 frappe.enqueue(
90 "frappe.desk.doctype.notification_log.notification_log.make_notification_logs",
91 doc=doc,
92 users=users,
93 now=frappe.flags.in_test,
94 )
95
96
97 def make_notification_logs(doc, users):
98 for user in _get_user_ids(users):
99 notification = frappe.new_doc("Notification Log")
100 notification.update(doc)
101 notification.for_user = user
102 if (
103 notification.for_user != notification.from_user
104 or doc.type == "Energy Point"
105 or doc.type == "Alert"
106 ):
107 notification.insert(ignore_permissions=True)
108
109
110 def _get_user_ids(user_emails):
111 user_names = frappe.db.get_values(
112 "User", {"enabled": 1, "email": ("in", user_emails)}, "name", pluck=True
113 )
114 return [user for user in user_names if is_notifications_enabled(user)]
115
116
117 def send_notification_email(doc):
118
119 if doc.type == "Energy Point" and doc.email_content is None:
120 return
121
122 from frappe.utils import get_url_to_form, strip_html
123
124 email = frappe.db.get_value("User", doc.for_user, "email")
125 if not email:
126 return
127
128 doc_link = get_url_to_form(doc.document_type, doc.document_name)
129 header = get_email_header(doc)
130 email_subject = strip_html(doc.subject)
131
132 frappe.sendmail(
133 recipients=email,
134 subject=email_subject,
135 template="new_notification",
136 args = {
137 "body_content": doc.subject,
138 "description": doc.email_content,
139 "document_type": doc.document_type,
140 "document_name": doc.document_name,
141 "doc_link": doc_link,
142 },
143 header=[header, "orange"],
144 now=frappe.flags.in_test,
145 )
146
147
148 def get_email_header(doc):
149 docname = doc.document_name
150 header_map = {
151 "Default": _("New Notification"),
152 "Mention": _("New Mention on {0}").format(docname),
153 "Assignment": _("Assignment Update on {0}").format(docname),
154 "Share": _("New Document Shared {0}").format(docname),
155 "Energy Point": _("Energy Point Update on {0}").format(docname),
156 }
157
158 return header_map[doc.type or "Default"]
159
160
161 @frappe.whitelist()
162 def get_notification_logs(limit=20):
163 notification_logs = frappe.db.get_list(
164 "Notification Log", fields=["*"], limit=limit, order_by="modified desc"
165 )
166
167 users = [log.from_user for log in notification_logs]
168 users = [*set(users)] # remove duplicates
169 user_info = frappe._dict()
170
171 for user in users:
172 frappe.utils.add_user_info(user, user_info)
173
174 return {"notification_logs": notification_logs, "user_info": user_info}
175
176
177 @frappe.whitelist()
178 def mark_all_as_read():
179 unread_docs_list = frappe.get_all(
180 "Notification Log", filters={"read": 0, "for_user": frappe.session.user}
181 )
182 unread_docnames = [doc.name for doc in unread_docs_list]
183 if unread_docnames:
184 filters = {"name": ["in", unread_docnames]}
185 frappe.db.set_value("Notification Log", filters, "read", 1, update_modified=False)
186
187
188 @frappe.whitelist()
189 def mark_as_read(docname: str):
190 if frappe.flags.read_only:
191 return
192
193 if docname:
194 frappe.db.set_value("Notification Log", str(docname), "read", 1, update_modified=False)
195
196
197 @frappe.whitelist()
198 def trigger_indicator_hide():
199 frappe.publish_realtime("indicator_hide", user=frappe.session.user)
200
201
202 def set_notifications_as_unseen(user):
203 try:
204 frappe.db.set_value("Notification Settings", user, "seen", 0, update_modified=False)
205 except frappe.DoesNotExistError:
206 return
207
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/frappe/desk/doctype/notification_log/notification_log.py b/frappe/desk/doctype/notification_log/notification_log.py
--- a/frappe/desk/doctype/notification_log/notification_log.py
+++ b/frappe/desk/doctype/notification_log/notification_log.py
@@ -25,6 +25,7 @@
email_content: DF.TextEditor | None
for_user: DF.Link | None
from_user: DF.Link | None
+ link: DF.Data | None
read: DF.Check
subject: DF.Text | None
type: DF.Literal["Mention", "Energy Point", "Assignment", "Share", "Alert"]
@@ -125,21 +126,24 @@
if not email:
return
- doc_link = get_url_to_form(doc.document_type, doc.document_name)
header = get_email_header(doc)
email_subject = strip_html(doc.subject)
+ args = {
+ "body_content": doc.subject,
+ "description": doc.email_content,
+ }
+ if doc.link:
+ args["doc_link"] = doc.link
+ else:
+ args["document_type"] = doc.document_type
+ args["document_name"] = doc.document_name
+ args["doc_link"] = get_url_to_form(doc.document_type, doc.document_name)
frappe.sendmail(
recipients=email,
subject=email_subject,
template="new_notification",
- args = {
- "body_content": doc.subject,
- "description": doc.email_content,
- "document_type": doc.document_type,
- "document_name": doc.document_name,
- "doc_link": doc_link,
- },
+ args=args,
header=[header, "orange"],
now=frappe.flags.in_test,
)
|
{"golden_diff": "diff --git a/frappe/desk/doctype/notification_log/notification_log.py b/frappe/desk/doctype/notification_log/notification_log.py\n--- a/frappe/desk/doctype/notification_log/notification_log.py\n+++ b/frappe/desk/doctype/notification_log/notification_log.py\n@@ -25,6 +25,7 @@\n \t\temail_content: DF.TextEditor | None\n \t\tfor_user: DF.Link | None\n \t\tfrom_user: DF.Link | None\n+\t\tlink: DF.Data | None\n \t\tread: DF.Check\n \t\tsubject: DF.Text | None\n \t\ttype: DF.Literal[\"Mention\", \"Energy Point\", \"Assignment\", \"Share\", \"Alert\"]\n@@ -125,21 +126,24 @@\n \tif not email:\n \t\treturn\n \n-\tdoc_link = get_url_to_form(doc.document_type, doc.document_name)\n \theader = get_email_header(doc)\n \temail_subject = strip_html(doc.subject)\n+\targs = {\n+\t\t\"body_content\": doc.subject,\n+\t\t\"description\": doc.email_content,\n+\t}\n+\tif doc.link:\n+\t\targs[\"doc_link\"] = doc.link\n+\telse:\n+\t\targs[\"document_type\"] = doc.document_type\n+\t\targs[\"document_name\"] = doc.document_name\n+\t\targs[\"doc_link\"] = get_url_to_form(doc.document_type, doc.document_name)\n \n \tfrappe.sendmail(\n \t\trecipients=email,\n \t\tsubject=email_subject,\n \t\ttemplate=\"new_notification\",\n-\t\targs = {\n-\t\t\t\"body_content\": doc.subject,\n-\t\t\t\"description\": doc.email_content,\n-\t\t\t\"document_type\": doc.document_type,\n-\t\t\t\"document_name\": doc.document_name,\n-\t\t\t\"doc_link\": doc_link,\n-\t\t},\n+\t\targs=args,\n \t\theader=[header, \"orange\"],\n \t\tnow=frappe.flags.in_test,\n \t)\n", "issue": "Custom Page Redirection in Notifications\n<!--\r\nWelcome to the Frappe Framework issue tracker! Before creating an issue, please heed the following:\r\n\r\n1. This tracker should only be used to report bugs and request features / enhancements to Frappe\r\n - For questions and general support, refer to https://stackoverflow.com/questions/tagged/frappe\r\n - For documentation issues, use https://frappeframework.com/docs/user/en or the developer cheetsheet https://frappeframework.com/docs/user/en/bench/resources/bench-commands-cheatsheet\r\n2. Use the search function before creating a new issue. Duplicates will be closed and directed to\r\n the original discussion.\r\n3. When making a feature request, make sure to be as verbose as possible. The better you convey your message, the greater the drive to make it happen.\r\n-->\r\n\r\nIs your feature request related to a problem? Please describe.\r\nAllow redirection to custom URL on notification click.\r\n\r\nDescribe the solution you'd like\r\nCurrently, in the Frappe framework, notifications only support a default redirection mechanism to a particular doctype.\r\n\r\nWe should have ability to specify:\r\n\r\nportal URL (custom app like desk has some notification)\r\nFramework page \r\nKanban link or something like that\r\n\r\nSuggested Changes:\r\n\r\n1. We will add \u201cCustom Link\u201d in type\r\n2. We will have another field called \u201cLink Uri\u201d where we will store the custom link.\r\n3. We will check if it is \u201cCustom Link\u201d. If it is Custom Link then we will Open the specified link instead of Opening Reference Document\r\n4. We will specify the\r\n\r\nExample use case:\r\n\r\nWe are having a custom WhatsApp-like which is built inside the framework, we want the user to be directed to a custom page in Frappe when they click on a notification (which is linked to whatsapp message), rather than the standard WhatsApp message doctype.\r\n\n", "before_files": [{"content": "# Copyright (c) 2019, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport frappe\nfrom frappe import _\nfrom frappe.desk.doctype.notification_settings.notification_settings import (\n\tis_email_notifications_enabled_for_type,\n\tis_notifications_enabled,\n)\nfrom frappe.model.document import Document\n\n\nclass NotificationLog(Document):\n\t# begin: auto-generated types\n\t# This code is auto-generated. Do not modify anything in this block.\n\n\tfrom typing import TYPE_CHECKING\n\n\tif TYPE_CHECKING:\n\t\tfrom frappe.types import DF\n\n\t\tattached_file: DF.Code | None\n\t\tdocument_name: DF.Data | None\n\t\tdocument_type: DF.Link | None\n\t\temail_content: DF.TextEditor | None\n\t\tfor_user: DF.Link | None\n\t\tfrom_user: DF.Link | None\n\t\tread: DF.Check\n\t\tsubject: DF.Text | None\n\t\ttype: DF.Literal[\"Mention\", \"Energy Point\", \"Assignment\", \"Share\", \"Alert\"]\n\t# end: auto-generated types\n\tdef after_insert(self):\n\t\tfrappe.publish_realtime(\"notification\", after_commit=True, user=self.for_user)\n\t\tset_notifications_as_unseen(self.for_user)\n\t\tif is_email_notifications_enabled_for_type(self.for_user, self.type):\n\t\t\ttry:\n\t\t\t\tsend_notification_email(self)\n\t\t\texcept frappe.OutgoingEmailError:\n\t\t\t\tself.log_error(_(\"Failed to send notification email\"))\n\n\t@staticmethod\n\tdef clear_old_logs(days=180):\n\t\tfrom frappe.query_builder import Interval\n\t\tfrom frappe.query_builder.functions import Now\n\n\t\ttable = frappe.qb.DocType(\"Notification Log\")\n\t\tfrappe.db.delete(table, filters=(table.modified < (Now() - Interval(days=days))))\n\n\ndef get_permission_query_conditions(for_user):\n\tif not for_user:\n\t\tfor_user = frappe.session.user\n\n\tif for_user == \"Administrator\":\n\t\treturn\n\n\treturn f\"\"\"(`tabNotification Log`.for_user = {frappe.db.escape(for_user)})\"\"\"\n\n\ndef get_title(doctype, docname, title_field=None):\n\tif not title_field:\n\t\ttitle_field = frappe.get_meta(doctype).get_title_field()\n\treturn docname if title_field == \"name\" else frappe.db.get_value(doctype, docname, title_field)\n\n\ndef get_title_html(title):\n\treturn f'<b class=\"subject-title\">{title}</b>'\n\n\ndef enqueue_create_notification(users: list[str] | str, doc: dict):\n\t\"\"\"Send notification to users.\n\n\tusers: list of user emails or string of users with comma separated emails\n\tdoc: contents of `Notification` doc\n\t\"\"\"\n\n\t# During installation of new site, enqueue_create_notification tries to connect to Redis.\n\t# This breaks new site creation if Redis server is not running.\n\t# We do not need any notifications in fresh installation\n\tif frappe.flags.in_install:\n\t\treturn\n\n\tdoc = frappe._dict(doc)\n\n\tif isinstance(users, str):\n\t\tusers = [user.strip() for user in users.split(\",\") if user.strip()]\n\tusers = list(set(users))\n\n\tfrappe.enqueue(\n\t\t\"frappe.desk.doctype.notification_log.notification_log.make_notification_logs\",\n\t\tdoc=doc,\n\t\tusers=users,\n\t\tnow=frappe.flags.in_test,\n\t)\n\n\ndef make_notification_logs(doc, users):\n\tfor user in _get_user_ids(users):\n\t\tnotification = frappe.new_doc(\"Notification Log\")\n\t\tnotification.update(doc)\n\t\tnotification.for_user = user\n\t\tif (\n\t\t\tnotification.for_user != notification.from_user\n\t\t\tor doc.type == \"Energy Point\"\n\t\t\tor doc.type == \"Alert\"\n\t\t):\n\t\t\tnotification.insert(ignore_permissions=True)\n\n\ndef _get_user_ids(user_emails):\n\tuser_names = frappe.db.get_values(\n\t\t\"User\", {\"enabled\": 1, \"email\": (\"in\", user_emails)}, \"name\", pluck=True\n\t)\n\treturn [user for user in user_names if is_notifications_enabled(user)]\n\n\ndef send_notification_email(doc):\n\n\tif doc.type == \"Energy Point\" and doc.email_content is None:\n\t\treturn\n\n\tfrom frappe.utils import get_url_to_form, strip_html\n\n\temail = frappe.db.get_value(\"User\", doc.for_user, \"email\")\n\tif not email:\n\t\treturn\n\n\tdoc_link = get_url_to_form(doc.document_type, doc.document_name)\n\theader = get_email_header(doc)\n\temail_subject = strip_html(doc.subject)\n\n\tfrappe.sendmail(\n\t\trecipients=email,\n\t\tsubject=email_subject,\n\t\ttemplate=\"new_notification\",\n\t\targs = {\n\t\t\t\"body_content\": doc.subject,\n\t\t\t\"description\": doc.email_content,\n\t\t\t\"document_type\": doc.document_type,\n\t\t\t\"document_name\": doc.document_name,\n\t\t\t\"doc_link\": doc_link,\n\t\t},\n\t\theader=[header, \"orange\"],\n\t\tnow=frappe.flags.in_test,\n\t)\n\n\ndef get_email_header(doc):\n\tdocname = doc.document_name\n\theader_map = {\n\t\t\"Default\": _(\"New Notification\"),\n\t\t\"Mention\": _(\"New Mention on {0}\").format(docname),\n\t\t\"Assignment\": _(\"Assignment Update on {0}\").format(docname),\n\t\t\"Share\": _(\"New Document Shared {0}\").format(docname),\n\t\t\"Energy Point\": _(\"Energy Point Update on {0}\").format(docname),\n\t}\n\n\treturn header_map[doc.type or \"Default\"]\n\n\[email protected]()\ndef get_notification_logs(limit=20):\n\tnotification_logs = frappe.db.get_list(\n\t\t\"Notification Log\", fields=[\"*\"], limit=limit, order_by=\"modified desc\"\n\t)\n\n\tusers = [log.from_user for log in notification_logs]\n\tusers = [*set(users)] # remove duplicates\n\tuser_info = frappe._dict()\n\n\tfor user in users:\n\t\tfrappe.utils.add_user_info(user, user_info)\n\n\treturn {\"notification_logs\": notification_logs, \"user_info\": user_info}\n\n\[email protected]()\ndef mark_all_as_read():\n\tunread_docs_list = frappe.get_all(\n\t\t\"Notification Log\", filters={\"read\": 0, \"for_user\": frappe.session.user}\n\t)\n\tunread_docnames = [doc.name for doc in unread_docs_list]\n\tif unread_docnames:\n\t\tfilters = {\"name\": [\"in\", unread_docnames]}\n\t\tfrappe.db.set_value(\"Notification Log\", filters, \"read\", 1, update_modified=False)\n\n\[email protected]()\ndef mark_as_read(docname: str):\n\tif frappe.flags.read_only:\n\t\treturn\n\n\tif docname:\n\t\tfrappe.db.set_value(\"Notification Log\", str(docname), \"read\", 1, update_modified=False)\n\n\[email protected]()\ndef trigger_indicator_hide():\n\tfrappe.publish_realtime(\"indicator_hide\", user=frappe.session.user)\n\n\ndef set_notifications_as_unseen(user):\n\ttry:\n\t\tfrappe.db.set_value(\"Notification Settings\", user, \"seen\", 0, update_modified=False)\n\texcept frappe.DoesNotExistError:\n\t\treturn\n", "path": "frappe/desk/doctype/notification_log/notification_log.py"}], "after_files": [{"content": "# Copyright (c) 2019, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport frappe\nfrom frappe import _\nfrom frappe.desk.doctype.notification_settings.notification_settings import (\n\tis_email_notifications_enabled_for_type,\n\tis_notifications_enabled,\n)\nfrom frappe.model.document import Document\n\n\nclass NotificationLog(Document):\n\t# begin: auto-generated types\n\t# This code is auto-generated. Do not modify anything in this block.\n\n\tfrom typing import TYPE_CHECKING\n\n\tif TYPE_CHECKING:\n\t\tfrom frappe.types import DF\n\n\t\tattached_file: DF.Code | None\n\t\tdocument_name: DF.Data | None\n\t\tdocument_type: DF.Link | None\n\t\temail_content: DF.TextEditor | None\n\t\tfor_user: DF.Link | None\n\t\tfrom_user: DF.Link | None\n\t\tlink: DF.Data | None\n\t\tread: DF.Check\n\t\tsubject: DF.Text | None\n\t\ttype: DF.Literal[\"Mention\", \"Energy Point\", \"Assignment\", \"Share\", \"Alert\"]\n\t# end: auto-generated types\n\tdef after_insert(self):\n\t\tfrappe.publish_realtime(\"notification\", after_commit=True, user=self.for_user)\n\t\tset_notifications_as_unseen(self.for_user)\n\t\tif is_email_notifications_enabled_for_type(self.for_user, self.type):\n\t\t\ttry:\n\t\t\t\tsend_notification_email(self)\n\t\t\texcept frappe.OutgoingEmailError:\n\t\t\t\tself.log_error(_(\"Failed to send notification email\"))\n\n\t@staticmethod\n\tdef clear_old_logs(days=180):\n\t\tfrom frappe.query_builder import Interval\n\t\tfrom frappe.query_builder.functions import Now\n\n\t\ttable = frappe.qb.DocType(\"Notification Log\")\n\t\tfrappe.db.delete(table, filters=(table.modified < (Now() - Interval(days=days))))\n\n\ndef get_permission_query_conditions(for_user):\n\tif not for_user:\n\t\tfor_user = frappe.session.user\n\n\tif for_user == \"Administrator\":\n\t\treturn\n\n\treturn f\"\"\"(`tabNotification Log`.for_user = {frappe.db.escape(for_user)})\"\"\"\n\n\ndef get_title(doctype, docname, title_field=None):\n\tif not title_field:\n\t\ttitle_field = frappe.get_meta(doctype).get_title_field()\n\treturn docname if title_field == \"name\" else frappe.db.get_value(doctype, docname, title_field)\n\n\ndef get_title_html(title):\n\treturn f'<b class=\"subject-title\">{title}</b>'\n\n\ndef enqueue_create_notification(users: list[str] | str, doc: dict):\n\t\"\"\"Send notification to users.\n\n\tusers: list of user emails or string of users with comma separated emails\n\tdoc: contents of `Notification` doc\n\t\"\"\"\n\n\t# During installation of new site, enqueue_create_notification tries to connect to Redis.\n\t# This breaks new site creation if Redis server is not running.\n\t# We do not need any notifications in fresh installation\n\tif frappe.flags.in_install:\n\t\treturn\n\n\tdoc = frappe._dict(doc)\n\n\tif isinstance(users, str):\n\t\tusers = [user.strip() for user in users.split(\",\") if user.strip()]\n\tusers = list(set(users))\n\n\tfrappe.enqueue(\n\t\t\"frappe.desk.doctype.notification_log.notification_log.make_notification_logs\",\n\t\tdoc=doc,\n\t\tusers=users,\n\t\tnow=frappe.flags.in_test,\n\t)\n\n\ndef make_notification_logs(doc, users):\n\tfor user in _get_user_ids(users):\n\t\tnotification = frappe.new_doc(\"Notification Log\")\n\t\tnotification.update(doc)\n\t\tnotification.for_user = user\n\t\tif (\n\t\t\tnotification.for_user != notification.from_user\n\t\t\tor doc.type == \"Energy Point\"\n\t\t\tor doc.type == \"Alert\"\n\t\t):\n\t\t\tnotification.insert(ignore_permissions=True)\n\n\ndef _get_user_ids(user_emails):\n\tuser_names = frappe.db.get_values(\n\t\t\"User\", {\"enabled\": 1, \"email\": (\"in\", user_emails)}, \"name\", pluck=True\n\t)\n\treturn [user for user in user_names if is_notifications_enabled(user)]\n\n\ndef send_notification_email(doc):\n\n\tif doc.type == \"Energy Point\" and doc.email_content is None:\n\t\treturn\n\n\tfrom frappe.utils import get_url_to_form, strip_html\n\n\temail = frappe.db.get_value(\"User\", doc.for_user, \"email\")\n\tif not email:\n\t\treturn\n\n\theader = get_email_header(doc)\n\temail_subject = strip_html(doc.subject)\n\targs = {\n\t\t\"body_content\": doc.subject,\n\t\t\"description\": doc.email_content,\n\t}\n\tif doc.link:\n\t\targs[\"doc_link\"] = doc.link\n\telse:\n\t\targs[\"document_type\"] = doc.document_type\n\t\targs[\"document_name\"] = doc.document_name\n\t\targs[\"doc_link\"] = get_url_to_form(doc.document_type, doc.document_name)\n\n\tfrappe.sendmail(\n\t\trecipients=email,\n\t\tsubject=email_subject,\n\t\ttemplate=\"new_notification\",\n\t\targs=args,\n\t\theader=[header, \"orange\"],\n\t\tnow=frappe.flags.in_test,\n\t)\n\n\ndef get_email_header(doc):\n\tdocname = doc.document_name\n\theader_map = {\n\t\t\"Default\": _(\"New Notification\"),\n\t\t\"Mention\": _(\"New Mention on {0}\").format(docname),\n\t\t\"Assignment\": _(\"Assignment Update on {0}\").format(docname),\n\t\t\"Share\": _(\"New Document Shared {0}\").format(docname),\n\t\t\"Energy Point\": _(\"Energy Point Update on {0}\").format(docname),\n\t}\n\n\treturn header_map[doc.type or \"Default\"]\n\n\[email protected]()\ndef get_notification_logs(limit=20):\n\tnotification_logs = frappe.db.get_list(\n\t\t\"Notification Log\", fields=[\"*\"], limit=limit, order_by=\"modified desc\"\n\t)\n\n\tusers = [log.from_user for log in notification_logs]\n\tusers = [*set(users)] # remove duplicates\n\tuser_info = frappe._dict()\n\n\tfor user in users:\n\t\tfrappe.utils.add_user_info(user, user_info)\n\n\treturn {\"notification_logs\": notification_logs, \"user_info\": user_info}\n\n\[email protected]()\ndef mark_all_as_read():\n\tunread_docs_list = frappe.get_all(\n\t\t\"Notification Log\", filters={\"read\": 0, \"for_user\": frappe.session.user}\n\t)\n\tunread_docnames = [doc.name for doc in unread_docs_list]\n\tif unread_docnames:\n\t\tfilters = {\"name\": [\"in\", unread_docnames]}\n\t\tfrappe.db.set_value(\"Notification Log\", filters, \"read\", 1, update_modified=False)\n\n\[email protected]()\ndef mark_as_read(docname: str):\n\tif frappe.flags.read_only:\n\t\treturn\n\n\tif docname:\n\t\tfrappe.db.set_value(\"Notification Log\", str(docname), \"read\", 1, update_modified=False)\n\n\[email protected]()\ndef trigger_indicator_hide():\n\tfrappe.publish_realtime(\"indicator_hide\", user=frappe.session.user)\n\n\ndef set_notifications_as_unseen(user):\n\ttry:\n\t\tfrappe.db.set_value(\"Notification Settings\", user, \"seen\", 0, update_modified=False)\n\texcept frappe.DoesNotExistError:\n\t\treturn\n", "path": "frappe/desk/doctype/notification_log/notification_log.py"}]}
| 2,725 | 399 |
gh_patches_debug_32331
|
rasdani/github-patches
|
git_diff
|
lk-geimfari__mimesis-435
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Arguments shadow builtin names
After we have updated `flake8-builtins` we got new several new errors.
```
=================================== FAILURES ===================================
_________________________________ FLAKE8-check _________________________________
/home/travis/build/lk-geimfari/mimesis/mimesis/providers/business.py:47:5: A002 "copyright" is used as an argument and thus shadows a python builtin, consider renaming the argument
_________________________________ FLAKE8-check _________________________________
/home/travis/build/lk-geimfari/mimesis/mimesis/providers/cryptographic.py:37:5: A002 "hash" is used as an argument and thus shadows a python builtin, consider renaming the argument
/home/travis/build/lk-geimfari/mimesis/mimesis/providers/cryptographic.py:50:5: A002 "bytes" is used as an argument and thus shadows a python builtin, consider renaming the argument
```
Arguments shadow builtin names
After we have updated `flake8-builtins` we got new several new errors.
```
=================================== FAILURES ===================================
_________________________________ FLAKE8-check _________________________________
/home/travis/build/lk-geimfari/mimesis/mimesis/providers/business.py:47:5: A002 "copyright" is used as an argument and thus shadows a python builtin, consider renaming the argument
_________________________________ FLAKE8-check _________________________________
/home/travis/build/lk-geimfari/mimesis/mimesis/providers/cryptographic.py:37:5: A002 "hash" is used as an argument and thus shadows a python builtin, consider renaming the argument
/home/travis/build/lk-geimfari/mimesis/mimesis/providers/cryptographic.py:50:5: A002 "bytes" is used as an argument and thus shadows a python builtin, consider renaming the argument
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mimesis/providers/business.py`
Content:
```
1 """Business data provider."""
2
3 from mimesis.data import (CRYPTOCURRENCY_ISO_CODES, CRYPTOCURRENCY_SYMBOLS,
4 CURRENCY_ISO_CODES, CURRENCY_SYMBOLS)
5 from mimesis.providers.base import BaseDataProvider
6 from mimesis.utils import pull
7
8 __all__ = ['Business']
9
10
11 class Business(BaseDataProvider):
12 """Class for generating data for business."""
13
14 def __init__(self, *args, **kwargs):
15 """Initialize attributes.
16
17 :param locale: Current locale.
18 """
19 super().__init__(*args, **kwargs)
20 self._data = pull('business.json', self.locale)
21
22 def company(self) -> str:
23 """Get a random company name.
24
25 :return: Company name.
26
27 :Example:
28 Gamma Systems.
29 """
30 return self.random.choice(
31 self._data['company']['name'])
32
33 def company_type(self, abbr: bool = False) -> str:
34 """Get a random type of business entity.
35
36 :param abbr: Abbreviated company type.
37 :return: Types of business entity.
38
39 :Example:
40 Incorporated.
41 """
42 return self.random.choice(
43 self._data['company']['type'].get(
44 'abbr' if abbr else 'title'),
45 )
46
47 def copyright(self) -> str:
48 """Generate a random copyright.
49
50 :return: Copyright of company.
51
52 :Example:
53 © Komercia, Inc.
54 """
55 return '© {}, {}'.format(
56 self.company(),
57 self.company_type(abbr=True),
58 )
59
60 def currency_iso_code(self) -> str:
61 """Get code of the currency.
62
63 :return: Currency code.
64
65 :Example:
66 RUR.
67 """
68 return self.random.choice(CURRENCY_ISO_CODES)
69
70 def cryptocurrency_iso_code(self) -> str:
71 """Get symbol of random cryptocurrency.
72
73 :return: Symbol of cryptocurrency.
74 """
75 return self.random.choice(CRYPTOCURRENCY_ISO_CODES)
76
77 def currency_symbol(self):
78 """Get a currency symbol for current locale.
79
80 :return: Currency symbol.
81 """
82 return CURRENCY_SYMBOLS[self.locale]
83
84 def cryptocurrency_symbol(self) -> str:
85 """Get a cryptocurrency symbol.
86
87 :return: Symbol of cryptocurrency.
88
89 :Example:
90 Ƀ
91 """
92 return self.random.choice(CRYPTOCURRENCY_SYMBOLS)
93
94 def price(self, minimum: float = 10.00,
95 maximum: float = 1000.00) -> str:
96 """Generate a random price.
97
98 :param minimum: Max value of price.
99 :param maximum: Min value of price.
100 :return: Price.
101
102 :Example:
103 599.99 $.
104 """
105 price = self.random.uniform(minimum, maximum, precision=2)
106 return '{0} {1}'.format(price, self.currency_symbol())
107
108 def price_in_btc(self, minimum: float = 0, maximum: float = 2) -> str:
109 """Generate random price in BTC.
110
111 :param minimum: Minimum value of price
112 :param maximum: Maximum value of price.
113 :return: Price in BTC.
114
115 :Example:
116 0.5885238 BTC
117 """
118 return '{} BTC'.format(
119 self.random.uniform(
120 minimum,
121 maximum,
122 precision=7,
123 ),
124 )
125
```
Path: `mimesis/providers/cryptographic.py`
Content:
```
1 """Cryptographic data provider."""
2
3 import hashlib
4 import string
5 import uuid
6 from typing import Optional
7
8 from mimesis.enums import Algorithm
9 from mimesis.providers.base import BaseDataProvider
10 from mimesis.providers.text import Text
11 from mimesis.typing import Bytes
12
13 __all__ = ['Cryptographic']
14
15
16 class Cryptographic(BaseDataProvider):
17 """Class that provides cryptographic data."""
18
19 def __init__(self, *args, **kwargs) -> None:
20 """Initialize attributes.
21
22 :param seed: Seed.
23 """
24 super().__init__(*args, **kwargs)
25 self.__words = Text('en')._data['words']
26 self.__chars = string.ascii_letters + string.digits + string.punctuation
27
28 def uuid(self, version: Optional[int] = None) -> str:
29 """Generate random UUID.
30
31 :param version: UUID version.
32 :return: UUID
33 """
34 bits = self.random.getrandbits(128)
35 return str(uuid.UUID(int=bits, version=version))
36
37 def hash(self, algorithm: Optional[Algorithm] = None) -> str:
38 """Generate random hash.
39
40 :param algorithm: Enum object ``Algorithm``.
41 :return: Hash.
42 :raises NonEnumerableError: if algorithm is not supported.
43 """
44 key = self._validate_enum(algorithm, Algorithm)
45
46 if hasattr(hashlib, key):
47 fn = getattr(hashlib, key)
48 return fn(self.uuid().encode()).hexdigest()
49
50 def bytes(self, entropy: int = 32) -> Bytes:
51 """Generate byte string containing *entropy* bytes.
52
53 The string has *entropy* random bytes, each byte
54 converted to two hex digits.
55
56 :param entropy: Number of bytes.
57 :return: Bytes.
58 :rtype: bytes
59 """
60 return bytes(self.random.getrandbits(8)
61 for _ in range(entropy))
62
63 def token(self, entropy: int = 32) -> str:
64 """Generate hexadecimal string.
65
66 :param entropy: Number of bytes.
67 :return: Token.
68 """
69 return self.bytes(entropy).hex()
70
71 def salt(self, size: int = 16) -> str:
72 """Generate salt chars (not cryptographically safe).
73
74 :param size: Salt size.
75 :return: Salt.
76 """
77 char_sequence = [
78 self.random.choice(self.__chars)
79 for _ in range(size)
80 ]
81 return ''.join(char_sequence)
82
83 def mnemonic_phrase(self, length: int = 12) -> str:
84 """Generate pseudo mnemonic phrase.
85
86 :param length: Number of words.
87 :return: Mnemonic code.
88 """
89 words = self.__words['normal']
90 return ' '.join(self.random.choice(words) for _ in range(length))
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mimesis/providers/business.py b/mimesis/providers/business.py
--- a/mimesis/providers/business.py
+++ b/mimesis/providers/business.py
@@ -27,8 +27,7 @@
:Example:
Gamma Systems.
"""
- return self.random.choice(
- self._data['company']['name'])
+ return self.random.choice(self._data['company']['name'])
def company_type(self, abbr: bool = False) -> str:
"""Get a random type of business entity.
@@ -40,11 +39,10 @@
Incorporated.
"""
return self.random.choice(
- self._data['company']['type'].get(
- 'abbr' if abbr else 'title'),
+ self._data['company']['type'].get('abbr' if abbr else 'title'),
)
- def copyright(self) -> str:
+ def copyright(self) -> str: # noqa: A002
"""Generate a random copyright.
:return: Copyright of company.
diff --git a/mimesis/providers/cryptographic.py b/mimesis/providers/cryptographic.py
--- a/mimesis/providers/cryptographic.py
+++ b/mimesis/providers/cryptographic.py
@@ -34,7 +34,7 @@
bits = self.random.getrandbits(128)
return str(uuid.UUID(int=bits, version=version))
- def hash(self, algorithm: Optional[Algorithm] = None) -> str:
+ def hash(self, algorithm: Optional[Algorithm] = None) -> str: # noqa: A002
"""Generate random hash.
:param algorithm: Enum object ``Algorithm``.
@@ -47,7 +47,7 @@
fn = getattr(hashlib, key)
return fn(self.uuid().encode()).hexdigest()
- def bytes(self, entropy: int = 32) -> Bytes:
+ def bytes(self, entropy: int = 32) -> Bytes: # noqa: A002
"""Generate byte string containing *entropy* bytes.
The string has *entropy* random bytes, each byte
|
{"golden_diff": "diff --git a/mimesis/providers/business.py b/mimesis/providers/business.py\n--- a/mimesis/providers/business.py\n+++ b/mimesis/providers/business.py\n@@ -27,8 +27,7 @@\n :Example:\n Gamma Systems.\n \"\"\"\n- return self.random.choice(\n- self._data['company']['name'])\n+ return self.random.choice(self._data['company']['name'])\n \n def company_type(self, abbr: bool = False) -> str:\n \"\"\"Get a random type of business entity.\n@@ -40,11 +39,10 @@\n Incorporated.\n \"\"\"\n return self.random.choice(\n- self._data['company']['type'].get(\n- 'abbr' if abbr else 'title'),\n+ self._data['company']['type'].get('abbr' if abbr else 'title'),\n )\n \n- def copyright(self) -> str:\n+ def copyright(self) -> str: # noqa: A002\n \"\"\"Generate a random copyright.\n \n :return: Copyright of company.\ndiff --git a/mimesis/providers/cryptographic.py b/mimesis/providers/cryptographic.py\n--- a/mimesis/providers/cryptographic.py\n+++ b/mimesis/providers/cryptographic.py\n@@ -34,7 +34,7 @@\n bits = self.random.getrandbits(128)\n return str(uuid.UUID(int=bits, version=version))\n \n- def hash(self, algorithm: Optional[Algorithm] = None) -> str:\n+ def hash(self, algorithm: Optional[Algorithm] = None) -> str: # noqa: A002\n \"\"\"Generate random hash.\n \n :param algorithm: Enum object ``Algorithm``.\n@@ -47,7 +47,7 @@\n fn = getattr(hashlib, key)\n return fn(self.uuid().encode()).hexdigest()\n \n- def bytes(self, entropy: int = 32) -> Bytes:\n+ def bytes(self, entropy: int = 32) -> Bytes: # noqa: A002\n \"\"\"Generate byte string containing *entropy* bytes.\n \n The string has *entropy* random bytes, each byte\n", "issue": "Arguments shadow builtin names\nAfter we have updated `flake8-builtins` we got new several new errors.\r\n\r\n```\r\n=================================== FAILURES ===================================\r\n_________________________________ FLAKE8-check _________________________________\r\n/home/travis/build/lk-geimfari/mimesis/mimesis/providers/business.py:47:5: A002 \"copyright\" is used as an argument and thus shadows a python builtin, consider renaming the argument\r\n_________________________________ FLAKE8-check _________________________________\r\n/home/travis/build/lk-geimfari/mimesis/mimesis/providers/cryptographic.py:37:5: A002 \"hash\" is used as an argument and thus shadows a python builtin, consider renaming the argument\r\n/home/travis/build/lk-geimfari/mimesis/mimesis/providers/cryptographic.py:50:5: A002 \"bytes\" is used as an argument and thus shadows a python builtin, consider renaming the argument\r\n```\nArguments shadow builtin names\nAfter we have updated `flake8-builtins` we got new several new errors.\r\n\r\n```\r\n=================================== FAILURES ===================================\r\n_________________________________ FLAKE8-check _________________________________\r\n/home/travis/build/lk-geimfari/mimesis/mimesis/providers/business.py:47:5: A002 \"copyright\" is used as an argument and thus shadows a python builtin, consider renaming the argument\r\n_________________________________ FLAKE8-check _________________________________\r\n/home/travis/build/lk-geimfari/mimesis/mimesis/providers/cryptographic.py:37:5: A002 \"hash\" is used as an argument and thus shadows a python builtin, consider renaming the argument\r\n/home/travis/build/lk-geimfari/mimesis/mimesis/providers/cryptographic.py:50:5: A002 \"bytes\" is used as an argument and thus shadows a python builtin, consider renaming the argument\r\n```\n", "before_files": [{"content": "\"\"\"Business data provider.\"\"\"\n\nfrom mimesis.data import (CRYPTOCURRENCY_ISO_CODES, CRYPTOCURRENCY_SYMBOLS,\n CURRENCY_ISO_CODES, CURRENCY_SYMBOLS)\nfrom mimesis.providers.base import BaseDataProvider\nfrom mimesis.utils import pull\n\n__all__ = ['Business']\n\n\nclass Business(BaseDataProvider):\n \"\"\"Class for generating data for business.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize attributes.\n\n :param locale: Current locale.\n \"\"\"\n super().__init__(*args, **kwargs)\n self._data = pull('business.json', self.locale)\n\n def company(self) -> str:\n \"\"\"Get a random company name.\n\n :return: Company name.\n\n :Example:\n Gamma Systems.\n \"\"\"\n return self.random.choice(\n self._data['company']['name'])\n\n def company_type(self, abbr: bool = False) -> str:\n \"\"\"Get a random type of business entity.\n\n :param abbr: Abbreviated company type.\n :return: Types of business entity.\n\n :Example:\n Incorporated.\n \"\"\"\n return self.random.choice(\n self._data['company']['type'].get(\n 'abbr' if abbr else 'title'),\n )\n\n def copyright(self) -> str:\n \"\"\"Generate a random copyright.\n\n :return: Copyright of company.\n\n :Example:\n \u00a9 Komercia, Inc.\n \"\"\"\n return '\u00a9 {}, {}'.format(\n self.company(),\n self.company_type(abbr=True),\n )\n\n def currency_iso_code(self) -> str:\n \"\"\"Get code of the currency.\n\n :return: Currency code.\n\n :Example:\n RUR.\n \"\"\"\n return self.random.choice(CURRENCY_ISO_CODES)\n\n def cryptocurrency_iso_code(self) -> str:\n \"\"\"Get symbol of random cryptocurrency.\n\n :return: Symbol of cryptocurrency.\n \"\"\"\n return self.random.choice(CRYPTOCURRENCY_ISO_CODES)\n\n def currency_symbol(self):\n \"\"\"Get a currency symbol for current locale.\n\n :return: Currency symbol.\n \"\"\"\n return CURRENCY_SYMBOLS[self.locale]\n\n def cryptocurrency_symbol(self) -> str:\n \"\"\"Get a cryptocurrency symbol.\n\n :return: Symbol of cryptocurrency.\n\n :Example:\n \u0243\n \"\"\"\n return self.random.choice(CRYPTOCURRENCY_SYMBOLS)\n\n def price(self, minimum: float = 10.00,\n maximum: float = 1000.00) -> str:\n \"\"\"Generate a random price.\n\n :param minimum: Max value of price.\n :param maximum: Min value of price.\n :return: Price.\n\n :Example:\n 599.99 $.\n \"\"\"\n price = self.random.uniform(minimum, maximum, precision=2)\n return '{0} {1}'.format(price, self.currency_symbol())\n\n def price_in_btc(self, minimum: float = 0, maximum: float = 2) -> str:\n \"\"\"Generate random price in BTC.\n\n :param minimum: Minimum value of price\n :param maximum: Maximum value of price.\n :return: Price in BTC.\n\n :Example:\n 0.5885238 BTC\n \"\"\"\n return '{} BTC'.format(\n self.random.uniform(\n minimum,\n maximum,\n precision=7,\n ),\n )\n", "path": "mimesis/providers/business.py"}, {"content": "\"\"\"Cryptographic data provider.\"\"\"\n\nimport hashlib\nimport string\nimport uuid\nfrom typing import Optional\n\nfrom mimesis.enums import Algorithm\nfrom mimesis.providers.base import BaseDataProvider\nfrom mimesis.providers.text import Text\nfrom mimesis.typing import Bytes\n\n__all__ = ['Cryptographic']\n\n\nclass Cryptographic(BaseDataProvider):\n \"\"\"Class that provides cryptographic data.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Initialize attributes.\n\n :param seed: Seed.\n \"\"\"\n super().__init__(*args, **kwargs)\n self.__words = Text('en')._data['words']\n self.__chars = string.ascii_letters + string.digits + string.punctuation\n\n def uuid(self, version: Optional[int] = None) -> str:\n \"\"\"Generate random UUID.\n\n :param version: UUID version.\n :return: UUID\n \"\"\"\n bits = self.random.getrandbits(128)\n return str(uuid.UUID(int=bits, version=version))\n\n def hash(self, algorithm: Optional[Algorithm] = None) -> str:\n \"\"\"Generate random hash.\n\n :param algorithm: Enum object ``Algorithm``.\n :return: Hash.\n :raises NonEnumerableError: if algorithm is not supported.\n \"\"\"\n key = self._validate_enum(algorithm, Algorithm)\n\n if hasattr(hashlib, key):\n fn = getattr(hashlib, key)\n return fn(self.uuid().encode()).hexdigest()\n\n def bytes(self, entropy: int = 32) -> Bytes:\n \"\"\"Generate byte string containing *entropy* bytes.\n\n The string has *entropy* random bytes, each byte\n converted to two hex digits.\n\n :param entropy: Number of bytes.\n :return: Bytes.\n :rtype: bytes\n \"\"\"\n return bytes(self.random.getrandbits(8)\n for _ in range(entropy))\n\n def token(self, entropy: int = 32) -> str:\n \"\"\"Generate hexadecimal string.\n\n :param entropy: Number of bytes.\n :return: Token.\n \"\"\"\n return self.bytes(entropy).hex()\n\n def salt(self, size: int = 16) -> str:\n \"\"\"Generate salt chars (not cryptographically safe).\n\n :param size: Salt size.\n :return: Salt.\n \"\"\"\n char_sequence = [\n self.random.choice(self.__chars)\n for _ in range(size)\n ]\n return ''.join(char_sequence)\n\n def mnemonic_phrase(self, length: int = 12) -> str:\n \"\"\"Generate pseudo mnemonic phrase.\n\n :param length: Number of words.\n :return: Mnemonic code.\n \"\"\"\n words = self.__words['normal']\n return ' '.join(self.random.choice(words) for _ in range(length))\n", "path": "mimesis/providers/cryptographic.py"}], "after_files": [{"content": "\"\"\"Business data provider.\"\"\"\n\nfrom mimesis.data import (CRYPTOCURRENCY_ISO_CODES, CRYPTOCURRENCY_SYMBOLS,\n CURRENCY_ISO_CODES, CURRENCY_SYMBOLS)\nfrom mimesis.providers.base import BaseDataProvider\nfrom mimesis.utils import pull\n\n__all__ = ['Business']\n\n\nclass Business(BaseDataProvider):\n \"\"\"Class for generating data for business.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize attributes.\n\n :param locale: Current locale.\n \"\"\"\n super().__init__(*args, **kwargs)\n self._data = pull('business.json', self.locale)\n\n def company(self) -> str:\n \"\"\"Get a random company name.\n\n :return: Company name.\n\n :Example:\n Gamma Systems.\n \"\"\"\n return self.random.choice(self._data['company']['name'])\n\n def company_type(self, abbr: bool = False) -> str:\n \"\"\"Get a random type of business entity.\n\n :param abbr: Abbreviated company type.\n :return: Types of business entity.\n\n :Example:\n Incorporated.\n \"\"\"\n return self.random.choice(\n self._data['company']['type'].get('abbr' if abbr else 'title'),\n )\n\n def copyright(self) -> str: # noqa: A002\n \"\"\"Generate a random copyright.\n\n :return: Copyright of company.\n\n :Example:\n \u00a9 Komercia, Inc.\n \"\"\"\n return '\u00a9 {}, {}'.format(\n self.company(),\n self.company_type(abbr=True),\n )\n\n def currency_iso_code(self) -> str:\n \"\"\"Get code of the currency.\n\n :return: Currency code.\n\n :Example:\n RUR.\n \"\"\"\n return self.random.choice(CURRENCY_ISO_CODES)\n\n def cryptocurrency_iso_code(self) -> str:\n \"\"\"Get symbol of random cryptocurrency.\n\n :return: Symbol of cryptocurrency.\n \"\"\"\n return self.random.choice(CRYPTOCURRENCY_ISO_CODES)\n\n def currency_symbol(self):\n \"\"\"Get a currency symbol for current locale.\n\n :return: Currency symbol.\n \"\"\"\n return CURRENCY_SYMBOLS[self.locale]\n\n def cryptocurrency_symbol(self) -> str:\n \"\"\"Get a cryptocurrency symbol.\n\n :return: Symbol of cryptocurrency.\n\n :Example:\n \u0243\n \"\"\"\n return self.random.choice(CRYPTOCURRENCY_SYMBOLS)\n\n def price(self, minimum: float = 10.00,\n maximum: float = 1000.00) -> str:\n \"\"\"Generate a random price.\n\n :param minimum: Max value of price.\n :param maximum: Min value of price.\n :return: Price.\n\n :Example:\n 599.99 $.\n \"\"\"\n price = self.random.uniform(minimum, maximum, precision=2)\n return '{0} {1}'.format(price, self.currency_symbol())\n\n def price_in_btc(self, minimum: float = 0, maximum: float = 2) -> str:\n \"\"\"Generate random price in BTC.\n\n :param minimum: Minimum value of price\n :param maximum: Maximum value of price.\n :return: Price in BTC.\n\n :Example:\n 0.5885238 BTC\n \"\"\"\n return '{} BTC'.format(\n self.random.uniform(\n minimum,\n maximum,\n precision=7,\n ),\n )\n", "path": "mimesis/providers/business.py"}, {"content": "\"\"\"Cryptographic data provider.\"\"\"\n\nimport hashlib\nimport string\nimport uuid\nfrom typing import Optional\n\nfrom mimesis.enums import Algorithm\nfrom mimesis.providers.base import BaseDataProvider\nfrom mimesis.providers.text import Text\nfrom mimesis.typing import Bytes\n\n__all__ = ['Cryptographic']\n\n\nclass Cryptographic(BaseDataProvider):\n \"\"\"Class that provides cryptographic data.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Initialize attributes.\n\n :param seed: Seed.\n \"\"\"\n super().__init__(*args, **kwargs)\n self.__words = Text('en')._data['words']\n self.__chars = string.ascii_letters + string.digits + string.punctuation\n\n def uuid(self, version: Optional[int] = None) -> str:\n \"\"\"Generate random UUID.\n\n :param version: UUID version.\n :return: UUID\n \"\"\"\n bits = self.random.getrandbits(128)\n return str(uuid.UUID(int=bits, version=version))\n\n def hash(self, algorithm: Optional[Algorithm] = None) -> str: # noqa: A002\n \"\"\"Generate random hash.\n\n :param algorithm: Enum object ``Algorithm``.\n :return: Hash.\n :raises NonEnumerableError: if algorithm is not supported.\n \"\"\"\n key = self._validate_enum(algorithm, Algorithm)\n\n if hasattr(hashlib, key):\n fn = getattr(hashlib, key)\n return fn(self.uuid().encode()).hexdigest()\n\n def bytes(self, entropy: int = 32) -> Bytes: # noqa: A002\n \"\"\"Generate byte string containing *entropy* bytes.\n\n The string has *entropy* random bytes, each byte\n converted to two hex digits.\n\n :param entropy: Number of bytes.\n :return: Bytes.\n :rtype: bytes\n \"\"\"\n return bytes(self.random.getrandbits(8)\n for _ in range(entropy))\n\n def token(self, entropy: int = 32) -> str:\n \"\"\"Generate hexadecimal string.\n\n :param entropy: Number of bytes.\n :return: Token.\n \"\"\"\n return self.bytes(entropy).hex()\n\n def salt(self, size: int = 16) -> str:\n \"\"\"Generate salt chars (not cryptographically safe).\n\n :param size: Salt size.\n :return: Salt.\n \"\"\"\n char_sequence = [\n self.random.choice(self.__chars)\n for _ in range(size)\n ]\n return ''.join(char_sequence)\n\n def mnemonic_phrase(self, length: int = 12) -> str:\n \"\"\"Generate pseudo mnemonic phrase.\n\n :param length: Number of words.\n :return: Mnemonic code.\n \"\"\"\n words = self.__words['normal']\n return ' '.join(self.random.choice(words) for _ in range(length))\n", "path": "mimesis/providers/cryptographic.py"}]}
| 2,472 | 478 |
gh_patches_debug_31158
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-3189
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CSV export does not include read date
**Describe the bug**
When exporting data into a CSV file, several fields are exported, but `read date` is not one of them, despite being exremelly valuable.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to 'Profile'
2. Click on 'Export to CSV'
3. Download CSV file
4. Open CSV file
**Expected behavior**
A column containing read date should be included among the current ones
**Instance**
bookwyrm.social
---
**Desktop (please complete the following information):**
- OS: KDE Neon
- Browser Firefox, Chromium
- Version
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/preferences/export.py`
Content:
```
1 """ Let users export their book data """
2 from datetime import timedelta
3 import csv
4 import io
5
6 from django.contrib.auth.decorators import login_required
7 from django.core.paginator import Paginator
8 from django.db.models import Q
9 from django.http import HttpResponse
10 from django.template.response import TemplateResponse
11 from django.utils import timezone
12 from django.views import View
13 from django.utils.decorators import method_decorator
14 from django.shortcuts import redirect
15
16 from bookwyrm import models
17 from bookwyrm.models.bookwyrm_export_job import BookwyrmExportJob
18 from bookwyrm.settings import PAGE_LENGTH
19
20 # pylint: disable=no-self-use
21 @method_decorator(login_required, name="dispatch")
22 class Export(View):
23 """Let users export data"""
24
25 def get(self, request):
26 """Request csv file"""
27 return TemplateResponse(request, "preferences/export.html")
28
29 def post(self, request):
30 """Download the csv file of a user's book data"""
31 books = models.Edition.viewer_aware_objects(request.user)
32 books_shelves = books.filter(Q(shelves__user=request.user)).distinct()
33 books_readthrough = books.filter(Q(readthrough__user=request.user)).distinct()
34 books_review = books.filter(Q(review__user=request.user)).distinct()
35 books_comment = books.filter(Q(comment__user=request.user)).distinct()
36 books_quotation = books.filter(Q(quotation__user=request.user)).distinct()
37
38 books = set(
39 list(books_shelves)
40 + list(books_readthrough)
41 + list(books_review)
42 + list(books_comment)
43 + list(books_quotation)
44 )
45
46 csv_string = io.StringIO()
47 writer = csv.writer(csv_string)
48
49 deduplication_fields = [
50 f.name
51 for f in models.Edition._meta.get_fields() # pylint: disable=protected-access
52 if getattr(f, "deduplication_field", False)
53 ]
54 fields = (
55 ["title", "author_text"]
56 + deduplication_fields
57 + ["rating", "review_name", "review_cw", "review_content"]
58 )
59 writer.writerow(fields)
60
61 for book in books:
62 # I think this is more efficient than doing a subquery in the view? but idk
63 review_rating = (
64 models.Review.objects.filter(
65 user=request.user, book=book, rating__isnull=False
66 )
67 .order_by("-published_date")
68 .first()
69 )
70
71 book.rating = review_rating.rating if review_rating else None
72
73 review = (
74 models.Review.objects.filter(
75 user=request.user, book=book, content__isnull=False
76 )
77 .order_by("-published_date")
78 .first()
79 )
80 if review:
81 book.review_name = review.name
82 book.review_cw = review.content_warning
83 book.review_content = review.raw_content
84 writer.writerow([getattr(book, field, "") or "" for field in fields])
85
86 return HttpResponse(
87 csv_string.getvalue(),
88 content_type="text/csv",
89 headers={
90 "Content-Disposition": 'attachment; filename="bookwyrm-export.csv"'
91 },
92 )
93
94
95 # pylint: disable=no-self-use
96 @method_decorator(login_required, name="dispatch")
97 class ExportUser(View):
98 """Let users export user data to import into another Bookwyrm instance"""
99
100 def get(self, request):
101 """Request tar file"""
102
103 jobs = BookwyrmExportJob.objects.filter(user=request.user).order_by(
104 "-created_date"
105 )
106 site = models.SiteSettings.objects.get()
107 hours = site.user_import_time_limit
108 allowed = (
109 jobs.first().created_date < timezone.now() - timedelta(hours=hours)
110 if jobs.first()
111 else True
112 )
113 next_available = (
114 jobs.first().created_date + timedelta(hours=hours) if not allowed else False
115 )
116 paginated = Paginator(jobs, PAGE_LENGTH)
117 page = paginated.get_page(request.GET.get("page"))
118 data = {
119 "jobs": page,
120 "next_available": next_available,
121 "page_range": paginated.get_elided_page_range(
122 page.number, on_each_side=2, on_ends=1
123 ),
124 }
125
126 return TemplateResponse(request, "preferences/export-user.html", data)
127
128 def post(self, request):
129 """Download the json file of a user's data"""
130
131 job = BookwyrmExportJob.objects.create(user=request.user)
132 job.start_job()
133
134 return redirect("prefs-user-export")
135
136
137 @method_decorator(login_required, name="dispatch")
138 class ExportArchive(View):
139 """Serve the archive file"""
140
141 def get(self, request, archive_id):
142 """download user export file"""
143 export = BookwyrmExportJob.objects.get(task_id=archive_id, user=request.user)
144 return HttpResponse(
145 export.export_data,
146 content_type="application/gzip",
147 headers={
148 "Content-Disposition": 'attachment; filename="bookwyrm-account-export.tar.gz"' # pylint: disable=line-too-long
149 },
150 )
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bookwyrm/views/preferences/export.py b/bookwyrm/views/preferences/export.py
--- a/bookwyrm/views/preferences/export.py
+++ b/bookwyrm/views/preferences/export.py
@@ -17,7 +17,7 @@
from bookwyrm.models.bookwyrm_export_job import BookwyrmExportJob
from bookwyrm.settings import PAGE_LENGTH
-# pylint: disable=no-self-use
+# pylint: disable=no-self-use,too-many-locals
@method_decorator(login_required, name="dispatch")
class Export(View):
"""Let users export data"""
@@ -54,6 +54,7 @@
fields = (
["title", "author_text"]
+ deduplication_fields
+ + ["start_date", "finish_date", "stopped_date"]
+ ["rating", "review_name", "review_cw", "review_content"]
)
writer.writerow(fields)
@@ -70,6 +71,24 @@
book.rating = review_rating.rating if review_rating else None
+ readthrough = (
+ models.ReadThrough.objects.filter(user=request.user, book=book)
+ .order_by("-start_date", "-finish_date")
+ .first()
+ )
+ if readthrough:
+ book.start_date = (
+ readthrough.start_date.date() if readthrough.start_date else None
+ )
+ book.finish_date = (
+ readthrough.finish_date.date() if readthrough.finish_date else None
+ )
+ book.stopped_date = (
+ readthrough.stopped_date.date()
+ if readthrough.stopped_date
+ else None
+ )
+
review = (
models.Review.objects.filter(
user=request.user, book=book, content__isnull=False
|
{"golden_diff": "diff --git a/bookwyrm/views/preferences/export.py b/bookwyrm/views/preferences/export.py\n--- a/bookwyrm/views/preferences/export.py\n+++ b/bookwyrm/views/preferences/export.py\n@@ -17,7 +17,7 @@\n from bookwyrm.models.bookwyrm_export_job import BookwyrmExportJob\n from bookwyrm.settings import PAGE_LENGTH\n \n-# pylint: disable=no-self-use\n+# pylint: disable=no-self-use,too-many-locals\n @method_decorator(login_required, name=\"dispatch\")\n class Export(View):\n \"\"\"Let users export data\"\"\"\n@@ -54,6 +54,7 @@\n fields = (\n [\"title\", \"author_text\"]\n + deduplication_fields\n+ + [\"start_date\", \"finish_date\", \"stopped_date\"]\n + [\"rating\", \"review_name\", \"review_cw\", \"review_content\"]\n )\n writer.writerow(fields)\n@@ -70,6 +71,24 @@\n \n book.rating = review_rating.rating if review_rating else None\n \n+ readthrough = (\n+ models.ReadThrough.objects.filter(user=request.user, book=book)\n+ .order_by(\"-start_date\", \"-finish_date\")\n+ .first()\n+ )\n+ if readthrough:\n+ book.start_date = (\n+ readthrough.start_date.date() if readthrough.start_date else None\n+ )\n+ book.finish_date = (\n+ readthrough.finish_date.date() if readthrough.finish_date else None\n+ )\n+ book.stopped_date = (\n+ readthrough.stopped_date.date()\n+ if readthrough.stopped_date\n+ else None\n+ )\n+\n review = (\n models.Review.objects.filter(\n user=request.user, book=book, content__isnull=False\n", "issue": "CSV export does not include read date\n**Describe the bug**\r\nWhen exporting data into a CSV file, several fields are exported, but `read date` is not one of them, despite being exremelly valuable.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'Profile'\r\n2. Click on 'Export to CSV'\r\n3. Download CSV file\r\n4. Open CSV file\r\n\r\n**Expected behavior**\r\nA column containing read date should be included among the current ones\r\n\r\n**Instance**\r\nbookwyrm.social\r\n\r\n\r\n\r\n---\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: KDE Neon\r\n - Browser Firefox, Chromium\r\n - Version \r\n\n", "before_files": [{"content": "\"\"\" Let users export their book data \"\"\"\nfrom datetime import timedelta\nimport csv\nimport io\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.views import View\nfrom django.utils.decorators import method_decorator\nfrom django.shortcuts import redirect\n\nfrom bookwyrm import models\nfrom bookwyrm.models.bookwyrm_export_job import BookwyrmExportJob\nfrom bookwyrm.settings import PAGE_LENGTH\n\n# pylint: disable=no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass Export(View):\n \"\"\"Let users export data\"\"\"\n\n def get(self, request):\n \"\"\"Request csv file\"\"\"\n return TemplateResponse(request, \"preferences/export.html\")\n\n def post(self, request):\n \"\"\"Download the csv file of a user's book data\"\"\"\n books = models.Edition.viewer_aware_objects(request.user)\n books_shelves = books.filter(Q(shelves__user=request.user)).distinct()\n books_readthrough = books.filter(Q(readthrough__user=request.user)).distinct()\n books_review = books.filter(Q(review__user=request.user)).distinct()\n books_comment = books.filter(Q(comment__user=request.user)).distinct()\n books_quotation = books.filter(Q(quotation__user=request.user)).distinct()\n\n books = set(\n list(books_shelves)\n + list(books_readthrough)\n + list(books_review)\n + list(books_comment)\n + list(books_quotation)\n )\n\n csv_string = io.StringIO()\n writer = csv.writer(csv_string)\n\n deduplication_fields = [\n f.name\n for f in models.Edition._meta.get_fields() # pylint: disable=protected-access\n if getattr(f, \"deduplication_field\", False)\n ]\n fields = (\n [\"title\", \"author_text\"]\n + deduplication_fields\n + [\"rating\", \"review_name\", \"review_cw\", \"review_content\"]\n )\n writer.writerow(fields)\n\n for book in books:\n # I think this is more efficient than doing a subquery in the view? but idk\n review_rating = (\n models.Review.objects.filter(\n user=request.user, book=book, rating__isnull=False\n )\n .order_by(\"-published_date\")\n .first()\n )\n\n book.rating = review_rating.rating if review_rating else None\n\n review = (\n models.Review.objects.filter(\n user=request.user, book=book, content__isnull=False\n )\n .order_by(\"-published_date\")\n .first()\n )\n if review:\n book.review_name = review.name\n book.review_cw = review.content_warning\n book.review_content = review.raw_content\n writer.writerow([getattr(book, field, \"\") or \"\" for field in fields])\n\n return HttpResponse(\n csv_string.getvalue(),\n content_type=\"text/csv\",\n headers={\n \"Content-Disposition\": 'attachment; filename=\"bookwyrm-export.csv\"'\n },\n )\n\n\n# pylint: disable=no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass ExportUser(View):\n \"\"\"Let users export user data to import into another Bookwyrm instance\"\"\"\n\n def get(self, request):\n \"\"\"Request tar file\"\"\"\n\n jobs = BookwyrmExportJob.objects.filter(user=request.user).order_by(\n \"-created_date\"\n )\n site = models.SiteSettings.objects.get()\n hours = site.user_import_time_limit\n allowed = (\n jobs.first().created_date < timezone.now() - timedelta(hours=hours)\n if jobs.first()\n else True\n )\n next_available = (\n jobs.first().created_date + timedelta(hours=hours) if not allowed else False\n )\n paginated = Paginator(jobs, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"jobs\": page,\n \"next_available\": next_available,\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n }\n\n return TemplateResponse(request, \"preferences/export-user.html\", data)\n\n def post(self, request):\n \"\"\"Download the json file of a user's data\"\"\"\n\n job = BookwyrmExportJob.objects.create(user=request.user)\n job.start_job()\n\n return redirect(\"prefs-user-export\")\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass ExportArchive(View):\n \"\"\"Serve the archive file\"\"\"\n\n def get(self, request, archive_id):\n \"\"\"download user export file\"\"\"\n export = BookwyrmExportJob.objects.get(task_id=archive_id, user=request.user)\n return HttpResponse(\n export.export_data,\n content_type=\"application/gzip\",\n headers={\n \"Content-Disposition\": 'attachment; filename=\"bookwyrm-account-export.tar.gz\"' # pylint: disable=line-too-long\n },\n )\n", "path": "bookwyrm/views/preferences/export.py"}], "after_files": [{"content": "\"\"\" Let users export their book data \"\"\"\nfrom datetime import timedelta\nimport csv\nimport io\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.views import View\nfrom django.utils.decorators import method_decorator\nfrom django.shortcuts import redirect\n\nfrom bookwyrm import models\nfrom bookwyrm.models.bookwyrm_export_job import BookwyrmExportJob\nfrom bookwyrm.settings import PAGE_LENGTH\n\n# pylint: disable=no-self-use,too-many-locals\n@method_decorator(login_required, name=\"dispatch\")\nclass Export(View):\n \"\"\"Let users export data\"\"\"\n\n def get(self, request):\n \"\"\"Request csv file\"\"\"\n return TemplateResponse(request, \"preferences/export.html\")\n\n def post(self, request):\n \"\"\"Download the csv file of a user's book data\"\"\"\n books = models.Edition.viewer_aware_objects(request.user)\n books_shelves = books.filter(Q(shelves__user=request.user)).distinct()\n books_readthrough = books.filter(Q(readthrough__user=request.user)).distinct()\n books_review = books.filter(Q(review__user=request.user)).distinct()\n books_comment = books.filter(Q(comment__user=request.user)).distinct()\n books_quotation = books.filter(Q(quotation__user=request.user)).distinct()\n\n books = set(\n list(books_shelves)\n + list(books_readthrough)\n + list(books_review)\n + list(books_comment)\n + list(books_quotation)\n )\n\n csv_string = io.StringIO()\n writer = csv.writer(csv_string)\n\n deduplication_fields = [\n f.name\n for f in models.Edition._meta.get_fields() # pylint: disable=protected-access\n if getattr(f, \"deduplication_field\", False)\n ]\n fields = (\n [\"title\", \"author_text\"]\n + deduplication_fields\n + [\"start_date\", \"finish_date\", \"stopped_date\"]\n + [\"rating\", \"review_name\", \"review_cw\", \"review_content\"]\n )\n writer.writerow(fields)\n\n for book in books:\n # I think this is more efficient than doing a subquery in the view? but idk\n review_rating = (\n models.Review.objects.filter(\n user=request.user, book=book, rating__isnull=False\n )\n .order_by(\"-published_date\")\n .first()\n )\n\n book.rating = review_rating.rating if review_rating else None\n\n readthrough = (\n models.ReadThrough.objects.filter(user=request.user, book=book)\n .order_by(\"-start_date\", \"-finish_date\")\n .first()\n )\n if readthrough:\n book.start_date = (\n readthrough.start_date.date() if readthrough.start_date else None\n )\n book.finish_date = (\n readthrough.finish_date.date() if readthrough.finish_date else None\n )\n book.stopped_date = (\n readthrough.stopped_date.date()\n if readthrough.stopped_date\n else None\n )\n\n review = (\n models.Review.objects.filter(\n user=request.user, book=book, content__isnull=False\n )\n .order_by(\"-published_date\")\n .first()\n )\n if review:\n book.review_name = review.name\n book.review_cw = review.content_warning\n book.review_content = review.raw_content\n writer.writerow([getattr(book, field, \"\") or \"\" for field in fields])\n\n return HttpResponse(\n csv_string.getvalue(),\n content_type=\"text/csv\",\n headers={\n \"Content-Disposition\": 'attachment; filename=\"bookwyrm-export.csv\"'\n },\n )\n\n\n# pylint: disable=no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass ExportUser(View):\n \"\"\"Let users export user data to import into another Bookwyrm instance\"\"\"\n\n def get(self, request):\n \"\"\"Request tar file\"\"\"\n\n jobs = BookwyrmExportJob.objects.filter(user=request.user).order_by(\n \"-created_date\"\n )\n site = models.SiteSettings.objects.get()\n hours = site.user_import_time_limit\n allowed = (\n jobs.first().created_date < timezone.now() - timedelta(hours=hours)\n if jobs.first()\n else True\n )\n next_available = (\n jobs.first().created_date + timedelta(hours=hours) if not allowed else False\n )\n paginated = Paginator(jobs, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"jobs\": page,\n \"next_available\": next_available,\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n }\n\n return TemplateResponse(request, \"preferences/export-user.html\", data)\n\n def post(self, request):\n \"\"\"Download the json file of a user's data\"\"\"\n\n job = BookwyrmExportJob.objects.create(user=request.user)\n job.start_job()\n\n return redirect(\"prefs-user-export\")\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass ExportArchive(View):\n \"\"\"Serve the archive file\"\"\"\n\n def get(self, request, archive_id):\n \"\"\"download user export file\"\"\"\n export = BookwyrmExportJob.objects.get(task_id=archive_id, user=request.user)\n return HttpResponse(\n export.export_data,\n content_type=\"application/gzip\",\n headers={\n \"Content-Disposition\": 'attachment; filename=\"bookwyrm-account-export.tar.gz\"' # pylint: disable=line-too-long\n },\n )\n", "path": "bookwyrm/views/preferences/export.py"}]}
| 1,808 | 380 |
gh_patches_debug_5570
|
rasdani/github-patches
|
git_diff
|
mindsdb__lightwood-40
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ModuleNotFoundError: No module named '_lzma'
I've tried to test lightwood with [home rentals
example](https://github.com/mindsdb/lightwood/blob/master/docs/examples/home_rentals.py) but got ModuleNotFoundError: No module named '_lzma'.
Screenshot:

It looks like _lzma is a dependency to pandas, but it should be included with Python 3.x version.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lightwood/__about__.py`
Content:
```
1 __title__ = 'lightwood'
2 __package_name__ = 'mindsdb'
3 __version__ = '0.9.0'
4 __description__ = "Lightwood's goal is to make it very simple for developers to use the power of artificial neural networks in their projects."
5 __email__ = "[email protected]"
6 __author__ = 'MindsDB Inc'
7 __github__ = 'https://github.com/mindsdb/lightwood'
8 __pypi__ = 'https://pypi.org/project/lightwood'
9 __license__ = 'MIT'
10 __copyright__ = 'Copyright 2019- mindsdb'
11
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lightwood/__about__.py b/lightwood/__about__.py
--- a/lightwood/__about__.py
+++ b/lightwood/__about__.py
@@ -1,6 +1,6 @@
__title__ = 'lightwood'
__package_name__ = 'mindsdb'
-__version__ = '0.9.0'
+__version__ = '0.9.1'
__description__ = "Lightwood's goal is to make it very simple for developers to use the power of artificial neural networks in their projects."
__email__ = "[email protected]"
__author__ = 'MindsDB Inc'
|
{"golden_diff": "diff --git a/lightwood/__about__.py b/lightwood/__about__.py\n--- a/lightwood/__about__.py\n+++ b/lightwood/__about__.py\n@@ -1,6 +1,6 @@\n __title__ = 'lightwood'\n __package_name__ = 'mindsdb'\n-__version__ = '0.9.0'\n+__version__ = '0.9.1'\n __description__ = \"Lightwood's goal is to make it very simple for developers to use the power of artificial neural networks in their projects.\"\n __email__ = \"[email protected]\"\n __author__ = 'MindsDB Inc'\n", "issue": "ModuleNotFoundError: No module named '_lzma'\nI've tried to test lightwood with [home rentals\r\n example](https://github.com/mindsdb/lightwood/blob/master/docs/examples/home_rentals.py) but got ModuleNotFoundError: No module named '_lzma'.\r\n\r\nScreenshot:\r\n\r\n\r\nIt looks like _lzma is a dependency to pandas, but it should be included with Python 3.x version.\r\n\r\n\n", "before_files": [{"content": "__title__ = 'lightwood'\n__package_name__ = 'mindsdb'\n__version__ = '0.9.0'\n__description__ = \"Lightwood's goal is to make it very simple for developers to use the power of artificial neural networks in their projects.\"\n__email__ = \"[email protected]\"\n__author__ = 'MindsDB Inc'\n__github__ = 'https://github.com/mindsdb/lightwood'\n__pypi__ = 'https://pypi.org/project/lightwood'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2019- mindsdb'\n", "path": "lightwood/__about__.py"}], "after_files": [{"content": "__title__ = 'lightwood'\n__package_name__ = 'mindsdb'\n__version__ = '0.9.1'\n__description__ = \"Lightwood's goal is to make it very simple for developers to use the power of artificial neural networks in their projects.\"\n__email__ = \"[email protected]\"\n__author__ = 'MindsDB Inc'\n__github__ = 'https://github.com/mindsdb/lightwood'\n__pypi__ = 'https://pypi.org/project/lightwood'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2019- mindsdb'\n", "path": "lightwood/__about__.py"}]}
| 571 | 139 |
gh_patches_debug_19383
|
rasdani/github-patches
|
git_diff
|
ckan__ckan-7387
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
datatablesview: show columns feature bug
## CKAN version
master, 2.10 (earlier?)
## Describe the bug
If any columns are unselected when creating a view, the view will not appear.
### Steps to reproduce
When creating or editing a datatablesview (not viewing an existing one) un-check some columns and save the view.
### Expected behavior
Those columns should be excluded but view should still work.
### Additional details
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext/datastore/helpers.py`
Content:
```
1 # encoding: utf-8
2 from __future__ import annotations
3
4 import json
5 import logging
6 from typing import (
7 Any, Iterable, Optional, Sequence, Union, cast, overload
8 )
9 from typing_extensions import Literal
10
11 import sqlparse
12 import six
13
14 import ckan.common as converters
15 import ckan.plugins.toolkit as tk
16 from ckan.types import Context
17
18
19 log = logging.getLogger(__name__)
20
21
22 def is_single_statement(sql: str):
23 '''Returns True if received SQL string contains at most one statement'''
24 return len(sqlparse.split(sql)) <= 1
25
26
27 def is_valid_field_name(name: str):
28 '''
29 Check that field name is valid:
30 * can't start or end with whitespace characters
31 * can't start with underscore
32 * can't contain double quote (")
33 * can't be empty
34 '''
35 return (name and name == name.strip() and
36 not name.startswith('_') and
37 '"' not in name)
38
39
40 def is_valid_table_name(name: str):
41 if '%' in name:
42 return False
43 return is_valid_field_name(name)
44
45
46 @overload
47 def get_list(input: Literal[None], strip_values: bool = ...) -> Literal[None]:
48 ...
49
50
51 @overload
52 def get_list(input: Union[str, "Sequence[Any]"],
53 strip_values: bool = ...) -> list[str]:
54 ...
55
56
57 def get_list(input: Any, strip_values: bool = True) -> Optional[list[str]]:
58 '''Transforms a string or list to a list'''
59 if input is None:
60 return
61 if input == '':
62 return []
63
64 converters_list = converters.aslist(input, ',', True)
65 if strip_values:
66 return [_strip(x) for x in converters_list]
67 else:
68 return converters_list
69
70
71 def validate_int(i: Any, non_negative: bool = False):
72 try:
73 i = int(i)
74 except ValueError:
75 return False
76 return i >= 0 or not non_negative
77
78
79 def _strip(s: Any):
80 if isinstance(s, str) and len(s) and s[0] == s[-1]:
81 return s.strip().strip('"')
82 return s
83
84
85 def should_fts_index_field_type(field_type: str):
86 return field_type.lower() in ['tsvector', 'text', 'number']
87
88
89 def get_table_and_function_names_from_sql(context: Context, sql: str):
90 '''Parses the output of EXPLAIN (FORMAT JSON) looking for table and
91 function names
92
93 It performs an EXPLAIN query against the provided SQL, and parses
94 the output recusively.
95
96 Note that this requires Postgres 9.x.
97
98 :param context: a CKAN context dict. It must contain a 'connection' key
99 with the current DB connection.
100 :type context: dict
101 :param sql: the SQL statement to parse for table and function names
102 :type sql: string
103
104 :rtype: a tuple with two list of strings, one for table and one for
105 function names
106 '''
107
108 queries = [sql]
109 table_names: list[str] = []
110 function_names: list[str] = []
111
112 while queries:
113 sql = queries.pop()
114
115 function_names.extend(_get_function_names_from_sql(sql))
116
117 result = context['connection'].execute(
118 'EXPLAIN (VERBOSE, FORMAT JSON) {0}'.format(
119 six.ensure_str(sql))).fetchone()
120
121 try:
122 query_plan = json.loads(result['QUERY PLAN'])
123 plan = query_plan[0]['Plan']
124
125 t, q, f = _parse_query_plan(plan)
126 table_names.extend(t)
127 queries.extend(q)
128
129 function_names = list(set(function_names) | set(f))
130
131 except ValueError:
132 log.error('Could not parse query plan')
133 raise
134
135 return table_names, function_names
136
137
138 def _parse_query_plan(
139 plan: dict[str, Any]) -> tuple[list[str], list[str], list[str]]:
140 '''
141 Given a Postgres Query Plan object (parsed from the output of an EXPLAIN
142 query), returns a tuple with three items:
143
144 * A list of tables involved
145 * A list of remaining queries to parse
146 * A list of function names involved
147 '''
148
149 table_names: list[str] = []
150 queries: list[str] = []
151 functions: list[str] = []
152
153 if plan.get('Relation Name'):
154 table_names.append(plan['Relation Name'])
155 if 'Function Name' in plan:
156 if plan['Function Name'].startswith(
157 'crosstab'):
158 try:
159 queries.append(_get_subquery_from_crosstab_call(
160 plan['Function Call']))
161 except ValueError:
162 table_names.append('_unknown_crosstab_sql')
163 else:
164 functions.append(plan['Function Name'])
165
166 if 'Plans' in plan:
167 for child_plan in plan['Plans']:
168 t, q, f = _parse_query_plan(child_plan)
169 table_names.extend(t)
170 queries.extend(q)
171 functions.extend(f)
172
173 return table_names, queries, functions
174
175
176 def _get_function_names_from_sql(sql: str):
177 function_names: list[str] = []
178
179 def _get_function_names(tokens: Iterable[Any]):
180 for token in tokens:
181 if isinstance(token, sqlparse.sql.Function):
182 function_name = cast(str, token.get_name())
183 if function_name not in function_names:
184 function_names.append(function_name)
185 if hasattr(token, 'tokens'):
186 _get_function_names(token.tokens)
187
188 parsed = sqlparse.parse(sql)[0]
189 _get_function_names(parsed.tokens)
190
191 return function_names
192
193
194 def _get_subquery_from_crosstab_call(ct: str):
195 """
196 Crosstabs are a useful feature some sites choose to enable on
197 their datastore databases. To support the sql parameter passed
198 safely we accept only the simple crosstab(text) form where text
199 is a literal SQL string, otherwise raise ValueError
200 """
201 if not ct.startswith("crosstab('") or not ct.endswith("'::text)"):
202 raise ValueError('only simple crosstab calls supported')
203 ct = ct[10:-8]
204 if "'" in ct.replace("''", ""):
205 raise ValueError('only escaped single quotes allowed in query')
206 return ct.replace("''", "'")
207
208
209 def datastore_dictionary(resource_id: str):
210 """
211 Return the data dictionary info for a resource
212 """
213 try:
214 return [
215 f for f in tk.get_action('datastore_search')(
216 {}, {
217 u'resource_id': resource_id,
218 u'limit': 0,
219 u'include_total': False})['fields']
220 if not f['id'].startswith(u'_')]
221 except (tk.ObjectNotFound, tk.NotAuthorized):
222 return []
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ckanext/datastore/helpers.py b/ckanext/datastore/helpers.py
--- a/ckanext/datastore/helpers.py
+++ b/ckanext/datastore/helpers.py
@@ -206,9 +206,13 @@
return ct.replace("''", "'")
-def datastore_dictionary(resource_id: str):
+def datastore_dictionary(
+ resource_id: str, include_columns: Optional[list[str]] = None):
"""
- Return the data dictionary info for a resource
+ Return the data dictionary info for a resource, optionally filtering
+ columns returned.
+
+ include_columns is a list of column ids to include in the output
"""
try:
return [
@@ -217,6 +221,8 @@
u'resource_id': resource_id,
u'limit': 0,
u'include_total': False})['fields']
- if not f['id'].startswith(u'_')]
+ if not f['id'].startswith(u'_') and (
+ include_columns is None or f['id'] in include_columns)
+ ]
except (tk.ObjectNotFound, tk.NotAuthorized):
return []
|
{"golden_diff": "diff --git a/ckanext/datastore/helpers.py b/ckanext/datastore/helpers.py\n--- a/ckanext/datastore/helpers.py\n+++ b/ckanext/datastore/helpers.py\n@@ -206,9 +206,13 @@\n return ct.replace(\"''\", \"'\")\n \n \n-def datastore_dictionary(resource_id: str):\n+def datastore_dictionary(\n+ resource_id: str, include_columns: Optional[list[str]] = None):\n \"\"\"\n- Return the data dictionary info for a resource\n+ Return the data dictionary info for a resource, optionally filtering\n+ columns returned.\n+\n+ include_columns is a list of column ids to include in the output\n \"\"\"\n try:\n return [\n@@ -217,6 +221,8 @@\n u'resource_id': resource_id,\n u'limit': 0,\n u'include_total': False})['fields']\n- if not f['id'].startswith(u'_')]\n+ if not f['id'].startswith(u'_') and (\n+ include_columns is None or f['id'] in include_columns)\n+ ]\n except (tk.ObjectNotFound, tk.NotAuthorized):\n return []\n", "issue": "datatablesview: show columns feature bug\n## CKAN version\r\nmaster, 2.10 (earlier?)\r\n\r\n## Describe the bug\r\nIf any columns are unselected when creating a view, the view will not appear.\r\n\r\n### Steps to reproduce\r\nWhen creating or editing a datatablesview (not viewing an existing one) un-check some columns and save the view.\r\n\r\n### Expected behavior\r\nThose columns should be excluded but view should still work.\r\n\r\n### Additional details\r\n\r\n\r\n\n", "before_files": [{"content": "# encoding: utf-8\nfrom __future__ import annotations\n\nimport json\nimport logging\nfrom typing import (\n Any, Iterable, Optional, Sequence, Union, cast, overload\n)\nfrom typing_extensions import Literal\n\nimport sqlparse\nimport six\n\nimport ckan.common as converters\nimport ckan.plugins.toolkit as tk\nfrom ckan.types import Context\n\n\nlog = logging.getLogger(__name__)\n\n\ndef is_single_statement(sql: str):\n '''Returns True if received SQL string contains at most one statement'''\n return len(sqlparse.split(sql)) <= 1\n\n\ndef is_valid_field_name(name: str):\n '''\n Check that field name is valid:\n * can't start or end with whitespace characters\n * can't start with underscore\n * can't contain double quote (\")\n * can't be empty\n '''\n return (name and name == name.strip() and\n not name.startswith('_') and\n '\"' not in name)\n\n\ndef is_valid_table_name(name: str):\n if '%' in name:\n return False\n return is_valid_field_name(name)\n\n\n@overload\ndef get_list(input: Literal[None], strip_values: bool = ...) -> Literal[None]:\n ...\n\n\n@overload\ndef get_list(input: Union[str, \"Sequence[Any]\"],\n strip_values: bool = ...) -> list[str]:\n ...\n\n\ndef get_list(input: Any, strip_values: bool = True) -> Optional[list[str]]:\n '''Transforms a string or list to a list'''\n if input is None:\n return\n if input == '':\n return []\n\n converters_list = converters.aslist(input, ',', True)\n if strip_values:\n return [_strip(x) for x in converters_list]\n else:\n return converters_list\n\n\ndef validate_int(i: Any, non_negative: bool = False):\n try:\n i = int(i)\n except ValueError:\n return False\n return i >= 0 or not non_negative\n\n\ndef _strip(s: Any):\n if isinstance(s, str) and len(s) and s[0] == s[-1]:\n return s.strip().strip('\"')\n return s\n\n\ndef should_fts_index_field_type(field_type: str):\n return field_type.lower() in ['tsvector', 'text', 'number']\n\n\ndef get_table_and_function_names_from_sql(context: Context, sql: str):\n '''Parses the output of EXPLAIN (FORMAT JSON) looking for table and\n function names\n\n It performs an EXPLAIN query against the provided SQL, and parses\n the output recusively.\n\n Note that this requires Postgres 9.x.\n\n :param context: a CKAN context dict. It must contain a 'connection' key\n with the current DB connection.\n :type context: dict\n :param sql: the SQL statement to parse for table and function names\n :type sql: string\n\n :rtype: a tuple with two list of strings, one for table and one for\n function names\n '''\n\n queries = [sql]\n table_names: list[str] = []\n function_names: list[str] = []\n\n while queries:\n sql = queries.pop()\n\n function_names.extend(_get_function_names_from_sql(sql))\n\n result = context['connection'].execute(\n 'EXPLAIN (VERBOSE, FORMAT JSON) {0}'.format(\n six.ensure_str(sql))).fetchone()\n\n try:\n query_plan = json.loads(result['QUERY PLAN'])\n plan = query_plan[0]['Plan']\n\n t, q, f = _parse_query_plan(plan)\n table_names.extend(t)\n queries.extend(q)\n\n function_names = list(set(function_names) | set(f))\n\n except ValueError:\n log.error('Could not parse query plan')\n raise\n\n return table_names, function_names\n\n\ndef _parse_query_plan(\n plan: dict[str, Any]) -> tuple[list[str], list[str], list[str]]:\n '''\n Given a Postgres Query Plan object (parsed from the output of an EXPLAIN\n query), returns a tuple with three items:\n\n * A list of tables involved\n * A list of remaining queries to parse\n * A list of function names involved\n '''\n\n table_names: list[str] = []\n queries: list[str] = []\n functions: list[str] = []\n\n if plan.get('Relation Name'):\n table_names.append(plan['Relation Name'])\n if 'Function Name' in plan:\n if plan['Function Name'].startswith(\n 'crosstab'):\n try:\n queries.append(_get_subquery_from_crosstab_call(\n plan['Function Call']))\n except ValueError:\n table_names.append('_unknown_crosstab_sql')\n else:\n functions.append(plan['Function Name'])\n\n if 'Plans' in plan:\n for child_plan in plan['Plans']:\n t, q, f = _parse_query_plan(child_plan)\n table_names.extend(t)\n queries.extend(q)\n functions.extend(f)\n\n return table_names, queries, functions\n\n\ndef _get_function_names_from_sql(sql: str):\n function_names: list[str] = []\n\n def _get_function_names(tokens: Iterable[Any]):\n for token in tokens:\n if isinstance(token, sqlparse.sql.Function):\n function_name = cast(str, token.get_name())\n if function_name not in function_names:\n function_names.append(function_name)\n if hasattr(token, 'tokens'):\n _get_function_names(token.tokens)\n\n parsed = sqlparse.parse(sql)[0]\n _get_function_names(parsed.tokens)\n\n return function_names\n\n\ndef _get_subquery_from_crosstab_call(ct: str):\n \"\"\"\n Crosstabs are a useful feature some sites choose to enable on\n their datastore databases. To support the sql parameter passed\n safely we accept only the simple crosstab(text) form where text\n is a literal SQL string, otherwise raise ValueError\n \"\"\"\n if not ct.startswith(\"crosstab('\") or not ct.endswith(\"'::text)\"):\n raise ValueError('only simple crosstab calls supported')\n ct = ct[10:-8]\n if \"'\" in ct.replace(\"''\", \"\"):\n raise ValueError('only escaped single quotes allowed in query')\n return ct.replace(\"''\", \"'\")\n\n\ndef datastore_dictionary(resource_id: str):\n \"\"\"\n Return the data dictionary info for a resource\n \"\"\"\n try:\n return [\n f for f in tk.get_action('datastore_search')(\n {}, {\n u'resource_id': resource_id,\n u'limit': 0,\n u'include_total': False})['fields']\n if not f['id'].startswith(u'_')]\n except (tk.ObjectNotFound, tk.NotAuthorized):\n return []\n", "path": "ckanext/datastore/helpers.py"}], "after_files": [{"content": "# encoding: utf-8\nfrom __future__ import annotations\n\nimport json\nimport logging\nfrom typing import (\n Any, Iterable, Optional, Sequence, Union, cast, overload\n)\nfrom typing_extensions import Literal\n\nimport sqlparse\nimport six\n\nimport ckan.common as converters\nimport ckan.plugins.toolkit as tk\nfrom ckan.types import Context\n\n\nlog = logging.getLogger(__name__)\n\n\ndef is_single_statement(sql: str):\n '''Returns True if received SQL string contains at most one statement'''\n return len(sqlparse.split(sql)) <= 1\n\n\ndef is_valid_field_name(name: str):\n '''\n Check that field name is valid:\n * can't start or end with whitespace characters\n * can't start with underscore\n * can't contain double quote (\")\n * can't be empty\n '''\n return (name and name == name.strip() and\n not name.startswith('_') and\n '\"' not in name)\n\n\ndef is_valid_table_name(name: str):\n if '%' in name:\n return False\n return is_valid_field_name(name)\n\n\n@overload\ndef get_list(input: Literal[None], strip_values: bool = ...) -> Literal[None]:\n ...\n\n\n@overload\ndef get_list(input: Union[str, \"Sequence[Any]\"],\n strip_values: bool = ...) -> list[str]:\n ...\n\n\ndef get_list(input: Any, strip_values: bool = True) -> Optional[list[str]]:\n '''Transforms a string or list to a list'''\n if input is None:\n return\n if input == '':\n return []\n\n converters_list = converters.aslist(input, ',', True)\n if strip_values:\n return [_strip(x) for x in converters_list]\n else:\n return converters_list\n\n\ndef validate_int(i: Any, non_negative: bool = False):\n try:\n i = int(i)\n except ValueError:\n return False\n return i >= 0 or not non_negative\n\n\ndef _strip(s: Any):\n if isinstance(s, str) and len(s) and s[0] == s[-1]:\n return s.strip().strip('\"')\n return s\n\n\ndef should_fts_index_field_type(field_type: str):\n return field_type.lower() in ['tsvector', 'text', 'number']\n\n\ndef get_table_and_function_names_from_sql(context: Context, sql: str):\n '''Parses the output of EXPLAIN (FORMAT JSON) looking for table and\n function names\n\n It performs an EXPLAIN query against the provided SQL, and parses\n the output recusively.\n\n Note that this requires Postgres 9.x.\n\n :param context: a CKAN context dict. It must contain a 'connection' key\n with the current DB connection.\n :type context: dict\n :param sql: the SQL statement to parse for table and function names\n :type sql: string\n\n :rtype: a tuple with two list of strings, one for table and one for\n function names\n '''\n\n queries = [sql]\n table_names: list[str] = []\n function_names: list[str] = []\n\n while queries:\n sql = queries.pop()\n\n function_names.extend(_get_function_names_from_sql(sql))\n\n result = context['connection'].execute(\n 'EXPLAIN (VERBOSE, FORMAT JSON) {0}'.format(\n six.ensure_str(sql))).fetchone()\n\n try:\n query_plan = json.loads(result['QUERY PLAN'])\n plan = query_plan[0]['Plan']\n\n t, q, f = _parse_query_plan(plan)\n table_names.extend(t)\n queries.extend(q)\n\n function_names = list(set(function_names) | set(f))\n\n except ValueError:\n log.error('Could not parse query plan')\n raise\n\n return table_names, function_names\n\n\ndef _parse_query_plan(\n plan: dict[str, Any]) -> tuple[list[str], list[str], list[str]]:\n '''\n Given a Postgres Query Plan object (parsed from the output of an EXPLAIN\n query), returns a tuple with three items:\n\n * A list of tables involved\n * A list of remaining queries to parse\n * A list of function names involved\n '''\n\n table_names: list[str] = []\n queries: list[str] = []\n functions: list[str] = []\n\n if plan.get('Relation Name'):\n table_names.append(plan['Relation Name'])\n if 'Function Name' in plan:\n if plan['Function Name'].startswith(\n 'crosstab'):\n try:\n queries.append(_get_subquery_from_crosstab_call(\n plan['Function Call']))\n except ValueError:\n table_names.append('_unknown_crosstab_sql')\n else:\n functions.append(plan['Function Name'])\n\n if 'Plans' in plan:\n for child_plan in plan['Plans']:\n t, q, f = _parse_query_plan(child_plan)\n table_names.extend(t)\n queries.extend(q)\n functions.extend(f)\n\n return table_names, queries, functions\n\n\ndef _get_function_names_from_sql(sql: str):\n function_names: list[str] = []\n\n def _get_function_names(tokens: Iterable[Any]):\n for token in tokens:\n if isinstance(token, sqlparse.sql.Function):\n function_name = cast(str, token.get_name())\n if function_name not in function_names:\n function_names.append(function_name)\n if hasattr(token, 'tokens'):\n _get_function_names(token.tokens)\n\n parsed = sqlparse.parse(sql)[0]\n _get_function_names(parsed.tokens)\n\n return function_names\n\n\ndef _get_subquery_from_crosstab_call(ct: str):\n \"\"\"\n Crosstabs are a useful feature some sites choose to enable on\n their datastore databases. To support the sql parameter passed\n safely we accept only the simple crosstab(text) form where text\n is a literal SQL string, otherwise raise ValueError\n \"\"\"\n if not ct.startswith(\"crosstab('\") or not ct.endswith(\"'::text)\"):\n raise ValueError('only simple crosstab calls supported')\n ct = ct[10:-8]\n if \"'\" in ct.replace(\"''\", \"\"):\n raise ValueError('only escaped single quotes allowed in query')\n return ct.replace(\"''\", \"'\")\n\n\ndef datastore_dictionary(\n resource_id: str, include_columns: Optional[list[str]] = None):\n \"\"\"\n Return the data dictionary info for a resource, optionally filtering\n columns returned.\n\n include_columns is a list of column ids to include in the output\n \"\"\"\n try:\n return [\n f for f in tk.get_action('datastore_search')(\n {}, {\n u'resource_id': resource_id,\n u'limit': 0,\n u'include_total': False})['fields']\n if not f['id'].startswith(u'_') and (\n include_columns is None or f['id'] in include_columns)\n ]\n except (tk.ObjectNotFound, tk.NotAuthorized):\n return []\n", "path": "ckanext/datastore/helpers.py"}]}
| 2,383 | 256 |
gh_patches_debug_30214
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-agent-1709
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[etcd] `/stats/leader` from a follower returns a 403
The `/stats/leader` endpoint is only available on the leader etcd host.
On the followers, it returns a 403: it is illegitimately logged as an exception in the agent and triggers a CRITICAL service check.
`#0[ERROR]: 'Http status code 403 on url http://localhost:4002/v2/stats/leader`
Full stacktrace:
```
2015-06-17 16:41:46 UTC | ERROR | dd.collector | checks.etcd(__init__.py:678) | Check 'etcd' instance #0 failed
Traceback (most recent call last):
File "/opt/datadog-agent/agent/checks/__init__.py", line 661, in run
self.check(copy.deepcopy(instance))
File "/opt/datadog-agent/agent/checks.d/etcd.py", line 114, in check
leader_response = self._get_leader_metrics(url, timeout)
File "/opt/datadog-agent/agent/checks.d/etcd.py", line 143, in _get_leader_metrics
return self._get_json(url + "/v2/stats/leader", timeout)
File "/opt/datadog-agent/agent/checks.d/etcd.py", line 159, in _get_json
raise Exception("Http status code {0} on url {1}".format(r.status_code, url))
Exception: Http status code 403 on url http://localhost:4002/v2/stats/leader
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checks.d/etcd.py`
Content:
```
1 # project
2 from checks import AgentCheck
3 from util import headers
4
5 # 3rd party
6 import requests
7
8
9 class Etcd(AgentCheck):
10
11 DEFAULT_TIMEOUT = 5
12
13 SERVICE_CHECK_NAME = 'etcd.can_connect'
14
15 STORE_RATES = {
16 'getsSuccess': 'etcd.store.gets.success',
17 'getsFail': 'etcd.store.gets.fail',
18 'setsSuccess': 'etcd.store.sets.success',
19 'setsFail': 'etcd.store.sets.fail',
20 'deleteSuccess': 'etcd.store.delete.success',
21 'deleteFail': 'etcd.store.delete.fail',
22 'updateSuccess': 'etcd.store.update.success',
23 'updateFail': 'etcd.store.update.fail',
24 'createSuccess': 'etcd.store.create.success',
25 'createFail': 'etcd.store.create.fail',
26 'compareAndSwapSuccess': 'etcd.store.compareandswap.success',
27 'compareAndSwapFail': 'etcd.store.compareandswap.fail',
28 'compareAndDeleteSuccess': 'etcd.store.compareanddelete.success',
29 'compareAndDeleteFail': 'etcd.store.compareanddelete.fail',
30 'expireCount': 'etcd.store.expire.count'
31 }
32
33 STORE_GAUGES = {
34 'watchers': 'etcd.store.watchers'
35 }
36
37 SELF_GAUGES = {
38 'sendPkgRate': 'etcd.self.send.pkgrate',
39 'sendBandwidthRate': 'etcd.self.send.bandwidthrate',
40 'recvPkgRate': 'etcd.self.recv.pkgrate',
41 'recvBandwidthRate': 'etcd.self.recv.bandwidthrate'
42 }
43
44 SELF_RATES = {
45 'recvAppendRequestCnt': 'etcd.self.recv.appendrequest.count',
46 'sendAppendRequestCnt': 'etcd.self.send.appendrequest.count'
47 }
48
49 LEADER_COUNTS = {
50 # Rates
51 'fail': 'etcd.leader.counts.fail',
52 'success': 'etcd.leader.counts.success',
53 }
54
55 LEADER_LATENCY = {
56 # Gauges
57 'current': 'etcd.leader.latency.current',
58 'average': 'etcd.leader.latency.avg',
59 'minimum': 'etcd.leader.latency.min',
60 'maximum': 'etcd.leader.latency.max',
61 'standardDeviation': 'etcd.leader.latency.stddev',
62 }
63
64 def check(self, instance):
65 if 'url' not in instance:
66 raise Exception('etcd instance missing "url" value.')
67
68 # Load values from the instance config
69 url = instance['url']
70 instance_tags = instance.get('tags', [])
71 # Append the instance's URL in case there are more than one, that
72 # way they can tell the difference!
73 instance_tags.append("url:{0}".format(url))
74 timeout = float(instance.get('timeout', self.DEFAULT_TIMEOUT))
75 is_leader = False
76
77 # Gather self metrics
78 self_response = self._get_self_metrics(url, timeout)
79 if self_response is not None:
80 if self_response['state'] == 'StateLeader':
81 is_leader = True
82 instance_tags.append('etcd_state:leader')
83 else:
84 instance_tags.append('etcd_state:follower')
85
86 for key in self.SELF_RATES:
87 if key in self_response:
88 self.rate(self.SELF_RATES[key], self_response[key], tags=instance_tags)
89 else:
90 self.log.warn("Missing key {0} in stats.".format(key))
91
92 for key in self.SELF_GAUGES:
93 if key in self_response:
94 self.gauge(self.SELF_GAUGES[key], self_response[key], tags=instance_tags)
95 else:
96 self.log.warn("Missing key {0} in stats.".format(key))
97
98 # Gather store metrics
99 store_response = self._get_store_metrics(url, timeout)
100 if store_response is not None:
101 for key in self.STORE_RATES:
102 if key in store_response:
103 self.rate(self.STORE_RATES[key], store_response[key], tags=instance_tags)
104 else:
105 self.log.warn("Missing key {0} in stats.".format(key))
106
107 for key in self.STORE_GAUGES:
108 if key in store_response:
109 self.gauge(self.STORE_GAUGES[key], store_response[key], tags=instance_tags)
110 else:
111 self.log.warn("Missing key {0} in stats.".format(key))
112
113 # Gather leader metrics
114 leader_response = self._get_leader_metrics(url, timeout)
115 if leader_response is not None and is_leader \
116 and len(leader_response.get("followers", {})) > 0:
117 # Get the followers
118 followers = leader_response.get("followers")
119 for fol in followers:
120 # counts
121 for key in self.LEADER_COUNTS:
122 self.rate(self.LEADER_COUNTS[key],
123 followers[fol].get("counts").get(key),
124 tags=instance_tags + ['follower:{0}'.format(fol)])
125 # latency
126 for key in self.LEADER_LATENCY:
127 self.gauge(self.LEADER_LATENCY[key],
128 followers[fol].get("latency").get(key),
129 tags=instance_tags + ['follower:{0}'.format(fol)])
130
131 # Service check
132 if self_response is not None and store_response is not None:
133 self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
134 tags=["url:{0}".format(url)])
135
136 def _get_self_metrics(self, url, timeout):
137 return self._get_json(url + "/v2/stats/self", timeout)
138
139 def _get_store_metrics(self, url, timeout):
140 return self._get_json(url + "/v2/stats/store", timeout)
141
142 def _get_leader_metrics(self, url, timeout):
143 return self._get_json(url + "/v2/stats/leader", timeout)
144
145 def _get_json(self, url, timeout):
146 try:
147 r = requests.get(url, timeout=timeout, headers=headers(self.agentConfig))
148 except requests.exceptions.Timeout:
149 # If there's a timeout
150 self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
151 message="Timeout when hitting %s" % url,
152 tags=["url:{0}".format(url)])
153 raise
154
155 if r.status_code != 200:
156 self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
157 message="Got %s when hitting %s" % (r.status_code, url),
158 tags=["url:{0}".format(url)])
159 raise Exception("Http status code {0} on url {1}".format(r.status_code, url))
160
161 return r.json()
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checks.d/etcd.py b/checks.d/etcd.py
--- a/checks.d/etcd.py
+++ b/checks.d/etcd.py
@@ -111,22 +111,22 @@
self.log.warn("Missing key {0} in stats.".format(key))
# Gather leader metrics
- leader_response = self._get_leader_metrics(url, timeout)
- if leader_response is not None and is_leader \
- and len(leader_response.get("followers", {})) > 0:
- # Get the followers
- followers = leader_response.get("followers")
- for fol in followers:
- # counts
- for key in self.LEADER_COUNTS:
- self.rate(self.LEADER_COUNTS[key],
- followers[fol].get("counts").get(key),
- tags=instance_tags + ['follower:{0}'.format(fol)])
- # latency
- for key in self.LEADER_LATENCY:
- self.gauge(self.LEADER_LATENCY[key],
- followers[fol].get("latency").get(key),
- tags=instance_tags + ['follower:{0}'.format(fol)])
+ if is_leader:
+ leader_response = self._get_leader_metrics(url, timeout)
+ if leader_response is not None and len(leader_response.get("followers", {})) > 0:
+ # Get the followers
+ followers = leader_response.get("followers")
+ for fol in followers:
+ # counts
+ for key in self.LEADER_COUNTS:
+ self.rate(self.LEADER_COUNTS[key],
+ followers[fol].get("counts").get(key),
+ tags=instance_tags + ['follower:{0}'.format(fol)])
+ # latency
+ for key in self.LEADER_LATENCY:
+ self.gauge(self.LEADER_LATENCY[key],
+ followers[fol].get("latency").get(key),
+ tags=instance_tags + ['follower:{0}'.format(fol)])
# Service check
if self_response is not None and store_response is not None:
|
{"golden_diff": "diff --git a/checks.d/etcd.py b/checks.d/etcd.py\n--- a/checks.d/etcd.py\n+++ b/checks.d/etcd.py\n@@ -111,22 +111,22 @@\n self.log.warn(\"Missing key {0} in stats.\".format(key))\n \n # Gather leader metrics\n- leader_response = self._get_leader_metrics(url, timeout)\n- if leader_response is not None and is_leader \\\n- and len(leader_response.get(\"followers\", {})) > 0:\n- # Get the followers\n- followers = leader_response.get(\"followers\")\n- for fol in followers:\n- # counts\n- for key in self.LEADER_COUNTS:\n- self.rate(self.LEADER_COUNTS[key],\n- followers[fol].get(\"counts\").get(key),\n- tags=instance_tags + ['follower:{0}'.format(fol)])\n- # latency\n- for key in self.LEADER_LATENCY:\n- self.gauge(self.LEADER_LATENCY[key],\n- followers[fol].get(\"latency\").get(key),\n- tags=instance_tags + ['follower:{0}'.format(fol)])\n+ if is_leader:\n+ leader_response = self._get_leader_metrics(url, timeout)\n+ if leader_response is not None and len(leader_response.get(\"followers\", {})) > 0:\n+ # Get the followers\n+ followers = leader_response.get(\"followers\")\n+ for fol in followers:\n+ # counts\n+ for key in self.LEADER_COUNTS:\n+ self.rate(self.LEADER_COUNTS[key],\n+ followers[fol].get(\"counts\").get(key),\n+ tags=instance_tags + ['follower:{0}'.format(fol)])\n+ # latency\n+ for key in self.LEADER_LATENCY:\n+ self.gauge(self.LEADER_LATENCY[key],\n+ followers[fol].get(\"latency\").get(key),\n+ tags=instance_tags + ['follower:{0}'.format(fol)])\n \n # Service check\n if self_response is not None and store_response is not None:\n", "issue": "[etcd] `/stats/leader` from a follower returns a 403\nThe `/stats/leader` endpoint is only available on the leader etcd host.\nOn the followers, it returns a 403: it is illegitimately logged as an exception in the agent and triggers a CRITICAL service check.\n\n`#0[ERROR]: 'Http status code 403 on url http://localhost:4002/v2/stats/leader`\n\nFull stacktrace: \n\n```\n2015-06-17 16:41:46 UTC | ERROR | dd.collector | checks.etcd(__init__.py:678) | Check 'etcd' instance #0 failed\nTraceback (most recent call last):\n File \"/opt/datadog-agent/agent/checks/__init__.py\", line 661, in run\n self.check(copy.deepcopy(instance))\n File \"/opt/datadog-agent/agent/checks.d/etcd.py\", line 114, in check\n leader_response = self._get_leader_metrics(url, timeout)\n File \"/opt/datadog-agent/agent/checks.d/etcd.py\", line 143, in _get_leader_metrics\n return self._get_json(url + \"/v2/stats/leader\", timeout)\n File \"/opt/datadog-agent/agent/checks.d/etcd.py\", line 159, in _get_json\n raise Exception(\"Http status code {0} on url {1}\".format(r.status_code, url))\nException: Http status code 403 on url http://localhost:4002/v2/stats/leader\n```\n\n", "before_files": [{"content": "# project\nfrom checks import AgentCheck\nfrom util import headers\n\n# 3rd party\nimport requests\n\n\nclass Etcd(AgentCheck):\n\n DEFAULT_TIMEOUT = 5\n\n SERVICE_CHECK_NAME = 'etcd.can_connect'\n\n STORE_RATES = {\n 'getsSuccess': 'etcd.store.gets.success',\n 'getsFail': 'etcd.store.gets.fail',\n 'setsSuccess': 'etcd.store.sets.success',\n 'setsFail': 'etcd.store.sets.fail',\n 'deleteSuccess': 'etcd.store.delete.success',\n 'deleteFail': 'etcd.store.delete.fail',\n 'updateSuccess': 'etcd.store.update.success',\n 'updateFail': 'etcd.store.update.fail',\n 'createSuccess': 'etcd.store.create.success',\n 'createFail': 'etcd.store.create.fail',\n 'compareAndSwapSuccess': 'etcd.store.compareandswap.success',\n 'compareAndSwapFail': 'etcd.store.compareandswap.fail',\n 'compareAndDeleteSuccess': 'etcd.store.compareanddelete.success',\n 'compareAndDeleteFail': 'etcd.store.compareanddelete.fail',\n 'expireCount': 'etcd.store.expire.count'\n }\n\n STORE_GAUGES = {\n 'watchers': 'etcd.store.watchers'\n }\n\n SELF_GAUGES = {\n 'sendPkgRate': 'etcd.self.send.pkgrate',\n 'sendBandwidthRate': 'etcd.self.send.bandwidthrate',\n 'recvPkgRate': 'etcd.self.recv.pkgrate',\n 'recvBandwidthRate': 'etcd.self.recv.bandwidthrate'\n }\n\n SELF_RATES = {\n 'recvAppendRequestCnt': 'etcd.self.recv.appendrequest.count',\n 'sendAppendRequestCnt': 'etcd.self.send.appendrequest.count'\n }\n\n LEADER_COUNTS = {\n # Rates\n 'fail': 'etcd.leader.counts.fail',\n 'success': 'etcd.leader.counts.success',\n }\n\n LEADER_LATENCY = {\n # Gauges\n 'current': 'etcd.leader.latency.current',\n 'average': 'etcd.leader.latency.avg',\n 'minimum': 'etcd.leader.latency.min',\n 'maximum': 'etcd.leader.latency.max',\n 'standardDeviation': 'etcd.leader.latency.stddev',\n }\n\n def check(self, instance):\n if 'url' not in instance:\n raise Exception('etcd instance missing \"url\" value.')\n\n # Load values from the instance config\n url = instance['url']\n instance_tags = instance.get('tags', [])\n # Append the instance's URL in case there are more than one, that\n # way they can tell the difference!\n instance_tags.append(\"url:{0}\".format(url))\n timeout = float(instance.get('timeout', self.DEFAULT_TIMEOUT))\n is_leader = False\n\n # Gather self metrics\n self_response = self._get_self_metrics(url, timeout)\n if self_response is not None:\n if self_response['state'] == 'StateLeader':\n is_leader = True\n instance_tags.append('etcd_state:leader')\n else:\n instance_tags.append('etcd_state:follower')\n\n for key in self.SELF_RATES:\n if key in self_response:\n self.rate(self.SELF_RATES[key], self_response[key], tags=instance_tags)\n else:\n self.log.warn(\"Missing key {0} in stats.\".format(key))\n\n for key in self.SELF_GAUGES:\n if key in self_response:\n self.gauge(self.SELF_GAUGES[key], self_response[key], tags=instance_tags)\n else:\n self.log.warn(\"Missing key {0} in stats.\".format(key))\n\n # Gather store metrics\n store_response = self._get_store_metrics(url, timeout)\n if store_response is not None:\n for key in self.STORE_RATES:\n if key in store_response:\n self.rate(self.STORE_RATES[key], store_response[key], tags=instance_tags)\n else:\n self.log.warn(\"Missing key {0} in stats.\".format(key))\n\n for key in self.STORE_GAUGES:\n if key in store_response:\n self.gauge(self.STORE_GAUGES[key], store_response[key], tags=instance_tags)\n else:\n self.log.warn(\"Missing key {0} in stats.\".format(key))\n\n # Gather leader metrics\n leader_response = self._get_leader_metrics(url, timeout)\n if leader_response is not None and is_leader \\\n and len(leader_response.get(\"followers\", {})) > 0:\n # Get the followers\n followers = leader_response.get(\"followers\")\n for fol in followers:\n # counts\n for key in self.LEADER_COUNTS:\n self.rate(self.LEADER_COUNTS[key],\n followers[fol].get(\"counts\").get(key),\n tags=instance_tags + ['follower:{0}'.format(fol)])\n # latency\n for key in self.LEADER_LATENCY:\n self.gauge(self.LEADER_LATENCY[key],\n followers[fol].get(\"latency\").get(key),\n tags=instance_tags + ['follower:{0}'.format(fol)])\n\n # Service check\n if self_response is not None and store_response is not None:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,\n tags=[\"url:{0}\".format(url)])\n\n def _get_self_metrics(self, url, timeout):\n return self._get_json(url + \"/v2/stats/self\", timeout)\n\n def _get_store_metrics(self, url, timeout):\n return self._get_json(url + \"/v2/stats/store\", timeout)\n\n def _get_leader_metrics(self, url, timeout):\n return self._get_json(url + \"/v2/stats/leader\", timeout)\n\n def _get_json(self, url, timeout):\n try:\n r = requests.get(url, timeout=timeout, headers=headers(self.agentConfig))\n except requests.exceptions.Timeout:\n # If there's a timeout\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n message=\"Timeout when hitting %s\" % url,\n tags=[\"url:{0}\".format(url)])\n raise\n\n if r.status_code != 200:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n message=\"Got %s when hitting %s\" % (r.status_code, url),\n tags=[\"url:{0}\".format(url)])\n raise Exception(\"Http status code {0} on url {1}\".format(r.status_code, url))\n\n return r.json()\n", "path": "checks.d/etcd.py"}], "after_files": [{"content": "# project\nfrom checks import AgentCheck\nfrom util import headers\n\n# 3rd party\nimport requests\n\n\nclass Etcd(AgentCheck):\n\n DEFAULT_TIMEOUT = 5\n\n SERVICE_CHECK_NAME = 'etcd.can_connect'\n\n STORE_RATES = {\n 'getsSuccess': 'etcd.store.gets.success',\n 'getsFail': 'etcd.store.gets.fail',\n 'setsSuccess': 'etcd.store.sets.success',\n 'setsFail': 'etcd.store.sets.fail',\n 'deleteSuccess': 'etcd.store.delete.success',\n 'deleteFail': 'etcd.store.delete.fail',\n 'updateSuccess': 'etcd.store.update.success',\n 'updateFail': 'etcd.store.update.fail',\n 'createSuccess': 'etcd.store.create.success',\n 'createFail': 'etcd.store.create.fail',\n 'compareAndSwapSuccess': 'etcd.store.compareandswap.success',\n 'compareAndSwapFail': 'etcd.store.compareandswap.fail',\n 'compareAndDeleteSuccess': 'etcd.store.compareanddelete.success',\n 'compareAndDeleteFail': 'etcd.store.compareanddelete.fail',\n 'expireCount': 'etcd.store.expire.count'\n }\n\n STORE_GAUGES = {\n 'watchers': 'etcd.store.watchers'\n }\n\n SELF_GAUGES = {\n 'sendPkgRate': 'etcd.self.send.pkgrate',\n 'sendBandwidthRate': 'etcd.self.send.bandwidthrate',\n 'recvPkgRate': 'etcd.self.recv.pkgrate',\n 'recvBandwidthRate': 'etcd.self.recv.bandwidthrate'\n }\n\n SELF_RATES = {\n 'recvAppendRequestCnt': 'etcd.self.recv.appendrequest.count',\n 'sendAppendRequestCnt': 'etcd.self.send.appendrequest.count'\n }\n\n LEADER_COUNTS = {\n # Rates\n 'fail': 'etcd.leader.counts.fail',\n 'success': 'etcd.leader.counts.success',\n }\n\n LEADER_LATENCY = {\n # Gauges\n 'current': 'etcd.leader.latency.current',\n 'average': 'etcd.leader.latency.avg',\n 'minimum': 'etcd.leader.latency.min',\n 'maximum': 'etcd.leader.latency.max',\n 'standardDeviation': 'etcd.leader.latency.stddev',\n }\n\n def check(self, instance):\n if 'url' not in instance:\n raise Exception('etcd instance missing \"url\" value.')\n\n # Load values from the instance config\n url = instance['url']\n instance_tags = instance.get('tags', [])\n # Append the instance's URL in case there are more than one, that\n # way they can tell the difference!\n instance_tags.append(\"url:{0}\".format(url))\n timeout = float(instance.get('timeout', self.DEFAULT_TIMEOUT))\n is_leader = False\n\n # Gather self metrics\n self_response = self._get_self_metrics(url, timeout)\n if self_response is not None:\n if self_response['state'] == 'StateLeader':\n is_leader = True\n instance_tags.append('etcd_state:leader')\n else:\n instance_tags.append('etcd_state:follower')\n\n for key in self.SELF_RATES:\n if key in self_response:\n self.rate(self.SELF_RATES[key], self_response[key], tags=instance_tags)\n else:\n self.log.warn(\"Missing key {0} in stats.\".format(key))\n\n for key in self.SELF_GAUGES:\n if key in self_response:\n self.gauge(self.SELF_GAUGES[key], self_response[key], tags=instance_tags)\n else:\n self.log.warn(\"Missing key {0} in stats.\".format(key))\n\n # Gather store metrics\n store_response = self._get_store_metrics(url, timeout)\n if store_response is not None:\n for key in self.STORE_RATES:\n if key in store_response:\n self.rate(self.STORE_RATES[key], store_response[key], tags=instance_tags)\n else:\n self.log.warn(\"Missing key {0} in stats.\".format(key))\n\n for key in self.STORE_GAUGES:\n if key in store_response:\n self.gauge(self.STORE_GAUGES[key], store_response[key], tags=instance_tags)\n else:\n self.log.warn(\"Missing key {0} in stats.\".format(key))\n\n # Gather leader metrics\n if is_leader:\n leader_response = self._get_leader_metrics(url, timeout)\n if leader_response is not None and len(leader_response.get(\"followers\", {})) > 0:\n # Get the followers\n followers = leader_response.get(\"followers\")\n for fol in followers:\n # counts\n for key in self.LEADER_COUNTS:\n self.rate(self.LEADER_COUNTS[key],\n followers[fol].get(\"counts\").get(key),\n tags=instance_tags + ['follower:{0}'.format(fol)])\n # latency\n for key in self.LEADER_LATENCY:\n self.gauge(self.LEADER_LATENCY[key],\n followers[fol].get(\"latency\").get(key),\n tags=instance_tags + ['follower:{0}'.format(fol)])\n\n # Service check\n if self_response is not None and store_response is not None:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,\n tags=[\"url:{0}\".format(url)])\n\n def _get_self_metrics(self, url, timeout):\n return self._get_json(url + \"/v2/stats/self\", timeout)\n\n def _get_store_metrics(self, url, timeout):\n return self._get_json(url + \"/v2/stats/store\", timeout)\n\n def _get_leader_metrics(self, url, timeout):\n return self._get_json(url + \"/v2/stats/leader\", timeout)\n\n def _get_json(self, url, timeout):\n try:\n r = requests.get(url, timeout=timeout, headers=headers(self.agentConfig))\n except requests.exceptions.Timeout:\n # If there's a timeout\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n message=\"Timeout when hitting %s\" % url,\n tags=[\"url:{0}\".format(url)])\n raise\n\n if r.status_code != 200:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n message=\"Got %s when hitting %s\" % (r.status_code, url),\n tags=[\"url:{0}\".format(url)])\n raise Exception(\"Http status code {0} on url {1}\".format(r.status_code, url))\n\n return r.json()\n", "path": "checks.d/etcd.py"}]}
| 2,448 | 461 |
gh_patches_debug_713
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-1826
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Agate type inference is too clever
### Describe the bug
We’re trying to set a value from a {% call statement %} and within the call, one line is SELECT 0 AS my_value...and it then treats it as a boolean (false) in the returned values.
The same happens if we try SELECT 1 AS my_value, but as soon as we do SELECT 2 AS my_value it treats it like a number (as it should).
### Steps To Reproduce
Create a call statement that selects 0, or 1. false, and true respectively will be returned.
### Expected behavior
0, or 1 to be returned, as integers.
### Screenshots and log output
### System information
**Which database are you using dbt with?**
- [ ] postgres
- [ ] redshift
- [x] bigquery
- [ ] snowflake
- [ ] other (specify: ____________)
**The output of `dbt --version`:**
```
installed version: 0.15.0-a1
latest version: 0.14.2
Your version of dbt is ahead of the latest release!
```
FYI, we run a fork, but that shouldn't have affected anything here.
**The operating system you're using:**
Mojave
**The output of `python --version`:**
Python 3.7.1
### Additional context
We'd love a quick fix for this, even if it's ugly!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/dbt/clients/agate_helper.py`
Content:
```
1 from codecs import BOM_UTF8
2
3 import agate
4 import json
5
6
7 BOM = BOM_UTF8.decode('utf-8') # '\ufeff'
8
9 DEFAULT_TYPE_TESTER = agate.TypeTester(types=[
10 agate.data_types.Number(null_values=('null', '')),
11 agate.data_types.TimeDelta(null_values=('null', '')),
12 agate.data_types.Date(null_values=('null', '')),
13 agate.data_types.DateTime(null_values=('null', '')),
14 agate.data_types.Boolean(true_values=('true',),
15 false_values=('false',),
16 null_values=('null', '')),
17 agate.data_types.Text(null_values=('null', ''))
18 ])
19
20
21 def table_from_data(data, column_names):
22 "Convert list of dictionaries into an Agate table"
23
24 # The agate table is generated from a list of dicts, so the column order
25 # from `data` is not preserved. We can use `select` to reorder the columns
26 #
27 # If there is no data, create an empty table with the specified columns
28
29 if len(data) == 0:
30 return agate.Table([], column_names=column_names)
31 else:
32 table = agate.Table.from_object(data, column_types=DEFAULT_TYPE_TESTER)
33 return table.select(column_names)
34
35
36 def table_from_data_flat(data, column_names):
37 "Convert list of dictionaries into an Agate table"
38
39 rows = []
40 for _row in data:
41 row = []
42 for value in list(_row.values()):
43 if isinstance(value, (dict, list, tuple)):
44 row.append(json.dumps(value))
45 else:
46 row.append(value)
47 rows.append(row)
48
49 return agate.Table(rows, column_names)
50
51
52 def empty_table():
53 "Returns an empty Agate table. To be used in place of None"
54
55 return agate.Table(rows=[])
56
57
58 def as_matrix(table):
59 "Return an agate table as a matrix of data sans columns"
60
61 return [r.values() for r in table.rows.values()]
62
63
64 def from_csv(abspath):
65 with open(abspath, encoding='utf-8') as fp:
66 if fp.read(1) != BOM:
67 fp.seek(0)
68 return agate.Table.from_csv(fp, column_types=DEFAULT_TYPE_TESTER)
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/dbt/clients/agate_helper.py b/core/dbt/clients/agate_helper.py
--- a/core/dbt/clients/agate_helper.py
+++ b/core/dbt/clients/agate_helper.py
@@ -46,7 +46,7 @@
row.append(value)
rows.append(row)
- return agate.Table(rows, column_names)
+ return agate.Table(rows, column_names, column_types=DEFAULT_TYPE_TESTER)
def empty_table():
|
{"golden_diff": "diff --git a/core/dbt/clients/agate_helper.py b/core/dbt/clients/agate_helper.py\n--- a/core/dbt/clients/agate_helper.py\n+++ b/core/dbt/clients/agate_helper.py\n@@ -46,7 +46,7 @@\n row.append(value)\n rows.append(row)\n \n- return agate.Table(rows, column_names)\n+ return agate.Table(rows, column_names, column_types=DEFAULT_TYPE_TESTER)\n \n \n def empty_table():\n", "issue": "Agate type inference is too clever\n### Describe the bug\r\nWe\u2019re trying to set a value from a {% call statement %} and within the call, one line is SELECT 0 AS my_value...and it then treats it as a boolean (false) in the returned values. \r\n\r\nThe same happens if we try SELECT 1 AS my_value, but as soon as we do SELECT 2 AS my_value it treats it like a number (as it should).\r\n\r\n### Steps To Reproduce\r\nCreate a call statement that selects 0, or 1. false, and true respectively will be returned.\r\n\r\n### Expected behavior\r\n0, or 1 to be returned, as integers.\r\n\r\n### Screenshots and log output\r\n\r\n### System information\r\n**Which database are you using dbt with?**\r\n- [ ] postgres\r\n- [ ] redshift\r\n- [x] bigquery\r\n- [ ] snowflake\r\n- [ ] other (specify: ____________)\r\n\r\n\r\n**The output of `dbt --version`:**\r\n```\r\ninstalled version: 0.15.0-a1\r\n latest version: 0.14.2\r\nYour version of dbt is ahead of the latest release!\r\n```\r\n\r\nFYI, we run a fork, but that shouldn't have affected anything here.\r\n\r\n**The operating system you're using:**\r\n\r\nMojave\r\n\r\n**The output of `python --version`:**\r\n\r\nPython 3.7.1\r\n\r\n### Additional context\r\nWe'd love a quick fix for this, even if it's ugly!\r\n\n", "before_files": [{"content": "from codecs import BOM_UTF8\n\nimport agate\nimport json\n\n\nBOM = BOM_UTF8.decode('utf-8') # '\\ufeff'\n\nDEFAULT_TYPE_TESTER = agate.TypeTester(types=[\n agate.data_types.Number(null_values=('null', '')),\n agate.data_types.TimeDelta(null_values=('null', '')),\n agate.data_types.Date(null_values=('null', '')),\n agate.data_types.DateTime(null_values=('null', '')),\n agate.data_types.Boolean(true_values=('true',),\n false_values=('false',),\n null_values=('null', '')),\n agate.data_types.Text(null_values=('null', ''))\n])\n\n\ndef table_from_data(data, column_names):\n \"Convert list of dictionaries into an Agate table\"\n\n # The agate table is generated from a list of dicts, so the column order\n # from `data` is not preserved. We can use `select` to reorder the columns\n #\n # If there is no data, create an empty table with the specified columns\n\n if len(data) == 0:\n return agate.Table([], column_names=column_names)\n else:\n table = agate.Table.from_object(data, column_types=DEFAULT_TYPE_TESTER)\n return table.select(column_names)\n\n\ndef table_from_data_flat(data, column_names):\n \"Convert list of dictionaries into an Agate table\"\n\n rows = []\n for _row in data:\n row = []\n for value in list(_row.values()):\n if isinstance(value, (dict, list, tuple)):\n row.append(json.dumps(value))\n else:\n row.append(value)\n rows.append(row)\n\n return agate.Table(rows, column_names)\n\n\ndef empty_table():\n \"Returns an empty Agate table. To be used in place of None\"\n\n return agate.Table(rows=[])\n\n\ndef as_matrix(table):\n \"Return an agate table as a matrix of data sans columns\"\n\n return [r.values() for r in table.rows.values()]\n\n\ndef from_csv(abspath):\n with open(abspath, encoding='utf-8') as fp:\n if fp.read(1) != BOM:\n fp.seek(0)\n return agate.Table.from_csv(fp, column_types=DEFAULT_TYPE_TESTER)\n", "path": "core/dbt/clients/agate_helper.py"}], "after_files": [{"content": "from codecs import BOM_UTF8\n\nimport agate\nimport json\n\n\nBOM = BOM_UTF8.decode('utf-8') # '\\ufeff'\n\nDEFAULT_TYPE_TESTER = agate.TypeTester(types=[\n agate.data_types.Number(null_values=('null', '')),\n agate.data_types.TimeDelta(null_values=('null', '')),\n agate.data_types.Date(null_values=('null', '')),\n agate.data_types.DateTime(null_values=('null', '')),\n agate.data_types.Boolean(true_values=('true',),\n false_values=('false',),\n null_values=('null', '')),\n agate.data_types.Text(null_values=('null', ''))\n])\n\n\ndef table_from_data(data, column_names):\n \"Convert list of dictionaries into an Agate table\"\n\n # The agate table is generated from a list of dicts, so the column order\n # from `data` is not preserved. We can use `select` to reorder the columns\n #\n # If there is no data, create an empty table with the specified columns\n\n if len(data) == 0:\n return agate.Table([], column_names=column_names)\n else:\n table = agate.Table.from_object(data, column_types=DEFAULT_TYPE_TESTER)\n return table.select(column_names)\n\n\ndef table_from_data_flat(data, column_names):\n \"Convert list of dictionaries into an Agate table\"\n\n rows = []\n for _row in data:\n row = []\n for value in list(_row.values()):\n if isinstance(value, (dict, list, tuple)):\n row.append(json.dumps(value))\n else:\n row.append(value)\n rows.append(row)\n\n return agate.Table(rows, column_names, column_types=DEFAULT_TYPE_TESTER)\n\n\ndef empty_table():\n \"Returns an empty Agate table. To be used in place of None\"\n\n return agate.Table(rows=[])\n\n\ndef as_matrix(table):\n \"Return an agate table as a matrix of data sans columns\"\n\n return [r.values() for r in table.rows.values()]\n\n\ndef from_csv(abspath):\n with open(abspath, encoding='utf-8') as fp:\n if fp.read(1) != BOM:\n fp.seek(0)\n return agate.Table.from_csv(fp, column_types=DEFAULT_TYPE_TESTER)\n", "path": "core/dbt/clients/agate_helper.py"}]}
| 1,205 | 106 |
gh_patches_debug_1031
|
rasdani/github-patches
|
git_diff
|
zestedesavoir__zds-site-3857
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[beta][v20] S'inscrire/se connecter/chercher avec un emoji provoque une 500
Serveur : Beta
Version : v20-RC3/d3fd8af
Système : Mac OS X
Navigateur : 52.0.2743.116 (64-bit)
---
1. Rendez-vous à la page d'inscription et renseigner un pseudo du type : 👚 test
2. Remplissez les autres champs.
3. Soumettez le formulaire.
4. Constatez une erreur 500.
Note : Vous pouvez reproduire la même erreur en tentant de vous connecter avec le même pseudo ou en faisant une recherche sur le pseudo d'un membre.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/utils/misc.py`
Content:
```
1 # coding: utf-8
2 import hashlib
3 import re
4
5 THUMB_MAX_WIDTH = 80
6 THUMB_MAX_HEIGHT = 80
7
8 MEDIUM_MAX_WIDTH = 200
9 MEDIUM_MAX_HEIGHT = 200
10
11
12 def compute_hash(filenames):
13 """returns a md5 hexdigest of group of files to check if they have change"""
14 md5_hash = hashlib.md5()
15 for filename in filenames:
16 if filename:
17 file_handle = open(filename, 'rb')
18 must_continue = True
19 while must_continue:
20 read_bytes = file_handle.read(8096)
21 if not read_bytes:
22 must_continue = False
23 else:
24 md5_hash.update(read_bytes)
25 return md5_hash.hexdigest()
26
27
28 def content_has_changed(filenames, md5):
29 return md5 != compute_hash(filenames)
30
31
32 def has_changed(instance, field, manager='objects'):
33 """Returns true if a field has changed in a model May be used in a
34 model.save() method."""
35 if not instance.pk:
36 return True
37 manager = getattr(instance.__class__, manager)
38 old = getattr(manager.get(pk=instance.pk), field)
39 return not getattr(instance, field) == old
40
41
42 def convert_camel_to_underscore(camel_case):
43 """
44 Converts a name in camel case to underscore.
45 """
46 s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel_case)
47 return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
48
49
50 def contains_utf8mb4(s):
51 """
52 This string contains at least one character of more than 3 bytes
53 """
54 if not isinstance(s, unicode):
55 s = unicode(s, 'utf-8')
56 return not all(len(c.encode('utf-8')) <= 3 for c in s)
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zds/utils/misc.py b/zds/utils/misc.py
--- a/zds/utils/misc.py
+++ b/zds/utils/misc.py
@@ -53,4 +53,5 @@
"""
if not isinstance(s, unicode):
s = unicode(s, 'utf-8')
- return not all(len(c.encode('utf-8')) <= 3 for c in s)
+ re_pattern = re.compile(u'[^\u0000-\uD7FF\uE000-\uFFFF]', re.UNICODE)
+ return s != re_pattern.sub(u'\uFFFD', s)
|
{"golden_diff": "diff --git a/zds/utils/misc.py b/zds/utils/misc.py\n--- a/zds/utils/misc.py\n+++ b/zds/utils/misc.py\n@@ -53,4 +53,5 @@\n \"\"\"\n if not isinstance(s, unicode):\n s = unicode(s, 'utf-8')\n- return not all(len(c.encode('utf-8')) <= 3 for c in s)\n+ re_pattern = re.compile(u'[^\\u0000-\\uD7FF\\uE000-\\uFFFF]', re.UNICODE)\n+ return s != re_pattern.sub(u'\\uFFFD', s)\n", "issue": "[beta][v20] S'inscrire/se connecter/chercher avec un emoji provoque une 500\nServeur : Beta\nVersion : v20-RC3/d3fd8af\nSyst\u00e8me : Mac OS X\nNavigateur : 52.0.2743.116 (64-bit)\n\n---\n1. Rendez-vous \u00e0 la page d'inscription et renseigner un pseudo du type : \ud83d\udc5a test\n2. Remplissez les autres champs.\n3. Soumettez le formulaire.\n4. Constatez une erreur 500.\n\nNote : Vous pouvez reproduire la m\u00eame erreur en tentant de vous connecter avec le m\u00eame pseudo ou en faisant une recherche sur le pseudo d'un membre.\n\n", "before_files": [{"content": "# coding: utf-8\nimport hashlib\nimport re\n\nTHUMB_MAX_WIDTH = 80\nTHUMB_MAX_HEIGHT = 80\n\nMEDIUM_MAX_WIDTH = 200\nMEDIUM_MAX_HEIGHT = 200\n\n\ndef compute_hash(filenames):\n \"\"\"returns a md5 hexdigest of group of files to check if they have change\"\"\"\n md5_hash = hashlib.md5()\n for filename in filenames:\n if filename:\n file_handle = open(filename, 'rb')\n must_continue = True\n while must_continue:\n read_bytes = file_handle.read(8096)\n if not read_bytes:\n must_continue = False\n else:\n md5_hash.update(read_bytes)\n return md5_hash.hexdigest()\n\n\ndef content_has_changed(filenames, md5):\n return md5 != compute_hash(filenames)\n\n\ndef has_changed(instance, field, manager='objects'):\n \"\"\"Returns true if a field has changed in a model May be used in a\n model.save() method.\"\"\"\n if not instance.pk:\n return True\n manager = getattr(instance.__class__, manager)\n old = getattr(manager.get(pk=instance.pk), field)\n return not getattr(instance, field) == old\n\n\ndef convert_camel_to_underscore(camel_case):\n \"\"\"\n Converts a name in camel case to underscore.\n \"\"\"\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', camel_case)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\n\ndef contains_utf8mb4(s):\n \"\"\"\n This string contains at least one character of more than 3 bytes\n \"\"\"\n if not isinstance(s, unicode):\n s = unicode(s, 'utf-8')\n return not all(len(c.encode('utf-8')) <= 3 for c in s)\n", "path": "zds/utils/misc.py"}], "after_files": [{"content": "# coding: utf-8\nimport hashlib\nimport re\n\nTHUMB_MAX_WIDTH = 80\nTHUMB_MAX_HEIGHT = 80\n\nMEDIUM_MAX_WIDTH = 200\nMEDIUM_MAX_HEIGHT = 200\n\n\ndef compute_hash(filenames):\n \"\"\"returns a md5 hexdigest of group of files to check if they have change\"\"\"\n md5_hash = hashlib.md5()\n for filename in filenames:\n if filename:\n file_handle = open(filename, 'rb')\n must_continue = True\n while must_continue:\n read_bytes = file_handle.read(8096)\n if not read_bytes:\n must_continue = False\n else:\n md5_hash.update(read_bytes)\n return md5_hash.hexdigest()\n\n\ndef content_has_changed(filenames, md5):\n return md5 != compute_hash(filenames)\n\n\ndef has_changed(instance, field, manager='objects'):\n \"\"\"Returns true if a field has changed in a model May be used in a\n model.save() method.\"\"\"\n if not instance.pk:\n return True\n manager = getattr(instance.__class__, manager)\n old = getattr(manager.get(pk=instance.pk), field)\n return not getattr(instance, field) == old\n\n\ndef convert_camel_to_underscore(camel_case):\n \"\"\"\n Converts a name in camel case to underscore.\n \"\"\"\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', camel_case)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\n\ndef contains_utf8mb4(s):\n \"\"\"\n This string contains at least one character of more than 3 bytes\n \"\"\"\n if not isinstance(s, unicode):\n s = unicode(s, 'utf-8')\n re_pattern = re.compile(u'[^\\u0000-\\uD7FF\\uE000-\\uFFFF]', re.UNICODE)\n return s != re_pattern.sub(u'\\uFFFD', s)\n", "path": "zds/utils/misc.py"}]}
| 947 | 134 |
gh_patches_debug_10873
|
rasdani/github-patches
|
git_diff
|
ietf-tools__datatracker-6268
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
xml parsing heuristics crash if a reference has anchor "STD", "FYI", "RFC", or "BCP"
### Describe the issue
I'm trying to submit a draft. On my first attempt, I realized that I tagged the wrong commit and missed some important changes from a co-author. So when I received the email, I cancelled the submission.
I have since tried twice (with the right commit). Both have gotten to the point where I receive an email and both have failed when I requested that the draft be published.
For my second retry attempt, the failed submission was apparently still in progress, so that failed. I then cancelled the first retry and tried a third time. That has also failed. I will leave things in that state for now.
https://datatracker.ietf.org/submit/status/136389/ (original, cancelled)
https://datatracker.ietf.org/submit/status/136390/ (retry 1, failed, also cancelled)
https://datatracker.ietf.org/submit/status/136403/ (retry 3, failed)
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ietf/utils/xmldraft.py`
Content:
```
1 # Copyright The IETF Trust 2022, All Rights Reserved
2 # -*- coding: utf-8 -*-
3 import datetime
4 import io
5 import re
6 import xml2rfc
7
8 import debug # pyflakes: ignore
9
10 from contextlib import ExitStack
11 from xml2rfc.util.date import augment_date, extract_date
12 from ietf.utils.timezone import date_today
13
14 from .draft import Draft
15
16
17 class XMLDraft(Draft):
18 """Draft from XML source
19
20 Not all methods from the superclass are implemented yet.
21 """
22 def __init__(self, xml_file):
23 """Initialize XMLDraft instance
24
25 :parameter xml_file: path to file containing XML source
26 """
27 super().__init__()
28 # cast xml_file to str so, e.g., this will work with a Path
29 self.xmltree, self.xml_version = self.parse_xml(str(xml_file))
30 self.xmlroot = self.xmltree.getroot()
31 self.filename, self.revision = self._parse_docname()
32
33 @staticmethod
34 def parse_xml(filename):
35 """Parse XML draft
36
37 Converts to xml2rfc v3 schema, then returns the root of the v3 tree and the original
38 xml version.
39 """
40 orig_write_out = xml2rfc.log.write_out
41 orig_write_err = xml2rfc.log.write_err
42 parser_out = io.StringIO()
43 parser_err = io.StringIO()
44
45 with ExitStack() as stack:
46 @stack.callback
47 def cleanup(): # called when context exited, even if there's an exception
48 xml2rfc.log.write_out = orig_write_out
49 xml2rfc.log.write_err = orig_write_err
50
51 xml2rfc.log.write_out = parser_out
52 xml2rfc.log.write_err = parser_err
53
54 parser = xml2rfc.XmlRfcParser(filename, quiet=True)
55 try:
56 tree = parser.parse()
57 except Exception as e:
58 raise XMLParseError(parser_out.getvalue(), parser_err.getvalue()) from e
59
60 xml_version = tree.getroot().get('version', '2')
61 if xml_version == '2':
62 v2v3 = xml2rfc.V2v3XmlWriter(tree)
63 tree.tree = v2v3.convert2to3()
64 return tree, xml_version
65
66 def _document_name(self, ref):
67 """Get document name from reference."""
68 series = ["rfc", "bcp", "fyi", "std"]
69 # handle xinclude first
70 # FIXME: this assumes the xinclude is a bibxml href; if it isn't, there can
71 # still be false negatives. it would be better to expand the xinclude and parse
72 # its seriesInfo.
73 if ref.tag.endswith("}include"):
74 name = re.search(
75 rf"reference\.({'|'.join(series).upper()})\.(\d{{4}})\.xml",
76 ref.attrib["href"],
77 )
78 if name:
79 return f"{name.group(1)}{int(name.group(2))}".lower()
80 name = re.search(
81 r"reference\.I-D\.(?:draft-)?(.*)\.xml", ref.attrib["href"]
82 )
83 if name:
84 return f"draft-{name.group(1)}"
85 # can't extract the name, give up
86 return ""
87
88 # check the anchor next
89 anchor = ref.get("anchor").lower() # always give back lowercase
90 label = anchor.rstrip("0123456789") # remove trailing digits
91 if label in series:
92 number = int(anchor[len(label) :])
93 return f"{label}{number}"
94
95 # if we couldn't find a match so far, try the seriesInfo
96 series_query = " or ".join(f"@name='{x.upper()}'" for x in series)
97 for info in ref.xpath(
98 f"./seriesInfo[{series_query} or @name='Internet-Draft']"
99 ):
100 if not info.attrib["value"]:
101 continue
102 if info.attrib["name"] == "Internet-Draft":
103 return info.attrib["value"]
104 else:
105 return f'{info.attrib["name"].lower()}{info.attrib["value"]}'
106 return ""
107
108 def _reference_section_type(self, section_name):
109 """Determine reference type from name of references section"""
110 if section_name:
111 section_name = section_name.lower()
112 if 'normative' in section_name:
113 return self.REF_TYPE_NORMATIVE
114 elif 'informative' in section_name:
115 return self.REF_TYPE_INFORMATIVE
116 return self.REF_TYPE_UNKNOWN
117
118 def _reference_section_name(self, section_elt):
119 section_name = section_elt.findtext('name')
120 if section_name is None and 'title' in section_elt.keys():
121 section_name = section_elt.get('title') # fall back to title if we have it
122 return section_name
123
124 def _parse_docname(self):
125 docname = self.xmlroot.attrib.get('docName')
126 revmatch = re.match(
127 r'^(?P<filename>.+?)(?:-(?P<rev>[0-9][0-9]))?$',
128 docname,
129
130 )
131 if revmatch is None:
132 raise ValueError('Unable to parse docName')
133 # If a group had no match it is None
134 return revmatch.group('filename'), revmatch.group('rev')
135
136 def get_title(self):
137 return self.xmlroot.findtext('front/title').strip()
138
139 @staticmethod
140 def parse_creation_date(date_elt):
141 if date_elt is None:
142 return None
143 today = date_today()
144 # ths mimics handling of date elements in the xml2rfc text/html writers
145 year, month, day = extract_date(date_elt, today)
146 year, month, day = augment_date(year, month, day, today)
147 if not day:
148 # Must choose a day for a datetime.date. Per RFC 7991 sect 2.17, we use
149 # today's date if it is consistent with the rest of the date. Otherwise,
150 # arbitrariy (and consistent with the text parser) assume the 15th.
151 if year == today.year and month == today.month:
152 day = today.day
153 else:
154 day = 15
155 return datetime.date(year, month, day)
156
157 def get_creation_date(self):
158 return self.parse_creation_date(self.xmlroot.find("front/date"))
159
160 # todo fix the implementation of XMLDraft.get_abstract()
161 #
162 # This code was pulled from ietf.submit.forms where it existed for some time.
163 # It does not work, at least with modern xml2rfc. This assumes that the abstract
164 # is simply text in the front/abstract node, but the XML schema wraps the actual
165 # abstract text in <t> elements (and allows <dl>, <ol>, and <ul> as well). As a
166 # result, this method normally returns an empty string, which is later replaced by
167 # the abstract parsed from the rendered text. For now, I a commenting this out
168 # and making it explicit that the abstract always comes from the text format.
169 #
170 # def get_abstract(self):
171 # """Extract the abstract"""
172 # abstract = self.xmlroot.findtext('front/abstract')
173 # return abstract.strip() if abstract else ''
174
175 def get_author_list(self):
176 """Get detailed author list
177
178 Returns a list of dicts with the following keys:
179 name, first_name, middle_initial, last_name,
180 name_suffix, email, country, affiliation
181 Values will be None if not available
182 """
183 result = []
184 empty_author = {
185 k: None for k in [
186 'name', 'first_name', 'middle_initial', 'last_name',
187 'name_suffix', 'email', 'country', 'affiliation',
188 ]
189 }
190
191 for author in self.xmlroot.findall('front/author'):
192 info = {
193 'name': author.attrib.get('fullname'),
194 'email': author.findtext('address/email'),
195 'affiliation': author.findtext('organization'),
196 }
197 elem = author.find('address/postal/country')
198 if elem is not None:
199 ascii_country = elem.get('ascii', None)
200 info['country'] = ascii_country if ascii_country else elem.text
201 for item in info:
202 if info[item]:
203 info[item] = info[item].strip()
204 result.append(empty_author | info) # merge, preferring info
205 return result
206
207 def get_refs(self):
208 """Extract references from the draft"""
209 refs = {}
210 # accept nested <references> sections
211 for section in self.xmlroot.findall("back//references"):
212 ref_type = self._reference_section_type(
213 self._reference_section_name(section)
214 )
215 for ref in (
216 section.findall("./reference")
217 + section.findall("./referencegroup")
218 + section.findall(
219 "./xi:include", {"xi": "http://www.w3.org/2001/XInclude"}
220 )
221 ):
222 name = self._document_name(ref)
223 if name:
224 refs[name] = ref_type
225 return refs
226
227
228 class XMLParseError(Exception):
229 """An error occurred while parsing"""
230 def __init__(self, out: str, err: str, *args):
231 super().__init__(*args)
232 self._out = out
233 self._err = err
234
235 def parser_msgs(self):
236 return self._out.splitlines() + self._err.splitlines()
237
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ietf/utils/xmldraft.py b/ietf/utils/xmldraft.py
--- a/ietf/utils/xmldraft.py
+++ b/ietf/utils/xmldraft.py
@@ -88,8 +88,9 @@
# check the anchor next
anchor = ref.get("anchor").lower() # always give back lowercase
label = anchor.rstrip("0123456789") # remove trailing digits
- if label in series:
- number = int(anchor[len(label) :])
+ maybe_number = anchor[len(label) :]
+ if label in series and maybe_number.isdigit():
+ number = int(maybe_number)
return f"{label}{number}"
# if we couldn't find a match so far, try the seriesInfo
|
{"golden_diff": "diff --git a/ietf/utils/xmldraft.py b/ietf/utils/xmldraft.py\n--- a/ietf/utils/xmldraft.py\n+++ b/ietf/utils/xmldraft.py\n@@ -88,8 +88,9 @@\n # check the anchor next\n anchor = ref.get(\"anchor\").lower() # always give back lowercase\n label = anchor.rstrip(\"0123456789\") # remove trailing digits\n- if label in series:\n- number = int(anchor[len(label) :])\n+ maybe_number = anchor[len(label) :]\n+ if label in series and maybe_number.isdigit():\n+ number = int(maybe_number)\n return f\"{label}{number}\"\n \n # if we couldn't find a match so far, try the seriesInfo\n", "issue": "xml parsing heuristics crash if a reference has anchor \"STD\", \"FYI\", \"RFC\", or \"BCP\"\n### Describe the issue\n\nI'm trying to submit a draft. On my first attempt, I realized that I tagged the wrong commit and missed some important changes from a co-author. So when I received the email, I cancelled the submission.\r\n\r\nI have since tried twice (with the right commit). Both have gotten to the point where I receive an email and both have failed when I requested that the draft be published.\r\n\r\nFor my second retry attempt, the failed submission was apparently still in progress, so that failed. I then cancelled the first retry and tried a third time. That has also failed. I will leave things in that state for now.\r\n\r\nhttps://datatracker.ietf.org/submit/status/136389/ (original, cancelled)\r\nhttps://datatracker.ietf.org/submit/status/136390/ (retry 1, failed, also cancelled)\r\nhttps://datatracker.ietf.org/submit/status/136403/ (retry 3, failed)\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "# Copyright The IETF Trust 2022, All Rights Reserved\n# -*- coding: utf-8 -*-\nimport datetime\nimport io\nimport re\nimport xml2rfc\n\nimport debug # pyflakes: ignore\n\nfrom contextlib import ExitStack\nfrom xml2rfc.util.date import augment_date, extract_date\nfrom ietf.utils.timezone import date_today\n\nfrom .draft import Draft\n\n\nclass XMLDraft(Draft):\n \"\"\"Draft from XML source\n\n Not all methods from the superclass are implemented yet.\n \"\"\"\n def __init__(self, xml_file):\n \"\"\"Initialize XMLDraft instance\n\n :parameter xml_file: path to file containing XML source\n \"\"\"\n super().__init__()\n # cast xml_file to str so, e.g., this will work with a Path\n self.xmltree, self.xml_version = self.parse_xml(str(xml_file))\n self.xmlroot = self.xmltree.getroot()\n self.filename, self.revision = self._parse_docname()\n\n @staticmethod\n def parse_xml(filename):\n \"\"\"Parse XML draft\n\n Converts to xml2rfc v3 schema, then returns the root of the v3 tree and the original\n xml version.\n \"\"\"\n orig_write_out = xml2rfc.log.write_out\n orig_write_err = xml2rfc.log.write_err\n parser_out = io.StringIO()\n parser_err = io.StringIO()\n\n with ExitStack() as stack:\n @stack.callback\n def cleanup(): # called when context exited, even if there's an exception\n xml2rfc.log.write_out = orig_write_out\n xml2rfc.log.write_err = orig_write_err\n\n xml2rfc.log.write_out = parser_out\n xml2rfc.log.write_err = parser_err\n\n parser = xml2rfc.XmlRfcParser(filename, quiet=True)\n try:\n tree = parser.parse()\n except Exception as e:\n raise XMLParseError(parser_out.getvalue(), parser_err.getvalue()) from e\n\n xml_version = tree.getroot().get('version', '2')\n if xml_version == '2':\n v2v3 = xml2rfc.V2v3XmlWriter(tree)\n tree.tree = v2v3.convert2to3()\n return tree, xml_version\n\n def _document_name(self, ref):\n \"\"\"Get document name from reference.\"\"\"\n series = [\"rfc\", \"bcp\", \"fyi\", \"std\"]\n # handle xinclude first\n # FIXME: this assumes the xinclude is a bibxml href; if it isn't, there can\n # still be false negatives. it would be better to expand the xinclude and parse\n # its seriesInfo.\n if ref.tag.endswith(\"}include\"):\n name = re.search(\n rf\"reference\\.({'|'.join(series).upper()})\\.(\\d{{4}})\\.xml\",\n ref.attrib[\"href\"],\n )\n if name:\n return f\"{name.group(1)}{int(name.group(2))}\".lower()\n name = re.search(\n r\"reference\\.I-D\\.(?:draft-)?(.*)\\.xml\", ref.attrib[\"href\"]\n )\n if name:\n return f\"draft-{name.group(1)}\"\n # can't extract the name, give up\n return \"\"\n\n # check the anchor next\n anchor = ref.get(\"anchor\").lower() # always give back lowercase\n label = anchor.rstrip(\"0123456789\") # remove trailing digits\n if label in series:\n number = int(anchor[len(label) :])\n return f\"{label}{number}\"\n\n # if we couldn't find a match so far, try the seriesInfo\n series_query = \" or \".join(f\"@name='{x.upper()}'\" for x in series)\n for info in ref.xpath(\n f\"./seriesInfo[{series_query} or @name='Internet-Draft']\"\n ):\n if not info.attrib[\"value\"]:\n continue\n if info.attrib[\"name\"] == \"Internet-Draft\":\n return info.attrib[\"value\"]\n else:\n return f'{info.attrib[\"name\"].lower()}{info.attrib[\"value\"]}'\n return \"\"\n\n def _reference_section_type(self, section_name):\n \"\"\"Determine reference type from name of references section\"\"\"\n if section_name:\n section_name = section_name.lower()\n if 'normative' in section_name:\n return self.REF_TYPE_NORMATIVE\n elif 'informative' in section_name:\n return self.REF_TYPE_INFORMATIVE\n return self.REF_TYPE_UNKNOWN\n\n def _reference_section_name(self, section_elt):\n section_name = section_elt.findtext('name')\n if section_name is None and 'title' in section_elt.keys():\n section_name = section_elt.get('title') # fall back to title if we have it\n return section_name\n\n def _parse_docname(self):\n docname = self.xmlroot.attrib.get('docName')\n revmatch = re.match(\n r'^(?P<filename>.+?)(?:-(?P<rev>[0-9][0-9]))?$',\n docname,\n\n )\n if revmatch is None:\n raise ValueError('Unable to parse docName')\n # If a group had no match it is None\n return revmatch.group('filename'), revmatch.group('rev')\n\n def get_title(self):\n return self.xmlroot.findtext('front/title').strip()\n\n @staticmethod\n def parse_creation_date(date_elt):\n if date_elt is None:\n return None\n today = date_today()\n # ths mimics handling of date elements in the xml2rfc text/html writers\n year, month, day = extract_date(date_elt, today)\n year, month, day = augment_date(year, month, day, today)\n if not day:\n # Must choose a day for a datetime.date. Per RFC 7991 sect 2.17, we use\n # today's date if it is consistent with the rest of the date. Otherwise,\n # arbitrariy (and consistent with the text parser) assume the 15th.\n if year == today.year and month == today.month:\n day = today.day\n else:\n day = 15\n return datetime.date(year, month, day)\n\n def get_creation_date(self):\n return self.parse_creation_date(self.xmlroot.find(\"front/date\"))\n\n # todo fix the implementation of XMLDraft.get_abstract()\n #\n # This code was pulled from ietf.submit.forms where it existed for some time.\n # It does not work, at least with modern xml2rfc. This assumes that the abstract\n # is simply text in the front/abstract node, but the XML schema wraps the actual\n # abstract text in <t> elements (and allows <dl>, <ol>, and <ul> as well). As a\n # result, this method normally returns an empty string, which is later replaced by\n # the abstract parsed from the rendered text. For now, I a commenting this out\n # and making it explicit that the abstract always comes from the text format.\n #\n # def get_abstract(self):\n # \"\"\"Extract the abstract\"\"\"\n # abstract = self.xmlroot.findtext('front/abstract')\n # return abstract.strip() if abstract else ''\n\n def get_author_list(self):\n \"\"\"Get detailed author list\n\n Returns a list of dicts with the following keys:\n name, first_name, middle_initial, last_name,\n name_suffix, email, country, affiliation\n Values will be None if not available\n \"\"\"\n result = []\n empty_author = {\n k: None for k in [\n 'name', 'first_name', 'middle_initial', 'last_name',\n 'name_suffix', 'email', 'country', 'affiliation',\n ]\n }\n\n for author in self.xmlroot.findall('front/author'):\n info = {\n 'name': author.attrib.get('fullname'),\n 'email': author.findtext('address/email'),\n 'affiliation': author.findtext('organization'),\n }\n elem = author.find('address/postal/country')\n if elem is not None:\n ascii_country = elem.get('ascii', None)\n info['country'] = ascii_country if ascii_country else elem.text\n for item in info:\n if info[item]:\n info[item] = info[item].strip()\n result.append(empty_author | info) # merge, preferring info\n return result\n\n def get_refs(self):\n \"\"\"Extract references from the draft\"\"\"\n refs = {}\n # accept nested <references> sections\n for section in self.xmlroot.findall(\"back//references\"):\n ref_type = self._reference_section_type(\n self._reference_section_name(section)\n )\n for ref in (\n section.findall(\"./reference\")\n + section.findall(\"./referencegroup\")\n + section.findall(\n \"./xi:include\", {\"xi\": \"http://www.w3.org/2001/XInclude\"}\n )\n ):\n name = self._document_name(ref)\n if name:\n refs[name] = ref_type\n return refs\n\n\nclass XMLParseError(Exception):\n \"\"\"An error occurred while parsing\"\"\"\n def __init__(self, out: str, err: str, *args):\n super().__init__(*args)\n self._out = out\n self._err = err\n\n def parser_msgs(self):\n return self._out.splitlines() + self._err.splitlines()\n", "path": "ietf/utils/xmldraft.py"}], "after_files": [{"content": "# Copyright The IETF Trust 2022, All Rights Reserved\n# -*- coding: utf-8 -*-\nimport datetime\nimport io\nimport re\nimport xml2rfc\n\nimport debug # pyflakes: ignore\n\nfrom contextlib import ExitStack\nfrom xml2rfc.util.date import augment_date, extract_date\nfrom ietf.utils.timezone import date_today\n\nfrom .draft import Draft\n\n\nclass XMLDraft(Draft):\n \"\"\"Draft from XML source\n\n Not all methods from the superclass are implemented yet.\n \"\"\"\n def __init__(self, xml_file):\n \"\"\"Initialize XMLDraft instance\n\n :parameter xml_file: path to file containing XML source\n \"\"\"\n super().__init__()\n # cast xml_file to str so, e.g., this will work with a Path\n self.xmltree, self.xml_version = self.parse_xml(str(xml_file))\n self.xmlroot = self.xmltree.getroot()\n self.filename, self.revision = self._parse_docname()\n\n @staticmethod\n def parse_xml(filename):\n \"\"\"Parse XML draft\n\n Converts to xml2rfc v3 schema, then returns the root of the v3 tree and the original\n xml version.\n \"\"\"\n orig_write_out = xml2rfc.log.write_out\n orig_write_err = xml2rfc.log.write_err\n parser_out = io.StringIO()\n parser_err = io.StringIO()\n\n with ExitStack() as stack:\n @stack.callback\n def cleanup(): # called when context exited, even if there's an exception\n xml2rfc.log.write_out = orig_write_out\n xml2rfc.log.write_err = orig_write_err\n\n xml2rfc.log.write_out = parser_out\n xml2rfc.log.write_err = parser_err\n\n parser = xml2rfc.XmlRfcParser(filename, quiet=True)\n try:\n tree = parser.parse()\n except Exception as e:\n raise XMLParseError(parser_out.getvalue(), parser_err.getvalue()) from e\n\n xml_version = tree.getroot().get('version', '2')\n if xml_version == '2':\n v2v3 = xml2rfc.V2v3XmlWriter(tree)\n tree.tree = v2v3.convert2to3()\n return tree, xml_version\n\n def _document_name(self, ref):\n \"\"\"Get document name from reference.\"\"\"\n series = [\"rfc\", \"bcp\", \"fyi\", \"std\"]\n # handle xinclude first\n # FIXME: this assumes the xinclude is a bibxml href; if it isn't, there can\n # still be false negatives. it would be better to expand the xinclude and parse\n # its seriesInfo.\n if ref.tag.endswith(\"}include\"):\n name = re.search(\n rf\"reference\\.({'|'.join(series).upper()})\\.(\\d{{4}})\\.xml\",\n ref.attrib[\"href\"],\n )\n if name:\n return f\"{name.group(1)}{int(name.group(2))}\".lower()\n name = re.search(\n r\"reference\\.I-D\\.(?:draft-)?(.*)\\.xml\", ref.attrib[\"href\"]\n )\n if name:\n return f\"draft-{name.group(1)}\"\n # can't extract the name, give up\n return \"\"\n\n # check the anchor next\n anchor = ref.get(\"anchor\").lower() # always give back lowercase\n label = anchor.rstrip(\"0123456789\") # remove trailing digits\n maybe_number = anchor[len(label) :]\n if label in series and maybe_number.isdigit():\n number = int(maybe_number)\n return f\"{label}{number}\"\n\n # if we couldn't find a match so far, try the seriesInfo\n series_query = \" or \".join(f\"@name='{x.upper()}'\" for x in series)\n for info in ref.xpath(\n f\"./seriesInfo[{series_query} or @name='Internet-Draft']\"\n ):\n if not info.attrib[\"value\"]:\n continue\n if info.attrib[\"name\"] == \"Internet-Draft\":\n return info.attrib[\"value\"]\n else:\n return f'{info.attrib[\"name\"].lower()}{info.attrib[\"value\"]}'\n return \"\"\n\n def _reference_section_type(self, section_name):\n \"\"\"Determine reference type from name of references section\"\"\"\n if section_name:\n section_name = section_name.lower()\n if 'normative' in section_name:\n return self.REF_TYPE_NORMATIVE\n elif 'informative' in section_name:\n return self.REF_TYPE_INFORMATIVE\n return self.REF_TYPE_UNKNOWN\n\n def _reference_section_name(self, section_elt):\n section_name = section_elt.findtext('name')\n if section_name is None and 'title' in section_elt.keys():\n section_name = section_elt.get('title') # fall back to title if we have it\n return section_name\n\n def _parse_docname(self):\n docname = self.xmlroot.attrib.get('docName')\n revmatch = re.match(\n r'^(?P<filename>.+?)(?:-(?P<rev>[0-9][0-9]))?$',\n docname,\n\n )\n if revmatch is None:\n raise ValueError('Unable to parse docName')\n # If a group had no match it is None\n return revmatch.group('filename'), revmatch.group('rev')\n\n def get_title(self):\n return self.xmlroot.findtext('front/title').strip()\n\n @staticmethod\n def parse_creation_date(date_elt):\n if date_elt is None:\n return None\n today = date_today()\n # ths mimics handling of date elements in the xml2rfc text/html writers\n year, month, day = extract_date(date_elt, today)\n year, month, day = augment_date(year, month, day, today)\n if not day:\n # Must choose a day for a datetime.date. Per RFC 7991 sect 2.17, we use\n # today's date if it is consistent with the rest of the date. Otherwise,\n # arbitrariy (and consistent with the text parser) assume the 15th.\n if year == today.year and month == today.month:\n day = today.day\n else:\n day = 15\n return datetime.date(year, month, day)\n\n def get_creation_date(self):\n return self.parse_creation_date(self.xmlroot.find(\"front/date\"))\n\n # todo fix the implementation of XMLDraft.get_abstract()\n #\n # This code was pulled from ietf.submit.forms where it existed for some time.\n # It does not work, at least with modern xml2rfc. This assumes that the abstract\n # is simply text in the front/abstract node, but the XML schema wraps the actual\n # abstract text in <t> elements (and allows <dl>, <ol>, and <ul> as well). As a\n # result, this method normally returns an empty string, which is later replaced by\n # the abstract parsed from the rendered text. For now, I a commenting this out\n # and making it explicit that the abstract always comes from the text format.\n #\n # def get_abstract(self):\n # \"\"\"Extract the abstract\"\"\"\n # abstract = self.xmlroot.findtext('front/abstract')\n # return abstract.strip() if abstract else ''\n\n def get_author_list(self):\n \"\"\"Get detailed author list\n\n Returns a list of dicts with the following keys:\n name, first_name, middle_initial, last_name,\n name_suffix, email, country, affiliation\n Values will be None if not available\n \"\"\"\n result = []\n empty_author = {\n k: None for k in [\n 'name', 'first_name', 'middle_initial', 'last_name',\n 'name_suffix', 'email', 'country', 'affiliation',\n ]\n }\n\n for author in self.xmlroot.findall('front/author'):\n info = {\n 'name': author.attrib.get('fullname'),\n 'email': author.findtext('address/email'),\n 'affiliation': author.findtext('organization'),\n }\n elem = author.find('address/postal/country')\n if elem is not None:\n ascii_country = elem.get('ascii', None)\n info['country'] = ascii_country if ascii_country else elem.text\n for item in info:\n if info[item]:\n info[item] = info[item].strip()\n result.append(empty_author | info) # merge, preferring info\n return result\n\n def get_refs(self):\n \"\"\"Extract references from the draft\"\"\"\n refs = {}\n # accept nested <references> sections\n for section in self.xmlroot.findall(\"back//references\"):\n ref_type = self._reference_section_type(\n self._reference_section_name(section)\n )\n for ref in (\n section.findall(\"./reference\")\n + section.findall(\"./referencegroup\")\n + section.findall(\n \"./xi:include\", {\"xi\": \"http://www.w3.org/2001/XInclude\"}\n )\n ):\n name = self._document_name(ref)\n if name:\n refs[name] = ref_type\n return refs\n\n\nclass XMLParseError(Exception):\n \"\"\"An error occurred while parsing\"\"\"\n def __init__(self, out: str, err: str, *args):\n super().__init__(*args)\n self._out = out\n self._err = err\n\n def parser_msgs(self):\n return self._out.splitlines() + self._err.splitlines()\n", "path": "ietf/utils/xmldraft.py"}]}
| 3,213 | 177 |
gh_patches_debug_39012
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-6031
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Orthogonal initializer dimensions error
Orthogonal Initializer throws an error whenever the first dimension is larger than the second. This could be fixed by transposing the array rather than throwing an error.
e.g. 1. ` dense = L.Linear(64,128, initialW=initializers.Orthogonal())`

e.g. 2 `initializers.generate_array(initializers.Orthogonal(), (20,10), numpy, 'f')`

System Info:
Chainer: 5.0.0rc1
NumPy: 1.14.2
CuPy:
CuPy Version : 5.0.0rc1
CUDA Root : /usr/local/cuda
CUDA Build Version : 8000
CUDA Driver Version : 9020
CUDA Runtime Version : 8000
cuDNN Build Version : 7102
cuDNN Version : 7102
NCCL Build Version : 2213
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/initializers/orthogonal.py`
Content:
```
1 import numpy
2
3 from chainer import backend
4 from chainer import initializer
5 from chainer import utils
6
7
8 # Original code forked from MIT licensed keras project
9 # https://github.com/fchollet/keras/blob/master/keras/initializations.py
10
11 class Orthogonal(initializer.Initializer):
12 """Initializes array with an orthogonal system.
13
14 This initializer first makes a matrix of the same shape as the
15 array to be initialized whose elements are drawn independently from
16 standard Gaussian distribution.
17 Next, it applies QR decomposition to (the transpose of) the matrix.
18 To make the decomposition (almost surely) unique, we require the diagonal
19 of the triangular matrix R to be non-negative (see e.g. Edelman & Rao,
20 https://web.eecs.umich.edu/~rajnrao/Acta05rmt.pdf).
21 Then, it initializes the array with the (semi-)orthogonal matrix Q.
22 Finally, the array is multiplied by the constant ``scale``.
23
24 If the ``ndim`` of the input array is more than 2, we consider the array
25 to be a matrix by concatenating all axes except the first one.
26
27 The number of vectors consisting of the orthogonal system
28 (i.e. first element of the shape of the array) must be equal to or smaller
29 than the dimension of each vector (i.e. second element of the shape of
30 the array).
31
32 Attributes:
33 scale (float): A constant to be multiplied by.
34 dtype: Data type specifier.
35
36 Reference: Saxe et al., https://arxiv.org/abs/1312.6120
37
38 """
39
40 def __init__(self, scale=1.1, dtype=None):
41 self.scale = scale
42 super(Orthogonal, self).__init__(dtype)
43
44 # TODO(Kenta Oono)
45 # How do we treat overcomplete base-system case?
46 def __call__(self, array):
47 if self.dtype is not None:
48 assert array.dtype == self.dtype
49 xp = backend.get_array_module(array)
50 if not array.shape: # 0-dim case
51 array[...] = self.scale * (2 * numpy.random.randint(2) - 1)
52 elif not array.size:
53 raise ValueError('Array to be initialized must be non-empty.')
54 else:
55 # numpy.prod returns float value when the argument is empty.
56 flat_shape = (len(array), utils.size_of_shape(array.shape[1:]))
57 if flat_shape[0] > flat_shape[1]:
58 raise ValueError('Cannot make orthogonal system because'
59 ' # of vectors ({}) is larger than'
60 ' that of dimensions ({})'.format(
61 flat_shape[0], flat_shape[1]))
62 a = numpy.random.normal(size=flat_shape)
63 # cupy.linalg.qr requires cusolver in CUDA 8+
64 q, r = numpy.linalg.qr(a.T)
65 q *= numpy.copysign(self.scale, numpy.diag(r))
66 array[...] = xp.asarray(q.T.reshape(array.shape))
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/initializers/orthogonal.py b/chainer/initializers/orthogonal.py
--- a/chainer/initializers/orthogonal.py
+++ b/chainer/initializers/orthogonal.py
@@ -5,6 +5,14 @@
from chainer import utils
+_orthogonal_constraints = { # (assert emb., assert proj.)
+ 'auto': (False, False),
+ 'projection': (False, True),
+ 'embedding': (True, False),
+ 'basis': (True, True),
+}
+
+
# Original code forked from MIT licensed keras project
# https://github.com/fchollet/keras/blob/master/keras/initializations.py
@@ -32,13 +40,24 @@
Attributes:
scale (float): A constant to be multiplied by.
dtype: Data type specifier.
+ mode (str): Assertion on the initialized shape.
+ ``'auto'`` (default), ``'projection'`` (before v7),
+ ``'embedding'``, or ``'basis'``.
Reference: Saxe et al., https://arxiv.org/abs/1312.6120
"""
- def __init__(self, scale=1.1, dtype=None):
+ def __init__(self, scale=1.1, dtype=None, mode='auto'):
self.scale = scale
+ self.mode = mode
+ try:
+ self._checks = _orthogonal_constraints[mode]
+ except KeyError:
+ raise ValueError(
+ 'Invalid mode: {}. Choose from {}.'.format(
+ repr(mode),
+ ', '.join(repr(m) for m in _orthogonal_constraints)))
super(Orthogonal, self).__init__(dtype)
# TODO(Kenta Oono)
@@ -53,14 +72,22 @@
raise ValueError('Array to be initialized must be non-empty.')
else:
# numpy.prod returns float value when the argument is empty.
- flat_shape = (len(array), utils.size_of_shape(array.shape[1:]))
- if flat_shape[0] > flat_shape[1]:
- raise ValueError('Cannot make orthogonal system because'
- ' # of vectors ({}) is larger than'
- ' that of dimensions ({})'.format(
- flat_shape[0], flat_shape[1]))
- a = numpy.random.normal(size=flat_shape)
+ out_dim = len(array)
+ in_dim = utils.size_of_shape(array.shape[1:])
+ if (in_dim > out_dim and self._checks[0]) or (
+ in_dim < out_dim and self._checks[1]):
+ raise ValueError(
+ 'Cannot make orthogonal {}.'
+ 'shape = {}, interpreted as '
+ '{}-dim input and {}-dim output.'.format(
+ self.mode, array.shape, in_dim, out_dim))
+ transpose = in_dim > out_dim
+ a = numpy.random.normal(size=(out_dim, in_dim))
+ if transpose:
+ a = a.T
# cupy.linalg.qr requires cusolver in CUDA 8+
- q, r = numpy.linalg.qr(a.T)
+ q, r = numpy.linalg.qr(a)
q *= numpy.copysign(self.scale, numpy.diag(r))
- array[...] = xp.asarray(q.T.reshape(array.shape))
+ if transpose:
+ q = q.T
+ array[...] = xp.asarray(q.reshape(array.shape))
|
{"golden_diff": "diff --git a/chainer/initializers/orthogonal.py b/chainer/initializers/orthogonal.py\n--- a/chainer/initializers/orthogonal.py\n+++ b/chainer/initializers/orthogonal.py\n@@ -5,6 +5,14 @@\n from chainer import utils\n \n \n+_orthogonal_constraints = { # (assert emb., assert proj.)\n+ 'auto': (False, False),\n+ 'projection': (False, True),\n+ 'embedding': (True, False),\n+ 'basis': (True, True),\n+}\n+\n+\n # Original code forked from MIT licensed keras project\n # https://github.com/fchollet/keras/blob/master/keras/initializations.py\n \n@@ -32,13 +40,24 @@\n Attributes:\n scale (float): A constant to be multiplied by.\n dtype: Data type specifier.\n+ mode (str): Assertion on the initialized shape.\n+ ``'auto'`` (default), ``'projection'`` (before v7),\n+ ``'embedding'``, or ``'basis'``.\n \n Reference: Saxe et al., https://arxiv.org/abs/1312.6120\n \n \"\"\"\n \n- def __init__(self, scale=1.1, dtype=None):\n+ def __init__(self, scale=1.1, dtype=None, mode='auto'):\n self.scale = scale\n+ self.mode = mode\n+ try:\n+ self._checks = _orthogonal_constraints[mode]\n+ except KeyError:\n+ raise ValueError(\n+ 'Invalid mode: {}. Choose from {}.'.format(\n+ repr(mode),\n+ ', '.join(repr(m) for m in _orthogonal_constraints)))\n super(Orthogonal, self).__init__(dtype)\n \n # TODO(Kenta Oono)\n@@ -53,14 +72,22 @@\n raise ValueError('Array to be initialized must be non-empty.')\n else:\n # numpy.prod returns float value when the argument is empty.\n- flat_shape = (len(array), utils.size_of_shape(array.shape[1:]))\n- if flat_shape[0] > flat_shape[1]:\n- raise ValueError('Cannot make orthogonal system because'\n- ' # of vectors ({}) is larger than'\n- ' that of dimensions ({})'.format(\n- flat_shape[0], flat_shape[1]))\n- a = numpy.random.normal(size=flat_shape)\n+ out_dim = len(array)\n+ in_dim = utils.size_of_shape(array.shape[1:])\n+ if (in_dim > out_dim and self._checks[0]) or (\n+ in_dim < out_dim and self._checks[1]):\n+ raise ValueError(\n+ 'Cannot make orthogonal {}.'\n+ 'shape = {}, interpreted as '\n+ '{}-dim input and {}-dim output.'.format(\n+ self.mode, array.shape, in_dim, out_dim))\n+ transpose = in_dim > out_dim\n+ a = numpy.random.normal(size=(out_dim, in_dim))\n+ if transpose:\n+ a = a.T\n # cupy.linalg.qr requires cusolver in CUDA 8+\n- q, r = numpy.linalg.qr(a.T)\n+ q, r = numpy.linalg.qr(a)\n q *= numpy.copysign(self.scale, numpy.diag(r))\n- array[...] = xp.asarray(q.T.reshape(array.shape))\n+ if transpose:\n+ q = q.T\n+ array[...] = xp.asarray(q.reshape(array.shape))\n", "issue": "Orthogonal initializer dimensions error\nOrthogonal Initializer throws an error whenever the first dimension is larger than the second. This could be fixed by transposing the array rather than throwing an error.\r\n\r\n\r\ne.g. 1. ` dense = L.Linear(64,128, initialW=initializers.Orthogonal())`\r\n\r\n\r\n\r\n\r\ne.g. 2 `initializers.generate_array(initializers.Orthogonal(), (20,10), numpy, 'f')`\r\n\r\n\r\n\r\n\r\nSystem Info:\r\nChainer: 5.0.0rc1\r\nNumPy: 1.14.2\r\nCuPy:\r\n CuPy Version : 5.0.0rc1\r\n CUDA Root : /usr/local/cuda\r\n CUDA Build Version : 8000\r\n CUDA Driver Version : 9020\r\n CUDA Runtime Version : 8000\r\n cuDNN Build Version : 7102\r\n cuDNN Version : 7102\r\n NCCL Build Version : 2213\r\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import backend\nfrom chainer import initializer\nfrom chainer import utils\n\n\n# Original code forked from MIT licensed keras project\n# https://github.com/fchollet/keras/blob/master/keras/initializations.py\n\nclass Orthogonal(initializer.Initializer):\n \"\"\"Initializes array with an orthogonal system.\n\n This initializer first makes a matrix of the same shape as the\n array to be initialized whose elements are drawn independently from\n standard Gaussian distribution.\n Next, it applies QR decomposition to (the transpose of) the matrix.\n To make the decomposition (almost surely) unique, we require the diagonal\n of the triangular matrix R to be non-negative (see e.g. Edelman & Rao,\n https://web.eecs.umich.edu/~rajnrao/Acta05rmt.pdf).\n Then, it initializes the array with the (semi-)orthogonal matrix Q.\n Finally, the array is multiplied by the constant ``scale``.\n\n If the ``ndim`` of the input array is more than 2, we consider the array\n to be a matrix by concatenating all axes except the first one.\n\n The number of vectors consisting of the orthogonal system\n (i.e. first element of the shape of the array) must be equal to or smaller\n than the dimension of each vector (i.e. second element of the shape of\n the array).\n\n Attributes:\n scale (float): A constant to be multiplied by.\n dtype: Data type specifier.\n\n Reference: Saxe et al., https://arxiv.org/abs/1312.6120\n\n \"\"\"\n\n def __init__(self, scale=1.1, dtype=None):\n self.scale = scale\n super(Orthogonal, self).__init__(dtype)\n\n # TODO(Kenta Oono)\n # How do we treat overcomplete base-system case?\n def __call__(self, array):\n if self.dtype is not None:\n assert array.dtype == self.dtype\n xp = backend.get_array_module(array)\n if not array.shape: # 0-dim case\n array[...] = self.scale * (2 * numpy.random.randint(2) - 1)\n elif not array.size:\n raise ValueError('Array to be initialized must be non-empty.')\n else:\n # numpy.prod returns float value when the argument is empty.\n flat_shape = (len(array), utils.size_of_shape(array.shape[1:]))\n if flat_shape[0] > flat_shape[1]:\n raise ValueError('Cannot make orthogonal system because'\n ' # of vectors ({}) is larger than'\n ' that of dimensions ({})'.format(\n flat_shape[0], flat_shape[1]))\n a = numpy.random.normal(size=flat_shape)\n # cupy.linalg.qr requires cusolver in CUDA 8+\n q, r = numpy.linalg.qr(a.T)\n q *= numpy.copysign(self.scale, numpy.diag(r))\n array[...] = xp.asarray(q.T.reshape(array.shape))\n", "path": "chainer/initializers/orthogonal.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer import backend\nfrom chainer import initializer\nfrom chainer import utils\n\n\n_orthogonal_constraints = { # (assert emb., assert proj.)\n 'auto': (False, False),\n 'projection': (False, True),\n 'embedding': (True, False),\n 'basis': (True, True),\n}\n\n\n# Original code forked from MIT licensed keras project\n# https://github.com/fchollet/keras/blob/master/keras/initializations.py\n\nclass Orthogonal(initializer.Initializer):\n \"\"\"Initializes array with an orthogonal system.\n\n This initializer first makes a matrix of the same shape as the\n array to be initialized whose elements are drawn independently from\n standard Gaussian distribution.\n Next, it applies QR decomposition to (the transpose of) the matrix.\n To make the decomposition (almost surely) unique, we require the diagonal\n of the triangular matrix R to be non-negative (see e.g. Edelman & Rao,\n https://web.eecs.umich.edu/~rajnrao/Acta05rmt.pdf).\n Then, it initializes the array with the (semi-)orthogonal matrix Q.\n Finally, the array is multiplied by the constant ``scale``.\n\n If the ``ndim`` of the input array is more than 2, we consider the array\n to be a matrix by concatenating all axes except the first one.\n\n The number of vectors consisting of the orthogonal system\n (i.e. first element of the shape of the array) must be equal to or smaller\n than the dimension of each vector (i.e. second element of the shape of\n the array).\n\n Attributes:\n scale (float): A constant to be multiplied by.\n dtype: Data type specifier.\n mode (str): Assertion on the initialized shape.\n ``'auto'`` (default), ``'projection'`` (before v7),\n ``'embedding'``, or ``'basis'``.\n\n Reference: Saxe et al., https://arxiv.org/abs/1312.6120\n\n \"\"\"\n\n def __init__(self, scale=1.1, dtype=None, mode='auto'):\n self.scale = scale\n self.mode = mode\n try:\n self._checks = _orthogonal_constraints[mode]\n except KeyError:\n raise ValueError(\n 'Invalid mode: {}. Choose from {}.'.format(\n repr(mode),\n ', '.join(repr(m) for m in _orthogonal_constraints)))\n super(Orthogonal, self).__init__(dtype)\n\n # TODO(Kenta Oono)\n # How do we treat overcomplete base-system case?\n def __call__(self, array):\n if self.dtype is not None:\n assert array.dtype == self.dtype\n xp = backend.get_array_module(array)\n if not array.shape: # 0-dim case\n array[...] = self.scale * (2 * numpy.random.randint(2) - 1)\n elif not array.size:\n raise ValueError('Array to be initialized must be non-empty.')\n else:\n # numpy.prod returns float value when the argument is empty.\n out_dim = len(array)\n in_dim = utils.size_of_shape(array.shape[1:])\n if (in_dim > out_dim and self._checks[0]) or (\n in_dim < out_dim and self._checks[1]):\n raise ValueError(\n 'Cannot make orthogonal {}.'\n 'shape = {}, interpreted as '\n '{}-dim input and {}-dim output.'.format(\n self.mode, array.shape, in_dim, out_dim))\n transpose = in_dim > out_dim\n a = numpy.random.normal(size=(out_dim, in_dim))\n if transpose:\n a = a.T\n # cupy.linalg.qr requires cusolver in CUDA 8+\n q, r = numpy.linalg.qr(a)\n q *= numpy.copysign(self.scale, numpy.diag(r))\n if transpose:\n q = q.T\n array[...] = xp.asarray(q.reshape(array.shape))\n", "path": "chainer/initializers/orthogonal.py"}]}
| 1,402 | 773 |
gh_patches_debug_2430
|
rasdani/github-patches
|
git_diff
|
pypa__pip-1390
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip doesn't detect a venv created virtual environment as a virtual environment
The venv integration in Python 3.4 fails if PIP_REQUIREVIRTUALENV is set (http://bugs.python.org/issue19734)
I'm currently working around this by forcibly clearing the setting in the test, but the PIP_REQUIREVIRTUALENV check should pass when sys.prefix and sys.base_prefix are different.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pip/locations.py`
Content:
```
1 """Locations where we look for configs, install stuff, etc"""
2
3 import sys
4 import site
5 import os
6 import tempfile
7 from distutils.command.install import install, SCHEME_KEYS
8 import getpass
9 from pip.backwardcompat import get_python_lib, get_path_uid, user_site
10 import pip.exceptions
11
12
13 DELETE_MARKER_MESSAGE = '''\
14 This file is placed here by pip to indicate the source was put
15 here by pip.
16
17 Once this package is successfully installed this source code will be
18 deleted (unless you remove this file).
19 '''
20 PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'
21
22 def write_delete_marker_file(directory):
23 """
24 Write the pip delete marker file into this directory.
25 """
26 filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)
27 marker_fp = open(filepath, 'w')
28 marker_fp.write(DELETE_MARKER_MESSAGE)
29 marker_fp.close()
30
31
32 def running_under_virtualenv():
33 """
34 Return True if we're running inside a virtualenv, False otherwise.
35
36 """
37 return hasattr(sys, 'real_prefix')
38
39
40 def virtualenv_no_global():
41 """
42 Return True if in a venv and no system site packages.
43 """
44 #this mirrors the logic in virtualenv.py for locating the no-global-site-packages.txt file
45 site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
46 no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')
47 if running_under_virtualenv() and os.path.isfile(no_global_file):
48 return True
49
50 def __get_username():
51 """ Returns the effective username of the current process. """
52 if sys.platform == 'win32':
53 return getpass.getuser()
54 import pwd
55 return pwd.getpwuid(os.geteuid()).pw_name
56
57 def _get_build_prefix():
58 """ Returns a safe build_prefix """
59 path = os.path.join(tempfile.gettempdir(), 'pip_build_%s' %
60 __get_username())
61 if sys.platform == 'win32':
62 """ on windows(tested on 7) temp dirs are isolated """
63 return path
64 try:
65 os.mkdir(path)
66 write_delete_marker_file(path)
67 except OSError:
68 file_uid = None
69 try:
70 # raises OSError for symlinks
71 # https://github.com/pypa/pip/pull/935#discussion_r5307003
72 file_uid = get_path_uid(path)
73 except OSError:
74 file_uid = None
75
76 if file_uid != os.geteuid():
77 msg = "The temporary folder for building (%s) is either not owned by you, or is a symlink." \
78 % path
79 print (msg)
80 print("pip will not work until the temporary folder is " + \
81 "either deleted or is a real directory owned by your user account.")
82 raise pip.exceptions.InstallationError(msg)
83 return path
84
85 if running_under_virtualenv():
86 build_prefix = os.path.join(sys.prefix, 'build')
87 src_prefix = os.path.join(sys.prefix, 'src')
88 else:
89 # Note: intentionally NOT using mkdtemp
90 # See https://github.com/pypa/pip/issues/906 for plan to move to mkdtemp
91 build_prefix = _get_build_prefix()
92
93 ## FIXME: keep src in cwd for now (it is not a temporary folder)
94 try:
95 src_prefix = os.path.join(os.getcwd(), 'src')
96 except OSError:
97 # In case the current working directory has been renamed or deleted
98 sys.exit("The folder you are executing pip from can no longer be found.")
99
100 # under Mac OS X + virtualenv sys.prefix is not properly resolved
101 # it is something like /path/to/python/bin/..
102 # Note: using realpath due to tmp dirs on OSX being symlinks
103 build_prefix = os.path.abspath(os.path.realpath(build_prefix))
104 src_prefix = os.path.abspath(src_prefix)
105
106 # FIXME doesn't account for venv linked to global site-packages
107
108 site_packages = get_python_lib()
109 user_dir = os.path.expanduser('~')
110 if sys.platform == 'win32':
111 bin_py = os.path.join(sys.prefix, 'Scripts')
112 bin_user = os.path.join(user_site, 'Scripts') if user_site else None
113 # buildout uses 'bin' on Windows too?
114 if not os.path.exists(bin_py):
115 bin_py = os.path.join(sys.prefix, 'bin')
116 bin_user = os.path.join(user_site, 'bin') if user_site else None
117 default_storage_dir = os.path.join(user_dir, 'pip')
118 default_config_file = os.path.join(default_storage_dir, 'pip.ini')
119 default_log_file = os.path.join(default_storage_dir, 'pip.log')
120 else:
121 bin_py = os.path.join(sys.prefix, 'bin')
122 bin_user = os.path.join(user_site, 'bin') if user_site else None
123 default_storage_dir = os.path.join(user_dir, '.pip')
124 default_config_file = os.path.join(default_storage_dir, 'pip.conf')
125 default_log_file = os.path.join(default_storage_dir, 'pip.log')
126
127 # Forcing to use /usr/local/bin for standard Mac OS X framework installs
128 # Also log to ~/Library/Logs/ for use with the Console.app log viewer
129 if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':
130 bin_py = '/usr/local/bin'
131 default_log_file = os.path.join(user_dir, 'Library/Logs/pip.log')
132
133
134 def distutils_scheme(dist_name, user=False, home=None, root=None):
135 """
136 Return a distutils install scheme
137 """
138 from distutils.dist import Distribution
139
140 scheme = {}
141 d = Distribution({'name': dist_name})
142 d.parse_config_files()
143 i = d.get_command_obj('install', create=True)
144 # NOTE: setting user or home has the side-effect of creating the home dir or
145 # user base for installations during finalize_options()
146 # ideally, we'd prefer a scheme class that has no side-effects.
147 i.user = user or i.user
148 i.home = home or i.home
149 i.root = root or i.root
150 i.finalize_options()
151 for key in SCHEME_KEYS:
152 scheme[key] = getattr(i, 'install_'+key)
153
154 if running_under_virtualenv():
155 scheme['headers'] = os.path.join(sys.prefix,
156 'include',
157 'site',
158 'python' + sys.version[:3],
159 dist_name)
160
161 if root is not None:
162 scheme["headers"] = os.path.join(
163 root,
164 os.path.abspath(scheme["headers"])[1:],
165 )
166
167 return scheme
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pip/locations.py b/pip/locations.py
--- a/pip/locations.py
+++ b/pip/locations.py
@@ -34,7 +34,12 @@
Return True if we're running inside a virtualenv, False otherwise.
"""
- return hasattr(sys, 'real_prefix')
+ if hasattr(sys, 'real_prefix'):
+ return True
+ elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
+ return True
+
+ return False
def virtualenv_no_global():
|
{"golden_diff": "diff --git a/pip/locations.py b/pip/locations.py\n--- a/pip/locations.py\n+++ b/pip/locations.py\n@@ -34,7 +34,12 @@\n Return True if we're running inside a virtualenv, False otherwise.\n \n \"\"\"\n- return hasattr(sys, 'real_prefix')\n+ if hasattr(sys, 'real_prefix'):\n+ return True\n+ elif sys.prefix != getattr(sys, \"base_prefix\", sys.prefix):\n+ return True\n+\n+ return False\n \n \n def virtualenv_no_global():\n", "issue": "pip doesn't detect a venv created virtual environment as a virtual environment\nThe venv integration in Python 3.4 fails if PIP_REQUIREVIRTUALENV is set (http://bugs.python.org/issue19734)\n\nI'm currently working around this by forcibly clearing the setting in the test, but the PIP_REQUIREVIRTUALENV check should pass when sys.prefix and sys.base_prefix are different.\n\n", "before_files": [{"content": "\"\"\"Locations where we look for configs, install stuff, etc\"\"\"\n\nimport sys\nimport site\nimport os\nimport tempfile\nfrom distutils.command.install import install, SCHEME_KEYS\nimport getpass\nfrom pip.backwardcompat import get_python_lib, get_path_uid, user_site\nimport pip.exceptions\n\n\nDELETE_MARKER_MESSAGE = '''\\\nThis file is placed here by pip to indicate the source was put\nhere by pip.\n\nOnce this package is successfully installed this source code will be\ndeleted (unless you remove this file).\n'''\nPIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'\n\ndef write_delete_marker_file(directory):\n \"\"\"\n Write the pip delete marker file into this directory.\n \"\"\"\n filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)\n marker_fp = open(filepath, 'w')\n marker_fp.write(DELETE_MARKER_MESSAGE)\n marker_fp.close()\n\n\ndef running_under_virtualenv():\n \"\"\"\n Return True if we're running inside a virtualenv, False otherwise.\n\n \"\"\"\n return hasattr(sys, 'real_prefix')\n\n\ndef virtualenv_no_global():\n \"\"\"\n Return True if in a venv and no system site packages.\n \"\"\"\n #this mirrors the logic in virtualenv.py for locating the no-global-site-packages.txt file\n site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))\n no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')\n if running_under_virtualenv() and os.path.isfile(no_global_file):\n return True\n\ndef __get_username():\n \"\"\" Returns the effective username of the current process. \"\"\"\n if sys.platform == 'win32':\n return getpass.getuser()\n import pwd\n return pwd.getpwuid(os.geteuid()).pw_name\n\ndef _get_build_prefix():\n \"\"\" Returns a safe build_prefix \"\"\"\n path = os.path.join(tempfile.gettempdir(), 'pip_build_%s' %\n __get_username())\n if sys.platform == 'win32':\n \"\"\" on windows(tested on 7) temp dirs are isolated \"\"\"\n return path\n try:\n os.mkdir(path)\n write_delete_marker_file(path)\n except OSError:\n file_uid = None\n try:\n # raises OSError for symlinks\n # https://github.com/pypa/pip/pull/935#discussion_r5307003\n file_uid = get_path_uid(path)\n except OSError:\n file_uid = None\n\n if file_uid != os.geteuid():\n msg = \"The temporary folder for building (%s) is either not owned by you, or is a symlink.\" \\\n % path\n print (msg)\n print(\"pip will not work until the temporary folder is \" + \\\n \"either deleted or is a real directory owned by your user account.\")\n raise pip.exceptions.InstallationError(msg)\n return path\n\nif running_under_virtualenv():\n build_prefix = os.path.join(sys.prefix, 'build')\n src_prefix = os.path.join(sys.prefix, 'src')\nelse:\n # Note: intentionally NOT using mkdtemp\n # See https://github.com/pypa/pip/issues/906 for plan to move to mkdtemp\n build_prefix = _get_build_prefix()\n\n ## FIXME: keep src in cwd for now (it is not a temporary folder)\n try:\n src_prefix = os.path.join(os.getcwd(), 'src')\n except OSError:\n # In case the current working directory has been renamed or deleted\n sys.exit(\"The folder you are executing pip from can no longer be found.\")\n\n# under Mac OS X + virtualenv sys.prefix is not properly resolved\n# it is something like /path/to/python/bin/..\n# Note: using realpath due to tmp dirs on OSX being symlinks\nbuild_prefix = os.path.abspath(os.path.realpath(build_prefix))\nsrc_prefix = os.path.abspath(src_prefix)\n\n# FIXME doesn't account for venv linked to global site-packages\n\nsite_packages = get_python_lib()\nuser_dir = os.path.expanduser('~')\nif sys.platform == 'win32':\n bin_py = os.path.join(sys.prefix, 'Scripts')\n bin_user = os.path.join(user_site, 'Scripts') if user_site else None\n # buildout uses 'bin' on Windows too?\n if not os.path.exists(bin_py):\n bin_py = os.path.join(sys.prefix, 'bin')\n bin_user = os.path.join(user_site, 'bin') if user_site else None\n default_storage_dir = os.path.join(user_dir, 'pip')\n default_config_file = os.path.join(default_storage_dir, 'pip.ini')\n default_log_file = os.path.join(default_storage_dir, 'pip.log')\nelse:\n bin_py = os.path.join(sys.prefix, 'bin')\n bin_user = os.path.join(user_site, 'bin') if user_site else None\n default_storage_dir = os.path.join(user_dir, '.pip')\n default_config_file = os.path.join(default_storage_dir, 'pip.conf')\n default_log_file = os.path.join(default_storage_dir, 'pip.log')\n\n # Forcing to use /usr/local/bin for standard Mac OS X framework installs\n # Also log to ~/Library/Logs/ for use with the Console.app log viewer\n if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':\n bin_py = '/usr/local/bin'\n default_log_file = os.path.join(user_dir, 'Library/Logs/pip.log')\n\n\ndef distutils_scheme(dist_name, user=False, home=None, root=None):\n \"\"\"\n Return a distutils install scheme\n \"\"\"\n from distutils.dist import Distribution\n\n scheme = {}\n d = Distribution({'name': dist_name})\n d.parse_config_files()\n i = d.get_command_obj('install', create=True)\n # NOTE: setting user or home has the side-effect of creating the home dir or\n # user base for installations during finalize_options()\n # ideally, we'd prefer a scheme class that has no side-effects.\n i.user = user or i.user\n i.home = home or i.home\n i.root = root or i.root\n i.finalize_options()\n for key in SCHEME_KEYS:\n scheme[key] = getattr(i, 'install_'+key)\n\n if running_under_virtualenv():\n scheme['headers'] = os.path.join(sys.prefix,\n 'include',\n 'site',\n 'python' + sys.version[:3],\n dist_name)\n\n if root is not None:\n scheme[\"headers\"] = os.path.join(\n root,\n os.path.abspath(scheme[\"headers\"])[1:],\n )\n\n return scheme\n", "path": "pip/locations.py"}], "after_files": [{"content": "\"\"\"Locations where we look for configs, install stuff, etc\"\"\"\n\nimport sys\nimport site\nimport os\nimport tempfile\nfrom distutils.command.install import install, SCHEME_KEYS\nimport getpass\nfrom pip.backwardcompat import get_python_lib, get_path_uid, user_site\nimport pip.exceptions\n\n\nDELETE_MARKER_MESSAGE = '''\\\nThis file is placed here by pip to indicate the source was put\nhere by pip.\n\nOnce this package is successfully installed this source code will be\ndeleted (unless you remove this file).\n'''\nPIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'\n\ndef write_delete_marker_file(directory):\n \"\"\"\n Write the pip delete marker file into this directory.\n \"\"\"\n filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)\n marker_fp = open(filepath, 'w')\n marker_fp.write(DELETE_MARKER_MESSAGE)\n marker_fp.close()\n\n\ndef running_under_virtualenv():\n \"\"\"\n Return True if we're running inside a virtualenv, False otherwise.\n\n \"\"\"\n if hasattr(sys, 'real_prefix'):\n return True\n elif sys.prefix != getattr(sys, \"base_prefix\", sys.prefix):\n return True\n\n return False\n\n\ndef virtualenv_no_global():\n \"\"\"\n Return True if in a venv and no system site packages.\n \"\"\"\n #this mirrors the logic in virtualenv.py for locating the no-global-site-packages.txt file\n site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))\n no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')\n if running_under_virtualenv() and os.path.isfile(no_global_file):\n return True\n\ndef __get_username():\n \"\"\" Returns the effective username of the current process. \"\"\"\n if sys.platform == 'win32':\n return getpass.getuser()\n import pwd\n return pwd.getpwuid(os.geteuid()).pw_name\n\ndef _get_build_prefix():\n \"\"\" Returns a safe build_prefix \"\"\"\n path = os.path.join(tempfile.gettempdir(), 'pip_build_%s' %\n __get_username())\n if sys.platform == 'win32':\n \"\"\" on windows(tested on 7) temp dirs are isolated \"\"\"\n return path\n try:\n os.mkdir(path)\n write_delete_marker_file(path)\n except OSError:\n file_uid = None\n try:\n # raises OSError for symlinks\n # https://github.com/pypa/pip/pull/935#discussion_r5307003\n file_uid = get_path_uid(path)\n except OSError:\n file_uid = None\n\n if file_uid != os.geteuid():\n msg = \"The temporary folder for building (%s) is either not owned by you, or is a symlink.\" \\\n % path\n print (msg)\n print(\"pip will not work until the temporary folder is \" + \\\n \"either deleted or is a real directory owned by your user account.\")\n raise pip.exceptions.InstallationError(msg)\n return path\n\nif running_under_virtualenv():\n build_prefix = os.path.join(sys.prefix, 'build')\n src_prefix = os.path.join(sys.prefix, 'src')\nelse:\n # Note: intentionally NOT using mkdtemp\n # See https://github.com/pypa/pip/issues/906 for plan to move to mkdtemp\n build_prefix = _get_build_prefix()\n\n ## FIXME: keep src in cwd for now (it is not a temporary folder)\n try:\n src_prefix = os.path.join(os.getcwd(), 'src')\n except OSError:\n # In case the current working directory has been renamed or deleted\n sys.exit(\"The folder you are executing pip from can no longer be found.\")\n\n# under Mac OS X + virtualenv sys.prefix is not properly resolved\n# it is something like /path/to/python/bin/..\n# Note: using realpath due to tmp dirs on OSX being symlinks\nbuild_prefix = os.path.abspath(os.path.realpath(build_prefix))\nsrc_prefix = os.path.abspath(src_prefix)\n\n# FIXME doesn't account for venv linked to global site-packages\n\nsite_packages = get_python_lib()\nuser_dir = os.path.expanduser('~')\nif sys.platform == 'win32':\n bin_py = os.path.join(sys.prefix, 'Scripts')\n bin_user = os.path.join(user_site, 'Scripts') if user_site else None\n # buildout uses 'bin' on Windows too?\n if not os.path.exists(bin_py):\n bin_py = os.path.join(sys.prefix, 'bin')\n bin_user = os.path.join(user_site, 'bin') if user_site else None\n default_storage_dir = os.path.join(user_dir, 'pip')\n default_config_file = os.path.join(default_storage_dir, 'pip.ini')\n default_log_file = os.path.join(default_storage_dir, 'pip.log')\nelse:\n bin_py = os.path.join(sys.prefix, 'bin')\n bin_user = os.path.join(user_site, 'bin') if user_site else None\n default_storage_dir = os.path.join(user_dir, '.pip')\n default_config_file = os.path.join(default_storage_dir, 'pip.conf')\n default_log_file = os.path.join(default_storage_dir, 'pip.log')\n\n # Forcing to use /usr/local/bin for standard Mac OS X framework installs\n # Also log to ~/Library/Logs/ for use with the Console.app log viewer\n if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':\n bin_py = '/usr/local/bin'\n default_log_file = os.path.join(user_dir, 'Library/Logs/pip.log')\n\n\ndef distutils_scheme(dist_name, user=False, home=None, root=None):\n \"\"\"\n Return a distutils install scheme\n \"\"\"\n from distutils.dist import Distribution\n\n scheme = {}\n d = Distribution({'name': dist_name})\n d.parse_config_files()\n i = d.get_command_obj('install', create=True)\n # NOTE: setting user or home has the side-effect of creating the home dir or\n # user base for installations during finalize_options()\n # ideally, we'd prefer a scheme class that has no side-effects.\n i.user = user or i.user\n i.home = home or i.home\n i.root = root or i.root\n i.finalize_options()\n for key in SCHEME_KEYS:\n scheme[key] = getattr(i, 'install_'+key)\n\n if running_under_virtualenv():\n scheme['headers'] = os.path.join(sys.prefix,\n 'include',\n 'site',\n 'python' + sys.version[:3],\n dist_name)\n\n if root is not None:\n scheme[\"headers\"] = os.path.join(\n root,\n os.path.abspath(scheme[\"headers\"])[1:],\n )\n\n return scheme\n", "path": "pip/locations.py"}]}
| 2,169 | 122 |
gh_patches_debug_22931
|
rasdani/github-patches
|
git_diff
|
GPflow__GPflow-1071
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
print summary fails for sum kernel
System information:
* Python version: 3.7
* TensorFlow installed from pip tf2 nightly
* TensorFlow version (use command below):'2.0.0-dev20190930'
* GPflow installed from (source or binary): python setup.py develop
* GPflow version: awav/gpflow-2.0
... _Describe the current behavior_
MWE:
```
k = gpflow.kernels.SquaredExponential() + gpflow.kernels.SquaredExponential()
print_summary(k)
Output:
name class transform trainable shape dtype value
------ ------- ----------- ----------- ------- ------- -------
```
... _Describe the expected behavior_
should print the values of the summed kernel's parameters.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gpflow/utilities/utilities.py`
Content:
```
1 import re
2 from functools import lru_cache
3 from typing import Callable, Dict, List, Optional, Union
4
5 import numpy as np
6 import tensorflow as tf
7 from tabulate import tabulate
8 from tensorflow.python.training.tracking.data_structures import ListWrapper, _DictWrapper
9
10 from ..base import Parameter
11 from ..config import summary_fmt
12
13 __all__ = [
14 "set_trainable",
15 "multiple_assign",
16 "training_loop",
17 "print_summary",
18 ]
19
20
21 def set_trainable(model: tf.Module, flag: bool = False):
22 """
23 Set trainable flag for all `tf.Variable`s and `gpflow.Parameter`s in a module.
24 """
25 for variable in model.trainable_variables:
26 variable._trainable = flag
27
28
29 def multiple_assign(input: tf.Module, vars_dict: Dict[str, tf.Tensor]):
30 """
31 Multiple assign takes a dictionary with new values. Dictionary keys are paths to the
32 `tf.Variable`s or `gpflow.Parameters` of the input module.
33
34 :param input: `tf.Module`.
35 :param vars_dict: a dictionary with keys of the form "module.path.to.variable" and new value tensors.
36 """
37 reference_var_dict = leaf_components(input)
38 for path, value in vars_dict.items():
39 reference_var_dict[path].assign(value)
40
41
42 def training_loop(closure: Callable[..., tf.Tensor],
43 optimizer: Optional[tf.optimizers.Optimizer] = None,
44 var_list: List[tf.Variable] = None,
45 maxiter=1e3,
46 jit=False):
47 """
48 Simple generic training loop. At each iteration uses a GradientTape to compute
49 the gradients of a loss function with respect to a set of variables.
50
51 :param closure: Callable that constructs a loss function based on data and model being trained
52 :param optimizer: tf.optimizers or tf.keras.optimizers that updates variables by applying the
53 corresponding loss gradients. Adam is a default optimizer with default settings.
54 :param var_list: List of model variables to be learnt during training
55 :param maxiter: Maximum number of
56 :return:
57 """
58
59 optimizer = tf.optimizers.Adam() if optimizer is None else optimizer
60
61 def optimization_step():
62 with tf.GradientTape() as tape:
63 tape.watch(var_list)
64 loss = closure()
65 grads = tape.gradient(loss, var_list)
66 optimizer.apply_gradients(zip(grads, var_list))
67
68 if jit:
69 optimization_step = tf.function(optimization_step)
70
71 for _ in range(int(maxiter)):
72 optimization_step()
73
74
75 def print_summary(module: tf.Module, fmt: str = None):
76 """
77 Prints a summary of the parameters and variables contained in a tf.Module.
78 """
79
80 fmt = fmt if fmt is not None else summary_fmt()
81 column_names = ['name', 'class', 'transform', 'trainable', 'shape', 'dtype', 'value']
82
83 def get_name(v):
84 return v.__class__.__name__
85
86 def get_transform(v):
87 if hasattr(v, "transform") and v.transform is not None:
88 return v.transform.__class__.__name__
89 return None
90
91 merged_leaf_components = _merge_leaf_components(leaf_components(module))
92
93 column_values = [[
94 path,
95 get_name(variable),
96 get_transform(variable),
97 variable.trainable,
98 variable.shape,
99 variable.dtype.name,
100 _str_tensor_value(variable.numpy())
101 ] for path, variable in merged_leaf_components.items()]
102
103 if fmt == "notebook":
104 from IPython.core.display import display, HTML
105 tab = tabulate(column_values, headers=column_names, tablefmt="html")
106 display(HTML(tab))
107 else:
108 print(tabulate(column_values, headers=column_names, tablefmt=fmt))
109
110
111 def leaf_components(input: tf.Module):
112 return _get_leaf_components(input)
113
114
115 def _merge_leaf_components(
116 input: Dict[str, Union[tf.Tensor, Parameter]]) -> Dict[str, Union[tf.Tensor, Parameter]]:
117 if len(set(input.values())) == len(input):
118 return input
119 tmp_dict = dict()
120 for key, item in input.items():
121 if item in tmp_dict:
122 tmp_dict[item] = f"{tmp_dict[item]}\n{key}"
123 else:
124 tmp_dict[item] = key
125 return {key: item for item, key in tmp_dict.items()}
126
127
128 def _get_leaf_components(input: tf.Module, prefix: Optional[str] = None):
129 """
130 Returns a list of tuples each corresponding to a gpflow.Parameter or tf.Variable in the each
131 submodules of a given tf.Module. Each tuple consists of an specific Parameter (or Variable) and
132 its relative path inside the module, which is constructed recursively by adding a prefix with
133 the path to the current module. Designed to be used as a helper for the method 'print_summary'.
134
135 :param module: tf.Module including keras.Model, keras.layers.Layer and gpflow.Module.
136 :param prefix: string containing the relative path to module, by default set to None.
137 :return:
138 """
139 if not isinstance(input, tf.Module):
140 raise TypeError("Input object expected to have `tf.Module` type")
141
142 prefix = input.__class__.__name__ if prefix is None else prefix
143 var_dict = dict()
144
145 for key, submodule in vars(input).items():
146 if key in tf.Module._TF_MODULE_IGNORED_PROPERTIES:
147 continue
148 elif isinstance(submodule, Parameter) or isinstance(submodule, tf.Variable):
149 var_dict[f"{prefix}.{key}"] = submodule
150 elif isinstance(submodule, tf.Module):
151 submodule_var = _get_leaf_components(submodule, prefix=f"{prefix}.{key}")
152 var_dict.update(submodule_var)
153 elif isinstance(submodule, ListWrapper):
154 submodule_name = input.__class__.__name__
155 for term_idx, subterm in enumerate(submodule):
156 subterm_key = f"{submodule_name}_{key}[{term_idx}]"
157 if isinstance(subterm, Parameter):
158 subterm_var = _get_leaf_components(subterm, prefix=f"{prefix}.{subterm_key}")
159 var_dict.update(subterm_var)
160 elif isinstance(submodule, _DictWrapper):
161 submodule_name = input.__class__.__name__
162 for term_key, subterm in submodule.items():
163 subterm_key = f"{submodule_name}_{key}[{term_key}]"
164 subterm_var = _get_leaf_components(subterm, prefix=f"{prefix}.{subterm_key}")
165 var_dict.update(subterm_var)
166 return var_dict
167
168
169 @lru_cache()
170 def _first_three_elements_regexp():
171 num_re = r"[+\-]?(?:0|[1-9]\d*)(?:\.\d*)?(?:[eE][+\-]?\d+)?"
172 pat_re = rf"^(?:(\[+)\s*)?({num_re})(?:\s+({num_re})(?:\s+({num_re}))?)?.*?"
173 return re.compile(pat_re)
174
175
176 def _str_tensor_value(value: np.ndarray):
177 value_str = str(value)
178 if value.size <= 3:
179 return value_str
180
181 max_chars = 500
182 value_str = value_str[:max_chars]
183 regexp = _first_three_elements_regexp()
184 match = regexp.match(value_str)
185 assert match is not None
186 brackets, elem1, elem2, elem3 = match.groups()
187
188 out = f"{elem1}"
189 if elem2 is not None:
190 out = f"{out}{f', {elem2}'}"
191 if elem3 is not None:
192 out = f"{out}{f', {elem3}'}"
193 if brackets is not None:
194 out = f"{brackets}{out}..."
195
196 return out
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gpflow/utilities/utilities.py b/gpflow/utilities/utilities.py
--- a/gpflow/utilities/utilities.py
+++ b/gpflow/utilities/utilities.py
@@ -100,6 +100,7 @@
_str_tensor_value(variable.numpy())
] for path, variable in merged_leaf_components.items()]
+
if fmt == "notebook":
from IPython.core.display import display, HTML
tab = tabulate(column_values, headers=column_names, tablefmt="html")
@@ -154,7 +155,7 @@
submodule_name = input.__class__.__name__
for term_idx, subterm in enumerate(submodule):
subterm_key = f"{submodule_name}_{key}[{term_idx}]"
- if isinstance(subterm, Parameter):
+ if isinstance(subterm, tf.Module):
subterm_var = _get_leaf_components(subterm, prefix=f"{prefix}.{subterm_key}")
var_dict.update(subterm_var)
elif isinstance(submodule, _DictWrapper):
|
{"golden_diff": "diff --git a/gpflow/utilities/utilities.py b/gpflow/utilities/utilities.py\n--- a/gpflow/utilities/utilities.py\n+++ b/gpflow/utilities/utilities.py\n@@ -100,6 +100,7 @@\n _str_tensor_value(variable.numpy())\n ] for path, variable in merged_leaf_components.items()]\n \n+\n if fmt == \"notebook\":\n from IPython.core.display import display, HTML\n tab = tabulate(column_values, headers=column_names, tablefmt=\"html\")\n@@ -154,7 +155,7 @@\n submodule_name = input.__class__.__name__\n for term_idx, subterm in enumerate(submodule):\n subterm_key = f\"{submodule_name}_{key}[{term_idx}]\"\n- if isinstance(subterm, Parameter):\n+ if isinstance(subterm, tf.Module):\n subterm_var = _get_leaf_components(subterm, prefix=f\"{prefix}.{subterm_key}\")\n var_dict.update(subterm_var)\n elif isinstance(submodule, _DictWrapper):\n", "issue": "print summary fails for sum kernel\n\r\nSystem information:\r\n\r\n* Python version: 3.7\r\n* TensorFlow installed from pip tf2 nightly\r\n* TensorFlow version (use command below):'2.0.0-dev20190930'\r\n* GPflow installed from (source or binary): python setup.py develop\r\n* GPflow version: awav/gpflow-2.0\r\n\r\n\r\n\r\n... _Describe the current behavior_\r\nMWE:\r\n```\r\nk = gpflow.kernels.SquaredExponential() + gpflow.kernels.SquaredExponential()\r\nprint_summary(k)\r\n\r\nOutput:\r\nname class transform trainable shape dtype value\r\n------ ------- ----------- ----------- ------- ------- -------\r\n```\r\n... _Describe the expected behavior_\r\nshould print the values of the summed kernel's parameters.\r\n\n", "before_files": [{"content": "import re\nfrom functools import lru_cache\nfrom typing import Callable, Dict, List, Optional, Union\n\nimport numpy as np\nimport tensorflow as tf\nfrom tabulate import tabulate\nfrom tensorflow.python.training.tracking.data_structures import ListWrapper, _DictWrapper\n\nfrom ..base import Parameter\nfrom ..config import summary_fmt\n\n__all__ = [\n \"set_trainable\",\n \"multiple_assign\",\n \"training_loop\",\n \"print_summary\",\n]\n\n\ndef set_trainable(model: tf.Module, flag: bool = False):\n \"\"\"\n Set trainable flag for all `tf.Variable`s and `gpflow.Parameter`s in a module.\n \"\"\"\n for variable in model.trainable_variables:\n variable._trainable = flag\n\n\ndef multiple_assign(input: tf.Module, vars_dict: Dict[str, tf.Tensor]):\n \"\"\"\n Multiple assign takes a dictionary with new values. Dictionary keys are paths to the\n `tf.Variable`s or `gpflow.Parameters` of the input module.\n\n :param input: `tf.Module`.\n :param vars_dict: a dictionary with keys of the form \"module.path.to.variable\" and new value tensors.\n \"\"\"\n reference_var_dict = leaf_components(input)\n for path, value in vars_dict.items():\n reference_var_dict[path].assign(value)\n\n\ndef training_loop(closure: Callable[..., tf.Tensor],\n optimizer: Optional[tf.optimizers.Optimizer] = None,\n var_list: List[tf.Variable] = None,\n maxiter=1e3,\n jit=False):\n \"\"\"\n Simple generic training loop. At each iteration uses a GradientTape to compute\n the gradients of a loss function with respect to a set of variables.\n\n :param closure: Callable that constructs a loss function based on data and model being trained\n :param optimizer: tf.optimizers or tf.keras.optimizers that updates variables by applying the\n corresponding loss gradients. Adam is a default optimizer with default settings.\n :param var_list: List of model variables to be learnt during training\n :param maxiter: Maximum number of\n :return:\n \"\"\"\n\n optimizer = tf.optimizers.Adam() if optimizer is None else optimizer\n\n def optimization_step():\n with tf.GradientTape() as tape:\n tape.watch(var_list)\n loss = closure()\n grads = tape.gradient(loss, var_list)\n optimizer.apply_gradients(zip(grads, var_list))\n\n if jit:\n optimization_step = tf.function(optimization_step)\n\n for _ in range(int(maxiter)):\n optimization_step()\n\n\ndef print_summary(module: tf.Module, fmt: str = None):\n \"\"\"\n Prints a summary of the parameters and variables contained in a tf.Module.\n \"\"\"\n\n fmt = fmt if fmt is not None else summary_fmt()\n column_names = ['name', 'class', 'transform', 'trainable', 'shape', 'dtype', 'value']\n\n def get_name(v):\n return v.__class__.__name__\n\n def get_transform(v):\n if hasattr(v, \"transform\") and v.transform is not None:\n return v.transform.__class__.__name__\n return None\n\n merged_leaf_components = _merge_leaf_components(leaf_components(module))\n\n column_values = [[\n path,\n get_name(variable),\n get_transform(variable),\n variable.trainable,\n variable.shape,\n variable.dtype.name,\n _str_tensor_value(variable.numpy())\n ] for path, variable in merged_leaf_components.items()]\n\n if fmt == \"notebook\":\n from IPython.core.display import display, HTML\n tab = tabulate(column_values, headers=column_names, tablefmt=\"html\")\n display(HTML(tab))\n else:\n print(tabulate(column_values, headers=column_names, tablefmt=fmt))\n\n\ndef leaf_components(input: tf.Module):\n return _get_leaf_components(input)\n\n\ndef _merge_leaf_components(\n input: Dict[str, Union[tf.Tensor, Parameter]]) -> Dict[str, Union[tf.Tensor, Parameter]]:\n if len(set(input.values())) == len(input):\n return input\n tmp_dict = dict()\n for key, item in input.items():\n if item in tmp_dict:\n tmp_dict[item] = f\"{tmp_dict[item]}\\n{key}\"\n else:\n tmp_dict[item] = key\n return {key: item for item, key in tmp_dict.items()}\n\n\ndef _get_leaf_components(input: tf.Module, prefix: Optional[str] = None):\n \"\"\"\n Returns a list of tuples each corresponding to a gpflow.Parameter or tf.Variable in the each\n submodules of a given tf.Module. Each tuple consists of an specific Parameter (or Variable) and\n its relative path inside the module, which is constructed recursively by adding a prefix with\n the path to the current module. Designed to be used as a helper for the method 'print_summary'.\n\n :param module: tf.Module including keras.Model, keras.layers.Layer and gpflow.Module.\n :param prefix: string containing the relative path to module, by default set to None.\n :return:\n \"\"\"\n if not isinstance(input, tf.Module):\n raise TypeError(\"Input object expected to have `tf.Module` type\")\n\n prefix = input.__class__.__name__ if prefix is None else prefix\n var_dict = dict()\n\n for key, submodule in vars(input).items():\n if key in tf.Module._TF_MODULE_IGNORED_PROPERTIES:\n continue\n elif isinstance(submodule, Parameter) or isinstance(submodule, tf.Variable):\n var_dict[f\"{prefix}.{key}\"] = submodule\n elif isinstance(submodule, tf.Module):\n submodule_var = _get_leaf_components(submodule, prefix=f\"{prefix}.{key}\")\n var_dict.update(submodule_var)\n elif isinstance(submodule, ListWrapper):\n submodule_name = input.__class__.__name__\n for term_idx, subterm in enumerate(submodule):\n subterm_key = f\"{submodule_name}_{key}[{term_idx}]\"\n if isinstance(subterm, Parameter):\n subterm_var = _get_leaf_components(subterm, prefix=f\"{prefix}.{subterm_key}\")\n var_dict.update(subterm_var)\n elif isinstance(submodule, _DictWrapper):\n submodule_name = input.__class__.__name__\n for term_key, subterm in submodule.items():\n subterm_key = f\"{submodule_name}_{key}[{term_key}]\"\n subterm_var = _get_leaf_components(subterm, prefix=f\"{prefix}.{subterm_key}\")\n var_dict.update(subterm_var)\n return var_dict\n\n\n@lru_cache()\ndef _first_three_elements_regexp():\n num_re = r\"[+\\-]?(?:0|[1-9]\\d*)(?:\\.\\d*)?(?:[eE][+\\-]?\\d+)?\"\n pat_re = rf\"^(?:(\\[+)\\s*)?({num_re})(?:\\s+({num_re})(?:\\s+({num_re}))?)?.*?\"\n return re.compile(pat_re)\n\n\ndef _str_tensor_value(value: np.ndarray):\n value_str = str(value)\n if value.size <= 3:\n return value_str\n\n max_chars = 500\n value_str = value_str[:max_chars]\n regexp = _first_three_elements_regexp()\n match = regexp.match(value_str)\n assert match is not None\n brackets, elem1, elem2, elem3 = match.groups()\n\n out = f\"{elem1}\"\n if elem2 is not None:\n out = f\"{out}{f', {elem2}'}\"\n if elem3 is not None:\n out = f\"{out}{f', {elem3}'}\"\n if brackets is not None:\n out = f\"{brackets}{out}...\"\n\n return out\n", "path": "gpflow/utilities/utilities.py"}], "after_files": [{"content": "import re\nfrom functools import lru_cache\nfrom typing import Callable, Dict, List, Optional, Union\n\nimport numpy as np\nimport tensorflow as tf\nfrom tabulate import tabulate\nfrom tensorflow.python.training.tracking.data_structures import ListWrapper, _DictWrapper\n\nfrom ..base import Parameter\nfrom ..config import summary_fmt\n\n__all__ = [\n \"set_trainable\",\n \"multiple_assign\",\n \"training_loop\",\n \"print_summary\",\n]\n\n\ndef set_trainable(model: tf.Module, flag: bool = False):\n \"\"\"\n Set trainable flag for all `tf.Variable`s and `gpflow.Parameter`s in a module.\n \"\"\"\n for variable in model.trainable_variables:\n variable._trainable = flag\n\n\ndef multiple_assign(input: tf.Module, vars_dict: Dict[str, tf.Tensor]):\n \"\"\"\n Multiple assign takes a dictionary with new values. Dictionary keys are paths to the\n `tf.Variable`s or `gpflow.Parameters` of the input module.\n\n :param input: `tf.Module`.\n :param vars_dict: a dictionary with keys of the form \"module.path.to.variable\" and new value tensors.\n \"\"\"\n reference_var_dict = leaf_components(input)\n for path, value in vars_dict.items():\n reference_var_dict[path].assign(value)\n\n\ndef training_loop(closure: Callable[..., tf.Tensor],\n optimizer: Optional[tf.optimizers.Optimizer] = None,\n var_list: List[tf.Variable] = None,\n maxiter=1e3,\n jit=False):\n \"\"\"\n Simple generic training loop. At each iteration uses a GradientTape to compute\n the gradients of a loss function with respect to a set of variables.\n\n :param closure: Callable that constructs a loss function based on data and model being trained\n :param optimizer: tf.optimizers or tf.keras.optimizers that updates variables by applying the\n corresponding loss gradients. Adam is a default optimizer with default settings.\n :param var_list: List of model variables to be learnt during training\n :param maxiter: Maximum number of\n :return:\n \"\"\"\n\n optimizer = tf.optimizers.Adam() if optimizer is None else optimizer\n\n def optimization_step():\n with tf.GradientTape() as tape:\n tape.watch(var_list)\n loss = closure()\n grads = tape.gradient(loss, var_list)\n optimizer.apply_gradients(zip(grads, var_list))\n\n if jit:\n optimization_step = tf.function(optimization_step)\n\n for _ in range(int(maxiter)):\n optimization_step()\n\n\ndef print_summary(module: tf.Module, fmt: str = None):\n \"\"\"\n Prints a summary of the parameters and variables contained in a tf.Module.\n \"\"\"\n\n fmt = fmt if fmt is not None else summary_fmt()\n column_names = ['name', 'class', 'transform', 'trainable', 'shape', 'dtype', 'value']\n\n def get_name(v):\n return v.__class__.__name__\n\n def get_transform(v):\n if hasattr(v, \"transform\") and v.transform is not None:\n return v.transform.__class__.__name__\n return None\n\n merged_leaf_components = _merge_leaf_components(leaf_components(module))\n\n column_values = [[\n path,\n get_name(variable),\n get_transform(variable),\n variable.trainable,\n variable.shape,\n variable.dtype.name,\n _str_tensor_value(variable.numpy())\n ] for path, variable in merged_leaf_components.items()]\n\n\n if fmt == \"notebook\":\n from IPython.core.display import display, HTML\n tab = tabulate(column_values, headers=column_names, tablefmt=\"html\")\n display(HTML(tab))\n else:\n print(tabulate(column_values, headers=column_names, tablefmt=fmt))\n\n\ndef leaf_components(input: tf.Module):\n return _get_leaf_components(input)\n\n\ndef _merge_leaf_components(\n input: Dict[str, Union[tf.Tensor, Parameter]]) -> Dict[str, Union[tf.Tensor, Parameter]]:\n if len(set(input.values())) == len(input):\n return input\n tmp_dict = dict()\n for key, item in input.items():\n if item in tmp_dict:\n tmp_dict[item] = f\"{tmp_dict[item]}\\n{key}\"\n else:\n tmp_dict[item] = key\n return {key: item for item, key in tmp_dict.items()}\n\n\ndef _get_leaf_components(input: tf.Module, prefix: Optional[str] = None):\n \"\"\"\n Returns a list of tuples each corresponding to a gpflow.Parameter or tf.Variable in the each\n submodules of a given tf.Module. Each tuple consists of an specific Parameter (or Variable) and\n its relative path inside the module, which is constructed recursively by adding a prefix with\n the path to the current module. Designed to be used as a helper for the method 'print_summary'.\n\n :param module: tf.Module including keras.Model, keras.layers.Layer and gpflow.Module.\n :param prefix: string containing the relative path to module, by default set to None.\n :return:\n \"\"\"\n if not isinstance(input, tf.Module):\n raise TypeError(\"Input object expected to have `tf.Module` type\")\n\n prefix = input.__class__.__name__ if prefix is None else prefix\n var_dict = dict()\n\n for key, submodule in vars(input).items():\n if key in tf.Module._TF_MODULE_IGNORED_PROPERTIES:\n continue\n elif isinstance(submodule, Parameter) or isinstance(submodule, tf.Variable):\n var_dict[f\"{prefix}.{key}\"] = submodule\n elif isinstance(submodule, tf.Module):\n submodule_var = _get_leaf_components(submodule, prefix=f\"{prefix}.{key}\")\n var_dict.update(submodule_var)\n elif isinstance(submodule, ListWrapper):\n submodule_name = input.__class__.__name__\n for term_idx, subterm in enumerate(submodule):\n subterm_key = f\"{submodule_name}_{key}[{term_idx}]\"\n if isinstance(subterm, tf.Module):\n subterm_var = _get_leaf_components(subterm, prefix=f\"{prefix}.{subterm_key}\")\n var_dict.update(subterm_var)\n elif isinstance(submodule, _DictWrapper):\n submodule_name = input.__class__.__name__\n for term_key, subterm in submodule.items():\n subterm_key = f\"{submodule_name}_{key}[{term_key}]\"\n subterm_var = _get_leaf_components(subterm, prefix=f\"{prefix}.{subterm_key}\")\n var_dict.update(subterm_var)\n return var_dict\n\n\n@lru_cache()\ndef _first_three_elements_regexp():\n num_re = r\"[+\\-]?(?:0|[1-9]\\d*)(?:\\.\\d*)?(?:[eE][+\\-]?\\d+)?\"\n pat_re = rf\"^(?:(\\[+)\\s*)?({num_re})(?:\\s+({num_re})(?:\\s+({num_re}))?)?.*?\"\n return re.compile(pat_re)\n\n\ndef _str_tensor_value(value: np.ndarray):\n value_str = str(value)\n if value.size <= 3:\n return value_str\n\n max_chars = 500\n value_str = value_str[:max_chars]\n regexp = _first_three_elements_regexp()\n match = regexp.match(value_str)\n assert match is not None\n brackets, elem1, elem2, elem3 = match.groups()\n\n out = f\"{elem1}\"\n if elem2 is not None:\n out = f\"{out}{f', {elem2}'}\"\n if elem3 is not None:\n out = f\"{out}{f', {elem3}'}\"\n if brackets is not None:\n out = f\"{brackets}{out}...\"\n\n return out\n", "path": "gpflow/utilities/utilities.py"}]}
| 2,571 | 229 |
gh_patches_debug_41470
|
rasdani/github-patches
|
git_diff
|
PennyLaneAI__pennylane-1761
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add batching support to the AmplitudeEmbedding template
### Feature details
A recent PR, #1710, added support for creating PennyLane QNodes with *batches* of input parameters via [`@qml.batch_params`](https://pennylane.readthedocs.io/en/latest/code/api/pennylane.batch_params.html). Under the hood, a separate circuit per batch dimension is created, and as a result this approach is both hardware and simulator compatible:
```python
dev = qml.device("default.qubit", wires=2)
@qml.batch_params
@qml.beta.qnode(dev)
def circuit(x, weights):
qml.RX(x, wires=0)
qml.RY(0.2, wires=1)
qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1, 2])
return qml.expval(qml.Hadamard(0))
def cost(x, weights):
return np.sum(circuit(x, weights))
batch_size = 3
x = np.linspace(0.1, 0.5, batch_size)
weights = np.random.random((batch_size, 10, 3, 3))
>>> circuit(x, weights)
[-0.30773348 0.23135516 0.13086565]
>>> cost_fn(x, weights)
-0.8581269507766536
>>> qml.grad(cost_fn)(x, weights)[0]
[ 0.23235464 0.00928953 -0.30083487]
```
One such template that would be great to support in batch mode is [`qml.templates.AmplitudeEmbedding`](https://pennylane.readthedocs.io/en/stable/code/api/pennylane.templates.embeddings.AmplitudeEmbedding.html). In order to add support, this template needs to be modified to recognize the first dimension of the input parameter as a _batch_ dimension.
### Implementation
We would like to support the following example:
```python
dev = qml.device("default.qubit", wires=3)
@qml.batch_params
@qml.beta.qnode(dev)
def circuit(data, weights):
qml.templates.AmplitudeEmbedding(data, wires=[0, 1, 2])
qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1, 2])
return qml.expval(qml.PauliX(0))
batch_size = 3
# create a batched input statevector
data = np.random.random((batch_size, 8))
data /= np.linalg.norm(data, axis=1).reshape(-1, 1) # normalize
weights = np.random.random((batch_size, 10, 3, 3))
circuit(data, weights)
```
This can be done by modifying the `AmplitudeEmbedding` source code as needed. In addition, a test should be added to `tests/transforms/test_batch_params.py`.
Note that some hardware devices do not support `AmplitudeEmbedding` directly, and instead decompose via the [`qml.templates.MottonenStatePreparation`](https://pennylane.readthedocs.io/en/stable/code/api/pennylane.templates.state_preparations.MottonenStatePreparation.html) template, so adding support to this template would also be an added bonus.
### How important would you say this feature is?
2: Somewhat important. Needed this quarter.
### Additional information
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pennylane/templates/embeddings/amplitude.py`
Content:
```
1 # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 r"""
15 Contains the AmplitudeEmbedding template.
16 """
17 # pylint: disable-msg=too-many-branches,too-many-arguments,protected-access
18 import warnings
19 import numpy as np
20
21 import pennylane as qml
22 from pennylane.operation import Operation, AnyWires
23 from pennylane.ops import QubitStateVector
24 from pennylane.wires import Wires
25
26 # tolerance for normalization
27 TOLERANCE = 1e-10
28
29
30 class AmplitudeEmbedding(Operation):
31 r"""Encodes :math:`2^n` features into the amplitude vector of :math:`n` qubits.
32
33 By setting ``pad_with`` to a real or complex number, ``features`` is automatically padded to dimension
34 :math:`2^n` where :math:`n` is the number of qubits used in the embedding.
35
36 To represent a valid quantum state vector, the L2-norm of ``features`` must be one.
37 The argument ``normalize`` can be set to ``True`` to automatically normalize the features.
38
39 If both automatic padding and normalization are used, padding is executed *before* normalizing.
40
41 .. note::
42
43 On some devices, ``AmplitudeEmbedding`` must be the first operation of a quantum circuit.
44
45 .. warning::
46
47 At the moment, the ``features`` argument is **not differentiable** when using the template, and
48 gradients with respect to the features cannot be computed by PennyLane.
49
50 Args:
51 features (tensor_like): input tensor of dimension ``(2^n,)``, or less if `pad_with` is specified
52 wires (Iterable): wires that the template acts on
53 pad_with (float or complex): if not None, the input is padded with this constant to size :math:`2^n`
54 normalize (bool): whether to automatically normalize the features
55 pad (float or complex): same as `pad`, to be deprecated
56
57 Example:
58
59 Amplitude embedding encodes a normalized :math:`2^n`-dimensional feature vector into the state
60 of :math:`n` qubits:
61
62 .. code-block:: python
63
64 import pennylane as qml
65 from pennylane.templates import AmplitudeEmbedding
66
67 dev = qml.device('default.qubit', wires=2)
68
69 @qml.qnode(dev)
70 def circuit(f=None):
71 AmplitudeEmbedding(features=f, wires=range(2))
72 return qml.expval(qml.PauliZ(0))
73
74 circuit(f=[1/2, 1/2, 1/2, 1/2])
75
76 The final state of the device is - up to a global phase - equivalent to the input passed to the circuit:
77
78 >>> dev.state
79 [0.5+0.j 0.5+0.j 0.5+0.j 0.5+0.j]
80
81 **Differentiating with respect to the features**
82
83 Due to non-trivial classical processing to construct the state preparation circuit,
84 the features argument is in general **not differentiable**.
85
86 **Normalization**
87
88 The template will raise an error if the feature input is not normalized.
89 One can set ``normalize=True`` to automatically normalize it:
90
91 .. code-block:: python
92
93 @qml.qnode(dev)
94 def circuit(f=None):
95 AmplitudeEmbedding(features=f, wires=range(2), normalize=True)
96 return qml.expval(qml.PauliZ(0))
97
98 circuit(f=[15, 15, 15, 15])
99
100 >>> dev.state
101 [0.5 + 0.j, 0.5 + 0.j, 0.5 + 0.j, 0.5 + 0.j]
102
103 **Padding**
104
105 If the dimension of the feature vector is smaller than the number of amplitudes,
106 one can automatically pad it with a constant for the missing dimensions using the ``pad_with`` option:
107
108 .. code-block:: python
109
110 from math import sqrt
111
112 @qml.qnode(dev)
113 def circuit(f=None):
114 AmplitudeEmbedding(features=f, wires=range(2), pad_with=0.)
115 return qml.expval(qml.PauliZ(0))
116
117 circuit(f=[1/sqrt(2), 1/sqrt(2)])
118
119 >>> dev.state
120 [0.70710678 + 0.j, 0.70710678 + 0.j, 0.0 + 0.j, 0.0 + 0.j]
121
122 """
123
124 num_params = 1
125 num_wires = AnyWires
126 par_domain = "A"
127 grad_method = None
128
129 def __init__(
130 self, features, wires, pad_with=None, normalize=False, pad=None, do_queue=True, id=None
131 ):
132
133 # pad is replaced with the more verbose pad_with
134 if pad is not None:
135 warnings.warn(
136 "The pad argument will be replaced by the pad_with option in future versions of PennyLane.",
137 UserWarning,
138 )
139 if pad_with is None:
140 pad_with = pad
141
142 wires = Wires(wires)
143 self.pad_with = pad_with
144 self.normalize = normalize
145
146 features = self._preprocess(features, wires, pad_with, normalize)
147 super().__init__(features, wires=wires, do_queue=do_queue, id=id)
148
149 def adjoint(self): # pylint: disable=arguments-differ
150 return qml.adjoint(qml.templates.MottonenStatePreparation)(
151 self.parameters[0], wires=self.wires
152 )
153
154 def expand(self):
155
156 with qml.tape.QuantumTape() as tape:
157 QubitStateVector(self.parameters[0], wires=self.wires)
158
159 return tape
160
161 @staticmethod
162 def _preprocess(features, wires, pad_with, normalize):
163 """Validate and pre-process inputs as follows:
164
165 * Check that the features tensor is one-dimensional.
166 * If pad_with is None, check that the first dimension of the features tensor
167 has length :math:`2^n` where :math:`n` is the number of qubits. Else check that the
168 first dimension of the features tensor is not larger than :math:`2^n` and pad features with value if necessary.
169 * If normalize is false, check that first dimension of features is normalised to one. Else, normalise the
170 features tensor.
171 """
172
173 shape = qml.math.shape(features)
174
175 # check shape
176 if len(shape) != 1:
177 raise ValueError(f"Features must be a one-dimensional tensor; got shape {shape}.")
178
179 n_features = shape[0]
180 if pad_with is None and n_features != 2 ** len(wires):
181 raise ValueError(
182 f"Features must be of length {2 ** len(wires)}; got length {n_features}. "
183 f"Use the 'pad' argument for automated padding."
184 )
185
186 if pad_with is not None and n_features > 2 ** len(wires):
187 raise ValueError(
188 f"Features must be of length {2 ** len(wires)} or "
189 f"smaller to be padded; got length {n_features}."
190 )
191
192 # pad
193 if pad_with is not None and n_features < 2 ** len(wires):
194 padding = [pad_with] * (2 ** len(wires) - n_features)
195 features = qml.math.concatenate([features, padding], axis=0)
196
197 # normalize
198 norm = qml.math.sum(qml.math.abs(features) ** 2)
199
200 if not qml.math.allclose(norm, 1.0, atol=TOLERANCE):
201 if normalize or pad_with:
202 features = features / np.sqrt(norm)
203 else:
204 raise ValueError(
205 f"Features must be a vector of length 1.0; got length {norm}."
206 "Use 'normalize=True' to automatically normalize."
207 )
208
209 features = qml.math.cast(features, np.complex128)
210 return features
211
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pennylane/templates/embeddings/amplitude.py b/pennylane/templates/embeddings/amplitude.py
--- a/pennylane/templates/embeddings/amplitude.py
+++ b/pennylane/templates/embeddings/amplitude.py
@@ -162,6 +162,7 @@
def _preprocess(features, wires, pad_with, normalize):
"""Validate and pre-process inputs as follows:
+ * If features is batched, the processing that follows is applied to each feature set in the batch.
* Check that the features tensor is one-dimensional.
* If pad_with is None, check that the first dimension of the features tensor
has length :math:`2^n` where :math:`n` is the number of qubits. Else check that the
@@ -170,41 +171,49 @@
features tensor.
"""
- shape = qml.math.shape(features)
+ # check if features is batched
+ batched = len(qml.math.shape(features)) > 1
- # check shape
- if len(shape) != 1:
- raise ValueError(f"Features must be a one-dimensional tensor; got shape {shape}.")
+ features_batch = features if batched else [features]
- n_features = shape[0]
- if pad_with is None and n_features != 2 ** len(wires):
- raise ValueError(
- f"Features must be of length {2 ** len(wires)}; got length {n_features}. "
- f"Use the 'pad' argument for automated padding."
- )
-
- if pad_with is not None and n_features > 2 ** len(wires):
- raise ValueError(
- f"Features must be of length {2 ** len(wires)} or "
- f"smaller to be padded; got length {n_features}."
- )
+ # apply pre-processing to each features tensor in the batch
+ for i, feature_set in enumerate(features_batch):
+ shape = qml.math.shape(feature_set)
- # pad
- if pad_with is not None and n_features < 2 ** len(wires):
- padding = [pad_with] * (2 ** len(wires) - n_features)
- features = qml.math.concatenate([features, padding], axis=0)
+ # check shape
+ if len(shape) != 1:
+ raise ValueError(f"Features must be a one-dimensional tensor; got shape {shape}.")
- # normalize
- norm = qml.math.sum(qml.math.abs(features) ** 2)
+ n_features = shape[0]
+ if pad_with is None and n_features != 2 ** len(wires):
+ raise ValueError(
+ f"Features must be of length {2 ** len(wires)}; got length {n_features}. "
+ f"Use the 'pad' argument for automated padding."
+ )
- if not qml.math.allclose(norm, 1.0, atol=TOLERANCE):
- if normalize or pad_with:
- features = features / np.sqrt(norm)
- else:
+ if pad_with is not None and n_features > 2 ** len(wires):
raise ValueError(
- f"Features must be a vector of length 1.0; got length {norm}."
- "Use 'normalize=True' to automatically normalize."
+ f"Features must be of length {2 ** len(wires)} or "
+ f"smaller to be padded; got length {n_features}."
)
- features = qml.math.cast(features, np.complex128)
- return features
+ # pad
+ if pad_with is not None and n_features < 2 ** len(wires):
+ padding = [pad_with] * (2 ** len(wires) - n_features)
+ feature_set = qml.math.concatenate([feature_set, padding], axis=0)
+
+ # normalize
+ norm = qml.math.sum(qml.math.abs(feature_set) ** 2)
+
+ if not qml.math.allclose(norm, 1.0, atol=TOLERANCE):
+ if normalize or pad_with:
+ feature_set = feature_set / np.sqrt(norm)
+ else:
+ raise ValueError(
+ f"Features must be a vector of norm 1.0; got norm {norm}."
+ "Use 'normalize=True' to automatically normalize."
+ )
+
+ features_batch[i] = qml.math.cast(feature_set, np.complex128)
+
+ return features_batch if batched else features_batch[0]
|
{"golden_diff": "diff --git a/pennylane/templates/embeddings/amplitude.py b/pennylane/templates/embeddings/amplitude.py\n--- a/pennylane/templates/embeddings/amplitude.py\n+++ b/pennylane/templates/embeddings/amplitude.py\n@@ -162,6 +162,7 @@\n def _preprocess(features, wires, pad_with, normalize):\n \"\"\"Validate and pre-process inputs as follows:\n \n+ * If features is batched, the processing that follows is applied to each feature set in the batch.\n * Check that the features tensor is one-dimensional.\n * If pad_with is None, check that the first dimension of the features tensor\n has length :math:`2^n` where :math:`n` is the number of qubits. Else check that the\n@@ -170,41 +171,49 @@\n features tensor.\n \"\"\"\n \n- shape = qml.math.shape(features)\n+ # check if features is batched\n+ batched = len(qml.math.shape(features)) > 1\n \n- # check shape\n- if len(shape) != 1:\n- raise ValueError(f\"Features must be a one-dimensional tensor; got shape {shape}.\")\n+ features_batch = features if batched else [features]\n \n- n_features = shape[0]\n- if pad_with is None and n_features != 2 ** len(wires):\n- raise ValueError(\n- f\"Features must be of length {2 ** len(wires)}; got length {n_features}. \"\n- f\"Use the 'pad' argument for automated padding.\"\n- )\n-\n- if pad_with is not None and n_features > 2 ** len(wires):\n- raise ValueError(\n- f\"Features must be of length {2 ** len(wires)} or \"\n- f\"smaller to be padded; got length {n_features}.\"\n- )\n+ # apply pre-processing to each features tensor in the batch\n+ for i, feature_set in enumerate(features_batch):\n+ shape = qml.math.shape(feature_set)\n \n- # pad\n- if pad_with is not None and n_features < 2 ** len(wires):\n- padding = [pad_with] * (2 ** len(wires) - n_features)\n- features = qml.math.concatenate([features, padding], axis=0)\n+ # check shape\n+ if len(shape) != 1:\n+ raise ValueError(f\"Features must be a one-dimensional tensor; got shape {shape}.\")\n \n- # normalize\n- norm = qml.math.sum(qml.math.abs(features) ** 2)\n+ n_features = shape[0]\n+ if pad_with is None and n_features != 2 ** len(wires):\n+ raise ValueError(\n+ f\"Features must be of length {2 ** len(wires)}; got length {n_features}. \"\n+ f\"Use the 'pad' argument for automated padding.\"\n+ )\n \n- if not qml.math.allclose(norm, 1.0, atol=TOLERANCE):\n- if normalize or pad_with:\n- features = features / np.sqrt(norm)\n- else:\n+ if pad_with is not None and n_features > 2 ** len(wires):\n raise ValueError(\n- f\"Features must be a vector of length 1.0; got length {norm}.\"\n- \"Use 'normalize=True' to automatically normalize.\"\n+ f\"Features must be of length {2 ** len(wires)} or \"\n+ f\"smaller to be padded; got length {n_features}.\"\n )\n \n- features = qml.math.cast(features, np.complex128)\n- return features\n+ # pad\n+ if pad_with is not None and n_features < 2 ** len(wires):\n+ padding = [pad_with] * (2 ** len(wires) - n_features)\n+ feature_set = qml.math.concatenate([feature_set, padding], axis=0)\n+\n+ # normalize\n+ norm = qml.math.sum(qml.math.abs(feature_set) ** 2)\n+\n+ if not qml.math.allclose(norm, 1.0, atol=TOLERANCE):\n+ if normalize or pad_with:\n+ feature_set = feature_set / np.sqrt(norm)\n+ else:\n+ raise ValueError(\n+ f\"Features must be a vector of norm 1.0; got norm {norm}.\"\n+ \"Use 'normalize=True' to automatically normalize.\"\n+ )\n+\n+ features_batch[i] = qml.math.cast(feature_set, np.complex128)\n+\n+ return features_batch if batched else features_batch[0]\n", "issue": "Add batching support to the AmplitudeEmbedding template\n### Feature details\r\n\r\nA recent PR, #1710, added support for creating PennyLane QNodes with *batches* of input parameters via [`@qml.batch_params`](https://pennylane.readthedocs.io/en/latest/code/api/pennylane.batch_params.html). Under the hood, a separate circuit per batch dimension is created, and as a result this approach is both hardware and simulator compatible:\r\n\r\n```python\r\ndev = qml.device(\"default.qubit\", wires=2)\r\n\r\[email protected]_params\r\[email protected](dev)\r\ndef circuit(x, weights):\r\n qml.RX(x, wires=0)\r\n qml.RY(0.2, wires=1)\r\n qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1, 2])\r\n return qml.expval(qml.Hadamard(0))\r\n\r\ndef cost(x, weights):\r\n return np.sum(circuit(x, weights))\r\n\r\nbatch_size = 3\r\nx = np.linspace(0.1, 0.5, batch_size)\r\nweights = np.random.random((batch_size, 10, 3, 3))\r\n\r\n>>> circuit(x, weights)\r\n[-0.30773348 0.23135516 0.13086565]\r\n>>> cost_fn(x, weights)\r\n-0.8581269507766536\r\n>>> qml.grad(cost_fn)(x, weights)[0]\r\n[ 0.23235464 0.00928953 -0.30083487]\r\n```\r\n\r\nOne such template that would be great to support in batch mode is [`qml.templates.AmplitudeEmbedding`](https://pennylane.readthedocs.io/en/stable/code/api/pennylane.templates.embeddings.AmplitudeEmbedding.html). In order to add support, this template needs to be modified to recognize the first dimension of the input parameter as a _batch_ dimension.\r\n\r\n### Implementation\r\n\r\nWe would like to support the following example:\r\n\r\n```python\r\ndev = qml.device(\"default.qubit\", wires=3)\r\n\r\[email protected]_params\r\[email protected](dev)\r\ndef circuit(data, weights):\r\n qml.templates.AmplitudeEmbedding(data, wires=[0, 1, 2])\r\n qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1, 2])\r\n return qml.expval(qml.PauliX(0))\r\n\r\nbatch_size = 3\r\n\r\n# create a batched input statevector\r\ndata = np.random.random((batch_size, 8))\r\ndata /= np.linalg.norm(data, axis=1).reshape(-1, 1) # normalize\r\n\r\nweights = np.random.random((batch_size, 10, 3, 3))\r\ncircuit(data, weights)\r\n```\r\n\r\nThis can be done by modifying the `AmplitudeEmbedding` source code as needed. In addition, a test should be added to `tests/transforms/test_batch_params.py`.\r\n\r\nNote that some hardware devices do not support `AmplitudeEmbedding` directly, and instead decompose via the [`qml.templates.MottonenStatePreparation`](https://pennylane.readthedocs.io/en/stable/code/api/pennylane.templates.state_preparations.MottonenStatePreparation.html) template, so adding support to this template would also be an added bonus.\r\n\r\n### How important would you say this feature is?\r\n\r\n2: Somewhat important. Needed this quarter.\r\n\r\n### Additional information\r\n\r\n_No response_\n", "before_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nContains the AmplitudeEmbedding template.\n\"\"\"\n# pylint: disable-msg=too-many-branches,too-many-arguments,protected-access\nimport warnings\nimport numpy as np\n\nimport pennylane as qml\nfrom pennylane.operation import Operation, AnyWires\nfrom pennylane.ops import QubitStateVector\nfrom pennylane.wires import Wires\n\n# tolerance for normalization\nTOLERANCE = 1e-10\n\n\nclass AmplitudeEmbedding(Operation):\n r\"\"\"Encodes :math:`2^n` features into the amplitude vector of :math:`n` qubits.\n\n By setting ``pad_with`` to a real or complex number, ``features`` is automatically padded to dimension\n :math:`2^n` where :math:`n` is the number of qubits used in the embedding.\n\n To represent a valid quantum state vector, the L2-norm of ``features`` must be one.\n The argument ``normalize`` can be set to ``True`` to automatically normalize the features.\n\n If both automatic padding and normalization are used, padding is executed *before* normalizing.\n\n .. note::\n\n On some devices, ``AmplitudeEmbedding`` must be the first operation of a quantum circuit.\n\n .. warning::\n\n At the moment, the ``features`` argument is **not differentiable** when using the template, and\n gradients with respect to the features cannot be computed by PennyLane.\n\n Args:\n features (tensor_like): input tensor of dimension ``(2^n,)``, or less if `pad_with` is specified\n wires (Iterable): wires that the template acts on\n pad_with (float or complex): if not None, the input is padded with this constant to size :math:`2^n`\n normalize (bool): whether to automatically normalize the features\n pad (float or complex): same as `pad`, to be deprecated\n\n Example:\n\n Amplitude embedding encodes a normalized :math:`2^n`-dimensional feature vector into the state\n of :math:`n` qubits:\n\n .. code-block:: python\n\n import pennylane as qml\n from pennylane.templates import AmplitudeEmbedding\n\n dev = qml.device('default.qubit', wires=2)\n\n @qml.qnode(dev)\n def circuit(f=None):\n AmplitudeEmbedding(features=f, wires=range(2))\n return qml.expval(qml.PauliZ(0))\n\n circuit(f=[1/2, 1/2, 1/2, 1/2])\n\n The final state of the device is - up to a global phase - equivalent to the input passed to the circuit:\n\n >>> dev.state\n [0.5+0.j 0.5+0.j 0.5+0.j 0.5+0.j]\n\n **Differentiating with respect to the features**\n\n Due to non-trivial classical processing to construct the state preparation circuit,\n the features argument is in general **not differentiable**.\n\n **Normalization**\n\n The template will raise an error if the feature input is not normalized.\n One can set ``normalize=True`` to automatically normalize it:\n\n .. code-block:: python\n\n @qml.qnode(dev)\n def circuit(f=None):\n AmplitudeEmbedding(features=f, wires=range(2), normalize=True)\n return qml.expval(qml.PauliZ(0))\n\n circuit(f=[15, 15, 15, 15])\n\n >>> dev.state\n [0.5 + 0.j, 0.5 + 0.j, 0.5 + 0.j, 0.5 + 0.j]\n\n **Padding**\n\n If the dimension of the feature vector is smaller than the number of amplitudes,\n one can automatically pad it with a constant for the missing dimensions using the ``pad_with`` option:\n\n .. code-block:: python\n\n from math import sqrt\n\n @qml.qnode(dev)\n def circuit(f=None):\n AmplitudeEmbedding(features=f, wires=range(2), pad_with=0.)\n return qml.expval(qml.PauliZ(0))\n\n circuit(f=[1/sqrt(2), 1/sqrt(2)])\n\n >>> dev.state\n [0.70710678 + 0.j, 0.70710678 + 0.j, 0.0 + 0.j, 0.0 + 0.j]\n\n \"\"\"\n\n num_params = 1\n num_wires = AnyWires\n par_domain = \"A\"\n grad_method = None\n\n def __init__(\n self, features, wires, pad_with=None, normalize=False, pad=None, do_queue=True, id=None\n ):\n\n # pad is replaced with the more verbose pad_with\n if pad is not None:\n warnings.warn(\n \"The pad argument will be replaced by the pad_with option in future versions of PennyLane.\",\n UserWarning,\n )\n if pad_with is None:\n pad_with = pad\n\n wires = Wires(wires)\n self.pad_with = pad_with\n self.normalize = normalize\n\n features = self._preprocess(features, wires, pad_with, normalize)\n super().__init__(features, wires=wires, do_queue=do_queue, id=id)\n\n def adjoint(self): # pylint: disable=arguments-differ\n return qml.adjoint(qml.templates.MottonenStatePreparation)(\n self.parameters[0], wires=self.wires\n )\n\n def expand(self):\n\n with qml.tape.QuantumTape() as tape:\n QubitStateVector(self.parameters[0], wires=self.wires)\n\n return tape\n\n @staticmethod\n def _preprocess(features, wires, pad_with, normalize):\n \"\"\"Validate and pre-process inputs as follows:\n\n * Check that the features tensor is one-dimensional.\n * If pad_with is None, check that the first dimension of the features tensor\n has length :math:`2^n` where :math:`n` is the number of qubits. Else check that the\n first dimension of the features tensor is not larger than :math:`2^n` and pad features with value if necessary.\n * If normalize is false, check that first dimension of features is normalised to one. Else, normalise the\n features tensor.\n \"\"\"\n\n shape = qml.math.shape(features)\n\n # check shape\n if len(shape) != 1:\n raise ValueError(f\"Features must be a one-dimensional tensor; got shape {shape}.\")\n\n n_features = shape[0]\n if pad_with is None and n_features != 2 ** len(wires):\n raise ValueError(\n f\"Features must be of length {2 ** len(wires)}; got length {n_features}. \"\n f\"Use the 'pad' argument for automated padding.\"\n )\n\n if pad_with is not None and n_features > 2 ** len(wires):\n raise ValueError(\n f\"Features must be of length {2 ** len(wires)} or \"\n f\"smaller to be padded; got length {n_features}.\"\n )\n\n # pad\n if pad_with is not None and n_features < 2 ** len(wires):\n padding = [pad_with] * (2 ** len(wires) - n_features)\n features = qml.math.concatenate([features, padding], axis=0)\n\n # normalize\n norm = qml.math.sum(qml.math.abs(features) ** 2)\n\n if not qml.math.allclose(norm, 1.0, atol=TOLERANCE):\n if normalize or pad_with:\n features = features / np.sqrt(norm)\n else:\n raise ValueError(\n f\"Features must be a vector of length 1.0; got length {norm}.\"\n \"Use 'normalize=True' to automatically normalize.\"\n )\n\n features = qml.math.cast(features, np.complex128)\n return features\n", "path": "pennylane/templates/embeddings/amplitude.py"}], "after_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nContains the AmplitudeEmbedding template.\n\"\"\"\n# pylint: disable-msg=too-many-branches,too-many-arguments,protected-access\nimport warnings\nimport numpy as np\n\nimport pennylane as qml\nfrom pennylane.operation import Operation, AnyWires\nfrom pennylane.ops import QubitStateVector\nfrom pennylane.wires import Wires\n\n# tolerance for normalization\nTOLERANCE = 1e-10\n\n\nclass AmplitudeEmbedding(Operation):\n r\"\"\"Encodes :math:`2^n` features into the amplitude vector of :math:`n` qubits.\n\n By setting ``pad_with`` to a real or complex number, ``features`` is automatically padded to dimension\n :math:`2^n` where :math:`n` is the number of qubits used in the embedding.\n\n To represent a valid quantum state vector, the L2-norm of ``features`` must be one.\n The argument ``normalize`` can be set to ``True`` to automatically normalize the features.\n\n If both automatic padding and normalization are used, padding is executed *before* normalizing.\n\n .. note::\n\n On some devices, ``AmplitudeEmbedding`` must be the first operation of a quantum circuit.\n\n .. warning::\n\n At the moment, the ``features`` argument is **not differentiable** when using the template, and\n gradients with respect to the features cannot be computed by PennyLane.\n\n Args:\n features (tensor_like): input tensor of dimension ``(2^n,)``, or less if `pad_with` is specified\n wires (Iterable): wires that the template acts on\n pad_with (float or complex): if not None, the input is padded with this constant to size :math:`2^n`\n normalize (bool): whether to automatically normalize the features\n pad (float or complex): same as `pad`, to be deprecated\n\n Example:\n\n Amplitude embedding encodes a normalized :math:`2^n`-dimensional feature vector into the state\n of :math:`n` qubits:\n\n .. code-block:: python\n\n import pennylane as qml\n from pennylane.templates import AmplitudeEmbedding\n\n dev = qml.device('default.qubit', wires=2)\n\n @qml.qnode(dev)\n def circuit(f=None):\n AmplitudeEmbedding(features=f, wires=range(2))\n return qml.expval(qml.PauliZ(0))\n\n circuit(f=[1/2, 1/2, 1/2, 1/2])\n\n The final state of the device is - up to a global phase - equivalent to the input passed to the circuit:\n\n >>> dev.state\n [0.5+0.j 0.5+0.j 0.5+0.j 0.5+0.j]\n\n **Differentiating with respect to the features**\n\n Due to non-trivial classical processing to construct the state preparation circuit,\n the features argument is in general **not differentiable**.\n\n **Normalization**\n\n The template will raise an error if the feature input is not normalized.\n One can set ``normalize=True`` to automatically normalize it:\n\n .. code-block:: python\n\n @qml.qnode(dev)\n def circuit(f=None):\n AmplitudeEmbedding(features=f, wires=range(2), normalize=True)\n return qml.expval(qml.PauliZ(0))\n\n circuit(f=[15, 15, 15, 15])\n\n >>> dev.state\n [0.5 + 0.j, 0.5 + 0.j, 0.5 + 0.j, 0.5 + 0.j]\n\n **Padding**\n\n If the dimension of the feature vector is smaller than the number of amplitudes,\n one can automatically pad it with a constant for the missing dimensions using the ``pad_with`` option:\n\n .. code-block:: python\n\n from math import sqrt\n\n @qml.qnode(dev)\n def circuit(f=None):\n AmplitudeEmbedding(features=f, wires=range(2), pad_with=0.)\n return qml.expval(qml.PauliZ(0))\n\n circuit(f=[1/sqrt(2), 1/sqrt(2)])\n\n >>> dev.state\n [0.70710678 + 0.j, 0.70710678 + 0.j, 0.0 + 0.j, 0.0 + 0.j]\n\n \"\"\"\n\n num_params = 1\n num_wires = AnyWires\n par_domain = \"A\"\n grad_method = None\n\n def __init__(\n self, features, wires, pad_with=None, normalize=False, pad=None, do_queue=True, id=None\n ):\n\n # pad is replaced with the more verbose pad_with\n if pad is not None:\n warnings.warn(\n \"The pad argument will be replaced by the pad_with option in future versions of PennyLane.\",\n UserWarning,\n )\n if pad_with is None:\n pad_with = pad\n\n wires = Wires(wires)\n self.pad_with = pad_with\n self.normalize = normalize\n\n features = self._preprocess(features, wires, pad_with, normalize)\n super().__init__(features, wires=wires, do_queue=do_queue, id=id)\n\n def adjoint(self): # pylint: disable=arguments-differ\n return qml.adjoint(qml.templates.MottonenStatePreparation)(\n self.parameters[0], wires=self.wires\n )\n\n def expand(self):\n\n with qml.tape.QuantumTape() as tape:\n QubitStateVector(self.parameters[0], wires=self.wires)\n\n return tape\n\n @staticmethod\n def _preprocess(features, wires, pad_with, normalize):\n \"\"\"Validate and pre-process inputs as follows:\n\n * If features is batched, the processing that follows is applied to each feature set in the batch.\n * Check that the features tensor is one-dimensional.\n * If pad_with is None, check that the first dimension of the features tensor\n has length :math:`2^n` where :math:`n` is the number of qubits. Else check that the\n first dimension of the features tensor is not larger than :math:`2^n` and pad features with value if necessary.\n * If normalize is false, check that first dimension of features is normalised to one. Else, normalise the\n features tensor.\n \"\"\"\n\n # check if features is batched\n batched = len(qml.math.shape(features)) > 1\n\n features_batch = features if batched else [features]\n\n # apply pre-processing to each features tensor in the batch\n for i, feature_set in enumerate(features_batch):\n shape = qml.math.shape(feature_set)\n\n # check shape\n if len(shape) != 1:\n raise ValueError(f\"Features must be a one-dimensional tensor; got shape {shape}.\")\n\n n_features = shape[0]\n if pad_with is None and n_features != 2 ** len(wires):\n raise ValueError(\n f\"Features must be of length {2 ** len(wires)}; got length {n_features}. \"\n f\"Use the 'pad' argument for automated padding.\"\n )\n\n if pad_with is not None and n_features > 2 ** len(wires):\n raise ValueError(\n f\"Features must be of length {2 ** len(wires)} or \"\n f\"smaller to be padded; got length {n_features}.\"\n )\n\n # pad\n if pad_with is not None and n_features < 2 ** len(wires):\n padding = [pad_with] * (2 ** len(wires) - n_features)\n feature_set = qml.math.concatenate([feature_set, padding], axis=0)\n\n # normalize\n norm = qml.math.sum(qml.math.abs(feature_set) ** 2)\n\n if not qml.math.allclose(norm, 1.0, atol=TOLERANCE):\n if normalize or pad_with:\n feature_set = feature_set / np.sqrt(norm)\n else:\n raise ValueError(\n f\"Features must be a vector of norm 1.0; got norm {norm}.\"\n \"Use 'normalize=True' to automatically normalize.\"\n )\n\n features_batch[i] = qml.math.cast(feature_set, np.complex128)\n\n return features_batch if batched else features_batch[0]\n", "path": "pennylane/templates/embeddings/amplitude.py"}]}
| 3,463 | 1,021 |
gh_patches_debug_18049
|
rasdani/github-patches
|
git_diff
|
PrefectHQ__prefect-9390
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Filter by work pool not filtering the "dot" graph
### First check
- [X] I added a descriptive title to this issue.
- [X] I used the GitHub search to find a similar issue and didn't find it.
- [X] I searched the Prefect documentation for this issue.
- [X] I checked that this issue is related to Prefect and not one of its dependencies.
### Bug summary
When filtering by "work pool" in the flows screen, the "dots graph" is not filtered.
The "main-pool" is a pool I've just created, and there are no runs associated to it.

### Reproduction
```python3
Create a new "work flow".
Make sure it's empty and has no runs in it.
Go to the flows screen, and filter by this work pool.
You'll see that all the dots on the graph remain although they are not related to that work pool.
```
### Error
_No response_
### Versions
```Text
Prefect cloud
```
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/prefect/server/api/ui/flow_runs.py`
Content:
```
1 import datetime
2 from typing import List
3 from uuid import UUID
4
5 from fastapi import Body, Depends
6 from pydantic import Field
7
8 import prefect.server.schemas as schemas
9 from prefect.logging import get_logger
10 from prefect.server import models
11 from prefect.server.database.dependencies import provide_database_interface
12 from prefect.server.database.interface import PrefectDBInterface
13 from prefect.server.utilities.schemas import DateTimeTZ, PrefectBaseModel
14 from prefect.server.utilities.server import PrefectRouter
15
16 logger = get_logger("server.api.ui.flow_runs")
17
18 router = PrefectRouter(prefix="/ui/flow_runs", tags=["Flow Runs", "UI"])
19
20
21 class SimpleFlowRun(PrefectBaseModel):
22 id: UUID = Field(default=..., description="The flow run id.")
23 state_type: schemas.states.StateType = Field(
24 default=..., description="The state type."
25 )
26 timestamp: DateTimeTZ = Field(
27 default=...,
28 description=(
29 "The start time of the run, or the expected start time "
30 "if it hasn't run yet."
31 ),
32 )
33 duration: datetime.timedelta = Field(
34 default=..., description="The total run time of the run."
35 )
36 lateness: datetime.timedelta = Field(
37 default=..., description="The delay between the expected and actual start time."
38 )
39
40
41 @router.post("/history")
42 async def read_flow_run_history(
43 sort: schemas.sorting.FlowRunSort = Body(
44 schemas.sorting.FlowRunSort.EXPECTED_START_TIME_DESC
45 ),
46 limit: int = Body(1000, le=1000),
47 offset: int = Body(0, ge=0),
48 flows: schemas.filters.FlowFilter = None,
49 flow_runs: schemas.filters.FlowRunFilter = None,
50 task_runs: schemas.filters.TaskRunFilter = None,
51 deployments: schemas.filters.DeploymentFilter = None,
52 db: PrefectDBInterface = Depends(provide_database_interface),
53 ) -> List[SimpleFlowRun]:
54 columns = [
55 db.FlowRun.id,
56 db.FlowRun.state_type,
57 db.FlowRun.start_time,
58 db.FlowRun.expected_start_time,
59 db.FlowRun.total_run_time,
60 # Although it isn't returned, we need to select
61 # this field in order to compute `estimated_run_time`
62 db.FlowRun.state_timestamp,
63 ]
64 async with db.session_context() as session:
65 result = await models.flow_runs.read_flow_runs(
66 columns=columns,
67 flow_filter=flows,
68 flow_run_filter=flow_runs,
69 task_run_filter=task_runs,
70 deployment_filter=deployments,
71 sort=sort,
72 limit=limit,
73 offset=offset,
74 session=session,
75 )
76 return [
77 SimpleFlowRun(
78 id=r.id,
79 state_type=r.state_type,
80 timestamp=r.start_time or r.expected_start_time,
81 duration=r.estimated_run_time,
82 lateness=r.estimated_start_time_delta,
83 )
84 for r in result
85 ]
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/prefect/server/api/ui/flow_runs.py b/src/prefect/server/api/ui/flow_runs.py
--- a/src/prefect/server/api/ui/flow_runs.py
+++ b/src/prefect/server/api/ui/flow_runs.py
@@ -49,6 +49,7 @@
flow_runs: schemas.filters.FlowRunFilter = None,
task_runs: schemas.filters.TaskRunFilter = None,
deployments: schemas.filters.DeploymentFilter = None,
+ work_pools: schemas.filters.WorkPoolFilter = None,
db: PrefectDBInterface = Depends(provide_database_interface),
) -> List[SimpleFlowRun]:
columns = [
@@ -68,6 +69,7 @@
flow_run_filter=flow_runs,
task_run_filter=task_runs,
deployment_filter=deployments,
+ work_pool_filter=work_pools,
sort=sort,
limit=limit,
offset=offset,
|
{"golden_diff": "diff --git a/src/prefect/server/api/ui/flow_runs.py b/src/prefect/server/api/ui/flow_runs.py\n--- a/src/prefect/server/api/ui/flow_runs.py\n+++ b/src/prefect/server/api/ui/flow_runs.py\n@@ -49,6 +49,7 @@\n flow_runs: schemas.filters.FlowRunFilter = None,\n task_runs: schemas.filters.TaskRunFilter = None,\n deployments: schemas.filters.DeploymentFilter = None,\n+ work_pools: schemas.filters.WorkPoolFilter = None,\n db: PrefectDBInterface = Depends(provide_database_interface),\n ) -> List[SimpleFlowRun]:\n columns = [\n@@ -68,6 +69,7 @@\n flow_run_filter=flow_runs,\n task_run_filter=task_runs,\n deployment_filter=deployments,\n+ work_pool_filter=work_pools,\n sort=sort,\n limit=limit,\n offset=offset,\n", "issue": "Filter by work pool not filtering the \"dot\" graph\n### First check\n\n- [X] I added a descriptive title to this issue.\n- [X] I used the GitHub search to find a similar issue and didn't find it.\n- [X] I searched the Prefect documentation for this issue.\n- [X] I checked that this issue is related to Prefect and not one of its dependencies.\n\n### Bug summary\n\nWhen filtering by \"work pool\" in the flows screen, the \"dots graph\" is not filtered.\r\nThe \"main-pool\" is a pool I've just created, and there are no runs associated to it.\r\n\r\n\r\n\r\n\n\n### Reproduction\n\n```python3\nCreate a new \"work flow\".\r\nMake sure it's empty and has no runs in it.\r\nGo to the flows screen, and filter by this work pool.\r\nYou'll see that all the dots on the graph remain although they are not related to that work pool.\n```\n\n\n### Error\n\n_No response_\n\n### Versions\n\n```Text\nPrefect cloud\n```\n\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "import datetime\nfrom typing import List\nfrom uuid import UUID\n\nfrom fastapi import Body, Depends\nfrom pydantic import Field\n\nimport prefect.server.schemas as schemas\nfrom prefect.logging import get_logger\nfrom prefect.server import models\nfrom prefect.server.database.dependencies import provide_database_interface\nfrom prefect.server.database.interface import PrefectDBInterface\nfrom prefect.server.utilities.schemas import DateTimeTZ, PrefectBaseModel\nfrom prefect.server.utilities.server import PrefectRouter\n\nlogger = get_logger(\"server.api.ui.flow_runs\")\n\nrouter = PrefectRouter(prefix=\"/ui/flow_runs\", tags=[\"Flow Runs\", \"UI\"])\n\n\nclass SimpleFlowRun(PrefectBaseModel):\n id: UUID = Field(default=..., description=\"The flow run id.\")\n state_type: schemas.states.StateType = Field(\n default=..., description=\"The state type.\"\n )\n timestamp: DateTimeTZ = Field(\n default=...,\n description=(\n \"The start time of the run, or the expected start time \"\n \"if it hasn't run yet.\"\n ),\n )\n duration: datetime.timedelta = Field(\n default=..., description=\"The total run time of the run.\"\n )\n lateness: datetime.timedelta = Field(\n default=..., description=\"The delay between the expected and actual start time.\"\n )\n\n\[email protected](\"/history\")\nasync def read_flow_run_history(\n sort: schemas.sorting.FlowRunSort = Body(\n schemas.sorting.FlowRunSort.EXPECTED_START_TIME_DESC\n ),\n limit: int = Body(1000, le=1000),\n offset: int = Body(0, ge=0),\n flows: schemas.filters.FlowFilter = None,\n flow_runs: schemas.filters.FlowRunFilter = None,\n task_runs: schemas.filters.TaskRunFilter = None,\n deployments: schemas.filters.DeploymentFilter = None,\n db: PrefectDBInterface = Depends(provide_database_interface),\n) -> List[SimpleFlowRun]:\n columns = [\n db.FlowRun.id,\n db.FlowRun.state_type,\n db.FlowRun.start_time,\n db.FlowRun.expected_start_time,\n db.FlowRun.total_run_time,\n # Although it isn't returned, we need to select\n # this field in order to compute `estimated_run_time`\n db.FlowRun.state_timestamp,\n ]\n async with db.session_context() as session:\n result = await models.flow_runs.read_flow_runs(\n columns=columns,\n flow_filter=flows,\n flow_run_filter=flow_runs,\n task_run_filter=task_runs,\n deployment_filter=deployments,\n sort=sort,\n limit=limit,\n offset=offset,\n session=session,\n )\n return [\n SimpleFlowRun(\n id=r.id,\n state_type=r.state_type,\n timestamp=r.start_time or r.expected_start_time,\n duration=r.estimated_run_time,\n lateness=r.estimated_start_time_delta,\n )\n for r in result\n ]\n", "path": "src/prefect/server/api/ui/flow_runs.py"}], "after_files": [{"content": "import datetime\nfrom typing import List\nfrom uuid import UUID\n\nfrom fastapi import Body, Depends\nfrom pydantic import Field\n\nimport prefect.server.schemas as schemas\nfrom prefect.logging import get_logger\nfrom prefect.server import models\nfrom prefect.server.database.dependencies import provide_database_interface\nfrom prefect.server.database.interface import PrefectDBInterface\nfrom prefect.server.utilities.schemas import DateTimeTZ, PrefectBaseModel\nfrom prefect.server.utilities.server import PrefectRouter\n\nlogger = get_logger(\"server.api.ui.flow_runs\")\n\nrouter = PrefectRouter(prefix=\"/ui/flow_runs\", tags=[\"Flow Runs\", \"UI\"])\n\n\nclass SimpleFlowRun(PrefectBaseModel):\n id: UUID = Field(default=..., description=\"The flow run id.\")\n state_type: schemas.states.StateType = Field(\n default=..., description=\"The state type.\"\n )\n timestamp: DateTimeTZ = Field(\n default=...,\n description=(\n \"The start time of the run, or the expected start time \"\n \"if it hasn't run yet.\"\n ),\n )\n duration: datetime.timedelta = Field(\n default=..., description=\"The total run time of the run.\"\n )\n lateness: datetime.timedelta = Field(\n default=..., description=\"The delay between the expected and actual start time.\"\n )\n\n\[email protected](\"/history\")\nasync def read_flow_run_history(\n sort: schemas.sorting.FlowRunSort = Body(\n schemas.sorting.FlowRunSort.EXPECTED_START_TIME_DESC\n ),\n limit: int = Body(1000, le=1000),\n offset: int = Body(0, ge=0),\n flows: schemas.filters.FlowFilter = None,\n flow_runs: schemas.filters.FlowRunFilter = None,\n task_runs: schemas.filters.TaskRunFilter = None,\n deployments: schemas.filters.DeploymentFilter = None,\n work_pools: schemas.filters.WorkPoolFilter = None,\n db: PrefectDBInterface = Depends(provide_database_interface),\n) -> List[SimpleFlowRun]:\n columns = [\n db.FlowRun.id,\n db.FlowRun.state_type,\n db.FlowRun.start_time,\n db.FlowRun.expected_start_time,\n db.FlowRun.total_run_time,\n # Although it isn't returned, we need to select\n # this field in order to compute `estimated_run_time`\n db.FlowRun.state_timestamp,\n ]\n async with db.session_context() as session:\n result = await models.flow_runs.read_flow_runs(\n columns=columns,\n flow_filter=flows,\n flow_run_filter=flow_runs,\n task_run_filter=task_runs,\n deployment_filter=deployments,\n work_pool_filter=work_pools,\n sort=sort,\n limit=limit,\n offset=offset,\n session=session,\n )\n return [\n SimpleFlowRun(\n id=r.id,\n state_type=r.state_type,\n timestamp=r.start_time or r.expected_start_time,\n duration=r.estimated_run_time,\n lateness=r.estimated_start_time_delta,\n )\n for r in result\n ]\n", "path": "src/prefect/server/api/ui/flow_runs.py"}]}
| 1,369 | 203 |
gh_patches_debug_4840
|
rasdani/github-patches
|
git_diff
|
vega__altair-1192
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect description of an example
https://altair-viz.github.io/gallery/scatter_linked_brush.html
The title of the page says "Faceted Scatter Plot with Linked Brushing".
But the example is a concatenated view, not a faceted view.
(The data points are shown twice in the visualization, not split by a category.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `altair/vegalite/v2/examples/scatter_linked_brush.py`
Content:
```
1 """
2 Faceted Scatter Plot with Linked Brushing
3 -----------------------------------------
4 This is an example of using an interval selection to control the color of
5 points across multiple facets.
6 """
7 # category: interactive charts
8 import altair as alt
9 from vega_datasets import data
10
11 cars = data.cars()
12
13 brush = alt.selection(type='interval', resolve='global')
14
15 base = alt.Chart(cars).mark_point().encode(
16 y='Miles_per_Gallon',
17 color=alt.condition(brush, 'Origin', alt.ColorValue('gray'))
18 ).add_selection(
19 brush
20 ).properties(
21 width=250,
22 height=250
23 )
24
25 base.encode(x='Horsepower') | base.encode(x='Acceleration')
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/altair/vegalite/v2/examples/scatter_linked_brush.py b/altair/vegalite/v2/examples/scatter_linked_brush.py
--- a/altair/vegalite/v2/examples/scatter_linked_brush.py
+++ b/altair/vegalite/v2/examples/scatter_linked_brush.py
@@ -1,8 +1,8 @@
"""
-Faceted Scatter Plot with Linked Brushing
------------------------------------------
+Multi-panel Scatter Plot with Linked Brushing
+---------------------------------------------
This is an example of using an interval selection to control the color of
-points across multiple facets.
+points across multiple panels.
"""
# category: interactive charts
import altair as alt
|
{"golden_diff": "diff --git a/altair/vegalite/v2/examples/scatter_linked_brush.py b/altair/vegalite/v2/examples/scatter_linked_brush.py\n--- a/altair/vegalite/v2/examples/scatter_linked_brush.py\n+++ b/altair/vegalite/v2/examples/scatter_linked_brush.py\n@@ -1,8 +1,8 @@\n \"\"\"\n-Faceted Scatter Plot with Linked Brushing\n------------------------------------------\n+Multi-panel Scatter Plot with Linked Brushing\n+---------------------------------------------\n This is an example of using an interval selection to control the color of\n-points across multiple facets.\n+points across multiple panels.\n \"\"\"\n # category: interactive charts\n import altair as alt\n", "issue": "Incorrect description of an example\nhttps://altair-viz.github.io/gallery/scatter_linked_brush.html\r\n\r\nThe title of the page says \"Faceted Scatter Plot with Linked Brushing\".\r\nBut the example is a concatenated view, not a faceted view. \r\n(The data points are shown twice in the visualization, not split by a category.) \n", "before_files": [{"content": "\"\"\"\nFaceted Scatter Plot with Linked Brushing\n-----------------------------------------\nThis is an example of using an interval selection to control the color of\npoints across multiple facets.\n\"\"\"\n# category: interactive charts\nimport altair as alt\nfrom vega_datasets import data\n\ncars = data.cars()\n\nbrush = alt.selection(type='interval', resolve='global')\n\nbase = alt.Chart(cars).mark_point().encode(\n y='Miles_per_Gallon',\n color=alt.condition(brush, 'Origin', alt.ColorValue('gray'))\n).add_selection(\n brush\n).properties(\n width=250,\n height=250\n)\n\nbase.encode(x='Horsepower') | base.encode(x='Acceleration')\n", "path": "altair/vegalite/v2/examples/scatter_linked_brush.py"}], "after_files": [{"content": "\"\"\"\nMulti-panel Scatter Plot with Linked Brushing\n---------------------------------------------\nThis is an example of using an interval selection to control the color of\npoints across multiple panels.\n\"\"\"\n# category: interactive charts\nimport altair as alt\nfrom vega_datasets import data\n\ncars = data.cars()\n\nbrush = alt.selection(type='interval', resolve='global')\n\nbase = alt.Chart(cars).mark_point().encode(\n y='Miles_per_Gallon',\n color=alt.condition(brush, 'Origin', alt.ColorValue('gray'))\n).add_selection(\n brush\n).properties(\n width=250,\n height=250\n)\n\nbase.encode(x='Horsepower') | base.encode(x='Acceleration')\n", "path": "altair/vegalite/v2/examples/scatter_linked_brush.py"}]}
| 535 | 151 |
gh_patches_debug_109
|
rasdani/github-patches
|
git_diff
|
mlcommons__GaNDLF-747
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Porting to PyTorch 2.0
**Is your feature request related to a problem? Please describe.**
As PyTorch 2.0 is approaching its release and promising significant benefits, particularly in model compilation, it would be beneficial for GaNDLF to migrate to the platform once it becomes stable. To learn more about PyTorch 2.0, visit [here](https://pytorch.org/get-started/pytorch-2.0/).
**Describe the solution you'd like**
A transition after *tagging* GaNDLF to move to pytorch 2.0
**Describe alternatives you've considered**
N.A.
**Additional context**
N.A.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 """The setup script."""
4
5
6 import sys, re, os
7 from setuptools import setup, find_packages
8 from setuptools.command.install import install
9 from setuptools.command.develop import develop
10 from setuptools.command.egg_info import egg_info
11
12 try:
13 with open("README.md") as readme_file:
14 readme = readme_file.read()
15 except Exception as error:
16 readme = "No README information found."
17 sys.stderr.write(
18 "Warning: Could not open '%s' due %s\n" % ("README.md", error)
19 )
20
21
22 class CustomInstallCommand(install):
23 def run(self):
24 install.run(self)
25
26
27 class CustomDevelopCommand(develop):
28 def run(self):
29 develop.run(self)
30
31
32 class CustomEggInfoCommand(egg_info):
33 def run(self):
34 egg_info.run(self)
35
36
37 try:
38 filepath = "GANDLF/version.py"
39 version_file = open(filepath)
40 (__version__,) = re.findall('__version__ = "(.*)"', version_file.read())
41
42 except Exception as error:
43 __version__ = "0.0.1"
44 sys.stderr.write(
45 "Warning: Could not open '%s' due %s\n" % (filepath, error)
46 )
47
48 # Handle cases where specific files need to be bundled into the final package as installed via PyPI
49 dockerfiles = [
50 item
51 for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))
52 if (os.path.isfile(item) and item.startswith("Dockerfile-"))
53 ]
54 entrypoint_files = [
55 item
56 for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))
57 if (os.path.isfile(item) and item.startswith("gandlf_"))
58 ]
59 setup_files = ["setup.py", ".dockerignore", "pyproject.toml", "MANIFEST.in"]
60 all_extra_files = dockerfiles + entrypoint_files + setup_files
61 all_extra_files_pathcorrected = [
62 os.path.join("../", item) for item in all_extra_files
63 ]
64 # find_packages should only ever find these as subpackages of gandlf, not as top-level packages
65 # generate this dynamically?
66 # GANDLF.GANDLF is needed to prevent recursion madness in deployments
67 toplevel_package_excludes = [
68 "GANDLF.GANDLF",
69 "anonymize",
70 "cli",
71 "compute",
72 "data",
73 "grad_clipping",
74 "losses",
75 "metrics",
76 "models",
77 "optimizers",
78 "schedulers",
79 "utils",
80 ]
81
82
83 requirements = [
84 "torch==1.13.1",
85 "black==23.11.0",
86 "numpy==1.25.0",
87 "scipy",
88 "SimpleITK!=2.0.*",
89 "SimpleITK!=2.2.1", # https://github.com/mlcommons/GaNDLF/issues/536
90 "torchvision",
91 "tqdm",
92 "torchio==0.18.75",
93 "pandas>=2.0.0",
94 "scikit-learn>=0.23.2",
95 "scikit-image>=0.19.1",
96 "setuptools",
97 "seaborn",
98 "pyyaml",
99 "tiffslide",
100 "matplotlib",
101 "gdown",
102 "pytest",
103 "coverage",
104 "pytest-cov",
105 "psutil",
106 "medcam",
107 "opencv-python",
108 "torchmetrics==1.1.2",
109 "zarr==2.10.3",
110 "pydicom",
111 "onnx",
112 "torchinfo==1.7.0",
113 "segmentation-models-pytorch==0.3.2",
114 "ACSConv==0.1.1",
115 "docker",
116 "dicom-anonymizer",
117 "twine",
118 "zarr",
119 "keyring",
120 ]
121
122 if __name__ == "__main__":
123 setup(
124 name="GANDLF",
125 version=__version__,
126 author="MLCommons",
127 author_email="[email protected]",
128 python_requires=">=3.9, <3.11",
129 packages=find_packages(
130 where=os.path.dirname(os.path.abspath(__file__)),
131 exclude=toplevel_package_excludes,
132 ),
133 cmdclass={
134 "install": CustomInstallCommand,
135 "develop": CustomDevelopCommand,
136 "egg_info": CustomEggInfoCommand,
137 },
138 scripts=[
139 "gandlf_run",
140 "gandlf_constructCSV",
141 "gandlf_collectStats",
142 "gandlf_patchMiner",
143 "gandlf_preprocess",
144 "gandlf_anonymizer",
145 "gandlf_verifyInstall",
146 "gandlf_configGenerator",
147 "gandlf_recoverConfig",
148 "gandlf_deploy",
149 "gandlf_optimizeModel",
150 "gandlf_generateMetrics",
151 ],
152 classifiers=[
153 "Development Status :: 3 - Alpha",
154 "Intended Audience :: Science/Research",
155 "License :: OSI Approved :: Apache Software License",
156 "Natural Language :: English",
157 "Operating System :: OS Independent",
158 "Programming Language :: Python :: 3.9",
159 "Programming Language :: Python :: 3.10",
160 "Topic :: Scientific/Engineering :: Medical Science Apps.",
161 ],
162 description=(
163 "PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging."
164 ),
165 install_requires=requirements,
166 license="Apache-2.0",
167 long_description=readme,
168 long_description_content_type="text/markdown",
169 include_package_data=True,
170 package_data={"GANDLF": all_extra_files_pathcorrected},
171 keywords="semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch",
172 zip_safe=False,
173 )
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -81,7 +81,7 @@
requirements = [
- "torch==1.13.1",
+ "torch==2.1.0",
"black==23.11.0",
"numpy==1.25.0",
"scipy",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -81,7 +81,7 @@\n \n \n requirements = [\n- \"torch==1.13.1\",\n+ \"torch==2.1.0\",\n \"black==23.11.0\",\n \"numpy==1.25.0\",\n \"scipy\",\n", "issue": "Porting to PyTorch 2.0\n**Is your feature request related to a problem? Please describe.**\r\nAs PyTorch 2.0 is approaching its release and promising significant benefits, particularly in model compilation, it would be beneficial for GaNDLF to migrate to the platform once it becomes stable. To learn more about PyTorch 2.0, visit [here](https://pytorch.org/get-started/pytorch-2.0/).\r\n\r\n**Describe the solution you'd like**\r\nA transition after *tagging* GaNDLF to move to pytorch 2.0\r\n\r\n**Describe alternatives you've considered**\r\nN.A.\r\n\r\n**Additional context**\r\nN.A.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport sys, re, os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\ntry:\n with open(\"README.md\") as readme_file:\n readme = readme_file.read()\nexcept Exception as error:\n readme = \"No README information found.\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (\"README.md\", error)\n )\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (filepath, error)\n )\n\n# Handle cases where specific files need to be bundled into the final package as installed via PyPI\ndockerfiles = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"Dockerfile-\"))\n]\nentrypoint_files = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"gandlf_\"))\n]\nsetup_files = [\"setup.py\", \".dockerignore\", \"pyproject.toml\", \"MANIFEST.in\"]\nall_extra_files = dockerfiles + entrypoint_files + setup_files\nall_extra_files_pathcorrected = [\n os.path.join(\"../\", item) for item in all_extra_files\n]\n# find_packages should only ever find these as subpackages of gandlf, not as top-level packages\n# generate this dynamically?\n# GANDLF.GANDLF is needed to prevent recursion madness in deployments\ntoplevel_package_excludes = [\n \"GANDLF.GANDLF\",\n \"anonymize\",\n \"cli\",\n \"compute\",\n \"data\",\n \"grad_clipping\",\n \"losses\",\n \"metrics\",\n \"models\",\n \"optimizers\",\n \"schedulers\",\n \"utils\",\n]\n\n\nrequirements = [\n \"torch==1.13.1\",\n \"black==23.11.0\",\n \"numpy==1.25.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"SimpleITK!=2.2.1\", # https://github.com/mlcommons/GaNDLF/issues/536\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.75\",\n \"pandas>=2.0.0\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"gdown\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==1.1.2\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n \"segmentation-models-pytorch==0.3.2\",\n \"ACSConv==0.1.1\",\n \"docker\",\n \"dicom-anonymizer\",\n \"twine\",\n \"zarr\",\n \"keyring\",\n]\n\nif __name__ == \"__main__\":\n setup(\n name=\"GANDLF\",\n version=__version__,\n author=\"MLCommons\",\n author_email=\"[email protected]\",\n python_requires=\">=3.9, <3.11\",\n packages=find_packages(\n where=os.path.dirname(os.path.abspath(__file__)),\n exclude=toplevel_package_excludes,\n ),\n cmdclass={\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n \"gandlf_configGenerator\",\n \"gandlf_recoverConfig\",\n \"gandlf_deploy\",\n \"gandlf_optimizeModel\",\n \"gandlf_generateMetrics\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"Apache-2.0\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n package_data={\"GANDLF\": all_extra_files_pathcorrected},\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch\",\n zip_safe=False,\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport sys, re, os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\ntry:\n with open(\"README.md\") as readme_file:\n readme = readme_file.read()\nexcept Exception as error:\n readme = \"No README information found.\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (\"README.md\", error)\n )\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (filepath, error)\n )\n\n# Handle cases where specific files need to be bundled into the final package as installed via PyPI\ndockerfiles = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"Dockerfile-\"))\n]\nentrypoint_files = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"gandlf_\"))\n]\nsetup_files = [\"setup.py\", \".dockerignore\", \"pyproject.toml\", \"MANIFEST.in\"]\nall_extra_files = dockerfiles + entrypoint_files + setup_files\nall_extra_files_pathcorrected = [\n os.path.join(\"../\", item) for item in all_extra_files\n]\n# find_packages should only ever find these as subpackages of gandlf, not as top-level packages\n# generate this dynamically?\n# GANDLF.GANDLF is needed to prevent recursion madness in deployments\ntoplevel_package_excludes = [\n \"GANDLF.GANDLF\",\n \"anonymize\",\n \"cli\",\n \"compute\",\n \"data\",\n \"grad_clipping\",\n \"losses\",\n \"metrics\",\n \"models\",\n \"optimizers\",\n \"schedulers\",\n \"utils\",\n]\n\n\nrequirements = [\n \"torch==2.1.0\",\n \"black==23.11.0\",\n \"numpy==1.25.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"SimpleITK!=2.2.1\", # https://github.com/mlcommons/GaNDLF/issues/536\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.75\",\n \"pandas>=2.0.0\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"gdown\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==1.1.2\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n \"segmentation-models-pytorch==0.3.2\",\n \"ACSConv==0.1.1\",\n \"docker\",\n \"dicom-anonymizer\",\n \"twine\",\n \"zarr\",\n \"keyring\",\n]\n\nif __name__ == \"__main__\":\n setup(\n name=\"GANDLF\",\n version=__version__,\n author=\"MLCommons\",\n author_email=\"[email protected]\",\n python_requires=\">=3.9, <3.11\",\n packages=find_packages(\n where=os.path.dirname(os.path.abspath(__file__)),\n exclude=toplevel_package_excludes,\n ),\n cmdclass={\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n \"gandlf_configGenerator\",\n \"gandlf_recoverConfig\",\n \"gandlf_deploy\",\n \"gandlf_optimizeModel\",\n \"gandlf_generateMetrics\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"Apache-2.0\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n package_data={\"GANDLF\": all_extra_files_pathcorrected},\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch\",\n zip_safe=False,\n )\n", "path": "setup.py"}]}
| 2,086 | 87 |
gh_patches_debug_44424
|
rasdani/github-patches
|
git_diff
|
deepset-ai__haystack-2048
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ranker scores shouldn't be discarded
**Is your feature request related to a problem? Please describe.**
The reason for using a ranker is that the scores should be more accurate than the retriever scores. For this reason, in that use case you would also want to use the ranker scores later on instead of the retriever score (e.g. when defining a threshold). You currently cannot do that as the ranker scores aren't saved anywhere (see [here](https://github.com/deepset-ai/haystack/blob/b87c0c950b2243f47fb249aa3865d4c46edb16df/haystack/nodes/ranker/sentence_transformers.py)) as the reader only updates the order and not the score. Having the order of documents not dependent on the scores is also unintuitive.
**Describe the solution you'd like**
Replace retriever scores with ranker scores.
**Describe alternatives you've considered**
Having an additional field to save ranker scores.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/nodes/ranker/sentence_transformers.py`
Content:
```
1 from typing import List, Optional, Union, Tuple, Iterator
2 import logging
3 from pathlib import Path
4
5 import torch
6 from torch.nn import DataParallel
7 from transformers import AutoModelForSequenceClassification, AutoTokenizer
8
9 from haystack.errors import HaystackError
10 from haystack.schema import Document
11 from haystack.nodes.ranker.base import BaseRanker
12 from haystack.modeling.utils import initialize_device_settings
13
14 logger = logging.getLogger(__name__)
15
16
17 class SentenceTransformersRanker(BaseRanker):
18 """
19 Sentence Transformer based pre-trained Cross-Encoder model for Document Re-ranking (https://huggingface.co/cross-encoder).
20 Re-Ranking can be used on top of a retriever to boost the performance for document search. This is particularly useful if the retriever has a high recall but is bad in sorting the documents by relevance.
21
22 SentenceTransformerRanker handles Cross-Encoder models
23 - use a single logit as similarity score e.g. cross-encoder/ms-marco-MiniLM-L-12-v2
24 - use two output logits (no_answer, has_answer) e.g. deepset/gbert-base-germandpr-reranking
25 https://www.sbert.net/docs/pretrained-models/ce-msmarco.html#usage-with-transformers
26
27 | With a SentenceTransformersRanker, you can:
28 - directly get predictions via predict()
29
30 Usage example:
31 ...
32 retriever = BM25Retriever(document_store=document_store)
33 ranker = SentenceTransformersRanker(model_name_or_path="cross-encoder/ms-marco-MiniLM-L-12-v2")
34 p = Pipeline()
35 p.add_node(component=retriever, name="ESRetriever", inputs=["Query"])
36 p.add_node(component=ranker, name="Ranker", inputs=["ESRetriever"])
37 """
38
39 def __init__(
40 self,
41 model_name_or_path: Union[str, Path],
42 model_version: Optional[str] = None,
43 top_k: int = 10,
44 use_gpu: bool = True,
45 devices: Optional[List[Union[str, torch.device]]] = None,
46 batch_size: Optional[int] = None,
47 ):
48 """
49 :param model_name_or_path: Directory of a saved model or the name of a public model e.g.
50 'cross-encoder/ms-marco-MiniLM-L-12-v2'.
51 See https://huggingface.co/cross-encoder for full list of available models
52 :param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.
53 :param top_k: The maximum number of documents to return
54 :param use_gpu: Whether to use all available GPUs or the CPU. Falls back on CPU if no GPU is available.
55 :param devices: List of GPU (or CPU) devices, to limit inference to certain GPUs and not use all available ones
56 The strings will be converted into pytorch devices, so use the string notation described here:
57 https://pytorch.org/docs/stable/tensor_attributes.html?highlight=torch%20device#torch.torch.device
58 (e.g. ["cuda:0"]).
59 :param batch_size: Number of documents to process at a time.
60 """
61 super().__init__()
62
63 self.top_k = top_k
64
65 if devices is not None:
66 self.devices = [torch.device(device) for device in devices]
67 else:
68 self.devices, _ = initialize_device_settings(use_cuda=use_gpu, multi_gpu=True)
69
70 self.transformer_model = AutoModelForSequenceClassification.from_pretrained(
71 pretrained_model_name_or_path=model_name_or_path, revision=model_version
72 )
73 self.transformer_model.to(str(self.devices[0]))
74 self.transformer_tokenizer = AutoTokenizer.from_pretrained(
75 pretrained_model_name_or_path=model_name_or_path, revision=model_version
76 )
77 self.transformer_model.eval()
78
79 if len(self.devices) > 1:
80 self.model = DataParallel(self.transformer_model, device_ids=self.devices)
81
82 self.batch_size = batch_size
83
84 def predict(self, query: str, documents: List[Document], top_k: Optional[int] = None) -> List[Document]:
85 """
86 Use loaded ranker model to re-rank the supplied list of Document.
87
88 Returns list of Document sorted by (desc.) similarity with the query.
89
90 :param query: Query string
91 :param documents: List of Document to be re-ranked
92 :param top_k: The maximum number of documents to return
93 :return: List of Document
94 """
95 if top_k is None:
96 top_k = self.top_k
97
98 features = self.transformer_tokenizer(
99 [query for doc in documents],
100 [doc.content for doc in documents],
101 padding=True,
102 truncation=True,
103 return_tensors="pt",
104 ).to(self.devices[0])
105
106 # SentenceTransformerRanker uses:
107 # 1. the logit as similarity score/answerable classification
108 # 2. the logits as answerable classification (no_answer / has_answer)
109 # https://www.sbert.net/docs/pretrained-models/ce-msmarco.html#usage-with-transformers
110 with torch.no_grad():
111 similarity_scores = self.transformer_model(**features).logits
112
113 logits_dim = similarity_scores.shape[1] # [batch_size, logits_dim]
114 sorted_scores_and_documents = sorted(
115 zip(similarity_scores, documents),
116 key=lambda similarity_document_tuple:
117 # assume the last element in logits represents the `has_answer` label
118 similarity_document_tuple[0][-1] if logits_dim >= 2 else similarity_document_tuple[0],
119 reverse=True,
120 )
121
122 # rank documents according to scores
123 sorted_documents = [doc for _, doc in sorted_scores_and_documents]
124 return sorted_documents[:top_k]
125
126 def predict_batch(
127 self,
128 queries: List[str],
129 documents: Union[List[Document], List[List[Document]]],
130 top_k: Optional[int] = None,
131 batch_size: Optional[int] = None,
132 ) -> Union[List[Document], List[List[Document]]]:
133 """
134 Use loaded ranker model to re-rank the supplied lists of Documents.
135
136 Returns lists of Documents sorted by (desc.) similarity with the corresponding queries.
137
138
139 - If you provide a list containing a single query...
140
141 - ... and a single list of Documents, the single list of Documents will be re-ranked based on the
142 supplied query.
143 - ... and a list of lists of Documents, each list of Documents will be re-ranked individually based on the
144 supplied query.
145
146
147 - If you provide a list of multiple queries...
148
149 - ... you need to provide a list of lists of Documents. Each list of Documents will be re-ranked based on
150 its corresponding query.
151
152 :param queries: Single query string or list of queries
153 :param documents: Single list of Documents or list of lists of Documents to be reranked.
154 :param top_k: The maximum number of documents to return per Document list.
155 :param batch_size: Number of Documents to process at a time.
156 """
157 if top_k is None:
158 top_k = self.top_k
159
160 if batch_size is None:
161 batch_size = self.batch_size
162
163 number_of_docs, all_queries, all_docs, single_list_of_docs = self._preprocess_batch_queries_and_docs(
164 queries=queries, documents=documents
165 )
166
167 batches = self._get_batches(all_queries=all_queries, all_docs=all_docs, batch_size=batch_size)
168 preds = []
169 for cur_queries, cur_docs in batches:
170 features = self.transformer_tokenizer(
171 cur_queries, [doc.content for doc in cur_docs], padding=True, truncation=True, return_tensors="pt"
172 ).to(self.devices[0])
173
174 with torch.no_grad():
175 similarity_scores = self.transformer_model(**features).logits
176 preds.extend(similarity_scores)
177
178 logits_dim = similarity_scores.shape[1] # [batch_size, logits_dim]
179 if single_list_of_docs:
180 sorted_scores_and_documents = sorted(
181 zip(similarity_scores, documents),
182 key=lambda similarity_document_tuple:
183 # assume the last element in logits represents the `has_answer` label
184 similarity_document_tuple[0][-1] if logits_dim >= 2 else similarity_document_tuple[0],
185 reverse=True,
186 )
187
188 # rank documents according to scores
189 sorted_documents = [doc for _, doc in sorted_scores_and_documents if isinstance(doc, Document)]
190 return sorted_documents[:top_k]
191 else:
192 # Group predictions together
193 grouped_predictions = []
194 left_idx = 0
195 right_idx = 0
196 for number in number_of_docs:
197 right_idx = left_idx + number
198 grouped_predictions.append(similarity_scores[left_idx:right_idx])
199 left_idx = right_idx
200
201 result = []
202 for pred_group, doc_group in zip(grouped_predictions, documents):
203 sorted_scores_and_documents = sorted(
204 zip(pred_group, doc_group), # type: ignore
205 key=lambda similarity_document_tuple:
206 # assume the last element in logits represents the `has_answer` label
207 similarity_document_tuple[0][-1] if logits_dim >= 2 else similarity_document_tuple[0],
208 reverse=True,
209 )
210
211 # rank documents according to scores
212 sorted_documents = [doc for _, doc in sorted_scores_and_documents if isinstance(doc, Document)][:top_k]
213 result.append(sorted_documents)
214
215 return result
216
217 def _preprocess_batch_queries_and_docs(
218 self, queries: List[str], documents: Union[List[Document], List[List[Document]]]
219 ) -> Tuple[List[int], List[str], List[Document], bool]:
220 number_of_docs = []
221 all_queries = []
222 all_docs: List[Document] = []
223 single_list_of_docs = False
224
225 # Docs case 1: single list of Documents -> rerank single list of Documents based on single query
226 if len(documents) > 0 and isinstance(documents[0], Document):
227 if len(queries) != 1:
228 raise HaystackError("Number of queries must be 1 if a single list of Documents is provided.")
229 query = queries[0]
230 number_of_docs = [len(documents)]
231 all_queries = [query] * len(documents)
232 all_docs = documents # type: ignore
233 single_list_of_docs = True
234
235 # Docs case 2: list of lists of Documents -> rerank each list of Documents based on corresponding query
236 # If queries contains a single query, apply it to each list of Documents
237 if len(documents) > 0 and isinstance(documents[0], list):
238 if len(queries) == 1:
239 queries = queries * len(documents)
240 if len(queries) != len(documents):
241 raise HaystackError("Number of queries must be equal to number of provided Document lists.")
242 for query, cur_docs in zip(queries, documents):
243 if not isinstance(cur_docs, list):
244 raise HaystackError(f"cur_docs was of type {type(cur_docs)}, but expected a list of Documents.")
245 number_of_docs.append(len(cur_docs))
246 all_queries.extend([query] * len(cur_docs))
247 all_docs.extend(cur_docs)
248
249 return number_of_docs, all_queries, all_docs, single_list_of_docs
250
251 @staticmethod
252 def _get_batches(
253 all_queries: List[str], all_docs: List[Document], batch_size: Optional[int]
254 ) -> Iterator[Tuple[List[str], List[Document]]]:
255 if batch_size is None:
256 yield all_queries, all_docs
257 return
258 else:
259 for index in range(0, len(all_queries), batch_size):
260 yield all_queries[index : index + batch_size], all_docs[index : index + batch_size]
261
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/haystack/nodes/ranker/sentence_transformers.py b/haystack/nodes/ranker/sentence_transformers.py
--- a/haystack/nodes/ranker/sentence_transformers.py
+++ b/haystack/nodes/ranker/sentence_transformers.py
@@ -1,4 +1,4 @@
-from typing import List, Optional, Union, Tuple, Iterator
+from typing import List, Optional, Union, Tuple, Iterator, Any
import logging
from pathlib import Path
@@ -44,6 +44,7 @@
use_gpu: bool = True,
devices: Optional[List[Union[str, torch.device]]] = None,
batch_size: Optional[int] = None,
+ scale_score: bool = True,
):
"""
:param model_name_or_path: Directory of a saved model or the name of a public model e.g.
@@ -57,6 +58,9 @@
https://pytorch.org/docs/stable/tensor_attributes.html?highlight=torch%20device#torch.torch.device
(e.g. ["cuda:0"]).
:param batch_size: Number of documents to process at a time.
+ :param scale_score: The raw predictions will be transformed using a Sigmoid activation function in case the model
+ only predicts a single label. For multi-label predictions, no scaling is applied. Set this
+ to False if you do not want any scaling of the raw predictions.
"""
super().__init__()
@@ -76,6 +80,15 @@
)
self.transformer_model.eval()
+ # we use sigmoid activation function to scale the score in case there is only a single label
+ # we do not apply any scaling when scale_score is set to False
+ num_labels = self.transformer_model.num_labels
+ self.activation_function: torch.nn.Module
+ if num_labels == 1 and scale_score:
+ self.activation_function = torch.nn.Sigmoid()
+ else:
+ self.activation_function = torch.nn.Identity()
+
if len(self.devices) > 1:
self.model = DataParallel(self.transformer_model, device_ids=self.devices)
@@ -119,9 +132,31 @@
reverse=True,
)
- # rank documents according to scores
- sorted_documents = [doc for _, doc in sorted_scores_and_documents]
- return sorted_documents[:top_k]
+ # add normalized scores to documents
+ sorted_documents = self._add_scores_to_documents(sorted_scores_and_documents[:top_k], logits_dim)
+
+ return sorted_documents
+
+ def _add_scores_to_documents(
+ self, sorted_scores_and_documents: List[Tuple[Any, Document]], logits_dim: int
+ ) -> List[Document]:
+ """
+ Normalize and add scores to retrieved result documents.
+
+ :param sorted_scores_and_documents: List of score, Document Tuples.
+ :param logits_dim: Dimensionality of the returned scores.
+ """
+ sorted_documents = []
+ for raw_score, doc in sorted_scores_and_documents:
+ if logits_dim >= 2:
+ score = self.activation_function(raw_score)[-1]
+ else:
+ score = self.activation_function(raw_score)[0]
+
+ doc.score = score.detach().cpu().numpy().tolist()
+ sorted_documents.append(doc)
+
+ return sorted_documents
def predict_batch(
self,
@@ -185,9 +220,11 @@
reverse=True,
)
- # rank documents according to scores
- sorted_documents = [doc for _, doc in sorted_scores_and_documents if isinstance(doc, Document)]
- return sorted_documents[:top_k]
+ # is this step needed?
+ sorted_documents = [(score, doc) for score, doc in sorted_scores_and_documents if isinstance(doc, Document)]
+ sorted_documents_with_scores = self._add_scores_to_documents(sorted_documents[:top_k], logits_dim)
+
+ return sorted_documents_with_scores
else:
# Group predictions together
grouped_predictions = []
@@ -209,8 +246,12 @@
)
# rank documents according to scores
- sorted_documents = [doc for _, doc in sorted_scores_and_documents if isinstance(doc, Document)][:top_k]
- result.append(sorted_documents)
+ sorted_documents = [
+ (score, doc) for score, doc in sorted_scores_and_documents if isinstance(doc, Document)
+ ]
+ sorted_documents_with_scores = self._add_scores_to_documents(sorted_documents[:top_k], logits_dim)
+
+ result.append(sorted_documents_with_scores)
return result
|
{"golden_diff": "diff --git a/haystack/nodes/ranker/sentence_transformers.py b/haystack/nodes/ranker/sentence_transformers.py\n--- a/haystack/nodes/ranker/sentence_transformers.py\n+++ b/haystack/nodes/ranker/sentence_transformers.py\n@@ -1,4 +1,4 @@\n-from typing import List, Optional, Union, Tuple, Iterator\n+from typing import List, Optional, Union, Tuple, Iterator, Any\n import logging\n from pathlib import Path\n \n@@ -44,6 +44,7 @@\n use_gpu: bool = True,\n devices: Optional[List[Union[str, torch.device]]] = None,\n batch_size: Optional[int] = None,\n+ scale_score: bool = True,\n ):\n \"\"\"\n :param model_name_or_path: Directory of a saved model or the name of a public model e.g.\n@@ -57,6 +58,9 @@\n https://pytorch.org/docs/stable/tensor_attributes.html?highlight=torch%20device#torch.torch.device\n (e.g. [\"cuda:0\"]).\n :param batch_size: Number of documents to process at a time.\n+ :param scale_score: The raw predictions will be transformed using a Sigmoid activation function in case the model\n+ only predicts a single label. For multi-label predictions, no scaling is applied. Set this\n+ to False if you do not want any scaling of the raw predictions.\n \"\"\"\n super().__init__()\n \n@@ -76,6 +80,15 @@\n )\n self.transformer_model.eval()\n \n+ # we use sigmoid activation function to scale the score in case there is only a single label\n+ # we do not apply any scaling when scale_score is set to False\n+ num_labels = self.transformer_model.num_labels\n+ self.activation_function: torch.nn.Module\n+ if num_labels == 1 and scale_score:\n+ self.activation_function = torch.nn.Sigmoid()\n+ else:\n+ self.activation_function = torch.nn.Identity()\n+\n if len(self.devices) > 1:\n self.model = DataParallel(self.transformer_model, device_ids=self.devices)\n \n@@ -119,9 +132,31 @@\n reverse=True,\n )\n \n- # rank documents according to scores\n- sorted_documents = [doc for _, doc in sorted_scores_and_documents]\n- return sorted_documents[:top_k]\n+ # add normalized scores to documents\n+ sorted_documents = self._add_scores_to_documents(sorted_scores_and_documents[:top_k], logits_dim)\n+\n+ return sorted_documents\n+\n+ def _add_scores_to_documents(\n+ self, sorted_scores_and_documents: List[Tuple[Any, Document]], logits_dim: int\n+ ) -> List[Document]:\n+ \"\"\"\n+ Normalize and add scores to retrieved result documents.\n+\n+ :param sorted_scores_and_documents: List of score, Document Tuples.\n+ :param logits_dim: Dimensionality of the returned scores.\n+ \"\"\"\n+ sorted_documents = []\n+ for raw_score, doc in sorted_scores_and_documents:\n+ if logits_dim >= 2:\n+ score = self.activation_function(raw_score)[-1]\n+ else:\n+ score = self.activation_function(raw_score)[0]\n+\n+ doc.score = score.detach().cpu().numpy().tolist()\n+ sorted_documents.append(doc)\n+\n+ return sorted_documents\n \n def predict_batch(\n self,\n@@ -185,9 +220,11 @@\n reverse=True,\n )\n \n- # rank documents according to scores\n- sorted_documents = [doc for _, doc in sorted_scores_and_documents if isinstance(doc, Document)]\n- return sorted_documents[:top_k]\n+ # is this step needed?\n+ sorted_documents = [(score, doc) for score, doc in sorted_scores_and_documents if isinstance(doc, Document)]\n+ sorted_documents_with_scores = self._add_scores_to_documents(sorted_documents[:top_k], logits_dim)\n+\n+ return sorted_documents_with_scores\n else:\n # Group predictions together\n grouped_predictions = []\n@@ -209,8 +246,12 @@\n )\n \n # rank documents according to scores\n- sorted_documents = [doc for _, doc in sorted_scores_and_documents if isinstance(doc, Document)][:top_k]\n- result.append(sorted_documents)\n+ sorted_documents = [\n+ (score, doc) for score, doc in sorted_scores_and_documents if isinstance(doc, Document)\n+ ]\n+ sorted_documents_with_scores = self._add_scores_to_documents(sorted_documents[:top_k], logits_dim)\n+\n+ result.append(sorted_documents_with_scores)\n \n return result\n", "issue": "Ranker scores shouldn't be discarded\n**Is your feature request related to a problem? Please describe.**\r\nThe reason for using a ranker is that the scores should be more accurate than the retriever scores. For this reason, in that use case you would also want to use the ranker scores later on instead of the retriever score (e.g. when defining a threshold). You currently cannot do that as the ranker scores aren't saved anywhere (see [here](https://github.com/deepset-ai/haystack/blob/b87c0c950b2243f47fb249aa3865d4c46edb16df/haystack/nodes/ranker/sentence_transformers.py)) as the reader only updates the order and not the score. Having the order of documents not dependent on the scores is also unintuitive.\r\n\r\n**Describe the solution you'd like**\r\nReplace retriever scores with ranker scores.\r\n\r\n**Describe alternatives you've considered**\r\nHaving an additional field to save ranker scores.\n", "before_files": [{"content": "from typing import List, Optional, Union, Tuple, Iterator\nimport logging\nfrom pathlib import Path\n\nimport torch\nfrom torch.nn import DataParallel\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer\n\nfrom haystack.errors import HaystackError\nfrom haystack.schema import Document\nfrom haystack.nodes.ranker.base import BaseRanker\nfrom haystack.modeling.utils import initialize_device_settings\n\nlogger = logging.getLogger(__name__)\n\n\nclass SentenceTransformersRanker(BaseRanker):\n \"\"\"\n Sentence Transformer based pre-trained Cross-Encoder model for Document Re-ranking (https://huggingface.co/cross-encoder).\n Re-Ranking can be used on top of a retriever to boost the performance for document search. This is particularly useful if the retriever has a high recall but is bad in sorting the documents by relevance.\n\n SentenceTransformerRanker handles Cross-Encoder models\n - use a single logit as similarity score e.g. cross-encoder/ms-marco-MiniLM-L-12-v2\n - use two output logits (no_answer, has_answer) e.g. deepset/gbert-base-germandpr-reranking\n https://www.sbert.net/docs/pretrained-models/ce-msmarco.html#usage-with-transformers\n\n | With a SentenceTransformersRanker, you can:\n - directly get predictions via predict()\n\n Usage example:\n ...\n retriever = BM25Retriever(document_store=document_store)\n ranker = SentenceTransformersRanker(model_name_or_path=\"cross-encoder/ms-marco-MiniLM-L-12-v2\")\n p = Pipeline()\n p.add_node(component=retriever, name=\"ESRetriever\", inputs=[\"Query\"])\n p.add_node(component=ranker, name=\"Ranker\", inputs=[\"ESRetriever\"])\n \"\"\"\n\n def __init__(\n self,\n model_name_or_path: Union[str, Path],\n model_version: Optional[str] = None,\n top_k: int = 10,\n use_gpu: bool = True,\n devices: Optional[List[Union[str, torch.device]]] = None,\n batch_size: Optional[int] = None,\n ):\n \"\"\"\n :param model_name_or_path: Directory of a saved model or the name of a public model e.g.\n 'cross-encoder/ms-marco-MiniLM-L-12-v2'.\n See https://huggingface.co/cross-encoder for full list of available models\n :param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.\n :param top_k: The maximum number of documents to return\n :param use_gpu: Whether to use all available GPUs or the CPU. Falls back on CPU if no GPU is available.\n :param devices: List of GPU (or CPU) devices, to limit inference to certain GPUs and not use all available ones\n The strings will be converted into pytorch devices, so use the string notation described here:\n https://pytorch.org/docs/stable/tensor_attributes.html?highlight=torch%20device#torch.torch.device\n (e.g. [\"cuda:0\"]).\n :param batch_size: Number of documents to process at a time.\n \"\"\"\n super().__init__()\n\n self.top_k = top_k\n\n if devices is not None:\n self.devices = [torch.device(device) for device in devices]\n else:\n self.devices, _ = initialize_device_settings(use_cuda=use_gpu, multi_gpu=True)\n\n self.transformer_model = AutoModelForSequenceClassification.from_pretrained(\n pretrained_model_name_or_path=model_name_or_path, revision=model_version\n )\n self.transformer_model.to(str(self.devices[0]))\n self.transformer_tokenizer = AutoTokenizer.from_pretrained(\n pretrained_model_name_or_path=model_name_or_path, revision=model_version\n )\n self.transformer_model.eval()\n\n if len(self.devices) > 1:\n self.model = DataParallel(self.transformer_model, device_ids=self.devices)\n\n self.batch_size = batch_size\n\n def predict(self, query: str, documents: List[Document], top_k: Optional[int] = None) -> List[Document]:\n \"\"\"\n Use loaded ranker model to re-rank the supplied list of Document.\n\n Returns list of Document sorted by (desc.) similarity with the query.\n\n :param query: Query string\n :param documents: List of Document to be re-ranked\n :param top_k: The maximum number of documents to return\n :return: List of Document\n \"\"\"\n if top_k is None:\n top_k = self.top_k\n\n features = self.transformer_tokenizer(\n [query for doc in documents],\n [doc.content for doc in documents],\n padding=True,\n truncation=True,\n return_tensors=\"pt\",\n ).to(self.devices[0])\n\n # SentenceTransformerRanker uses:\n # 1. the logit as similarity score/answerable classification\n # 2. the logits as answerable classification (no_answer / has_answer)\n # https://www.sbert.net/docs/pretrained-models/ce-msmarco.html#usage-with-transformers\n with torch.no_grad():\n similarity_scores = self.transformer_model(**features).logits\n\n logits_dim = similarity_scores.shape[1] # [batch_size, logits_dim]\n sorted_scores_and_documents = sorted(\n zip(similarity_scores, documents),\n key=lambda similarity_document_tuple:\n # assume the last element in logits represents the `has_answer` label\n similarity_document_tuple[0][-1] if logits_dim >= 2 else similarity_document_tuple[0],\n reverse=True,\n )\n\n # rank documents according to scores\n sorted_documents = [doc for _, doc in sorted_scores_and_documents]\n return sorted_documents[:top_k]\n\n def predict_batch(\n self,\n queries: List[str],\n documents: Union[List[Document], List[List[Document]]],\n top_k: Optional[int] = None,\n batch_size: Optional[int] = None,\n ) -> Union[List[Document], List[List[Document]]]:\n \"\"\"\n Use loaded ranker model to re-rank the supplied lists of Documents.\n\n Returns lists of Documents sorted by (desc.) similarity with the corresponding queries.\n\n\n - If you provide a list containing a single query...\n\n - ... and a single list of Documents, the single list of Documents will be re-ranked based on the\n supplied query.\n - ... and a list of lists of Documents, each list of Documents will be re-ranked individually based on the\n supplied query.\n\n\n - If you provide a list of multiple queries...\n\n - ... you need to provide a list of lists of Documents. Each list of Documents will be re-ranked based on\n its corresponding query.\n\n :param queries: Single query string or list of queries\n :param documents: Single list of Documents or list of lists of Documents to be reranked.\n :param top_k: The maximum number of documents to return per Document list.\n :param batch_size: Number of Documents to process at a time.\n \"\"\"\n if top_k is None:\n top_k = self.top_k\n\n if batch_size is None:\n batch_size = self.batch_size\n\n number_of_docs, all_queries, all_docs, single_list_of_docs = self._preprocess_batch_queries_and_docs(\n queries=queries, documents=documents\n )\n\n batches = self._get_batches(all_queries=all_queries, all_docs=all_docs, batch_size=batch_size)\n preds = []\n for cur_queries, cur_docs in batches:\n features = self.transformer_tokenizer(\n cur_queries, [doc.content for doc in cur_docs], padding=True, truncation=True, return_tensors=\"pt\"\n ).to(self.devices[0])\n\n with torch.no_grad():\n similarity_scores = self.transformer_model(**features).logits\n preds.extend(similarity_scores)\n\n logits_dim = similarity_scores.shape[1] # [batch_size, logits_dim]\n if single_list_of_docs:\n sorted_scores_and_documents = sorted(\n zip(similarity_scores, documents),\n key=lambda similarity_document_tuple:\n # assume the last element in logits represents the `has_answer` label\n similarity_document_tuple[0][-1] if logits_dim >= 2 else similarity_document_tuple[0],\n reverse=True,\n )\n\n # rank documents according to scores\n sorted_documents = [doc for _, doc in sorted_scores_and_documents if isinstance(doc, Document)]\n return sorted_documents[:top_k]\n else:\n # Group predictions together\n grouped_predictions = []\n left_idx = 0\n right_idx = 0\n for number in number_of_docs:\n right_idx = left_idx + number\n grouped_predictions.append(similarity_scores[left_idx:right_idx])\n left_idx = right_idx\n\n result = []\n for pred_group, doc_group in zip(grouped_predictions, documents):\n sorted_scores_and_documents = sorted(\n zip(pred_group, doc_group), # type: ignore\n key=lambda similarity_document_tuple:\n # assume the last element in logits represents the `has_answer` label\n similarity_document_tuple[0][-1] if logits_dim >= 2 else similarity_document_tuple[0],\n reverse=True,\n )\n\n # rank documents according to scores\n sorted_documents = [doc for _, doc in sorted_scores_and_documents if isinstance(doc, Document)][:top_k]\n result.append(sorted_documents)\n\n return result\n\n def _preprocess_batch_queries_and_docs(\n self, queries: List[str], documents: Union[List[Document], List[List[Document]]]\n ) -> Tuple[List[int], List[str], List[Document], bool]:\n number_of_docs = []\n all_queries = []\n all_docs: List[Document] = []\n single_list_of_docs = False\n\n # Docs case 1: single list of Documents -> rerank single list of Documents based on single query\n if len(documents) > 0 and isinstance(documents[0], Document):\n if len(queries) != 1:\n raise HaystackError(\"Number of queries must be 1 if a single list of Documents is provided.\")\n query = queries[0]\n number_of_docs = [len(documents)]\n all_queries = [query] * len(documents)\n all_docs = documents # type: ignore\n single_list_of_docs = True\n\n # Docs case 2: list of lists of Documents -> rerank each list of Documents based on corresponding query\n # If queries contains a single query, apply it to each list of Documents\n if len(documents) > 0 and isinstance(documents[0], list):\n if len(queries) == 1:\n queries = queries * len(documents)\n if len(queries) != len(documents):\n raise HaystackError(\"Number of queries must be equal to number of provided Document lists.\")\n for query, cur_docs in zip(queries, documents):\n if not isinstance(cur_docs, list):\n raise HaystackError(f\"cur_docs was of type {type(cur_docs)}, but expected a list of Documents.\")\n number_of_docs.append(len(cur_docs))\n all_queries.extend([query] * len(cur_docs))\n all_docs.extend(cur_docs)\n\n return number_of_docs, all_queries, all_docs, single_list_of_docs\n\n @staticmethod\n def _get_batches(\n all_queries: List[str], all_docs: List[Document], batch_size: Optional[int]\n ) -> Iterator[Tuple[List[str], List[Document]]]:\n if batch_size is None:\n yield all_queries, all_docs\n return\n else:\n for index in range(0, len(all_queries), batch_size):\n yield all_queries[index : index + batch_size], all_docs[index : index + batch_size]\n", "path": "haystack/nodes/ranker/sentence_transformers.py"}], "after_files": [{"content": "from typing import List, Optional, Union, Tuple, Iterator, Any\nimport logging\nfrom pathlib import Path\n\nimport torch\nfrom torch.nn import DataParallel\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer\n\nfrom haystack.errors import HaystackError\nfrom haystack.schema import Document\nfrom haystack.nodes.ranker.base import BaseRanker\nfrom haystack.modeling.utils import initialize_device_settings\n\nlogger = logging.getLogger(__name__)\n\n\nclass SentenceTransformersRanker(BaseRanker):\n \"\"\"\n Sentence Transformer based pre-trained Cross-Encoder model for Document Re-ranking (https://huggingface.co/cross-encoder).\n Re-Ranking can be used on top of a retriever to boost the performance for document search. This is particularly useful if the retriever has a high recall but is bad in sorting the documents by relevance.\n\n SentenceTransformerRanker handles Cross-Encoder models\n - use a single logit as similarity score e.g. cross-encoder/ms-marco-MiniLM-L-12-v2\n - use two output logits (no_answer, has_answer) e.g. deepset/gbert-base-germandpr-reranking\n https://www.sbert.net/docs/pretrained-models/ce-msmarco.html#usage-with-transformers\n\n | With a SentenceTransformersRanker, you can:\n - directly get predictions via predict()\n\n Usage example:\n ...\n retriever = BM25Retriever(document_store=document_store)\n ranker = SentenceTransformersRanker(model_name_or_path=\"cross-encoder/ms-marco-MiniLM-L-12-v2\")\n p = Pipeline()\n p.add_node(component=retriever, name=\"ESRetriever\", inputs=[\"Query\"])\n p.add_node(component=ranker, name=\"Ranker\", inputs=[\"ESRetriever\"])\n \"\"\"\n\n def __init__(\n self,\n model_name_or_path: Union[str, Path],\n model_version: Optional[str] = None,\n top_k: int = 10,\n use_gpu: bool = True,\n devices: Optional[List[Union[str, torch.device]]] = None,\n batch_size: Optional[int] = None,\n scale_score: bool = True,\n ):\n \"\"\"\n :param model_name_or_path: Directory of a saved model or the name of a public model e.g.\n 'cross-encoder/ms-marco-MiniLM-L-12-v2'.\n See https://huggingface.co/cross-encoder for full list of available models\n :param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.\n :param top_k: The maximum number of documents to return\n :param use_gpu: Whether to use all available GPUs or the CPU. Falls back on CPU if no GPU is available.\n :param devices: List of GPU (or CPU) devices, to limit inference to certain GPUs and not use all available ones\n The strings will be converted into pytorch devices, so use the string notation described here:\n https://pytorch.org/docs/stable/tensor_attributes.html?highlight=torch%20device#torch.torch.device\n (e.g. [\"cuda:0\"]).\n :param batch_size: Number of documents to process at a time.\n :param scale_score: The raw predictions will be transformed using a Sigmoid activation function in case the model\n only predicts a single label. For multi-label predictions, no scaling is applied. Set this\n to False if you do not want any scaling of the raw predictions.\n \"\"\"\n super().__init__()\n\n self.top_k = top_k\n\n if devices is not None:\n self.devices = [torch.device(device) for device in devices]\n else:\n self.devices, _ = initialize_device_settings(use_cuda=use_gpu, multi_gpu=True)\n\n self.transformer_model = AutoModelForSequenceClassification.from_pretrained(\n pretrained_model_name_or_path=model_name_or_path, revision=model_version\n )\n self.transformer_model.to(str(self.devices[0]))\n self.transformer_tokenizer = AutoTokenizer.from_pretrained(\n pretrained_model_name_or_path=model_name_or_path, revision=model_version\n )\n self.transformer_model.eval()\n\n # we use sigmoid activation function to scale the score in case there is only a single label\n # we do not apply any scaling when scale_score is set to False\n num_labels = self.transformer_model.num_labels\n self.activation_function: torch.nn.Module\n if num_labels == 1 and scale_score:\n self.activation_function = torch.nn.Sigmoid()\n else:\n self.activation_function = torch.nn.Identity()\n\n if len(self.devices) > 1:\n self.model = DataParallel(self.transformer_model, device_ids=self.devices)\n\n self.batch_size = batch_size\n\n def predict(self, query: str, documents: List[Document], top_k: Optional[int] = None) -> List[Document]:\n \"\"\"\n Use loaded ranker model to re-rank the supplied list of Document.\n\n Returns list of Document sorted by (desc.) similarity with the query.\n\n :param query: Query string\n :param documents: List of Document to be re-ranked\n :param top_k: The maximum number of documents to return\n :return: List of Document\n \"\"\"\n if top_k is None:\n top_k = self.top_k\n\n features = self.transformer_tokenizer(\n [query for doc in documents],\n [doc.content for doc in documents],\n padding=True,\n truncation=True,\n return_tensors=\"pt\",\n ).to(self.devices[0])\n\n # SentenceTransformerRanker uses:\n # 1. the logit as similarity score/answerable classification\n # 2. the logits as answerable classification (no_answer / has_answer)\n # https://www.sbert.net/docs/pretrained-models/ce-msmarco.html#usage-with-transformers\n with torch.no_grad():\n similarity_scores = self.transformer_model(**features).logits\n\n logits_dim = similarity_scores.shape[1] # [batch_size, logits_dim]\n sorted_scores_and_documents = sorted(\n zip(similarity_scores, documents),\n key=lambda similarity_document_tuple:\n # assume the last element in logits represents the `has_answer` label\n similarity_document_tuple[0][-1] if logits_dim >= 2 else similarity_document_tuple[0],\n reverse=True,\n )\n\n # add normalized scores to documents\n sorted_documents = self._add_scores_to_documents(sorted_scores_and_documents[:top_k], logits_dim)\n\n return sorted_documents\n\n def _add_scores_to_documents(\n self, sorted_scores_and_documents: List[Tuple[Any, Document]], logits_dim: int\n ) -> List[Document]:\n \"\"\"\n Normalize and add scores to retrieved result documents.\n\n :param sorted_scores_and_documents: List of score, Document Tuples.\n :param logits_dim: Dimensionality of the returned scores.\n \"\"\"\n sorted_documents = []\n for raw_score, doc in sorted_scores_and_documents:\n if logits_dim >= 2:\n score = self.activation_function(raw_score)[-1]\n else:\n score = self.activation_function(raw_score)[0]\n\n doc.score = score.detach().cpu().numpy().tolist()\n sorted_documents.append(doc)\n\n return sorted_documents\n\n def predict_batch(\n self,\n queries: List[str],\n documents: Union[List[Document], List[List[Document]]],\n top_k: Optional[int] = None,\n batch_size: Optional[int] = None,\n ) -> Union[List[Document], List[List[Document]]]:\n \"\"\"\n Use loaded ranker model to re-rank the supplied lists of Documents.\n\n Returns lists of Documents sorted by (desc.) similarity with the corresponding queries.\n\n\n - If you provide a list containing a single query...\n\n - ... and a single list of Documents, the single list of Documents will be re-ranked based on the\n supplied query.\n - ... and a list of lists of Documents, each list of Documents will be re-ranked individually based on the\n supplied query.\n\n\n - If you provide a list of multiple queries...\n\n - ... you need to provide a list of lists of Documents. Each list of Documents will be re-ranked based on\n its corresponding query.\n\n :param queries: Single query string or list of queries\n :param documents: Single list of Documents or list of lists of Documents to be reranked.\n :param top_k: The maximum number of documents to return per Document list.\n :param batch_size: Number of Documents to process at a time.\n \"\"\"\n if top_k is None:\n top_k = self.top_k\n\n if batch_size is None:\n batch_size = self.batch_size\n\n number_of_docs, all_queries, all_docs, single_list_of_docs = self._preprocess_batch_queries_and_docs(\n queries=queries, documents=documents\n )\n\n batches = self._get_batches(all_queries=all_queries, all_docs=all_docs, batch_size=batch_size)\n preds = []\n for cur_queries, cur_docs in batches:\n features = self.transformer_tokenizer(\n cur_queries, [doc.content for doc in cur_docs], padding=True, truncation=True, return_tensors=\"pt\"\n ).to(self.devices[0])\n\n with torch.no_grad():\n similarity_scores = self.transformer_model(**features).logits\n preds.extend(similarity_scores)\n\n logits_dim = similarity_scores.shape[1] # [batch_size, logits_dim]\n if single_list_of_docs:\n sorted_scores_and_documents = sorted(\n zip(similarity_scores, documents),\n key=lambda similarity_document_tuple:\n # assume the last element in logits represents the `has_answer` label\n similarity_document_tuple[0][-1] if logits_dim >= 2 else similarity_document_tuple[0],\n reverse=True,\n )\n\n # is this step needed?\n sorted_documents = [(score, doc) for score, doc in sorted_scores_and_documents if isinstance(doc, Document)]\n sorted_documents_with_scores = self._add_scores_to_documents(sorted_documents[:top_k], logits_dim)\n\n return sorted_documents_with_scores\n else:\n # Group predictions together\n grouped_predictions = []\n left_idx = 0\n right_idx = 0\n for number in number_of_docs:\n right_idx = left_idx + number\n grouped_predictions.append(similarity_scores[left_idx:right_idx])\n left_idx = right_idx\n\n result = []\n for pred_group, doc_group in zip(grouped_predictions, documents):\n sorted_scores_and_documents = sorted(\n zip(pred_group, doc_group), # type: ignore\n key=lambda similarity_document_tuple:\n # assume the last element in logits represents the `has_answer` label\n similarity_document_tuple[0][-1] if logits_dim >= 2 else similarity_document_tuple[0],\n reverse=True,\n )\n\n # rank documents according to scores\n sorted_documents = [\n (score, doc) for score, doc in sorted_scores_and_documents if isinstance(doc, Document)\n ]\n sorted_documents_with_scores = self._add_scores_to_documents(sorted_documents[:top_k], logits_dim)\n\n result.append(sorted_documents_with_scores)\n\n return result\n\n def _preprocess_batch_queries_and_docs(\n self, queries: List[str], documents: Union[List[Document], List[List[Document]]]\n ) -> Tuple[List[int], List[str], List[Document], bool]:\n number_of_docs = []\n all_queries = []\n all_docs: List[Document] = []\n single_list_of_docs = False\n\n # Docs case 1: single list of Documents -> rerank single list of Documents based on single query\n if len(documents) > 0 and isinstance(documents[0], Document):\n if len(queries) != 1:\n raise HaystackError(\"Number of queries must be 1 if a single list of Documents is provided.\")\n query = queries[0]\n number_of_docs = [len(documents)]\n all_queries = [query] * len(documents)\n all_docs = documents # type: ignore\n single_list_of_docs = True\n\n # Docs case 2: list of lists of Documents -> rerank each list of Documents based on corresponding query\n # If queries contains a single query, apply it to each list of Documents\n if len(documents) > 0 and isinstance(documents[0], list):\n if len(queries) == 1:\n queries = queries * len(documents)\n if len(queries) != len(documents):\n raise HaystackError(\"Number of queries must be equal to number of provided Document lists.\")\n for query, cur_docs in zip(queries, documents):\n if not isinstance(cur_docs, list):\n raise HaystackError(f\"cur_docs was of type {type(cur_docs)}, but expected a list of Documents.\")\n number_of_docs.append(len(cur_docs))\n all_queries.extend([query] * len(cur_docs))\n all_docs.extend(cur_docs)\n\n return number_of_docs, all_queries, all_docs, single_list_of_docs\n\n @staticmethod\n def _get_batches(\n all_queries: List[str], all_docs: List[Document], batch_size: Optional[int]\n ) -> Iterator[Tuple[List[str], List[Document]]]:\n if batch_size is None:\n yield all_queries, all_docs\n return\n else:\n for index in range(0, len(all_queries), batch_size):\n yield all_queries[index : index + batch_size], all_docs[index : index + batch_size]\n", "path": "haystack/nodes/ranker/sentence_transformers.py"}]}
| 3,705 | 1,010 |
gh_patches_debug_39588
|
rasdani/github-patches
|
git_diff
|
google__turbinia-1002
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve exception handling in FileSystemTimelineJob
Under certain conditions, dfvfs may throw exceptions that are not currently being handked:
```
dfvfs.lib.errors.BackEndError: Unable to open file system with error: pyfsext_volume_open_file_object: unable to open volume. libfsext_superblock_read_data: unsupported read-only compatible features flags: 0xff000003. libfsext_superblock_read_file_io_handle: unable to read superblock at offset: 1024 (0x00000400). libfsext_internal_volume_read_block_groups: unable to read superblock: 0 at offset: 1024 (0x00000400). libfsext_internal_volume_open_read: unable to read block groups. libfsext_volume_open_file_io_handle: unable to read from file IO handle.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `turbinia/workers/file_system_timeline.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2022 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Task to run dfimagetools FileEntryLister on disk partitions."""
16
17 from __future__ import unicode_literals
18
19 import os
20
21 from turbinia import TurbiniaException
22 from turbinia.workers import TurbiniaTask
23 from turbinia.evidence import EvidenceState as state
24 from turbinia.evidence import BodyFile
25
26 if TurbiniaTask.check_worker_role():
27 try:
28 from dfvfs.helpers import volume_scanner
29 from dfvfs.lib import errors as dfvfs_errors
30 from dfimagetools import file_entry_lister
31 except ImportError as exception:
32 message = 'Could not import libraries: {0!s}'.format(exception)
33 raise TurbiniaException(message)
34
35
36 class FileSystemTimelineTask(TurbiniaTask):
37
38 REQUIRED_STATES = [state.ATTACHED]
39
40 TASK_CONFIG = {'partitions': ['all']}
41
42 def run(self, evidence, result):
43 """Task to execute (dfimagetools) FileEntryLister.
44
45 Args:
46 evidence (Evidence object): The evidence we will process.
47 result (TurbiniaTaskResult): The object to place task results into.
48
49 Returns:
50 TurbiniaTaskResult object.
51 """
52 bodyfile_output = os.path.join(self.output_dir, 'file_system.bodyfile')
53 output_evidence = BodyFile(source_path=bodyfile_output)
54 number_of_entries = 0
55
56 # Set things up for the FileEntryLister client. We will scan all
57 # partitions in the volume.
58 volume_scanner_options = volume_scanner.VolumeScannerOptions()
59 volume_scanner_options.partitions = self.task_config.get('partitions')
60
61 # Create the FileEntryLister client and generate the path specs
62 # for all available partitions.
63 entry_lister = file_entry_lister.FileEntryLister()
64 base_path_specs = entry_lister.GetBasePathSpecs(
65 evidence.device_path, options=volume_scanner_options)
66
67 # Iterate over all file entries and generate the output in bodyfile
68 # format.
69 try:
70 with open(bodyfile_output, 'w') as file_object:
71 for file_entry, path_segments in entry_lister.ListFileEntries(
72 base_path_specs):
73 bodyfile_entries = entry_lister.GetBodyfileEntries(
74 file_entry, path_segments)
75 for bodyfile_entry in bodyfile_entries:
76 file_object.write(bodyfile_entry)
77 file_object.write('\n')
78 number_of_entries += 1
79 output_evidence.number_of_entries = number_of_entries
80 result.add_evidence(output_evidence, evidence.config)
81 status = 'Generated file system timeline containing [{0:d}] entries'.format(
82 number_of_entries)
83 result.close(self, success=True, status=status)
84 except dfvfs_errors.ScannerError as exception:
85 result.log('Error generating bodyfile {0!s}'.format(exception))
86 status = 'Unable to generate bodyfile using provided evidence data.'
87 result.close(self, success=False, status=status)
88 raise TurbiniaException(
89 'Could not process volume: {0!s}'.format(exception))
90
91 return result
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/turbinia/workers/file_system_timeline.py b/turbinia/workers/file_system_timeline.py
--- a/turbinia/workers/file_system_timeline.py
+++ b/turbinia/workers/file_system_timeline.py
@@ -34,6 +34,7 @@
class FileSystemTimelineTask(TurbiniaTask):
+ """Task to generate file system timelines. """
REQUIRED_STATES = [state.ATTACHED]
@@ -61,31 +62,49 @@
# Create the FileEntryLister client and generate the path specs
# for all available partitions.
entry_lister = file_entry_lister.FileEntryLister()
- base_path_specs = entry_lister.GetBasePathSpecs(
- evidence.device_path, options=volume_scanner_options)
+ try:
+ base_path_specs = entry_lister.GetBasePathSpecs(
+ evidence.device_path, options=volume_scanner_options)
+ except dfvfs_errors.ScannerError as exception:
+ status = 'Unable to open evidence: {0!s}'.format(exception)
+ result.close(self, success=False, status=status)
# Iterate over all file entries and generate the output in bodyfile
# format.
try:
- with open(bodyfile_output, 'w') as file_object:
- for file_entry, path_segments in entry_lister.ListFileEntries(
- base_path_specs):
- bodyfile_entries = entry_lister.GetBodyfileEntries(
- file_entry, path_segments)
- for bodyfile_entry in bodyfile_entries:
- file_object.write(bodyfile_entry)
- file_object.write('\n')
- number_of_entries += 1
- output_evidence.number_of_entries = number_of_entries
- result.add_evidence(output_evidence, evidence.config)
- status = 'Generated file system timeline containing [{0:d}] entries'.format(
- number_of_entries)
- result.close(self, success=True, status=status)
- except dfvfs_errors.ScannerError as exception:
- result.log('Error generating bodyfile {0!s}'.format(exception))
- status = 'Unable to generate bodyfile using provided evidence data.'
+ file_entries = None
+ with open(bodyfile_output, 'w', encoding='utf-8') as file_object:
+ file_entries = enumerate(entry_lister.ListFileEntries(base_path_specs))
+ while file_entries:
+ try:
+ _, (file_entry, path_segments) = next(file_entries)
+ bodyfile_entries = entry_lister.GetBodyfileEntries(
+ file_entry, path_segments)
+ for bodyfile_entry in bodyfile_entries:
+ file_object.write(bodyfile_entry)
+ file_object.write('\n')
+ number_of_entries += 1
+ except StopIteration:
+ break
+ except (dfvfs_errors.AccessError, dfvfs_errors.BackEndError,
+ dfvfs_errors.MountPointError,
+ dfvfs_errors.PathSpecError) as exception:
+ status = 'Unable to process file entry: {0!s}'.format(exception)
+ result.log(status)
+
+ if number_of_entries > 0:
+ output_evidence.number_of_entries = number_of_entries
+ result.add_evidence(output_evidence, evidence.config)
+ status = 'Generated file system timeline containing [{0:d}] entries'.format(
+ number_of_entries)
+ result.close(self, success=True, status=status)
+ else:
+ status = 'Unable to process any file entries.'
+ result.close(self, success=False, status=status)
+
+ except IOError as exception:
+ status = 'Unable to create bodyfile local output file: {0!s}'.format(
+ exception)
result.close(self, success=False, status=status)
- raise TurbiniaException(
- 'Could not process volume: {0!s}'.format(exception))
return result
|
{"golden_diff": "diff --git a/turbinia/workers/file_system_timeline.py b/turbinia/workers/file_system_timeline.py\n--- a/turbinia/workers/file_system_timeline.py\n+++ b/turbinia/workers/file_system_timeline.py\n@@ -34,6 +34,7 @@\n \n \n class FileSystemTimelineTask(TurbiniaTask):\n+ \"\"\"Task to generate file system timelines. \"\"\"\n \n REQUIRED_STATES = [state.ATTACHED]\n \n@@ -61,31 +62,49 @@\n # Create the FileEntryLister client and generate the path specs\n # for all available partitions.\n entry_lister = file_entry_lister.FileEntryLister()\n- base_path_specs = entry_lister.GetBasePathSpecs(\n- evidence.device_path, options=volume_scanner_options)\n+ try:\n+ base_path_specs = entry_lister.GetBasePathSpecs(\n+ evidence.device_path, options=volume_scanner_options)\n+ except dfvfs_errors.ScannerError as exception:\n+ status = 'Unable to open evidence: {0!s}'.format(exception)\n+ result.close(self, success=False, status=status)\n \n # Iterate over all file entries and generate the output in bodyfile\n # format.\n try:\n- with open(bodyfile_output, 'w') as file_object:\n- for file_entry, path_segments in entry_lister.ListFileEntries(\n- base_path_specs):\n- bodyfile_entries = entry_lister.GetBodyfileEntries(\n- file_entry, path_segments)\n- for bodyfile_entry in bodyfile_entries:\n- file_object.write(bodyfile_entry)\n- file_object.write('\\n')\n- number_of_entries += 1\n- output_evidence.number_of_entries = number_of_entries\n- result.add_evidence(output_evidence, evidence.config)\n- status = 'Generated file system timeline containing [{0:d}] entries'.format(\n- number_of_entries)\n- result.close(self, success=True, status=status)\n- except dfvfs_errors.ScannerError as exception:\n- result.log('Error generating bodyfile {0!s}'.format(exception))\n- status = 'Unable to generate bodyfile using provided evidence data.'\n+ file_entries = None\n+ with open(bodyfile_output, 'w', encoding='utf-8') as file_object:\n+ file_entries = enumerate(entry_lister.ListFileEntries(base_path_specs))\n+ while file_entries:\n+ try:\n+ _, (file_entry, path_segments) = next(file_entries)\n+ bodyfile_entries = entry_lister.GetBodyfileEntries(\n+ file_entry, path_segments)\n+ for bodyfile_entry in bodyfile_entries:\n+ file_object.write(bodyfile_entry)\n+ file_object.write('\\n')\n+ number_of_entries += 1\n+ except StopIteration:\n+ break\n+ except (dfvfs_errors.AccessError, dfvfs_errors.BackEndError,\n+ dfvfs_errors.MountPointError,\n+ dfvfs_errors.PathSpecError) as exception:\n+ status = 'Unable to process file entry: {0!s}'.format(exception)\n+ result.log(status)\n+\n+ if number_of_entries > 0:\n+ output_evidence.number_of_entries = number_of_entries\n+ result.add_evidence(output_evidence, evidence.config)\n+ status = 'Generated file system timeline containing [{0:d}] entries'.format(\n+ number_of_entries)\n+ result.close(self, success=True, status=status)\n+ else:\n+ status = 'Unable to process any file entries.'\n+ result.close(self, success=False, status=status)\n+\n+ except IOError as exception:\n+ status = 'Unable to create bodyfile local output file: {0!s}'.format(\n+ exception)\n result.close(self, success=False, status=status)\n- raise TurbiniaException(\n- 'Could not process volume: {0!s}'.format(exception))\n \n return result\n", "issue": "Improve exception handling in FileSystemTimelineJob\nUnder certain conditions, dfvfs may throw exceptions that are not currently being handked:\r\n\r\n```\r\ndfvfs.lib.errors.BackEndError: Unable to open file system with error: pyfsext_volume_open_file_object: unable to open volume. libfsext_superblock_read_data: unsupported read-only compatible features flags: 0xff000003. libfsext_superblock_read_file_io_handle: unable to read superblock at offset: 1024 (0x00000400). libfsext_internal_volume_read_block_groups: unable to read superblock: 0 at offset: 1024 (0x00000400). libfsext_internal_volume_open_read: unable to read block groups. libfsext_volume_open_file_io_handle: unable to read from file IO handle.\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2022 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task to run dfimagetools FileEntryLister on disk partitions.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\n\nfrom turbinia import TurbiniaException\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.evidence import BodyFile\n\nif TurbiniaTask.check_worker_role():\n try:\n from dfvfs.helpers import volume_scanner\n from dfvfs.lib import errors as dfvfs_errors\n from dfimagetools import file_entry_lister\n except ImportError as exception:\n message = 'Could not import libraries: {0!s}'.format(exception)\n raise TurbiniaException(message)\n\n\nclass FileSystemTimelineTask(TurbiniaTask):\n\n REQUIRED_STATES = [state.ATTACHED]\n\n TASK_CONFIG = {'partitions': ['all']}\n\n def run(self, evidence, result):\n \"\"\"Task to execute (dfimagetools) FileEntryLister.\n\n Args:\n evidence (Evidence object): The evidence we will process.\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n bodyfile_output = os.path.join(self.output_dir, 'file_system.bodyfile')\n output_evidence = BodyFile(source_path=bodyfile_output)\n number_of_entries = 0\n\n # Set things up for the FileEntryLister client. We will scan all\n # partitions in the volume.\n volume_scanner_options = volume_scanner.VolumeScannerOptions()\n volume_scanner_options.partitions = self.task_config.get('partitions')\n\n # Create the FileEntryLister client and generate the path specs\n # for all available partitions.\n entry_lister = file_entry_lister.FileEntryLister()\n base_path_specs = entry_lister.GetBasePathSpecs(\n evidence.device_path, options=volume_scanner_options)\n\n # Iterate over all file entries and generate the output in bodyfile\n # format.\n try:\n with open(bodyfile_output, 'w') as file_object:\n for file_entry, path_segments in entry_lister.ListFileEntries(\n base_path_specs):\n bodyfile_entries = entry_lister.GetBodyfileEntries(\n file_entry, path_segments)\n for bodyfile_entry in bodyfile_entries:\n file_object.write(bodyfile_entry)\n file_object.write('\\n')\n number_of_entries += 1\n output_evidence.number_of_entries = number_of_entries\n result.add_evidence(output_evidence, evidence.config)\n status = 'Generated file system timeline containing [{0:d}] entries'.format(\n number_of_entries)\n result.close(self, success=True, status=status)\n except dfvfs_errors.ScannerError as exception:\n result.log('Error generating bodyfile {0!s}'.format(exception))\n status = 'Unable to generate bodyfile using provided evidence data.'\n result.close(self, success=False, status=status)\n raise TurbiniaException(\n 'Could not process volume: {0!s}'.format(exception))\n\n return result\n", "path": "turbinia/workers/file_system_timeline.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2022 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task to run dfimagetools FileEntryLister on disk partitions.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\n\nfrom turbinia import TurbiniaException\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.evidence import BodyFile\n\nif TurbiniaTask.check_worker_role():\n try:\n from dfvfs.helpers import volume_scanner\n from dfvfs.lib import errors as dfvfs_errors\n from dfimagetools import file_entry_lister\n except ImportError as exception:\n message = 'Could not import libraries: {0!s}'.format(exception)\n raise TurbiniaException(message)\n\n\nclass FileSystemTimelineTask(TurbiniaTask):\n \"\"\"Task to generate file system timelines. \"\"\"\n\n REQUIRED_STATES = [state.ATTACHED]\n\n TASK_CONFIG = {'partitions': ['all']}\n\n def run(self, evidence, result):\n \"\"\"Task to execute (dfimagetools) FileEntryLister.\n\n Args:\n evidence (Evidence object): The evidence we will process.\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n bodyfile_output = os.path.join(self.output_dir, 'file_system.bodyfile')\n output_evidence = BodyFile(source_path=bodyfile_output)\n number_of_entries = 0\n\n # Set things up for the FileEntryLister client. We will scan all\n # partitions in the volume.\n volume_scanner_options = volume_scanner.VolumeScannerOptions()\n volume_scanner_options.partitions = self.task_config.get('partitions')\n\n # Create the FileEntryLister client and generate the path specs\n # for all available partitions.\n entry_lister = file_entry_lister.FileEntryLister()\n try:\n base_path_specs = entry_lister.GetBasePathSpecs(\n evidence.device_path, options=volume_scanner_options)\n except dfvfs_errors.ScannerError as exception:\n status = 'Unable to open evidence: {0!s}'.format(exception)\n result.close(self, success=False, status=status)\n\n # Iterate over all file entries and generate the output in bodyfile\n # format.\n try:\n file_entries = None\n with open(bodyfile_output, 'w', encoding='utf-8') as file_object:\n file_entries = enumerate(entry_lister.ListFileEntries(base_path_specs))\n while file_entries:\n try:\n _, (file_entry, path_segments) = next(file_entries)\n bodyfile_entries = entry_lister.GetBodyfileEntries(\n file_entry, path_segments)\n for bodyfile_entry in bodyfile_entries:\n file_object.write(bodyfile_entry)\n file_object.write('\\n')\n number_of_entries += 1\n except StopIteration:\n break\n except (dfvfs_errors.AccessError, dfvfs_errors.BackEndError,\n dfvfs_errors.MountPointError,\n dfvfs_errors.PathSpecError) as exception:\n status = 'Unable to process file entry: {0!s}'.format(exception)\n result.log(status)\n\n if number_of_entries > 0:\n output_evidence.number_of_entries = number_of_entries\n result.add_evidence(output_evidence, evidence.config)\n status = 'Generated file system timeline containing [{0:d}] entries'.format(\n number_of_entries)\n result.close(self, success=True, status=status)\n else:\n status = 'Unable to process any file entries.'\n result.close(self, success=False, status=status)\n\n except IOError as exception:\n status = 'Unable to create bodyfile local output file: {0!s}'.format(\n exception)\n result.close(self, success=False, status=status)\n\n return result\n", "path": "turbinia/workers/file_system_timeline.py"}]}
| 1,434 | 849 |
gh_patches_debug_24183
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-6460
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tifffile deprecation warning on test_tifffile.py
## Description
<!--
(Note: for guidance on how to use `scikit-image`, please post instead on https://forum.image.sc/tag/scikit-image)
-->
## Way to reproduce
```python
run pytest on skimage/io/tests/test_tifffile.py
```
Will show deprecation warning issues
```bash
skimage/io/tests/test_tifffile.py ................................... [100%]
=============================== warnings summary ===============================
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float32-shape1]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float32-shape1]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'float32' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float32-shape2]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float32-shape2]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'float32' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-int16-shape1]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-int16-shape1]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'int16' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-int16-shape2]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-int16-shape2]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'int16' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float64-shape1]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float64-shape1]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'float64' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float64-shape2]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float64-shape2]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'float64' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/io/_plugins/tifffile_plugin.py`
Content:
```
1 __all__ = ['imread', 'imsave']
2
3 from tifffile import imwrite as imsave, imread as tifffile_imread
4
5
6 def imread(fname, **kwargs):
7 """Load a tiff image from file.
8
9 Parameters
10 ----------
11 fname : str or file
12 File name or file-like-object.
13 kwargs : keyword pairs, optional
14 Additional keyword arguments to pass through (see ``tifffile``'s
15 ``imread`` function).
16
17 Notes
18 -----
19 Provided by the tifffile library [1]_, and supports many
20 advanced image types including multi-page and floating point.
21
22 References
23 ----------
24 .. [1] https://pypi.org/project/tifffile/
25
26 """
27 if 'img_num' in kwargs:
28 kwargs['key'] = kwargs.pop('img_num')
29
30 return tifffile_imread(fname, **kwargs)
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py
--- a/skimage/io/_plugins/tifffile_plugin.py
+++ b/skimage/io/_plugins/tifffile_plugin.py
@@ -1,6 +1,50 @@
+from tifffile import imread as tifffile_imread
+from tifffile import imwrite as tifffile_imwrite
+
__all__ = ['imread', 'imsave']
-from tifffile import imwrite as imsave, imread as tifffile_imread
+
+def imsave(fname, arr, **kwargs):
+ """Load a tiff image to file.
+
+ Parameters
+ ----------
+ fname : str or file
+ File name or file-like object.
+ arr : ndarray
+ The array to write.
+ kwargs : keyword pairs, optional
+ Additional keyword arguments to pass through (see ``tifffile``'s
+ ``imwrite`` function).
+
+ Notes
+ -----
+ Provided by the tifffile library [1]_, and supports many
+ advanced image types including multi-page and floating-point.
+
+ This implementation will set ``photometric='RGB'`` when writing if the first
+ or last axis of `arr` has length 3 or 4. To override this, explicitly
+ pass the ``photometric`` kwarg.
+
+ This implementation will set ``planarconfig='SEPARATE'`` when writing if the
+ first axis of arr has length 3 or 4. To override this, explicitly
+ specify the ``planarconfig`` kwarg.
+
+ References
+ ----------
+ .. [1] https://pypi.org/project/tifffile/
+
+ """
+ if arr.shape[0] in [3, 4]:
+ if 'planarconfig' not in kwargs:
+ kwargs['planarconfig'] = 'SEPARATE'
+ rgb = True
+ else:
+ rgb = arr.shape[-1] in [3, 4]
+ if rgb and 'photometric' not in kwargs:
+ kwargs['photometric'] = 'RGB'
+
+ return tifffile_imwrite(fname, arr, **kwargs)
def imread(fname, **kwargs):
|
{"golden_diff": "diff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py\n--- a/skimage/io/_plugins/tifffile_plugin.py\n+++ b/skimage/io/_plugins/tifffile_plugin.py\n@@ -1,6 +1,50 @@\n+from tifffile import imread as tifffile_imread\n+from tifffile import imwrite as tifffile_imwrite\n+\n __all__ = ['imread', 'imsave']\n \n-from tifffile import imwrite as imsave, imread as tifffile_imread\n+\n+def imsave(fname, arr, **kwargs):\n+ \"\"\"Load a tiff image to file.\n+\n+ Parameters\n+ ----------\n+ fname : str or file\n+ File name or file-like object.\n+ arr : ndarray\n+ The array to write.\n+ kwargs : keyword pairs, optional\n+ Additional keyword arguments to pass through (see ``tifffile``'s\n+ ``imwrite`` function).\n+\n+ Notes\n+ -----\n+ Provided by the tifffile library [1]_, and supports many\n+ advanced image types including multi-page and floating-point.\n+\n+ This implementation will set ``photometric='RGB'`` when writing if the first\n+ or last axis of `arr` has length 3 or 4. To override this, explicitly\n+ pass the ``photometric`` kwarg.\n+\n+ This implementation will set ``planarconfig='SEPARATE'`` when writing if the\n+ first axis of arr has length 3 or 4. To override this, explicitly\n+ specify the ``planarconfig`` kwarg.\n+\n+ References\n+ ----------\n+ .. [1] https://pypi.org/project/tifffile/\n+\n+ \"\"\"\n+ if arr.shape[0] in [3, 4]:\n+ if 'planarconfig' not in kwargs:\n+ kwargs['planarconfig'] = 'SEPARATE'\n+ rgb = True\n+ else:\n+ rgb = arr.shape[-1] in [3, 4]\n+ if rgb and 'photometric' not in kwargs:\n+ kwargs['photometric'] = 'RGB'\n+\n+ return tifffile_imwrite(fname, arr, **kwargs)\n \n \n def imread(fname, **kwargs):\n", "issue": "tifffile deprecation warning on test_tifffile.py\n## Description\r\n\r\n<!--\r\n(Note: for guidance on how to use `scikit-image`, please post instead on https://forum.image.sc/tag/scikit-image)\r\n-->\r\n\r\n## Way to reproduce\r\n```python\r\nrun pytest on skimage/io/tests/test_tifffile.py\r\n```\r\nWill show deprecation warning issues\r\n```bash\r\nskimage/io/tests/test_tifffile.py ................................... [100%]\r\n\r\n=============================== warnings summary ===============================\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float32-shape1]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float32-shape1]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'float32' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float32-shape2]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float32-shape2]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'float32' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-int16-shape1]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-int16-shape1]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'int16' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-int16-shape2]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-int16-shape2]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'int16' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float64-shape1]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float64-shape1]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'float64' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float64-shape2]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float64-shape2]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'float64' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\n-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html\r\n\r\n```\r\n\r\n\n", "before_files": [{"content": "__all__ = ['imread', 'imsave']\n\nfrom tifffile import imwrite as imsave, imread as tifffile_imread\n\n\ndef imread(fname, **kwargs):\n \"\"\"Load a tiff image from file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like-object.\n kwargs : keyword pairs, optional\n Additional keyword arguments to pass through (see ``tifffile``'s\n ``imread`` function).\n\n Notes\n -----\n Provided by the tifffile library [1]_, and supports many\n advanced image types including multi-page and floating point.\n\n References\n ----------\n .. [1] https://pypi.org/project/tifffile/\n\n \"\"\"\n if 'img_num' in kwargs:\n kwargs['key'] = kwargs.pop('img_num')\n\n return tifffile_imread(fname, **kwargs)\n", "path": "skimage/io/_plugins/tifffile_plugin.py"}], "after_files": [{"content": "from tifffile import imread as tifffile_imread\nfrom tifffile import imwrite as tifffile_imwrite\n\n__all__ = ['imread', 'imsave']\n\n\ndef imsave(fname, arr, **kwargs):\n \"\"\"Load a tiff image to file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like object.\n arr : ndarray\n The array to write.\n kwargs : keyword pairs, optional\n Additional keyword arguments to pass through (see ``tifffile``'s\n ``imwrite`` function).\n\n Notes\n -----\n Provided by the tifffile library [1]_, and supports many\n advanced image types including multi-page and floating-point.\n\n This implementation will set ``photometric='RGB'`` when writing if the first\n or last axis of `arr` has length 3 or 4. To override this, explicitly\n pass the ``photometric`` kwarg.\n\n This implementation will set ``planarconfig='SEPARATE'`` when writing if the\n first axis of arr has length 3 or 4. To override this, explicitly\n specify the ``planarconfig`` kwarg.\n\n References\n ----------\n .. [1] https://pypi.org/project/tifffile/\n\n \"\"\"\n if arr.shape[0] in [3, 4]:\n if 'planarconfig' not in kwargs:\n kwargs['planarconfig'] = 'SEPARATE'\n rgb = True\n else:\n rgb = arr.shape[-1] in [3, 4]\n if rgb and 'photometric' not in kwargs:\n kwargs['photometric'] = 'RGB'\n\n return tifffile_imwrite(fname, arr, **kwargs)\n\n\ndef imread(fname, **kwargs):\n \"\"\"Load a tiff image from file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like-object.\n kwargs : keyword pairs, optional\n Additional keyword arguments to pass through (see ``tifffile``'s\n ``imread`` function).\n\n Notes\n -----\n Provided by the tifffile library [1]_, and supports many\n advanced image types including multi-page and floating point.\n\n References\n ----------\n .. [1] https://pypi.org/project/tifffile/\n\n \"\"\"\n if 'img_num' in kwargs:\n kwargs['key'] = kwargs.pop('img_num')\n\n return tifffile_imread(fname, **kwargs)\n", "path": "skimage/io/_plugins/tifffile_plugin.py"}]}
| 1,653 | 526 |
gh_patches_debug_21682
|
rasdani/github-patches
|
git_diff
|
ansible-collections__community.general-7409
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
kernel_blacklist - Error while setting attributes: /tmp/xyz: Operation not supported
### Summary
I wanted to blacklist some kernel modules using the ready-made kernel_blacklist in Ansible.
My sample code:
```yaml
- name: Remove potentially affected (and unused) modules
community.general.kernel_blacklist:
name: "{{ line_item }}"
state: present
blacklist_file: "/etc/modprobe.d/{{ line_item }}-blacklist.conf"
with_items:
- cifs
- cls_rsvp
loop_control:
loop_var: line_item
```
As I understand it, the module should create such a file and add content to it.
As a result, when you run the playbook, the files are created but their contents are empty:
```bash
$ find /etc/modprobe.d/ -size 0 -ls
6030631 0 -rw-r--r-- 1 root root 0 paź 7 12:19 /etc/modprobe.d/cls_rsvp-blacklist.conf
6029638 0 -rw-r--r-- 1 root root 0 paź 7 12:19 /etc/modprobe.d/cifs-blacklist.conf
```
Additionally, when launching the playbook, I receive a failure message. Below traceback ( -vvv)
```
The full traceback is:
Traceback (most recent call last):
File "/tmp/ansible_community.general.kernel_blacklist_payload_rk2m8l96/ansible_community.general.kernel_blacklist_payload.zip/ansible/module_utils/basic.py", line 1003, in set_attributes_if_different
raise Exception("Error while setting attributes: %s" % (out + err))
Exception: Error while setting attributes: /tmp/tmpnholykn5: Operation not supported
failed: [local] (item=cls_rsvp) => {
"ansible_loop_var": "line_item",
"changed": false,
"details": "Error while setting attributes: /tmp/tmpnholykn5: Operation not supported\n",
"gid": 0,
"group": "root",
"invocation": {
"module_args": {
"blacklist_file": "/etc/modprobe.d/cls_rsvp-blacklist.conf",
"name": "cls_rsvp",
"state": "present"
}
},
"line_item": "cls_rsvp",
"mode": "0644",
"msg": "chattr failed",
"owner": "root",
"path": "/tmp/tmpnholykn5",
"size": 0,
"state": "file",
"uid": 0
}
```
I don't know why module display `Error while setting attributes: /tmp/tmpwn_d8ybv: Operation not supported`.
In bash shell work perfect:
```bash
# rm -fv /etc/modprobe.d/cifs-blacklist.conf
removed '/etc/modprobe.d/cifs-blacklist.conf'
# touch /etc/modprobe.d/cifs-blacklist.conf
# echo 'test test' > /etc/modprobe.d/cifs-blacklist.conf
```
### Issue Type
Bug Report
### Component Name
kernel_blacklist
### Ansible Version
```bash
$ ansible --version
ansible [core 2.14.10]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/bkida/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.11/site-packages/ansible
ansible collection location = /home/bkida/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.11.5 (main, Aug 28 2023, 00:00:00) [GCC 13.2.1 20230728 (Red Hat 13.2.1-1)] (/usr/bin/python3)
jinja version = 3.0.3
libyaml = True
```
### Community.general Version
```bash
$ ansible-galaxy collection list community.general
# /usr/lib/python3.11/site-packages/ansible_collections
Collection Version
----------------- -------
community.general 6.6.2
# /usr/share/ansible/collections/ansible_collections
Collection Version
----------------- -------
community.general 6.6.0
```
### Configuration
```bash
$ ansible-config dump --only-changed
CONFIG_FILE() = /etc/ansible/ansible.cfg
```
### OS / Environment
Fedora 38
### Steps to Reproduce
Copy example code from summary description and run on the same ansible / community.general version
### Expected Results
Working module ;)
### Actual Results
`Error while setting attributes: /tmp/tmpwn_d8ybv: Operation not supported`
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/kernel_blacklist.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright (c) 2021, Alexei Znamensky (@russoz) <[email protected]>
5 # Copyright (c) 2013, Matthias Vogelgesang <[email protected]>
6 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
7 # SPDX-License-Identifier: GPL-3.0-or-later
8
9 from __future__ import absolute_import, division, print_function
10 __metaclass__ = type
11
12 DOCUMENTATION = '''
13 ---
14 module: kernel_blacklist
15 author:
16 - Matthias Vogelgesang (@matze)
17 short_description: Blacklist kernel modules
18 description:
19 - Add or remove kernel modules from blacklist.
20 extends_documentation_fragment:
21 - community.general.attributes
22 attributes:
23 check_mode:
24 support: full
25 diff_mode:
26 support: full
27 options:
28 name:
29 type: str
30 description:
31 - Name of kernel module to black- or whitelist.
32 required: true
33 state:
34 type: str
35 description:
36 - Whether the module should be present in the blacklist or absent.
37 choices: [ absent, present ]
38 default: present
39 blacklist_file:
40 type: str
41 description:
42 - If specified, use this blacklist file instead of
43 C(/etc/modprobe.d/blacklist-ansible.conf).
44 default: /etc/modprobe.d/blacklist-ansible.conf
45 '''
46
47 EXAMPLES = '''
48 - name: Blacklist the nouveau driver module
49 community.general.kernel_blacklist:
50 name: nouveau
51 state: present
52 '''
53
54 import os
55 import re
56 import tempfile
57
58 from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
59
60
61 class Blacklist(StateModuleHelper):
62 output_params = ('name', 'state')
63 module = dict(
64 argument_spec=dict(
65 name=dict(type='str', required=True),
66 state=dict(type='str', default='present', choices=['absent', 'present']),
67 blacklist_file=dict(type='str', default='/etc/modprobe.d/blacklist-ansible.conf'),
68 ),
69 supports_check_mode=True,
70 )
71
72 def __init_module__(self):
73 self.pattern = re.compile(r'^blacklist\s+{0}$'.format(re.escape(self.vars.name)))
74 self.vars.filename = self.vars.blacklist_file
75 self.vars.set('file_exists', os.path.exists(self.vars.filename), output=False, change=True)
76 if not self.vars.file_exists:
77 with open(self.vars.filename, 'a'):
78 pass
79 self.vars.file_exists = True
80 self.vars.set('lines', [], change=True, diff=True)
81 else:
82 with open(self.vars.filename) as fd:
83 self.vars.set('lines', [x.rstrip() for x in fd.readlines()], change=True, diff=True)
84 self.vars.set('is_blacklisted', self._is_module_blocked(), change=True)
85
86 def _is_module_blocked(self):
87 for line in self.vars.lines:
88 stripped = line.strip()
89 if stripped.startswith('#'):
90 continue
91 if self.pattern.match(stripped):
92 return True
93 return False
94
95 def state_absent(self):
96 if not self.vars.is_blacklisted:
97 return
98 self.vars.is_blacklisted = False
99 self.vars.lines = [line for line in self.vars.lines if not self.pattern.match(line.strip())]
100
101 def state_present(self):
102 if self.vars.is_blacklisted:
103 return
104 self.vars.is_blacklisted = True
105 self.vars.lines = self.vars.lines + ['blacklist %s' % self.vars.name]
106
107 def __quit_module__(self):
108 if self.has_changed() and not self.module.check_mode:
109 dummy, tmpfile = tempfile.mkstemp()
110 try:
111 os.remove(tmpfile)
112 self.module.preserved_copy(self.vars.filename, tmpfile) # ensure right perms/ownership
113 with open(tmpfile, 'w') as fd:
114 fd.writelines(["{0}\n".format(x) for x in self.vars.lines])
115 self.module.atomic_move(tmpfile, self.vars.filename)
116 finally:
117 if os.path.exists(tmpfile):
118 os.remove(tmpfile)
119
120
121 def main():
122 Blacklist.execute()
123
124
125 if __name__ == '__main__':
126 main()
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugins/modules/kernel_blacklist.py b/plugins/modules/kernel_blacklist.py
--- a/plugins/modules/kernel_blacklist.py
+++ b/plugins/modules/kernel_blacklist.py
@@ -53,7 +53,6 @@
import os
import re
-import tempfile
from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
@@ -106,16 +105,10 @@
def __quit_module__(self):
if self.has_changed() and not self.module.check_mode:
- dummy, tmpfile = tempfile.mkstemp()
- try:
- os.remove(tmpfile)
- self.module.preserved_copy(self.vars.filename, tmpfile) # ensure right perms/ownership
- with open(tmpfile, 'w') as fd:
- fd.writelines(["{0}\n".format(x) for x in self.vars.lines])
- self.module.atomic_move(tmpfile, self.vars.filename)
- finally:
- if os.path.exists(tmpfile):
- os.remove(tmpfile)
+ bkp = self.module.backup_local(self.vars.filename)
+ with open(self.vars.filename, "w") as fd:
+ fd.writelines(["{0}\n".format(x) for x in self.vars.lines])
+ self.module.add_cleanup_file(bkp)
def main():
|
{"golden_diff": "diff --git a/plugins/modules/kernel_blacklist.py b/plugins/modules/kernel_blacklist.py\n--- a/plugins/modules/kernel_blacklist.py\n+++ b/plugins/modules/kernel_blacklist.py\n@@ -53,7 +53,6 @@\n \n import os\n import re\n-import tempfile\n \n from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper\n \n@@ -106,16 +105,10 @@\n \n def __quit_module__(self):\n if self.has_changed() and not self.module.check_mode:\n- dummy, tmpfile = tempfile.mkstemp()\n- try:\n- os.remove(tmpfile)\n- self.module.preserved_copy(self.vars.filename, tmpfile) # ensure right perms/ownership\n- with open(tmpfile, 'w') as fd:\n- fd.writelines([\"{0}\\n\".format(x) for x in self.vars.lines])\n- self.module.atomic_move(tmpfile, self.vars.filename)\n- finally:\n- if os.path.exists(tmpfile):\n- os.remove(tmpfile)\n+ bkp = self.module.backup_local(self.vars.filename)\n+ with open(self.vars.filename, \"w\") as fd:\n+ fd.writelines([\"{0}\\n\".format(x) for x in self.vars.lines])\n+ self.module.add_cleanup_file(bkp)\n \n \n def main():\n", "issue": "kernel_blacklist - Error while setting attributes: /tmp/xyz: Operation not supported\n### Summary\n\nI wanted to blacklist some kernel modules using the ready-made kernel_blacklist in Ansible.\r\n\r\nMy sample code:\r\n```yaml\r\n- name: Remove potentially affected (and unused) modules\r\n community.general.kernel_blacklist:\r\n name: \"{{ line_item }}\"\r\n state: present\r\n blacklist_file: \"/etc/modprobe.d/{{ line_item }}-blacklist.conf\"\r\n with_items:\r\n - cifs\r\n - cls_rsvp\r\n loop_control:\r\n loop_var: line_item\r\n```\r\n\r\nAs I understand it, the module should create such a file and add content to it.\r\n\r\nAs a result, when you run the playbook, the files are created but their contents are empty:\r\n```bash\r\n$ find /etc/modprobe.d/ -size 0 -ls\r\n 6030631 0 -rw-r--r-- 1 root root 0 pa\u017a 7 12:19 /etc/modprobe.d/cls_rsvp-blacklist.conf\r\n 6029638 0 -rw-r--r-- 1 root root 0 pa\u017a 7 12:19 /etc/modprobe.d/cifs-blacklist.conf\r\n```\r\n\r\nAdditionally, when launching the playbook, I receive a failure message. Below traceback ( -vvv)\r\n```\r\nThe full traceback is:\r\nTraceback (most recent call last):\r\n File \"/tmp/ansible_community.general.kernel_blacklist_payload_rk2m8l96/ansible_community.general.kernel_blacklist_payload.zip/ansible/module_utils/basic.py\", line 1003, in set_attributes_if_different\r\n raise Exception(\"Error while setting attributes: %s\" % (out + err))\r\nException: Error while setting attributes: /tmp/tmpnholykn5: Operation not supported\r\n\r\nfailed: [local] (item=cls_rsvp) => {\r\n \"ansible_loop_var\": \"line_item\",\r\n \"changed\": false,\r\n \"details\": \"Error while setting attributes: /tmp/tmpnholykn5: Operation not supported\\n\",\r\n \"gid\": 0,\r\n \"group\": \"root\",\r\n \"invocation\": {\r\n \"module_args\": {\r\n \"blacklist_file\": \"/etc/modprobe.d/cls_rsvp-blacklist.conf\",\r\n \"name\": \"cls_rsvp\",\r\n \"state\": \"present\"\r\n }\r\n },\r\n \"line_item\": \"cls_rsvp\",\r\n \"mode\": \"0644\",\r\n \"msg\": \"chattr failed\",\r\n \"owner\": \"root\",\r\n \"path\": \"/tmp/tmpnholykn5\",\r\n \"size\": 0,\r\n \"state\": \"file\",\r\n \"uid\": 0\r\n}\r\n```\r\n\r\nI don't know why module display `Error while setting attributes: /tmp/tmpwn_d8ybv: Operation not supported`. \r\nIn bash shell work perfect:\r\n```bash\r\n# rm -fv /etc/modprobe.d/cifs-blacklist.conf\r\nremoved '/etc/modprobe.d/cifs-blacklist.conf'\r\n# touch /etc/modprobe.d/cifs-blacklist.conf\r\n# echo 'test test' > /etc/modprobe.d/cifs-blacklist.conf\r\n```\n\n### Issue Type\n\nBug Report\n\n### Component Name\n\nkernel_blacklist\n\n### Ansible Version\n\n```bash\r\n$ ansible --version\r\nansible [core 2.14.10]\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = ['/home/bkida/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python3.11/site-packages/ansible\r\n ansible collection location = /home/bkida/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /usr/bin/ansible\r\n python version = 3.11.5 (main, Aug 28 2023, 00:00:00) [GCC 13.2.1 20230728 (Red Hat 13.2.1-1)] (/usr/bin/python3)\r\n jinja version = 3.0.3\r\n libyaml = True\r\n```\r\n\n\n### Community.general Version\n\n```bash\r\n$ ansible-galaxy collection list community.general\r\n\r\n# /usr/lib/python3.11/site-packages/ansible_collections\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 6.6.2 \r\n\r\n# /usr/share/ansible/collections/ansible_collections\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 6.6.0 \r\n\r\n```\r\n\n\n### Configuration\n\n```bash\r\n$ ansible-config dump --only-changed\r\nCONFIG_FILE() = /etc/ansible/ansible.cfg\r\n```\r\n\n\n### OS / Environment\n\nFedora 38\n\n### Steps to Reproduce\n\nCopy example code from summary description and run on the same ansible / community.general version\n\n### Expected Results\n\nWorking module ;)\n\n### Actual Results\n\n`Error while setting attributes: /tmp/tmpwn_d8ybv: Operation not supported`\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2021, Alexei Znamensky (@russoz) <[email protected]>\n# Copyright (c) 2013, Matthias Vogelgesang <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = '''\n---\nmodule: kernel_blacklist\nauthor:\n - Matthias Vogelgesang (@matze)\nshort_description: Blacklist kernel modules\ndescription:\n - Add or remove kernel modules from blacklist.\nextends_documentation_fragment:\n - community.general.attributes\nattributes:\n check_mode:\n support: full\n diff_mode:\n support: full\noptions:\n name:\n type: str\n description:\n - Name of kernel module to black- or whitelist.\n required: true\n state:\n type: str\n description:\n - Whether the module should be present in the blacklist or absent.\n choices: [ absent, present ]\n default: present\n blacklist_file:\n type: str\n description:\n - If specified, use this blacklist file instead of\n C(/etc/modprobe.d/blacklist-ansible.conf).\n default: /etc/modprobe.d/blacklist-ansible.conf\n'''\n\nEXAMPLES = '''\n- name: Blacklist the nouveau driver module\n community.general.kernel_blacklist:\n name: nouveau\n state: present\n'''\n\nimport os\nimport re\nimport tempfile\n\nfrom ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper\n\n\nclass Blacklist(StateModuleHelper):\n output_params = ('name', 'state')\n module = dict(\n argument_spec=dict(\n name=dict(type='str', required=True),\n state=dict(type='str', default='present', choices=['absent', 'present']),\n blacklist_file=dict(type='str', default='/etc/modprobe.d/blacklist-ansible.conf'),\n ),\n supports_check_mode=True,\n )\n\n def __init_module__(self):\n self.pattern = re.compile(r'^blacklist\\s+{0}$'.format(re.escape(self.vars.name)))\n self.vars.filename = self.vars.blacklist_file\n self.vars.set('file_exists', os.path.exists(self.vars.filename), output=False, change=True)\n if not self.vars.file_exists:\n with open(self.vars.filename, 'a'):\n pass\n self.vars.file_exists = True\n self.vars.set('lines', [], change=True, diff=True)\n else:\n with open(self.vars.filename) as fd:\n self.vars.set('lines', [x.rstrip() for x in fd.readlines()], change=True, diff=True)\n self.vars.set('is_blacklisted', self._is_module_blocked(), change=True)\n\n def _is_module_blocked(self):\n for line in self.vars.lines:\n stripped = line.strip()\n if stripped.startswith('#'):\n continue\n if self.pattern.match(stripped):\n return True\n return False\n\n def state_absent(self):\n if not self.vars.is_blacklisted:\n return\n self.vars.is_blacklisted = False\n self.vars.lines = [line for line in self.vars.lines if not self.pattern.match(line.strip())]\n\n def state_present(self):\n if self.vars.is_blacklisted:\n return\n self.vars.is_blacklisted = True\n self.vars.lines = self.vars.lines + ['blacklist %s' % self.vars.name]\n\n def __quit_module__(self):\n if self.has_changed() and not self.module.check_mode:\n dummy, tmpfile = tempfile.mkstemp()\n try:\n os.remove(tmpfile)\n self.module.preserved_copy(self.vars.filename, tmpfile) # ensure right perms/ownership\n with open(tmpfile, 'w') as fd:\n fd.writelines([\"{0}\\n\".format(x) for x in self.vars.lines])\n self.module.atomic_move(tmpfile, self.vars.filename)\n finally:\n if os.path.exists(tmpfile):\n os.remove(tmpfile)\n\n\ndef main():\n Blacklist.execute()\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/kernel_blacklist.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2021, Alexei Znamensky (@russoz) <[email protected]>\n# Copyright (c) 2013, Matthias Vogelgesang <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = '''\n---\nmodule: kernel_blacklist\nauthor:\n - Matthias Vogelgesang (@matze)\nshort_description: Blacklist kernel modules\ndescription:\n - Add or remove kernel modules from blacklist.\nextends_documentation_fragment:\n - community.general.attributes\nattributes:\n check_mode:\n support: full\n diff_mode:\n support: full\noptions:\n name:\n type: str\n description:\n - Name of kernel module to black- or whitelist.\n required: true\n state:\n type: str\n description:\n - Whether the module should be present in the blacklist or absent.\n choices: [ absent, present ]\n default: present\n blacklist_file:\n type: str\n description:\n - If specified, use this blacklist file instead of\n C(/etc/modprobe.d/blacklist-ansible.conf).\n default: /etc/modprobe.d/blacklist-ansible.conf\n'''\n\nEXAMPLES = '''\n- name: Blacklist the nouveau driver module\n community.general.kernel_blacklist:\n name: nouveau\n state: present\n'''\n\nimport os\nimport re\n\nfrom ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper\n\n\nclass Blacklist(StateModuleHelper):\n output_params = ('name', 'state')\n module = dict(\n argument_spec=dict(\n name=dict(type='str', required=True),\n state=dict(type='str', default='present', choices=['absent', 'present']),\n blacklist_file=dict(type='str', default='/etc/modprobe.d/blacklist-ansible.conf'),\n ),\n supports_check_mode=True,\n )\n\n def __init_module__(self):\n self.pattern = re.compile(r'^blacklist\\s+{0}$'.format(re.escape(self.vars.name)))\n self.vars.filename = self.vars.blacklist_file\n self.vars.set('file_exists', os.path.exists(self.vars.filename), output=False, change=True)\n if not self.vars.file_exists:\n with open(self.vars.filename, 'a'):\n pass\n self.vars.file_exists = True\n self.vars.set('lines', [], change=True, diff=True)\n else:\n with open(self.vars.filename) as fd:\n self.vars.set('lines', [x.rstrip() for x in fd.readlines()], change=True, diff=True)\n self.vars.set('is_blacklisted', self._is_module_blocked(), change=True)\n\n def _is_module_blocked(self):\n for line in self.vars.lines:\n stripped = line.strip()\n if stripped.startswith('#'):\n continue\n if self.pattern.match(stripped):\n return True\n return False\n\n def state_absent(self):\n if not self.vars.is_blacklisted:\n return\n self.vars.is_blacklisted = False\n self.vars.lines = [line for line in self.vars.lines if not self.pattern.match(line.strip())]\n\n def state_present(self):\n if self.vars.is_blacklisted:\n return\n self.vars.is_blacklisted = True\n self.vars.lines = self.vars.lines + ['blacklist %s' % self.vars.name]\n\n def __quit_module__(self):\n if self.has_changed() and not self.module.check_mode:\n bkp = self.module.backup_local(self.vars.filename)\n with open(self.vars.filename, \"w\") as fd:\n fd.writelines([\"{0}\\n\".format(x) for x in self.vars.lines])\n self.module.add_cleanup_file(bkp)\n\n\ndef main():\n Blacklist.execute()\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/kernel_blacklist.py"}]}
| 2,577 | 285 |
gh_patches_debug_34329
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-1417
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documents: This backend doesn't support absolute paths. (Update to 1.0b2)
## 1. Bug since Wagtail update
I recently upgraded to 1.0b2 (from 1.0b1) and now, when I try to access I document I uploaded via a `wagtaildocs.Document` field, I get the following error:
```
NotImplementedError at /documents/3/headphones.svg
This backend doesn't support absolute paths.
```
The field is specified as:
```
svg_mask = models.ForeignKey(
verbose_name=u"Mask (SVG)",
to='wagtaildocs.Document',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
```
Is this a wagtail bug or a configuration error?
## 2. Unexpected behaviour
I would like to serve the svg as a `image/svg+xml`, not as a stream (which was the case in 1.0b1). I set the mimetype in my settings as follows:
```
mimetypes.add_type("image/svg+xml", ".svg", True)
mimetypes.add_type("image/svg+xml", ".svgz", True)
```
Yet (in 1.0b1) it always got served as a stream. I couldn't test it in 1.0b2 yet, because of the above problem.
Thank you in advance.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtaildocs/views/serve.py`
Content:
```
1 from django.shortcuts import get_object_or_404
2 from django.conf import settings
3
4 from wagtail.utils.sendfile import sendfile
5 from wagtail.utils import sendfile_streaming_backend
6
7 from wagtail.wagtaildocs.models import Document, document_served
8
9
10 def serve(request, document_id, document_filename):
11 doc = get_object_or_404(Document, id=document_id)
12
13 # Send document_served signal
14 document_served.send(sender=Document, instance=doc, request=request)
15
16 if hasattr(settings, 'SENDFILE_BACKEND'):
17 return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename)
18 else:
19 # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND
20 return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/wagtaildocs/views/serve.py b/wagtail/wagtaildocs/views/serve.py
--- a/wagtail/wagtaildocs/views/serve.py
+++ b/wagtail/wagtaildocs/views/serve.py
@@ -1,5 +1,9 @@
from django.shortcuts import get_object_or_404
from django.conf import settings
+from django.http import StreamingHttpResponse, BadHeaderError
+
+from unidecode import unidecode
+from wsgiref.util import FileWrapper
from wagtail.utils.sendfile import sendfile
from wagtail.utils import sendfile_streaming_backend
@@ -13,8 +17,40 @@
# Send document_served signal
document_served.send(sender=Document, instance=doc, request=request)
- if hasattr(settings, 'SENDFILE_BACKEND'):
- return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename)
+ try:
+ local_path = doc.file.path
+ except NotImplementedError:
+ local_path = None
+
+ if local_path:
+
+ # Use wagtail.utils.sendfile to serve the file;
+ # this provides support for mimetypes, if-modified-since and django-sendfile backends
+
+ if hasattr(settings, 'SENDFILE_BACKEND'):
+ return sendfile(request, local_path, attachment=True, attachment_filename=doc.filename)
+ else:
+ # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND
+ return sendfile(request, local_path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)
+
else:
- # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND
- return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)
+
+ # We are using a storage backend which does not expose filesystem paths
+ # (e.g. storages.backends.s3boto.S3BotoStorage).
+ # Fall back on pre-sendfile behaviour of reading the file content and serving it
+ # as a StreamingHttpResponse
+
+ wrapper = FileWrapper(doc.file)
+ response = StreamingHttpResponse(wrapper, content_type='application/octet-stream')
+
+ try:
+ response['Content-Disposition'] = 'attachment; filename=%s' % doc.filename
+ except BadHeaderError:
+ # Unicode filenames can fail on Django <1.8, Python 2 due to
+ # https://code.djangoproject.com/ticket/20889 - try with an ASCIIfied version of the name
+ response['Content-Disposition'] = 'attachment; filename=%s' % unidecode(doc.filename)
+
+ # FIXME: storage backends are not guaranteed to implement 'size'
+ response['Content-Length'] = doc.file.size
+
+ return response
|
{"golden_diff": "diff --git a/wagtail/wagtaildocs/views/serve.py b/wagtail/wagtaildocs/views/serve.py\n--- a/wagtail/wagtaildocs/views/serve.py\n+++ b/wagtail/wagtaildocs/views/serve.py\n@@ -1,5 +1,9 @@\n from django.shortcuts import get_object_or_404\n from django.conf import settings\n+from django.http import StreamingHttpResponse, BadHeaderError\n+\n+from unidecode import unidecode\n+from wsgiref.util import FileWrapper\n \n from wagtail.utils.sendfile import sendfile\n from wagtail.utils import sendfile_streaming_backend\n@@ -13,8 +17,40 @@\n # Send document_served signal\n document_served.send(sender=Document, instance=doc, request=request)\n \n- if hasattr(settings, 'SENDFILE_BACKEND'):\n- return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename)\n+ try:\n+ local_path = doc.file.path\n+ except NotImplementedError:\n+ local_path = None\n+\n+ if local_path:\n+\n+ # Use wagtail.utils.sendfile to serve the file;\n+ # this provides support for mimetypes, if-modified-since and django-sendfile backends\n+\n+ if hasattr(settings, 'SENDFILE_BACKEND'):\n+ return sendfile(request, local_path, attachment=True, attachment_filename=doc.filename)\n+ else:\n+ # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND\n+ return sendfile(request, local_path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)\n+\n else:\n- # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND\n- return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)\n+\n+ # We are using a storage backend which does not expose filesystem paths\n+ # (e.g. storages.backends.s3boto.S3BotoStorage).\n+ # Fall back on pre-sendfile behaviour of reading the file content and serving it\n+ # as a StreamingHttpResponse\n+\n+ wrapper = FileWrapper(doc.file)\n+ response = StreamingHttpResponse(wrapper, content_type='application/octet-stream')\n+\n+ try:\n+ response['Content-Disposition'] = 'attachment; filename=%s' % doc.filename\n+ except BadHeaderError:\n+ # Unicode filenames can fail on Django <1.8, Python 2 due to\n+ # https://code.djangoproject.com/ticket/20889 - try with an ASCIIfied version of the name\n+ response['Content-Disposition'] = 'attachment; filename=%s' % unidecode(doc.filename)\n+\n+ # FIXME: storage backends are not guaranteed to implement 'size'\n+ response['Content-Length'] = doc.file.size\n+\n+ return response\n", "issue": "Documents: This backend doesn't support absolute paths. (Update to 1.0b2)\n## 1. Bug since Wagtail update\n\nI recently upgraded to 1.0b2 (from 1.0b1) and now, when I try to access I document I uploaded via a `wagtaildocs.Document` field, I get the following error:\n\n```\nNotImplementedError at /documents/3/headphones.svg\nThis backend doesn't support absolute paths.\n```\n\nThe field is specified as:\n\n```\nsvg_mask = models.ForeignKey(\n verbose_name=u\"Mask (SVG)\",\n to='wagtaildocs.Document',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n)\n```\n\nIs this a wagtail bug or a configuration error?\n## 2. Unexpected behaviour\n\nI would like to serve the svg as a `image/svg+xml`, not as a stream (which was the case in 1.0b1). I set the mimetype in my settings as follows:\n\n```\nmimetypes.add_type(\"image/svg+xml\", \".svg\", True)\nmimetypes.add_type(\"image/svg+xml\", \".svgz\", True)\n```\n\nYet (in 1.0b1) it always got served as a stream. I couldn't test it in 1.0b2 yet, because of the above problem.\n\nThank you in advance.\n\n", "before_files": [{"content": "from django.shortcuts import get_object_or_404\nfrom django.conf import settings\n\nfrom wagtail.utils.sendfile import sendfile\nfrom wagtail.utils import sendfile_streaming_backend\n\nfrom wagtail.wagtaildocs.models import Document, document_served\n\n\ndef serve(request, document_id, document_filename):\n doc = get_object_or_404(Document, id=document_id)\n\n # Send document_served signal\n document_served.send(sender=Document, instance=doc, request=request)\n\n if hasattr(settings, 'SENDFILE_BACKEND'):\n return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename)\n else:\n # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND\n return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)\n", "path": "wagtail/wagtaildocs/views/serve.py"}], "after_files": [{"content": "from django.shortcuts import get_object_or_404\nfrom django.conf import settings\nfrom django.http import StreamingHttpResponse, BadHeaderError\n\nfrom unidecode import unidecode\nfrom wsgiref.util import FileWrapper\n\nfrom wagtail.utils.sendfile import sendfile\nfrom wagtail.utils import sendfile_streaming_backend\n\nfrom wagtail.wagtaildocs.models import Document, document_served\n\n\ndef serve(request, document_id, document_filename):\n doc = get_object_or_404(Document, id=document_id)\n\n # Send document_served signal\n document_served.send(sender=Document, instance=doc, request=request)\n\n try:\n local_path = doc.file.path\n except NotImplementedError:\n local_path = None\n\n if local_path:\n\n # Use wagtail.utils.sendfile to serve the file;\n # this provides support for mimetypes, if-modified-since and django-sendfile backends\n\n if hasattr(settings, 'SENDFILE_BACKEND'):\n return sendfile(request, local_path, attachment=True, attachment_filename=doc.filename)\n else:\n # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND\n return sendfile(request, local_path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)\n\n else:\n\n # We are using a storage backend which does not expose filesystem paths\n # (e.g. storages.backends.s3boto.S3BotoStorage).\n # Fall back on pre-sendfile behaviour of reading the file content and serving it\n # as a StreamingHttpResponse\n\n wrapper = FileWrapper(doc.file)\n response = StreamingHttpResponse(wrapper, content_type='application/octet-stream')\n\n try:\n response['Content-Disposition'] = 'attachment; filename=%s' % doc.filename\n except BadHeaderError:\n # Unicode filenames can fail on Django <1.8, Python 2 due to\n # https://code.djangoproject.com/ticket/20889 - try with an ASCIIfied version of the name\n response['Content-Disposition'] = 'attachment; filename=%s' % unidecode(doc.filename)\n\n # FIXME: storage backends are not guaranteed to implement 'size'\n response['Content-Length'] = doc.file.size\n\n return response\n", "path": "wagtail/wagtaildocs/views/serve.py"}]}
| 780 | 637 |
gh_patches_debug_5789
|
rasdani/github-patches
|
git_diff
|
weni-ai__bothub-engine-145
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Just email console backend in development mode
When EMAIL_HOST is setted and DEBUG is True email continue on console
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bothub/settings.py`
Content:
```
1 import os
2 import dj_database_url
3
4 from decouple import config
5
6
7 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
8 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
9
10
11 # SECURITY WARNING: keep the secret key used in production secret!
12 SECRET_KEY = config('SECRET_KEY')
13
14 # SECURITY WARNING: don't run with debug turned on in production!
15 DEBUG = config('DEBUG', default=False, cast=bool)
16
17 ALLOWED_HOSTS = config(
18 'ALLOWED_HOSTS',
19 default='*',
20 cast=lambda v: [s.strip() for s in v.split(',')])
21
22
23 # Application definition
24
25 INSTALLED_APPS = [
26 'django.contrib.admin',
27 'django.contrib.auth',
28 'django.contrib.contenttypes',
29 'django.contrib.sessions',
30 'django.contrib.messages',
31 'django.contrib.staticfiles',
32 'rest_framework',
33 'rest_framework.authtoken',
34 'django_filters',
35 'corsheaders',
36 'bothub.authentication',
37 'bothub.common',
38 'bothub.api',
39 ]
40
41 MIDDLEWARE = [
42 'django.middleware.security.SecurityMiddleware',
43 'whitenoise.middleware.WhiteNoiseMiddleware',
44 'django.contrib.sessions.middleware.SessionMiddleware',
45 'corsheaders.middleware.CorsMiddleware',
46 'django.middleware.common.CommonMiddleware',
47 'django.middleware.csrf.CsrfViewMiddleware',
48 'django.contrib.auth.middleware.AuthenticationMiddleware',
49 'django.contrib.messages.middleware.MessageMiddleware',
50 'django.middleware.clickjacking.XFrameOptionsMiddleware',
51 ]
52
53 ROOT_URLCONF = 'bothub.urls'
54
55 TEMPLATES = [
56 {
57 'BACKEND': 'django.template.backends.django.DjangoTemplates',
58 'DIRS': [],
59 'APP_DIRS': True,
60 'OPTIONS': {
61 'context_processors': [
62 'django.template.context_processors.debug',
63 'django.template.context_processors.request',
64 'django.contrib.auth.context_processors.auth',
65 'django.contrib.messages.context_processors.messages',
66 ],
67 },
68 },
69 ]
70
71 WSGI_APPLICATION = 'bothub.wsgi.application'
72
73
74 # Database
75
76 DATABASES = {}
77 DATABASES['default'] = dj_database_url.parse(
78 config(
79 'DEFAULT_DATABASE',
80 default='sqlite:///db.sqlite3'))
81
82
83 # Auth
84
85 AUTH_USER_MODEL = 'authentication.User'
86
87
88 # Password validation
89
90 AUTH_PASSWORD_VALIDATORS = [
91 {
92 'NAME': 'django.contrib.auth.password_validation.' +
93 'UserAttributeSimilarityValidator',
94 },
95 {
96 'NAME': 'django.contrib.auth.password_validation.' +
97 'MinimumLengthValidator',
98 },
99 {
100 'NAME': 'django.contrib.auth.password_validation.' +
101 'CommonPasswordValidator',
102 },
103 {
104 'NAME': 'django.contrib.auth.password_validation.' +
105 'NumericPasswordValidator',
106 },
107 ]
108
109
110 # Internationalization
111
112 LANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')
113
114 TIME_ZONE = config('TIME_ZONE', default='UTC')
115
116 USE_I18N = True
117
118 USE_L10N = True
119
120 USE_TZ = True
121
122
123 # Static files (CSS, JavaScript, Images)
124
125 STATIC_URL = '/static/'
126
127 STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
128
129 STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
130
131
132 # rest framework
133
134 REST_FRAMEWORK = {
135 'DEFAULT_AUTHENTICATION_CLASSES': [
136 'rest_framework.authentication.TokenAuthentication',
137 ],
138 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +
139 'LimitOffsetPagination',
140 'PAGE_SIZE': 20,
141 'DEFAULT_FILTER_BACKENDS': [
142 'django_filters.rest_framework.DjangoFilterBackend',
143 ],
144 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',
145 }
146
147
148 # cors headers
149
150 CORS_ORIGIN_ALLOW_ALL = True
151 CORS_URLS_REGEX = r'^/api/.*$'
152
153
154 # mail
155
156 envvar_EMAIL_HOST = config('EMAIL_HOST', default=None)
157
158 ADMINS = config(
159 'ADMINS',
160 default='',
161 cast=lambda v: [
162 (
163 s.strip().split('|')[0],
164 s.strip().split('|')[1],
165 ) for s in v.split(',')] if v else [])
166 EMAIL_SUBJECT_PREFIX = '[bothub] '
167 DEFAULT_FROM_EMAIL = config(
168 'DEFAULT_FROM_EMAIL',
169 default='webmaster@localhost')
170 SERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')
171
172 if not DEBUG and envvar_EMAIL_HOST:
173 EMAIL_HOST = envvar_EMAIL_HOST
174 EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)
175 EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
176 EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')
177 EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)
178 EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)
179 else:
180 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
181
182
183 # webapp
184
185 BOTHUB_WEBAPP_BASE_URL = config(
186 'BOTHUB_WEBAPP_BASE_URL',
187 default='http://localhost:8080/')
188
189
190 # NLP
191
192 BOTHUB_NLP_BASE_URL = config(
193 'BOTHUB_NLP_BASE_URL',
194 default='http://localhost:8001/')
195
196
197 # CSRF
198
199 CSRF_COOKIE_DOMAIN = config(
200 'CSRF_COOKIE_DOMAIN',
201 default=None)
202
203 CSRF_COOKIE_SECURE = config(
204 'CSRF_COOKIE_SECURE',
205 default=False,
206 cast=bool)
207
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bothub/settings.py b/bothub/settings.py
--- a/bothub/settings.py
+++ b/bothub/settings.py
@@ -169,7 +169,7 @@
default='webmaster@localhost')
SERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')
-if not DEBUG and envvar_EMAIL_HOST:
+if envvar_EMAIL_HOST:
EMAIL_HOST = envvar_EMAIL_HOST
EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)
EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
|
{"golden_diff": "diff --git a/bothub/settings.py b/bothub/settings.py\n--- a/bothub/settings.py\n+++ b/bothub/settings.py\n@@ -169,7 +169,7 @@\n default='webmaster@localhost')\n SERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')\n \n-if not DEBUG and envvar_EMAIL_HOST:\n+if envvar_EMAIL_HOST:\n EMAIL_HOST = envvar_EMAIL_HOST\n EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)\n EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')\n", "issue": "Just email console backend in development mode\nWhen EMAIL_HOST is setted and DEBUG is True email continue on console\n", "before_files": [{"content": "import os\nimport dj_database_url\n\nfrom decouple import config\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = config(\n 'ALLOWED_HOSTS',\n default='*',\n cast=lambda v: [s.strip() for s in v.split(',')])\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_filters',\n 'corsheaders',\n 'bothub.authentication',\n 'bothub.common',\n 'bothub.api',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'bothub.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'bothub.wsgi.application'\n\n\n# Database\n\nDATABASES = {}\nDATABASES['default'] = dj_database_url.parse(\n config(\n 'DEFAULT_DATABASE',\n default='sqlite:///db.sqlite3'))\n\n\n# Auth\n\nAUTH_USER_MODEL = 'authentication.User'\n\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n\nLANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')\n\nTIME_ZONE = config('TIME_ZONE', default='UTC')\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = '/static/'\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\n\n# rest framework\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +\n 'LimitOffsetPagination',\n 'PAGE_SIZE': 20,\n 'DEFAULT_FILTER_BACKENDS': [\n 'django_filters.rest_framework.DjangoFilterBackend',\n ],\n 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',\n}\n\n\n# cors headers\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/api/.*$'\n\n\n# mail\n\nenvvar_EMAIL_HOST = config('EMAIL_HOST', default=None)\n\nADMINS = config(\n 'ADMINS',\n default='',\n cast=lambda v: [\n (\n s.strip().split('|')[0],\n s.strip().split('|')[1],\n ) for s in v.split(',')] if v else [])\nEMAIL_SUBJECT_PREFIX = '[bothub] '\nDEFAULT_FROM_EMAIL = config(\n 'DEFAULT_FROM_EMAIL',\n default='webmaster@localhost')\nSERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')\n\nif not DEBUG and envvar_EMAIL_HOST:\n EMAIL_HOST = envvar_EMAIL_HOST\n EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)\n EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')\n EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')\n EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)\n EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)\nelse:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# webapp\n\nBOTHUB_WEBAPP_BASE_URL = config(\n 'BOTHUB_WEBAPP_BASE_URL',\n default='http://localhost:8080/')\n\n\n# NLP\n\nBOTHUB_NLP_BASE_URL = config(\n 'BOTHUB_NLP_BASE_URL',\n default='http://localhost:8001/')\n\n\n# CSRF\n\nCSRF_COOKIE_DOMAIN = config(\n 'CSRF_COOKIE_DOMAIN',\n default=None)\n\nCSRF_COOKIE_SECURE = config(\n 'CSRF_COOKIE_SECURE',\n default=False,\n cast=bool)\n", "path": "bothub/settings.py"}], "after_files": [{"content": "import os\nimport dj_database_url\n\nfrom decouple import config\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = config(\n 'ALLOWED_HOSTS',\n default='*',\n cast=lambda v: [s.strip() for s in v.split(',')])\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_filters',\n 'corsheaders',\n 'bothub.authentication',\n 'bothub.common',\n 'bothub.api',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'bothub.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'bothub.wsgi.application'\n\n\n# Database\n\nDATABASES = {}\nDATABASES['default'] = dj_database_url.parse(\n config(\n 'DEFAULT_DATABASE',\n default='sqlite:///db.sqlite3'))\n\n\n# Auth\n\nAUTH_USER_MODEL = 'authentication.User'\n\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n\nLANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')\n\nTIME_ZONE = config('TIME_ZONE', default='UTC')\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = '/static/'\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\n\n# rest framework\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +\n 'LimitOffsetPagination',\n 'PAGE_SIZE': 20,\n 'DEFAULT_FILTER_BACKENDS': [\n 'django_filters.rest_framework.DjangoFilterBackend',\n ],\n 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',\n}\n\n\n# cors headers\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/api/.*$'\n\n\n# mail\n\nenvvar_EMAIL_HOST = config('EMAIL_HOST', default=None)\n\nADMINS = config(\n 'ADMINS',\n default='',\n cast=lambda v: [\n (\n s.strip().split('|')[0],\n s.strip().split('|')[1],\n ) for s in v.split(',')] if v else [])\nEMAIL_SUBJECT_PREFIX = '[bothub] '\nDEFAULT_FROM_EMAIL = config(\n 'DEFAULT_FROM_EMAIL',\n default='webmaster@localhost')\nSERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')\n\nif envvar_EMAIL_HOST:\n EMAIL_HOST = envvar_EMAIL_HOST\n EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)\n EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')\n EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')\n EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)\n EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)\nelse:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# webapp\n\nBOTHUB_WEBAPP_BASE_URL = config(\n 'BOTHUB_WEBAPP_BASE_URL',\n default='http://localhost:8080/')\n\n\n# NLP\n\nBOTHUB_NLP_BASE_URL = config(\n 'BOTHUB_NLP_BASE_URL',\n default='http://localhost:8001/')\n\n\n# CSRF\n\nCSRF_COOKIE_DOMAIN = config(\n 'CSRF_COOKIE_DOMAIN',\n default=None)\n\nCSRF_COOKIE_SECURE = config(\n 'CSRF_COOKIE_SECURE',\n default=False,\n cast=bool)\n", "path": "bothub/settings.py"}]}
| 1,958 | 124 |
gh_patches_debug_35844
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-1205
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecate htmlparser link extractor
Let's add a deprecation warning like we did for SGML Link extractor
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/linkextractors/sgml.py`
Content:
```
1 """
2 SGMLParser-based Link extractors
3 """
4 from six.moves.urllib.parse import urljoin
5 import warnings
6 from sgmllib import SGMLParser
7
8 from w3lib.url import safe_url_string
9 from scrapy.selector import Selector
10 from scrapy.link import Link
11 from scrapy.linkextractor import FilteringLinkExtractor
12 from scrapy.utils.misc import arg_to_iter
13 from scrapy.utils.python import unique as unique_list, str_to_unicode
14 from scrapy.utils.response import get_base_url
15 from scrapy.exceptions import ScrapyDeprecationWarning
16
17
18 class BaseSgmlLinkExtractor(SGMLParser):
19
20 def __init__(self, tag="a", attr="href", unique=False, process_value=None):
21 warnings.warn(
22 "BaseSgmlLinkExtractor is deprecated and will be removed in future releases. "
23 "Please use scrapy.linkextractors.LinkExtractor",
24 ScrapyDeprecationWarning
25 )
26 SGMLParser.__init__(self)
27 self.scan_tag = tag if callable(tag) else lambda t: t == tag
28 self.scan_attr = attr if callable(attr) else lambda a: a == attr
29 self.process_value = (lambda v: v) if process_value is None else process_value
30 self.current_link = None
31 self.unique = unique
32
33 def _extract_links(self, response_text, response_url, response_encoding, base_url=None):
34 """ Do the real extraction work """
35 self.reset()
36 self.feed(response_text)
37 self.close()
38
39 ret = []
40 if base_url is None:
41 base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
42 for link in self.links:
43 if isinstance(link.url, unicode):
44 link.url = link.url.encode(response_encoding)
45 link.url = urljoin(base_url, link.url)
46 link.url = safe_url_string(link.url, response_encoding)
47 link.text = str_to_unicode(link.text, response_encoding, errors='replace').strip()
48 ret.append(link)
49
50 return ret
51
52 def _process_links(self, links):
53 """ Normalize and filter extracted links
54
55 The subclass should override it if necessary
56 """
57 links = unique_list(links, key=lambda link: link.url) if self.unique else links
58 return links
59
60 def extract_links(self, response):
61 # wrapper needed to allow to work directly with text
62 links = self._extract_links(response.body, response.url, response.encoding)
63 links = self._process_links(links)
64 return links
65
66 def reset(self):
67 SGMLParser.reset(self)
68 self.links = []
69 self.base_url = None
70 self.current_link = None
71
72 def unknown_starttag(self, tag, attrs):
73 if tag == 'base':
74 self.base_url = dict(attrs).get('href')
75 if self.scan_tag(tag):
76 for attr, value in attrs:
77 if self.scan_attr(attr):
78 url = self.process_value(value)
79 if url is not None:
80 link = Link(url=url, nofollow=True if dict(attrs).get('rel') == 'nofollow' else False)
81 self.links.append(link)
82 self.current_link = link
83
84 def unknown_endtag(self, tag):
85 if self.scan_tag(tag):
86 self.current_link = None
87
88 def handle_data(self, data):
89 if self.current_link:
90 self.current_link.text = self.current_link.text + data
91
92 def matches(self, url):
93 """This extractor matches with any url, since
94 it doesn't contain any patterns"""
95 return True
96
97
98 class SgmlLinkExtractor(FilteringLinkExtractor):
99
100 def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),
101 tags=('a', 'area'), attrs=('href',), canonicalize=True, unique=True,
102 process_value=None, deny_extensions=None, restrict_css=()):
103
104 warnings.warn(
105 "SgmlLinkExtractor is deprecated and will be removed in future releases. "
106 "Please use scrapy.linkextractors.LinkExtractor",
107 ScrapyDeprecationWarning
108 )
109
110 tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))
111 tag_func = lambda x: x in tags
112 attr_func = lambda x: x in attrs
113
114 with warnings.catch_warnings(record=True):
115 lx = BaseSgmlLinkExtractor(tag=tag_func, attr=attr_func,
116 unique=unique, process_value=process_value)
117
118 super(SgmlLinkExtractor, self).__init__(lx, allow=allow, deny=deny,
119 allow_domains=allow_domains, deny_domains=deny_domains,
120 restrict_xpaths=restrict_xpaths, restrict_css=restrict_css,
121 canonicalize=canonicalize, deny_extensions=deny_extensions)
122
123 # FIXME: was added to fix a RegexLinkExtractor testcase
124 self.base_url = None
125
126 def extract_links(self, response):
127 base_url = None
128 if self.restrict_xpaths:
129 sel = Selector(response)
130 base_url = get_base_url(response)
131 body = u''.join(f
132 for x in self.restrict_xpaths
133 for f in sel.xpath(x).extract()
134 ).encode(response.encoding, errors='xmlcharrefreplace')
135 else:
136 body = response.body
137
138 links = self._extract_links(body, response.url, response.encoding, base_url)
139 links = self._process_links(links)
140 return links
141
```
Path: `scrapy/linkextractors/htmlparser.py`
Content:
```
1 """
2 HTMLParser-based link extractor
3 """
4
5 from HTMLParser import HTMLParser
6 from six.moves.urllib.parse import urljoin
7
8 from w3lib.url import safe_url_string
9
10 from scrapy.link import Link
11 from scrapy.utils.python import unique as unique_list
12
13 class HtmlParserLinkExtractor(HTMLParser):
14
15 def __init__(self, tag="a", attr="href", process=None, unique=False):
16 HTMLParser.__init__(self)
17
18 self.scan_tag = tag if callable(tag) else lambda t: t == tag
19 self.scan_attr = attr if callable(attr) else lambda a: a == attr
20 self.process_attr = process if callable(process) else lambda v: v
21 self.unique = unique
22
23 def _extract_links(self, response_text, response_url, response_encoding):
24 self.reset()
25 self.feed(response_text)
26 self.close()
27
28 links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links
29
30 ret = []
31 base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
32 for link in links:
33 if isinstance(link.url, unicode):
34 link.url = link.url.encode(response_encoding)
35 link.url = urljoin(base_url, link.url)
36 link.url = safe_url_string(link.url, response_encoding)
37 link.text = link.text.decode(response_encoding)
38 ret.append(link)
39
40 return ret
41
42 def extract_links(self, response):
43 # wrapper needed to allow to work directly with text
44 return self._extract_links(response.body, response.url, response.encoding)
45
46 def reset(self):
47 HTMLParser.reset(self)
48
49 self.base_url = None
50 self.current_link = None
51 self.links = []
52
53 def handle_starttag(self, tag, attrs):
54 if tag == 'base':
55 self.base_url = dict(attrs).get('href')
56 if self.scan_tag(tag):
57 for attr, value in attrs:
58 if self.scan_attr(attr):
59 url = self.process_attr(value)
60 link = Link(url=url)
61 self.links.append(link)
62 self.current_link = link
63
64 def handle_endtag(self, tag):
65 if self.scan_tag(tag):
66 self.current_link = None
67
68 def handle_data(self, data):
69 if self.current_link:
70 self.current_link.text = self.current_link.text + data
71
72 def matches(self, url):
73 """This extractor matches with any url, since
74 it doesn't contain any patterns"""
75 return True
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/linkextractors/htmlparser.py b/scrapy/linkextractors/htmlparser.py
--- a/scrapy/linkextractors/htmlparser.py
+++ b/scrapy/linkextractors/htmlparser.py
@@ -2,6 +2,7 @@
HTMLParser-based link extractor
"""
+import warnings
from HTMLParser import HTMLParser
from six.moves.urllib.parse import urljoin
@@ -9,12 +10,20 @@
from scrapy.link import Link
from scrapy.utils.python import unique as unique_list
+from scrapy.exceptions import ScrapyDeprecationWarning
+
class HtmlParserLinkExtractor(HTMLParser):
def __init__(self, tag="a", attr="href", process=None, unique=False):
HTMLParser.__init__(self)
+ warnings.warn(
+ "HtmlParserLinkExtractor is deprecated and will be removed in "
+ "future releases. Please use scrapy.linkextractors.LinkExtractor",
+ ScrapyDeprecationWarning, stacklevel=2,
+ )
+
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
diff --git a/scrapy/linkextractors/sgml.py b/scrapy/linkextractors/sgml.py
--- a/scrapy/linkextractors/sgml.py
+++ b/scrapy/linkextractors/sgml.py
@@ -21,7 +21,7 @@
warnings.warn(
"BaseSgmlLinkExtractor is deprecated and will be removed in future releases. "
"Please use scrapy.linkextractors.LinkExtractor",
- ScrapyDeprecationWarning
+ ScrapyDeprecationWarning, stacklevel=2,
)
SGMLParser.__init__(self)
self.scan_tag = tag if callable(tag) else lambda t: t == tag
@@ -104,7 +104,7 @@
warnings.warn(
"SgmlLinkExtractor is deprecated and will be removed in future releases. "
"Please use scrapy.linkextractors.LinkExtractor",
- ScrapyDeprecationWarning
+ ScrapyDeprecationWarning, stacklevel=2,
)
tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))
|
{"golden_diff": "diff --git a/scrapy/linkextractors/htmlparser.py b/scrapy/linkextractors/htmlparser.py\n--- a/scrapy/linkextractors/htmlparser.py\n+++ b/scrapy/linkextractors/htmlparser.py\n@@ -2,6 +2,7 @@\n HTMLParser-based link extractor\n \"\"\"\n \n+import warnings\n from HTMLParser import HTMLParser\n from six.moves.urllib.parse import urljoin\n \n@@ -9,12 +10,20 @@\n \n from scrapy.link import Link\n from scrapy.utils.python import unique as unique_list\n+from scrapy.exceptions import ScrapyDeprecationWarning\n+\n \n class HtmlParserLinkExtractor(HTMLParser):\n \n def __init__(self, tag=\"a\", attr=\"href\", process=None, unique=False):\n HTMLParser.__init__(self)\n \n+ warnings.warn(\n+ \"HtmlParserLinkExtractor is deprecated and will be removed in \"\n+ \"future releases. Please use scrapy.linkextractors.LinkExtractor\",\n+ ScrapyDeprecationWarning, stacklevel=2,\n+ )\n+\n self.scan_tag = tag if callable(tag) else lambda t: t == tag\n self.scan_attr = attr if callable(attr) else lambda a: a == attr\n self.process_attr = process if callable(process) else lambda v: v\ndiff --git a/scrapy/linkextractors/sgml.py b/scrapy/linkextractors/sgml.py\n--- a/scrapy/linkextractors/sgml.py\n+++ b/scrapy/linkextractors/sgml.py\n@@ -21,7 +21,7 @@\n warnings.warn(\n \"BaseSgmlLinkExtractor is deprecated and will be removed in future releases. \"\n \"Please use scrapy.linkextractors.LinkExtractor\",\n- ScrapyDeprecationWarning\n+ ScrapyDeprecationWarning, stacklevel=2,\n )\n SGMLParser.__init__(self)\n self.scan_tag = tag if callable(tag) else lambda t: t == tag\n@@ -104,7 +104,7 @@\n warnings.warn(\n \"SgmlLinkExtractor is deprecated and will be removed in future releases. \"\n \"Please use scrapy.linkextractors.LinkExtractor\",\n- ScrapyDeprecationWarning\n+ ScrapyDeprecationWarning, stacklevel=2,\n )\n \n tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))\n", "issue": "Deprecate htmlparser link extractor\nLet's add a deprecation warning like we did for SGML Link extractor\n\n", "before_files": [{"content": "\"\"\"\nSGMLParser-based Link extractors\n\"\"\"\nfrom six.moves.urllib.parse import urljoin\nimport warnings\nfrom sgmllib import SGMLParser\n\nfrom w3lib.url import safe_url_string\nfrom scrapy.selector import Selector\nfrom scrapy.link import Link\nfrom scrapy.linkextractor import FilteringLinkExtractor\nfrom scrapy.utils.misc import arg_to_iter\nfrom scrapy.utils.python import unique as unique_list, str_to_unicode\nfrom scrapy.utils.response import get_base_url\nfrom scrapy.exceptions import ScrapyDeprecationWarning\n\n\nclass BaseSgmlLinkExtractor(SGMLParser):\n\n def __init__(self, tag=\"a\", attr=\"href\", unique=False, process_value=None):\n warnings.warn(\n \"BaseSgmlLinkExtractor is deprecated and will be removed in future releases. \"\n \"Please use scrapy.linkextractors.LinkExtractor\",\n ScrapyDeprecationWarning\n )\n SGMLParser.__init__(self)\n self.scan_tag = tag if callable(tag) else lambda t: t == tag\n self.scan_attr = attr if callable(attr) else lambda a: a == attr\n self.process_value = (lambda v: v) if process_value is None else process_value\n self.current_link = None\n self.unique = unique\n\n def _extract_links(self, response_text, response_url, response_encoding, base_url=None):\n \"\"\" Do the real extraction work \"\"\"\n self.reset()\n self.feed(response_text)\n self.close()\n\n ret = []\n if base_url is None:\n base_url = urljoin(response_url, self.base_url) if self.base_url else response_url\n for link in self.links:\n if isinstance(link.url, unicode):\n link.url = link.url.encode(response_encoding)\n link.url = urljoin(base_url, link.url)\n link.url = safe_url_string(link.url, response_encoding)\n link.text = str_to_unicode(link.text, response_encoding, errors='replace').strip()\n ret.append(link)\n\n return ret\n\n def _process_links(self, links):\n \"\"\" Normalize and filter extracted links\n\n The subclass should override it if necessary\n \"\"\"\n links = unique_list(links, key=lambda link: link.url) if self.unique else links\n return links\n\n def extract_links(self, response):\n # wrapper needed to allow to work directly with text\n links = self._extract_links(response.body, response.url, response.encoding)\n links = self._process_links(links)\n return links\n\n def reset(self):\n SGMLParser.reset(self)\n self.links = []\n self.base_url = None\n self.current_link = None\n\n def unknown_starttag(self, tag, attrs):\n if tag == 'base':\n self.base_url = dict(attrs).get('href')\n if self.scan_tag(tag):\n for attr, value in attrs:\n if self.scan_attr(attr):\n url = self.process_value(value)\n if url is not None:\n link = Link(url=url, nofollow=True if dict(attrs).get('rel') == 'nofollow' else False)\n self.links.append(link)\n self.current_link = link\n\n def unknown_endtag(self, tag):\n if self.scan_tag(tag):\n self.current_link = None\n\n def handle_data(self, data):\n if self.current_link:\n self.current_link.text = self.current_link.text + data\n\n def matches(self, url):\n \"\"\"This extractor matches with any url, since\n it doesn't contain any patterns\"\"\"\n return True\n\n\nclass SgmlLinkExtractor(FilteringLinkExtractor):\n\n def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),\n tags=('a', 'area'), attrs=('href',), canonicalize=True, unique=True,\n process_value=None, deny_extensions=None, restrict_css=()):\n\n warnings.warn(\n \"SgmlLinkExtractor is deprecated and will be removed in future releases. \"\n \"Please use scrapy.linkextractors.LinkExtractor\",\n ScrapyDeprecationWarning\n )\n\n tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))\n tag_func = lambda x: x in tags\n attr_func = lambda x: x in attrs\n\n with warnings.catch_warnings(record=True):\n lx = BaseSgmlLinkExtractor(tag=tag_func, attr=attr_func,\n unique=unique, process_value=process_value)\n\n super(SgmlLinkExtractor, self).__init__(lx, allow=allow, deny=deny,\n allow_domains=allow_domains, deny_domains=deny_domains,\n restrict_xpaths=restrict_xpaths, restrict_css=restrict_css,\n canonicalize=canonicalize, deny_extensions=deny_extensions)\n\n # FIXME: was added to fix a RegexLinkExtractor testcase\n self.base_url = None\n\n def extract_links(self, response):\n base_url = None\n if self.restrict_xpaths:\n sel = Selector(response)\n base_url = get_base_url(response)\n body = u''.join(f\n for x in self.restrict_xpaths\n for f in sel.xpath(x).extract()\n ).encode(response.encoding, errors='xmlcharrefreplace')\n else:\n body = response.body\n\n links = self._extract_links(body, response.url, response.encoding, base_url)\n links = self._process_links(links)\n return links\n", "path": "scrapy/linkextractors/sgml.py"}, {"content": "\"\"\"\nHTMLParser-based link extractor\n\"\"\"\n\nfrom HTMLParser import HTMLParser\nfrom six.moves.urllib.parse import urljoin\n\nfrom w3lib.url import safe_url_string\n\nfrom scrapy.link import Link\nfrom scrapy.utils.python import unique as unique_list\n\nclass HtmlParserLinkExtractor(HTMLParser):\n\n def __init__(self, tag=\"a\", attr=\"href\", process=None, unique=False):\n HTMLParser.__init__(self)\n\n self.scan_tag = tag if callable(tag) else lambda t: t == tag\n self.scan_attr = attr if callable(attr) else lambda a: a == attr\n self.process_attr = process if callable(process) else lambda v: v\n self.unique = unique\n\n def _extract_links(self, response_text, response_url, response_encoding):\n self.reset()\n self.feed(response_text)\n self.close()\n\n links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links\n\n ret = []\n base_url = urljoin(response_url, self.base_url) if self.base_url else response_url\n for link in links:\n if isinstance(link.url, unicode):\n link.url = link.url.encode(response_encoding)\n link.url = urljoin(base_url, link.url)\n link.url = safe_url_string(link.url, response_encoding)\n link.text = link.text.decode(response_encoding)\n ret.append(link)\n\n return ret\n\n def extract_links(self, response):\n # wrapper needed to allow to work directly with text\n return self._extract_links(response.body, response.url, response.encoding)\n\n def reset(self):\n HTMLParser.reset(self)\n\n self.base_url = None\n self.current_link = None\n self.links = []\n\n def handle_starttag(self, tag, attrs):\n if tag == 'base':\n self.base_url = dict(attrs).get('href')\n if self.scan_tag(tag):\n for attr, value in attrs:\n if self.scan_attr(attr):\n url = self.process_attr(value)\n link = Link(url=url)\n self.links.append(link)\n self.current_link = link\n\n def handle_endtag(self, tag):\n if self.scan_tag(tag):\n self.current_link = None\n\n def handle_data(self, data):\n if self.current_link:\n self.current_link.text = self.current_link.text + data\n\n def matches(self, url):\n \"\"\"This extractor matches with any url, since\n it doesn't contain any patterns\"\"\"\n return True\n", "path": "scrapy/linkextractors/htmlparser.py"}], "after_files": [{"content": "\"\"\"\nSGMLParser-based Link extractors\n\"\"\"\nfrom six.moves.urllib.parse import urljoin\nimport warnings\nfrom sgmllib import SGMLParser\n\nfrom w3lib.url import safe_url_string\nfrom scrapy.selector import Selector\nfrom scrapy.link import Link\nfrom scrapy.linkextractor import FilteringLinkExtractor\nfrom scrapy.utils.misc import arg_to_iter\nfrom scrapy.utils.python import unique as unique_list, str_to_unicode\nfrom scrapy.utils.response import get_base_url\nfrom scrapy.exceptions import ScrapyDeprecationWarning\n\n\nclass BaseSgmlLinkExtractor(SGMLParser):\n\n def __init__(self, tag=\"a\", attr=\"href\", unique=False, process_value=None):\n warnings.warn(\n \"BaseSgmlLinkExtractor is deprecated and will be removed in future releases. \"\n \"Please use scrapy.linkextractors.LinkExtractor\",\n ScrapyDeprecationWarning, stacklevel=2,\n )\n SGMLParser.__init__(self)\n self.scan_tag = tag if callable(tag) else lambda t: t == tag\n self.scan_attr = attr if callable(attr) else lambda a: a == attr\n self.process_value = (lambda v: v) if process_value is None else process_value\n self.current_link = None\n self.unique = unique\n\n def _extract_links(self, response_text, response_url, response_encoding, base_url=None):\n \"\"\" Do the real extraction work \"\"\"\n self.reset()\n self.feed(response_text)\n self.close()\n\n ret = []\n if base_url is None:\n base_url = urljoin(response_url, self.base_url) if self.base_url else response_url\n for link in self.links:\n if isinstance(link.url, unicode):\n link.url = link.url.encode(response_encoding)\n link.url = urljoin(base_url, link.url)\n link.url = safe_url_string(link.url, response_encoding)\n link.text = str_to_unicode(link.text, response_encoding, errors='replace').strip()\n ret.append(link)\n\n return ret\n\n def _process_links(self, links):\n \"\"\" Normalize and filter extracted links\n\n The subclass should override it if necessary\n \"\"\"\n links = unique_list(links, key=lambda link: link.url) if self.unique else links\n return links\n\n def extract_links(self, response):\n # wrapper needed to allow to work directly with text\n links = self._extract_links(response.body, response.url, response.encoding)\n links = self._process_links(links)\n return links\n\n def reset(self):\n SGMLParser.reset(self)\n self.links = []\n self.base_url = None\n self.current_link = None\n\n def unknown_starttag(self, tag, attrs):\n if tag == 'base':\n self.base_url = dict(attrs).get('href')\n if self.scan_tag(tag):\n for attr, value in attrs:\n if self.scan_attr(attr):\n url = self.process_value(value)\n if url is not None:\n link = Link(url=url, nofollow=True if dict(attrs).get('rel') == 'nofollow' else False)\n self.links.append(link)\n self.current_link = link\n\n def unknown_endtag(self, tag):\n if self.scan_tag(tag):\n self.current_link = None\n\n def handle_data(self, data):\n if self.current_link:\n self.current_link.text = self.current_link.text + data\n\n def matches(self, url):\n \"\"\"This extractor matches with any url, since\n it doesn't contain any patterns\"\"\"\n return True\n\n\nclass SgmlLinkExtractor(FilteringLinkExtractor):\n\n def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),\n tags=('a', 'area'), attrs=('href',), canonicalize=True, unique=True,\n process_value=None, deny_extensions=None, restrict_css=()):\n\n warnings.warn(\n \"SgmlLinkExtractor is deprecated and will be removed in future releases. \"\n \"Please use scrapy.linkextractors.LinkExtractor\",\n ScrapyDeprecationWarning, stacklevel=2,\n )\n\n tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))\n tag_func = lambda x: x in tags\n attr_func = lambda x: x in attrs\n\n with warnings.catch_warnings(record=True):\n lx = BaseSgmlLinkExtractor(tag=tag_func, attr=attr_func,\n unique=unique, process_value=process_value)\n\n super(SgmlLinkExtractor, self).__init__(lx, allow=allow, deny=deny,\n allow_domains=allow_domains, deny_domains=deny_domains,\n restrict_xpaths=restrict_xpaths, restrict_css=restrict_css,\n canonicalize=canonicalize, deny_extensions=deny_extensions)\n\n # FIXME: was added to fix a RegexLinkExtractor testcase\n self.base_url = None\n\n def extract_links(self, response):\n base_url = None\n if self.restrict_xpaths:\n sel = Selector(response)\n base_url = get_base_url(response)\n body = u''.join(f\n for x in self.restrict_xpaths\n for f in sel.xpath(x).extract()\n ).encode(response.encoding, errors='xmlcharrefreplace')\n else:\n body = response.body\n\n links = self._extract_links(body, response.url, response.encoding, base_url)\n links = self._process_links(links)\n return links\n", "path": "scrapy/linkextractors/sgml.py"}, {"content": "\"\"\"\nHTMLParser-based link extractor\n\"\"\"\n\nimport warnings\nfrom HTMLParser import HTMLParser\nfrom six.moves.urllib.parse import urljoin\n\nfrom w3lib.url import safe_url_string\n\nfrom scrapy.link import Link\nfrom scrapy.utils.python import unique as unique_list\nfrom scrapy.exceptions import ScrapyDeprecationWarning\n\n\nclass HtmlParserLinkExtractor(HTMLParser):\n\n def __init__(self, tag=\"a\", attr=\"href\", process=None, unique=False):\n HTMLParser.__init__(self)\n\n warnings.warn(\n \"HtmlParserLinkExtractor is deprecated and will be removed in \"\n \"future releases. Please use scrapy.linkextractors.LinkExtractor\",\n ScrapyDeprecationWarning, stacklevel=2,\n )\n\n self.scan_tag = tag if callable(tag) else lambda t: t == tag\n self.scan_attr = attr if callable(attr) else lambda a: a == attr\n self.process_attr = process if callable(process) else lambda v: v\n self.unique = unique\n\n def _extract_links(self, response_text, response_url, response_encoding):\n self.reset()\n self.feed(response_text)\n self.close()\n\n links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links\n\n ret = []\n base_url = urljoin(response_url, self.base_url) if self.base_url else response_url\n for link in links:\n if isinstance(link.url, unicode):\n link.url = link.url.encode(response_encoding)\n link.url = urljoin(base_url, link.url)\n link.url = safe_url_string(link.url, response_encoding)\n link.text = link.text.decode(response_encoding)\n ret.append(link)\n\n return ret\n\n def extract_links(self, response):\n # wrapper needed to allow to work directly with text\n return self._extract_links(response.body, response.url, response.encoding)\n\n def reset(self):\n HTMLParser.reset(self)\n\n self.base_url = None\n self.current_link = None\n self.links = []\n\n def handle_starttag(self, tag, attrs):\n if tag == 'base':\n self.base_url = dict(attrs).get('href')\n if self.scan_tag(tag):\n for attr, value in attrs:\n if self.scan_attr(attr):\n url = self.process_attr(value)\n link = Link(url=url)\n self.links.append(link)\n self.current_link = link\n\n def handle_endtag(self, tag):\n if self.scan_tag(tag):\n self.current_link = None\n\n def handle_data(self, data):\n if self.current_link:\n self.current_link.text = self.current_link.text + data\n\n def matches(self, url):\n \"\"\"This extractor matches with any url, since\n it doesn't contain any patterns\"\"\"\n return True\n", "path": "scrapy/linkextractors/htmlparser.py"}]}
| 2,447 | 508 |
gh_patches_debug_34719
|
rasdani/github-patches
|
git_diff
|
aio-libs-abandoned__aioredis-py-311
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
aioredis.pubsub.Receiver has no iter() method
The example code (below) references an iter() method on Receiver (here is the [implementation](https://github.com/aio-libs/aioredis/blob/master/aioredis/pubsub.py#L200)).
There is no iter() method on Receiver. What is the correct implementation of listening on all Receiver's subscribed channels?
code:
```
>>> from aioredis.pubsub import Receiver
>>> from aioredis.abc import AbcChannel
>>> mpsc = Receiver(loop=loop)
>>> async def reader(mpsc):
... async for channel, msg in mpsc.iter():
... assert isinstance(channel, AbcChannel)
... print("Got {!r} in channel {!r}".format(msg, channel))
>>> asyncio.ensure_future(reader(mpsc))
>>> await redis.subscribe(mpsc.channel('channel:1'),
... mpsc.channel('channel:3'))
... mpsc.channel('channel:5'))
>>> await redis.psubscribe(mpsc.pattern('hello'))
>>> # publishing 'Hello world' into 'hello-channel'
>>> # will print this message:
Got b'Hello world' in channel b'hello-channel'
>>> # when all is done:
>>> await redis.unsubscribe('channel:1', 'channel:3', 'channel:5')
>>> await redis.punsubscribe('hello')
>>> mpsc.stop()
>>> # any message received after stop() will be ignored.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aioredis/commands/__init__.py`
Content:
```
1 import asyncio
2 import warnings
3
4 from aioredis.connection import create_connection
5 from aioredis.pool import create_pool
6 from aioredis.util import _NOTSET
7 from .generic import GenericCommandsMixin
8 from .string import StringCommandsMixin
9 from .hash import HashCommandsMixin
10 from .hyperloglog import HyperLogLogCommandsMixin
11 from .set import SetCommandsMixin
12 from .sorted_set import SortedSetCommandsMixin
13 from .transaction import TransactionsCommandsMixin, Pipeline, MultiExec
14 from .list import ListCommandsMixin
15 from .scripting import ScriptingCommandsMixin
16 from .server import ServerCommandsMixin
17 from .pubsub import PubSubCommandsMixin
18 from .cluster import ClusterCommandsMixin
19 from .geo import GeoCommandsMixin, GeoPoint, GeoMember
20
21 __all__ = [
22 'create_redis',
23 'create_redis_pool',
24 'Redis',
25 'Pipeline',
26 'MultiExec',
27 'GeoPoint',
28 'GeoMember',
29 ]
30
31
32 class Redis(GenericCommandsMixin, StringCommandsMixin,
33 HyperLogLogCommandsMixin, SetCommandsMixin,
34 HashCommandsMixin, TransactionsCommandsMixin,
35 SortedSetCommandsMixin, ListCommandsMixin,
36 ScriptingCommandsMixin, ServerCommandsMixin,
37 PubSubCommandsMixin, ClusterCommandsMixin,
38 GeoCommandsMixin):
39 """High-level Redis interface.
40
41 Gathers in one place Redis commands implemented in mixins.
42
43 For commands details see: http://redis.io/commands/#connection
44 """
45 def __init__(self, pool_or_conn):
46 self._pool_or_conn = pool_or_conn
47
48 def __repr__(self):
49 return '<Redis {!r}>'.format(self._pool_or_conn)
50
51 def execute(self, command, *args, **kwargs):
52 return self._pool_or_conn.execute(command, *args, **kwargs)
53
54 def close(self):
55 """Close client connections."""
56 self._pool_or_conn.close()
57
58 @asyncio.coroutine
59 def wait_closed(self):
60 """Coroutine waiting until underlying connections are closed."""
61 yield from self._pool_or_conn.wait_closed()
62
63 @property
64 def db(self):
65 """Currently selected db index."""
66 return self._pool_or_conn.db
67
68 @property
69 def encoding(self):
70 """Current set codec or None."""
71 return self._pool_or_conn.encoding
72
73 @property
74 def connection(self):
75 """Either :class:`aioredis.RedisConnection`,
76 or :class:`aioredis.ConnectionsPool` instance.
77 """
78 return self._pool_or_conn
79
80 @property
81 def address(self):
82 """Redis connection address (if applicable)."""
83 return self._pool_or_conn.address
84
85 @property
86 def in_transaction(self):
87 """Set to True when MULTI command was issued."""
88 # XXX: this must be bound to real connection
89 return self._pool_or_conn.in_transaction
90
91 @property
92 def closed(self):
93 """True if connection is closed."""
94 return self._pool_or_conn.closed
95
96 def auth(self, password):
97 """Authenticate to server.
98
99 This method wraps call to :meth:`aioredis.RedisConnection.auth()`
100 """
101 return self._pool_or_conn.auth(password)
102
103 def echo(self, message, *, encoding=_NOTSET):
104 """Echo the given string."""
105 return self.execute('ECHO', message, encoding=encoding)
106
107 def ping(self, message=_NOTSET, *, encoding=_NOTSET):
108 """Ping the server.
109
110 Accept optional echo message.
111 """
112 if message is not _NOTSET:
113 args = (message,)
114 else:
115 args = ()
116 return self.execute('PING', *args, encoding=encoding)
117
118 def quit(self):
119 """Close the connection."""
120 # TODO: warn when using pool
121 return self.execute('QUIT')
122
123 def select(self, db):
124 """Change the selected database for the current connection.
125
126 This method wraps call to :meth:`aioredis.RedisConnection.select()`
127 """
128 return self._pool_or_conn.select(db)
129
130 def __enter__(self):
131 # TODO: warn it is obsolete way
132 warnings.warn("It is not recommended way to use Redis instance"
133 " as a context manager. Use Redis.<command> directly")
134 return self
135
136 def __exit__(self, *args):
137 pass
138
139 def __iter__(self):
140 return self
141 yield
142
143
144 @asyncio.coroutine
145 def create_redis(address, *, db=None, password=None, ssl=None,
146 encoding=None, commands_factory=Redis,
147 parser=None, timeout=None,
148 connection_cls=None, loop=None):
149 """Creates high-level Redis interface.
150
151 This function is a coroutine.
152 """
153 conn = yield from create_connection(address, db=db,
154 password=password,
155 ssl=ssl,
156 encoding=encoding,
157 parser=parser,
158 timeout=timeout,
159 connection_cls=connection_cls,
160 loop=loop)
161 return commands_factory(conn)
162
163
164 @asyncio.coroutine
165 def create_redis_pool(address, *, db=None, password=None, ssl=None,
166 encoding=None, commands_factory=Redis,
167 minsize=1, maxsize=10, parser=None,
168 timeout=None, pool_cls=None,
169 connection_cls=None, loop=None):
170 """Creates high-level Redis interface.
171
172 This function is a coroutine.
173 """
174 pool = yield from create_pool(address, db=db,
175 password=password,
176 ssl=ssl,
177 encoding=encoding,
178 minsize=minsize,
179 maxsize=maxsize,
180 parser=parser,
181 create_connection_timeout=timeout,
182 pool_cls=pool_cls,
183 connection_cls=connection_cls,
184 loop=loop)
185 return commands_factory(pool)
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/aioredis/commands/__init__.py b/aioredis/commands/__init__.py
--- a/aioredis/commands/__init__.py
+++ b/aioredis/commands/__init__.py
@@ -1,9 +1,10 @@
import asyncio
-import warnings
+# import warnings
from aioredis.connection import create_connection
from aioredis.pool import create_pool
from aioredis.util import _NOTSET
+from aioredis.abc import AbcPool
from .generic import GenericCommandsMixin
from .string import StringCommandsMixin
from .hash import HashCommandsMixin
@@ -46,7 +47,7 @@
self._pool_or_conn = pool_or_conn
def __repr__(self):
- return '<Redis {!r}>'.format(self._pool_or_conn)
+ return '<{} {!r}>'.format(self.__class__.__name__, self._pool_or_conn)
def execute(self, command, *args, **kwargs):
return self._pool_or_conn.execute(command, *args, **kwargs)
@@ -127,18 +128,38 @@
"""
return self._pool_or_conn.select(db)
+ def __await__(self):
+ if isinstance(self._pool_or_conn, AbcPool):
+ conn = yield from self._pool_or_conn.acquire()
+ release = self._pool_or_conn.release
+ else:
+ # TODO: probably a lock is needed here if _pool_or_conn
+ # is Connection instance.
+ conn = self._pool_or_conn
+ release = None
+ return ContextRedis(conn, release)
+ __iter__ = __await__
+
+
+class ContextRedis(Redis):
+ """An instance of Redis class bound to single connection."""
+
+ def __init__(self, conn, release_cb=None):
+ super().__init__(conn)
+ self._release_callback = release_cb
+
def __enter__(self):
- # TODO: warn it is obsolete way
- warnings.warn("It is not recommended way to use Redis instance"
- " as a context manager. Use Redis.<command> directly")
return self
- def __exit__(self, *args):
- pass
+ def __exit__(self, *exc_info):
+ if self._release_callback is not None:
+ conn, self._pool_or_conn = self._pool_or_conn, None
+ self._release_callback(conn)
- def __iter__(self):
- return self
+ def __await__(self):
+ return ContextRedis(self._pool_or_conn)
yield
+ __iter__ = __await__
@asyncio.coroutine
|
{"golden_diff": "diff --git a/aioredis/commands/__init__.py b/aioredis/commands/__init__.py\n--- a/aioredis/commands/__init__.py\n+++ b/aioredis/commands/__init__.py\n@@ -1,9 +1,10 @@\n import asyncio\n-import warnings\n+# import warnings\n \n from aioredis.connection import create_connection\n from aioredis.pool import create_pool\n from aioredis.util import _NOTSET\n+from aioredis.abc import AbcPool\n from .generic import GenericCommandsMixin\n from .string import StringCommandsMixin\n from .hash import HashCommandsMixin\n@@ -46,7 +47,7 @@\n self._pool_or_conn = pool_or_conn\n \n def __repr__(self):\n- return '<Redis {!r}>'.format(self._pool_or_conn)\n+ return '<{} {!r}>'.format(self.__class__.__name__, self._pool_or_conn)\n \n def execute(self, command, *args, **kwargs):\n return self._pool_or_conn.execute(command, *args, **kwargs)\n@@ -127,18 +128,38 @@\n \"\"\"\n return self._pool_or_conn.select(db)\n \n+ def __await__(self):\n+ if isinstance(self._pool_or_conn, AbcPool):\n+ conn = yield from self._pool_or_conn.acquire()\n+ release = self._pool_or_conn.release\n+ else:\n+ # TODO: probably a lock is needed here if _pool_or_conn\n+ # is Connection instance.\n+ conn = self._pool_or_conn\n+ release = None\n+ return ContextRedis(conn, release)\n+ __iter__ = __await__\n+\n+\n+class ContextRedis(Redis):\n+ \"\"\"An instance of Redis class bound to single connection.\"\"\"\n+\n+ def __init__(self, conn, release_cb=None):\n+ super().__init__(conn)\n+ self._release_callback = release_cb\n+\n def __enter__(self):\n- # TODO: warn it is obsolete way\n- warnings.warn(\"It is not recommended way to use Redis instance\"\n- \" as a context manager. Use Redis.<command> directly\")\n return self\n \n- def __exit__(self, *args):\n- pass\n+ def __exit__(self, *exc_info):\n+ if self._release_callback is not None:\n+ conn, self._pool_or_conn = self._pool_or_conn, None\n+ self._release_callback(conn)\n \n- def __iter__(self):\n- return self\n+ def __await__(self):\n+ return ContextRedis(self._pool_or_conn)\n yield\n+ __iter__ = __await__\n \n \n @asyncio.coroutine\n", "issue": "aioredis.pubsub.Receiver has no iter() method\nThe example code (below) references an iter() method on Receiver (here is the [implementation](https://github.com/aio-libs/aioredis/blob/master/aioredis/pubsub.py#L200)).\r\n\r\nThere is no iter() method on Receiver. What is the correct implementation of listening on all Receiver's subscribed channels?\r\n\r\ncode:\r\n```\r\n>>> from aioredis.pubsub import Receiver\r\n>>> from aioredis.abc import AbcChannel\r\n>>> mpsc = Receiver(loop=loop)\r\n>>> async def reader(mpsc):\r\n... async for channel, msg in mpsc.iter():\r\n... assert isinstance(channel, AbcChannel)\r\n... print(\"Got {!r} in channel {!r}\".format(msg, channel))\r\n>>> asyncio.ensure_future(reader(mpsc))\r\n>>> await redis.subscribe(mpsc.channel('channel:1'),\r\n... mpsc.channel('channel:3'))\r\n... mpsc.channel('channel:5'))\r\n>>> await redis.psubscribe(mpsc.pattern('hello'))\r\n>>> # publishing 'Hello world' into 'hello-channel'\r\n>>> # will print this message:\r\nGot b'Hello world' in channel b'hello-channel'\r\n>>> # when all is done:\r\n>>> await redis.unsubscribe('channel:1', 'channel:3', 'channel:5')\r\n>>> await redis.punsubscribe('hello')\r\n>>> mpsc.stop()\r\n>>> # any message received after stop() will be ignored.\r\n```\n", "before_files": [{"content": "import asyncio\nimport warnings\n\nfrom aioredis.connection import create_connection\nfrom aioredis.pool import create_pool\nfrom aioredis.util import _NOTSET\nfrom .generic import GenericCommandsMixin\nfrom .string import StringCommandsMixin\nfrom .hash import HashCommandsMixin\nfrom .hyperloglog import HyperLogLogCommandsMixin\nfrom .set import SetCommandsMixin\nfrom .sorted_set import SortedSetCommandsMixin\nfrom .transaction import TransactionsCommandsMixin, Pipeline, MultiExec\nfrom .list import ListCommandsMixin\nfrom .scripting import ScriptingCommandsMixin\nfrom .server import ServerCommandsMixin\nfrom .pubsub import PubSubCommandsMixin\nfrom .cluster import ClusterCommandsMixin\nfrom .geo import GeoCommandsMixin, GeoPoint, GeoMember\n\n__all__ = [\n 'create_redis',\n 'create_redis_pool',\n 'Redis',\n 'Pipeline',\n 'MultiExec',\n 'GeoPoint',\n 'GeoMember',\n]\n\n\nclass Redis(GenericCommandsMixin, StringCommandsMixin,\n HyperLogLogCommandsMixin, SetCommandsMixin,\n HashCommandsMixin, TransactionsCommandsMixin,\n SortedSetCommandsMixin, ListCommandsMixin,\n ScriptingCommandsMixin, ServerCommandsMixin,\n PubSubCommandsMixin, ClusterCommandsMixin,\n GeoCommandsMixin):\n \"\"\"High-level Redis interface.\n\n Gathers in one place Redis commands implemented in mixins.\n\n For commands details see: http://redis.io/commands/#connection\n \"\"\"\n def __init__(self, pool_or_conn):\n self._pool_or_conn = pool_or_conn\n\n def __repr__(self):\n return '<Redis {!r}>'.format(self._pool_or_conn)\n\n def execute(self, command, *args, **kwargs):\n return self._pool_or_conn.execute(command, *args, **kwargs)\n\n def close(self):\n \"\"\"Close client connections.\"\"\"\n self._pool_or_conn.close()\n\n @asyncio.coroutine\n def wait_closed(self):\n \"\"\"Coroutine waiting until underlying connections are closed.\"\"\"\n yield from self._pool_or_conn.wait_closed()\n\n @property\n def db(self):\n \"\"\"Currently selected db index.\"\"\"\n return self._pool_or_conn.db\n\n @property\n def encoding(self):\n \"\"\"Current set codec or None.\"\"\"\n return self._pool_or_conn.encoding\n\n @property\n def connection(self):\n \"\"\"Either :class:`aioredis.RedisConnection`,\n or :class:`aioredis.ConnectionsPool` instance.\n \"\"\"\n return self._pool_or_conn\n\n @property\n def address(self):\n \"\"\"Redis connection address (if applicable).\"\"\"\n return self._pool_or_conn.address\n\n @property\n def in_transaction(self):\n \"\"\"Set to True when MULTI command was issued.\"\"\"\n # XXX: this must be bound to real connection\n return self._pool_or_conn.in_transaction\n\n @property\n def closed(self):\n \"\"\"True if connection is closed.\"\"\"\n return self._pool_or_conn.closed\n\n def auth(self, password):\n \"\"\"Authenticate to server.\n\n This method wraps call to :meth:`aioredis.RedisConnection.auth()`\n \"\"\"\n return self._pool_or_conn.auth(password)\n\n def echo(self, message, *, encoding=_NOTSET):\n \"\"\"Echo the given string.\"\"\"\n return self.execute('ECHO', message, encoding=encoding)\n\n def ping(self, message=_NOTSET, *, encoding=_NOTSET):\n \"\"\"Ping the server.\n\n Accept optional echo message.\n \"\"\"\n if message is not _NOTSET:\n args = (message,)\n else:\n args = ()\n return self.execute('PING', *args, encoding=encoding)\n\n def quit(self):\n \"\"\"Close the connection.\"\"\"\n # TODO: warn when using pool\n return self.execute('QUIT')\n\n def select(self, db):\n \"\"\"Change the selected database for the current connection.\n\n This method wraps call to :meth:`aioredis.RedisConnection.select()`\n \"\"\"\n return self._pool_or_conn.select(db)\n\n def __enter__(self):\n # TODO: warn it is obsolete way\n warnings.warn(\"It is not recommended way to use Redis instance\"\n \" as a context manager. Use Redis.<command> directly\")\n return self\n\n def __exit__(self, *args):\n pass\n\n def __iter__(self):\n return self\n yield\n\n\[email protected]\ndef create_redis(address, *, db=None, password=None, ssl=None,\n encoding=None, commands_factory=Redis,\n parser=None, timeout=None,\n connection_cls=None, loop=None):\n \"\"\"Creates high-level Redis interface.\n\n This function is a coroutine.\n \"\"\"\n conn = yield from create_connection(address, db=db,\n password=password,\n ssl=ssl,\n encoding=encoding,\n parser=parser,\n timeout=timeout,\n connection_cls=connection_cls,\n loop=loop)\n return commands_factory(conn)\n\n\[email protected]\ndef create_redis_pool(address, *, db=None, password=None, ssl=None,\n encoding=None, commands_factory=Redis,\n minsize=1, maxsize=10, parser=None,\n timeout=None, pool_cls=None,\n connection_cls=None, loop=None):\n \"\"\"Creates high-level Redis interface.\n\n This function is a coroutine.\n \"\"\"\n pool = yield from create_pool(address, db=db,\n password=password,\n ssl=ssl,\n encoding=encoding,\n minsize=minsize,\n maxsize=maxsize,\n parser=parser,\n create_connection_timeout=timeout,\n pool_cls=pool_cls,\n connection_cls=connection_cls,\n loop=loop)\n return commands_factory(pool)\n", "path": "aioredis/commands/__init__.py"}], "after_files": [{"content": "import asyncio\n# import warnings\n\nfrom aioredis.connection import create_connection\nfrom aioredis.pool import create_pool\nfrom aioredis.util import _NOTSET\nfrom aioredis.abc import AbcPool\nfrom .generic import GenericCommandsMixin\nfrom .string import StringCommandsMixin\nfrom .hash import HashCommandsMixin\nfrom .hyperloglog import HyperLogLogCommandsMixin\nfrom .set import SetCommandsMixin\nfrom .sorted_set import SortedSetCommandsMixin\nfrom .transaction import TransactionsCommandsMixin, Pipeline, MultiExec\nfrom .list import ListCommandsMixin\nfrom .scripting import ScriptingCommandsMixin\nfrom .server import ServerCommandsMixin\nfrom .pubsub import PubSubCommandsMixin\nfrom .cluster import ClusterCommandsMixin\nfrom .geo import GeoCommandsMixin, GeoPoint, GeoMember\n\n__all__ = [\n 'create_redis',\n 'create_redis_pool',\n 'Redis',\n 'Pipeline',\n 'MultiExec',\n 'GeoPoint',\n 'GeoMember',\n]\n\n\nclass Redis(GenericCommandsMixin, StringCommandsMixin,\n HyperLogLogCommandsMixin, SetCommandsMixin,\n HashCommandsMixin, TransactionsCommandsMixin,\n SortedSetCommandsMixin, ListCommandsMixin,\n ScriptingCommandsMixin, ServerCommandsMixin,\n PubSubCommandsMixin, ClusterCommandsMixin,\n GeoCommandsMixin):\n \"\"\"High-level Redis interface.\n\n Gathers in one place Redis commands implemented in mixins.\n\n For commands details see: http://redis.io/commands/#connection\n \"\"\"\n def __init__(self, pool_or_conn):\n self._pool_or_conn = pool_or_conn\n\n def __repr__(self):\n return '<{} {!r}>'.format(self.__class__.__name__, self._pool_or_conn)\n\n def execute(self, command, *args, **kwargs):\n return self._pool_or_conn.execute(command, *args, **kwargs)\n\n def close(self):\n \"\"\"Close client connections.\"\"\"\n self._pool_or_conn.close()\n\n @asyncio.coroutine\n def wait_closed(self):\n \"\"\"Coroutine waiting until underlying connections are closed.\"\"\"\n yield from self._pool_or_conn.wait_closed()\n\n @property\n def db(self):\n \"\"\"Currently selected db index.\"\"\"\n return self._pool_or_conn.db\n\n @property\n def encoding(self):\n \"\"\"Current set codec or None.\"\"\"\n return self._pool_or_conn.encoding\n\n @property\n def connection(self):\n \"\"\"Either :class:`aioredis.RedisConnection`,\n or :class:`aioredis.ConnectionsPool` instance.\n \"\"\"\n return self._pool_or_conn\n\n @property\n def address(self):\n \"\"\"Redis connection address (if applicable).\"\"\"\n return self._pool_or_conn.address\n\n @property\n def in_transaction(self):\n \"\"\"Set to True when MULTI command was issued.\"\"\"\n # XXX: this must be bound to real connection\n return self._pool_or_conn.in_transaction\n\n @property\n def closed(self):\n \"\"\"True if connection is closed.\"\"\"\n return self._pool_or_conn.closed\n\n def auth(self, password):\n \"\"\"Authenticate to server.\n\n This method wraps call to :meth:`aioredis.RedisConnection.auth()`\n \"\"\"\n return self._pool_or_conn.auth(password)\n\n def echo(self, message, *, encoding=_NOTSET):\n \"\"\"Echo the given string.\"\"\"\n return self.execute('ECHO', message, encoding=encoding)\n\n def ping(self, message=_NOTSET, *, encoding=_NOTSET):\n \"\"\"Ping the server.\n\n Accept optional echo message.\n \"\"\"\n if message is not _NOTSET:\n args = (message,)\n else:\n args = ()\n return self.execute('PING', *args, encoding=encoding)\n\n def quit(self):\n \"\"\"Close the connection.\"\"\"\n # TODO: warn when using pool\n return self.execute('QUIT')\n\n def select(self, db):\n \"\"\"Change the selected database for the current connection.\n\n This method wraps call to :meth:`aioredis.RedisConnection.select()`\n \"\"\"\n return self._pool_or_conn.select(db)\n\n def __await__(self):\n if isinstance(self._pool_or_conn, AbcPool):\n conn = yield from self._pool_or_conn.acquire()\n release = self._pool_or_conn.release\n else:\n # TODO: probably a lock is needed here if _pool_or_conn\n # is Connection instance.\n conn = self._pool_or_conn\n release = None\n return ContextRedis(conn, release)\n __iter__ = __await__\n\n\nclass ContextRedis(Redis):\n \"\"\"An instance of Redis class bound to single connection.\"\"\"\n\n def __init__(self, conn, release_cb=None):\n super().__init__(conn)\n self._release_callback = release_cb\n\n def __enter__(self):\n return self\n\n def __exit__(self, *exc_info):\n if self._release_callback is not None:\n conn, self._pool_or_conn = self._pool_or_conn, None\n self._release_callback(conn)\n\n def __await__(self):\n return ContextRedis(self._pool_or_conn)\n yield\n __iter__ = __await__\n\n\[email protected]\ndef create_redis(address, *, db=None, password=None, ssl=None,\n encoding=None, commands_factory=Redis,\n parser=None, timeout=None,\n connection_cls=None, loop=None):\n \"\"\"Creates high-level Redis interface.\n\n This function is a coroutine.\n \"\"\"\n conn = yield from create_connection(address, db=db,\n password=password,\n ssl=ssl,\n encoding=encoding,\n parser=parser,\n timeout=timeout,\n connection_cls=connection_cls,\n loop=loop)\n return commands_factory(conn)\n\n\[email protected]\ndef create_redis_pool(address, *, db=None, password=None, ssl=None,\n encoding=None, commands_factory=Redis,\n minsize=1, maxsize=10, parser=None,\n timeout=None, pool_cls=None,\n connection_cls=None, loop=None):\n \"\"\"Creates high-level Redis interface.\n\n This function is a coroutine.\n \"\"\"\n pool = yield from create_pool(address, db=db,\n password=password,\n ssl=ssl,\n encoding=encoding,\n minsize=minsize,\n maxsize=maxsize,\n parser=parser,\n create_connection_timeout=timeout,\n pool_cls=pool_cls,\n connection_cls=connection_cls,\n loop=loop)\n return commands_factory(pool)\n", "path": "aioredis/commands/__init__.py"}]}
| 2,231 | 597 |
gh_patches_debug_9375
|
rasdani/github-patches
|
git_diff
|
readthedocs__readthedocs.org-2304
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MkDocs's media url shouldn't be hardcode with media_url = 'http://localhost:8000' + media_url
In file `readthedocs.org/readthedocs/doc_builder/backends/mkdocs.py`:
``` python
55 # Set mkdocs config values
56
57 media_url = getattr(settings, 'MEDIA_URL', 'https://media.readthedocs.org')
58
59 # Mkdocs needs a full domain here because it tries to link to local media files
60 if not media_url.startswith('http'):
61 media_url = 'http://localhost:8000' + media_url
```
Can u please to replace it with `SITE_URL` as the follows:
``` python
59 # Mkdocs needs a full domain here because it tries to link to local media files
60 if not media_url.startswith('http'):
61 media_url = getattr(settings, 'SITE_URL', None) + media_url
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/doc_builder/backends/mkdocs.py`
Content:
```
1 import os
2 import logging
3 import json
4 import yaml
5
6 from django.conf import settings
7 from django.template import Context, loader as template_loader
8
9 from readthedocs.doc_builder.base import BaseBuilder
10
11 log = logging.getLogger(__name__)
12
13 TEMPLATE_DIR = '%s/readthedocs/templates/mkdocs/readthedocs' % settings.SITE_ROOT
14 OVERRIDE_TEMPLATE_DIR = '%s/readthedocs/templates/mkdocs/overrides' % settings.SITE_ROOT
15
16
17 class BaseMkdocs(BaseBuilder):
18
19 """
20 Mkdocs builder
21 """
22 use_theme = True
23
24 def __init__(self, *args, **kwargs):
25 super(BaseMkdocs, self).__init__(*args, **kwargs)
26 self.old_artifact_path = os.path.join(
27 self.version.project.checkout_path(self.version.slug),
28 self.build_dir)
29 self.root_path = self.version.project.checkout_path(self.version.slug)
30
31 def append_conf(self, **kwargs):
32 """
33 Set mkdocs config values
34 """
35
36 # Pull mkdocs config data
37 try:
38 user_config = yaml.safe_load(
39 open(os.path.join(self.root_path, 'mkdocs.yml'), 'r')
40 )
41 except IOError:
42 user_config = {
43 'site_name': self.version.project.name,
44 }
45
46 # Handle custom docs dirs
47
48 user_docs_dir = user_config.get('docs_dir')
49 if user_docs_dir:
50 user_docs_dir = os.path.join(self.root_path, user_docs_dir)
51 docs_dir = self.docs_dir(docs_dir=user_docs_dir)
52 self.create_index(extension='md')
53 user_config['docs_dir'] = docs_dir
54
55 # Set mkdocs config values
56
57 media_url = getattr(settings, 'MEDIA_URL', 'https://media.readthedocs.org')
58
59 # Mkdocs needs a full domain here because it tries to link to local media files
60 if not media_url.startswith('http'):
61 media_url = 'http://localhost:8000' + media_url
62
63 if 'extra_javascript' in user_config:
64 user_config['extra_javascript'].append('readthedocs-data.js')
65 user_config['extra_javascript'].append(
66 'readthedocs-dynamic-include.js')
67 user_config['extra_javascript'].append(
68 '%sjavascript/readthedocs-doc-embed.js' % media_url)
69 else:
70 user_config['extra_javascript'] = [
71 'readthedocs-data.js',
72 'readthedocs-dynamic-include.js',
73 '%sjavascript/readthedocs-doc-embed.js' % media_url,
74 ]
75
76 if 'extra_css' in user_config:
77 user_config['extra_css'].append(
78 '%s/css/badge_only.css' % media_url)
79 user_config['extra_css'].append(
80 '%s/css/readthedocs-doc-embed.css' % media_url)
81 else:
82 user_config['extra_css'] = [
83 '%scss/badge_only.css' % media_url,
84 '%scss/readthedocs-doc-embed.css' % media_url,
85 ]
86
87 # Set our custom theme dir for mkdocs
88 if 'theme_dir' not in user_config and self.use_theme:
89 user_config['theme_dir'] = TEMPLATE_DIR
90
91 yaml.dump(
92 user_config,
93 open(os.path.join(self.root_path, 'mkdocs.yml'), 'w')
94 )
95
96 # RTD javascript writing
97
98 # Will be available in the JavaScript as READTHEDOCS_DATA.
99 readthedocs_data = {
100 'project': self.version.project.slug,
101 'version': self.version.slug,
102 'language': self.version.project.language,
103 'page': None,
104 'theme': "readthedocs",
105 'builder': "mkdocs",
106 'docroot': docs_dir,
107 'source_suffix': ".md",
108 'api_host': getattr(settings, 'PUBLIC_API_URL',
109 'https://readthedocs.org'),
110 'commit': self.version.project.vcs_repo(self.version.slug).commit,
111 }
112 data_json = json.dumps(readthedocs_data, indent=4)
113 data_ctx = {
114 'data_json': data_json,
115 'current_version': readthedocs_data['version'],
116 'slug': readthedocs_data['project'],
117 'html_theme': readthedocs_data['theme'],
118 'pagename': None,
119 }
120 data_string = template_loader.get_template(
121 'doc_builder/data.js.tmpl'
122 ).render(data_ctx)
123
124 data_file = open(os.path.join(self.root_path, docs_dir, 'readthedocs-data.js'), 'w+')
125 data_file.write(data_string)
126 data_file.write('''
127 READTHEDOCS_DATA["page"] = mkdocs_page_input_path.substr(
128 0, mkdocs_page_input_path.lastIndexOf(READTHEDOCS_DATA.source_suffix));
129 ''')
130 data_file.close()
131
132 include_ctx = {
133 'global_analytics_code': getattr(settings, 'GLOBAL_ANALYTICS_CODE', 'UA-17997319-1'),
134 'user_analytics_code': self.version.project.analytics_code,
135 }
136 include_string = template_loader.get_template(
137 'doc_builder/include.js.tmpl'
138 ).render(include_ctx)
139 include_file = open(
140 os.path.join(self.root_path, docs_dir, 'readthedocs-dynamic-include.js'),
141 'w+'
142 )
143 include_file.write(include_string)
144 include_file.close()
145
146 def build(self, **kwargs):
147 checkout_path = self.project.checkout_path(self.version.slug)
148 build_command = [
149 'python',
150 self.python_env.venv_bin(filename='mkdocs'),
151 self.builder,
152 '--clean',
153 '--site-dir', self.build_dir,
154 ]
155 if self.use_theme:
156 build_command.extend(['--theme', 'readthedocs'])
157 cmd_ret = self.run(
158 *build_command,
159 cwd=checkout_path,
160 bin_path=self.python_env.venv_bin()
161 )
162 return cmd_ret.successful
163
164
165 class MkdocsHTML(BaseMkdocs):
166 type = 'mkdocs'
167 builder = 'build'
168 build_dir = '_build/html'
169
170
171 class MkdocsJSON(BaseMkdocs):
172 type = 'mkdocs_json'
173 builder = 'json'
174 build_dir = '_build/json'
175 use_theme = False
176
177 def build(self, **kwargs):
178 user_config = yaml.safe_load(
179 open(os.path.join(self.root_path, 'mkdocs.yml'), 'r')
180 )
181 if user_config['theme_dir'] == TEMPLATE_DIR:
182 del user_config['theme_dir']
183 yaml.dump(
184 user_config,
185 open(os.path.join(self.root_path, 'mkdocs.yml'), 'w')
186 )
187 super(MkdocsJSON, self).build(**kwargs)
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/readthedocs/doc_builder/backends/mkdocs.py b/readthedocs/doc_builder/backends/mkdocs.py
--- a/readthedocs/doc_builder/backends/mkdocs.py
+++ b/readthedocs/doc_builder/backends/mkdocs.py
@@ -58,7 +58,8 @@
# Mkdocs needs a full domain here because it tries to link to local media files
if not media_url.startswith('http'):
- media_url = 'http://localhost:8000' + media_url
+ domain = getattr(settings, 'PRODUCTION_DOMAIN')
+ media_url = 'http://{}/{}'.format(domain, media_url)
if 'extra_javascript' in user_config:
user_config['extra_javascript'].append('readthedocs-data.js')
|
{"golden_diff": "diff --git a/readthedocs/doc_builder/backends/mkdocs.py b/readthedocs/doc_builder/backends/mkdocs.py\n--- a/readthedocs/doc_builder/backends/mkdocs.py\n+++ b/readthedocs/doc_builder/backends/mkdocs.py\n@@ -58,7 +58,8 @@\n \n # Mkdocs needs a full domain here because it tries to link to local media files\n if not media_url.startswith('http'):\n- media_url = 'http://localhost:8000' + media_url\n+ domain = getattr(settings, 'PRODUCTION_DOMAIN')\n+ media_url = 'http://{}/{}'.format(domain, media_url)\n \n if 'extra_javascript' in user_config:\n user_config['extra_javascript'].append('readthedocs-data.js')\n", "issue": "MkDocs's media url shouldn't be hardcode with media_url = 'http://localhost:8000' + media_url\nIn file `readthedocs.org/readthedocs/doc_builder/backends/mkdocs.py`:\n\n``` python\n55 # Set mkdocs config values\n56\n57 media_url = getattr(settings, 'MEDIA_URL', 'https://media.readthedocs.org')\n58\n59 # Mkdocs needs a full domain here because it tries to link to local media files\n60 if not media_url.startswith('http'):\n61 media_url = 'http://localhost:8000' + media_url\n```\n\nCan u please to replace it with `SITE_URL` as the follows:\n\n``` python\n59 # Mkdocs needs a full domain here because it tries to link to local media files\n60 if not media_url.startswith('http'):\n61 media_url = getattr(settings, 'SITE_URL', None) + media_url\n```\n\n", "before_files": [{"content": "import os\nimport logging\nimport json\nimport yaml\n\nfrom django.conf import settings\nfrom django.template import Context, loader as template_loader\n\nfrom readthedocs.doc_builder.base import BaseBuilder\n\nlog = logging.getLogger(__name__)\n\nTEMPLATE_DIR = '%s/readthedocs/templates/mkdocs/readthedocs' % settings.SITE_ROOT\nOVERRIDE_TEMPLATE_DIR = '%s/readthedocs/templates/mkdocs/overrides' % settings.SITE_ROOT\n\n\nclass BaseMkdocs(BaseBuilder):\n\n \"\"\"\n Mkdocs builder\n \"\"\"\n use_theme = True\n\n def __init__(self, *args, **kwargs):\n super(BaseMkdocs, self).__init__(*args, **kwargs)\n self.old_artifact_path = os.path.join(\n self.version.project.checkout_path(self.version.slug),\n self.build_dir)\n self.root_path = self.version.project.checkout_path(self.version.slug)\n\n def append_conf(self, **kwargs):\n \"\"\"\n Set mkdocs config values\n \"\"\"\n\n # Pull mkdocs config data\n try:\n user_config = yaml.safe_load(\n open(os.path.join(self.root_path, 'mkdocs.yml'), 'r')\n )\n except IOError:\n user_config = {\n 'site_name': self.version.project.name,\n }\n\n # Handle custom docs dirs\n\n user_docs_dir = user_config.get('docs_dir')\n if user_docs_dir:\n user_docs_dir = os.path.join(self.root_path, user_docs_dir)\n docs_dir = self.docs_dir(docs_dir=user_docs_dir)\n self.create_index(extension='md')\n user_config['docs_dir'] = docs_dir\n\n # Set mkdocs config values\n\n media_url = getattr(settings, 'MEDIA_URL', 'https://media.readthedocs.org')\n\n # Mkdocs needs a full domain here because it tries to link to local media files\n if not media_url.startswith('http'):\n media_url = 'http://localhost:8000' + media_url\n\n if 'extra_javascript' in user_config:\n user_config['extra_javascript'].append('readthedocs-data.js')\n user_config['extra_javascript'].append(\n 'readthedocs-dynamic-include.js')\n user_config['extra_javascript'].append(\n '%sjavascript/readthedocs-doc-embed.js' % media_url)\n else:\n user_config['extra_javascript'] = [\n 'readthedocs-data.js',\n 'readthedocs-dynamic-include.js',\n '%sjavascript/readthedocs-doc-embed.js' % media_url,\n ]\n\n if 'extra_css' in user_config:\n user_config['extra_css'].append(\n '%s/css/badge_only.css' % media_url)\n user_config['extra_css'].append(\n '%s/css/readthedocs-doc-embed.css' % media_url)\n else:\n user_config['extra_css'] = [\n '%scss/badge_only.css' % media_url,\n '%scss/readthedocs-doc-embed.css' % media_url,\n ]\n\n # Set our custom theme dir for mkdocs\n if 'theme_dir' not in user_config and self.use_theme:\n user_config['theme_dir'] = TEMPLATE_DIR\n\n yaml.dump(\n user_config,\n open(os.path.join(self.root_path, 'mkdocs.yml'), 'w')\n )\n\n # RTD javascript writing\n\n # Will be available in the JavaScript as READTHEDOCS_DATA.\n readthedocs_data = {\n 'project': self.version.project.slug,\n 'version': self.version.slug,\n 'language': self.version.project.language,\n 'page': None,\n 'theme': \"readthedocs\",\n 'builder': \"mkdocs\",\n 'docroot': docs_dir,\n 'source_suffix': \".md\",\n 'api_host': getattr(settings, 'PUBLIC_API_URL',\n 'https://readthedocs.org'),\n 'commit': self.version.project.vcs_repo(self.version.slug).commit,\n }\n data_json = json.dumps(readthedocs_data, indent=4)\n data_ctx = {\n 'data_json': data_json,\n 'current_version': readthedocs_data['version'],\n 'slug': readthedocs_data['project'],\n 'html_theme': readthedocs_data['theme'],\n 'pagename': None,\n }\n data_string = template_loader.get_template(\n 'doc_builder/data.js.tmpl'\n ).render(data_ctx)\n\n data_file = open(os.path.join(self.root_path, docs_dir, 'readthedocs-data.js'), 'w+')\n data_file.write(data_string)\n data_file.write('''\nREADTHEDOCS_DATA[\"page\"] = mkdocs_page_input_path.substr(\n 0, mkdocs_page_input_path.lastIndexOf(READTHEDOCS_DATA.source_suffix));\n''')\n data_file.close()\n\n include_ctx = {\n 'global_analytics_code': getattr(settings, 'GLOBAL_ANALYTICS_CODE', 'UA-17997319-1'),\n 'user_analytics_code': self.version.project.analytics_code,\n }\n include_string = template_loader.get_template(\n 'doc_builder/include.js.tmpl'\n ).render(include_ctx)\n include_file = open(\n os.path.join(self.root_path, docs_dir, 'readthedocs-dynamic-include.js'),\n 'w+'\n )\n include_file.write(include_string)\n include_file.close()\n\n def build(self, **kwargs):\n checkout_path = self.project.checkout_path(self.version.slug)\n build_command = [\n 'python',\n self.python_env.venv_bin(filename='mkdocs'),\n self.builder,\n '--clean',\n '--site-dir', self.build_dir,\n ]\n if self.use_theme:\n build_command.extend(['--theme', 'readthedocs'])\n cmd_ret = self.run(\n *build_command,\n cwd=checkout_path,\n bin_path=self.python_env.venv_bin()\n )\n return cmd_ret.successful\n\n\nclass MkdocsHTML(BaseMkdocs):\n type = 'mkdocs'\n builder = 'build'\n build_dir = '_build/html'\n\n\nclass MkdocsJSON(BaseMkdocs):\n type = 'mkdocs_json'\n builder = 'json'\n build_dir = '_build/json'\n use_theme = False\n\n def build(self, **kwargs):\n user_config = yaml.safe_load(\n open(os.path.join(self.root_path, 'mkdocs.yml'), 'r')\n )\n if user_config['theme_dir'] == TEMPLATE_DIR:\n del user_config['theme_dir']\n yaml.dump(\n user_config,\n open(os.path.join(self.root_path, 'mkdocs.yml'), 'w')\n )\n super(MkdocsJSON, self).build(**kwargs)\n", "path": "readthedocs/doc_builder/backends/mkdocs.py"}], "after_files": [{"content": "import os\nimport logging\nimport json\nimport yaml\n\nfrom django.conf import settings\nfrom django.template import Context, loader as template_loader\n\nfrom readthedocs.doc_builder.base import BaseBuilder\n\nlog = logging.getLogger(__name__)\n\nTEMPLATE_DIR = '%s/readthedocs/templates/mkdocs/readthedocs' % settings.SITE_ROOT\nOVERRIDE_TEMPLATE_DIR = '%s/readthedocs/templates/mkdocs/overrides' % settings.SITE_ROOT\n\n\nclass BaseMkdocs(BaseBuilder):\n\n \"\"\"\n Mkdocs builder\n \"\"\"\n use_theme = True\n\n def __init__(self, *args, **kwargs):\n super(BaseMkdocs, self).__init__(*args, **kwargs)\n self.old_artifact_path = os.path.join(\n self.version.project.checkout_path(self.version.slug),\n self.build_dir)\n self.root_path = self.version.project.checkout_path(self.version.slug)\n\n def append_conf(self, **kwargs):\n \"\"\"\n Set mkdocs config values\n \"\"\"\n\n # Pull mkdocs config data\n try:\n user_config = yaml.safe_load(\n open(os.path.join(self.root_path, 'mkdocs.yml'), 'r')\n )\n except IOError:\n user_config = {\n 'site_name': self.version.project.name,\n }\n\n # Handle custom docs dirs\n\n user_docs_dir = user_config.get('docs_dir')\n if user_docs_dir:\n user_docs_dir = os.path.join(self.root_path, user_docs_dir)\n docs_dir = self.docs_dir(docs_dir=user_docs_dir)\n self.create_index(extension='md')\n user_config['docs_dir'] = docs_dir\n\n # Set mkdocs config values\n\n media_url = getattr(settings, 'MEDIA_URL', 'https://media.readthedocs.org')\n\n # Mkdocs needs a full domain here because it tries to link to local media files\n if not media_url.startswith('http'):\n domain = getattr(settings, 'PRODUCTION_DOMAIN')\n media_url = 'http://{}/{}'.format(domain, media_url)\n\n if 'extra_javascript' in user_config:\n user_config['extra_javascript'].append('readthedocs-data.js')\n user_config['extra_javascript'].append(\n 'readthedocs-dynamic-include.js')\n user_config['extra_javascript'].append(\n '%sjavascript/readthedocs-doc-embed.js' % media_url)\n else:\n user_config['extra_javascript'] = [\n 'readthedocs-data.js',\n 'readthedocs-dynamic-include.js',\n '%sjavascript/readthedocs-doc-embed.js' % media_url,\n ]\n\n if 'extra_css' in user_config:\n user_config['extra_css'].append(\n '%s/css/badge_only.css' % media_url)\n user_config['extra_css'].append(\n '%s/css/readthedocs-doc-embed.css' % media_url)\n else:\n user_config['extra_css'] = [\n '%scss/badge_only.css' % media_url,\n '%scss/readthedocs-doc-embed.css' % media_url,\n ]\n\n # Set our custom theme dir for mkdocs\n if 'theme_dir' not in user_config and self.use_theme:\n user_config['theme_dir'] = TEMPLATE_DIR\n\n yaml.dump(\n user_config,\n open(os.path.join(self.root_path, 'mkdocs.yml'), 'w')\n )\n\n # RTD javascript writing\n\n # Will be available in the JavaScript as READTHEDOCS_DATA.\n readthedocs_data = {\n 'project': self.version.project.slug,\n 'version': self.version.slug,\n 'language': self.version.project.language,\n 'page': None,\n 'theme': \"readthedocs\",\n 'builder': \"mkdocs\",\n 'docroot': docs_dir,\n 'source_suffix': \".md\",\n 'api_host': getattr(settings, 'PUBLIC_API_URL',\n 'https://readthedocs.org'),\n 'commit': self.version.project.vcs_repo(self.version.slug).commit,\n }\n data_json = json.dumps(readthedocs_data, indent=4)\n data_ctx = {\n 'data_json': data_json,\n 'current_version': readthedocs_data['version'],\n 'slug': readthedocs_data['project'],\n 'html_theme': readthedocs_data['theme'],\n 'pagename': None,\n }\n data_string = template_loader.get_template(\n 'doc_builder/data.js.tmpl'\n ).render(data_ctx)\n\n data_file = open(os.path.join(self.root_path, docs_dir, 'readthedocs-data.js'), 'w+')\n data_file.write(data_string)\n data_file.write('''\nREADTHEDOCS_DATA[\"page\"] = mkdocs_page_input_path.substr(\n 0, mkdocs_page_input_path.lastIndexOf(READTHEDOCS_DATA.source_suffix));\n''')\n data_file.close()\n\n include_ctx = {\n 'global_analytics_code': getattr(settings, 'GLOBAL_ANALYTICS_CODE', 'UA-17997319-1'),\n 'user_analytics_code': self.version.project.analytics_code,\n }\n include_string = template_loader.get_template(\n 'doc_builder/include.js.tmpl'\n ).render(include_ctx)\n include_file = open(\n os.path.join(self.root_path, docs_dir, 'readthedocs-dynamic-include.js'),\n 'w+'\n )\n include_file.write(include_string)\n include_file.close()\n\n def build(self, **kwargs):\n checkout_path = self.project.checkout_path(self.version.slug)\n build_command = [\n 'python',\n self.python_env.venv_bin(filename='mkdocs'),\n self.builder,\n '--clean',\n '--site-dir', self.build_dir,\n ]\n if self.use_theme:\n build_command.extend(['--theme', 'readthedocs'])\n cmd_ret = self.run(\n *build_command,\n cwd=checkout_path,\n bin_path=self.python_env.venv_bin()\n )\n return cmd_ret.successful\n\n\nclass MkdocsHTML(BaseMkdocs):\n type = 'mkdocs'\n builder = 'build'\n build_dir = '_build/html'\n\n\nclass MkdocsJSON(BaseMkdocs):\n type = 'mkdocs_json'\n builder = 'json'\n build_dir = '_build/json'\n use_theme = False\n\n def build(self, **kwargs):\n user_config = yaml.safe_load(\n open(os.path.join(self.root_path, 'mkdocs.yml'), 'r')\n )\n if user_config['theme_dir'] == TEMPLATE_DIR:\n del user_config['theme_dir']\n yaml.dump(\n user_config,\n open(os.path.join(self.root_path, 'mkdocs.yml'), 'w')\n )\n super(MkdocsJSON, self).build(**kwargs)\n", "path": "readthedocs/doc_builder/backends/mkdocs.py"}]}
| 2,378 | 172 |
gh_patches_debug_15791
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-3031
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Having setuptools installed causes cpython stdlib build to fail
### setuptools version
setuptools>=60
### Python version
Python 3.9.9 (and probably others)
### OS
Probably affects all; reported on macOS
### Additional environment information
Originally reported in the context of MacPorts' python39 and py39-setuptools ports.
### Description
If you have setuptools installed and attempt to build cpython from source, setuptools' copy of distutils may be used when building the stdlib modules and cause the build to fail due to an incompatibility.
This is easily cured by setting `SETUPTOOLS_USE_DISTUTILS=stdlib` if you understand the problem, but if you don't, it's a pretty baffling failure mode.
Downstream report: https://trac.macports.org/ticket/64352
### Expected behavior
Cpython build uses its own distutils and succeeds.
### How to Reproduce
1. Install setuptools into a convenient site-packages directory.
2. Configure cpython to install in a prefix such that it will be using the site-packages directory from step 1.
3. Build cpython.
### Output
```console
Traceback (most recent call last):
File "/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9/./setup.py", line 2598, in <module>
main()
File "/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9/./setup.py", line 2568, in main
setup(# PyPI Metadata (PEP 301)
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/core.py", line 148, in setup
return run_commands(dist)
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/core.py", line 163, in run_commands
dist.run_commands()
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/dist.py", line 967, in run_commands
self.run_command(cmd)
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/dist.py", line 986, in run_command
cmd_obj.run()
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/command/build.py", line 135, in run
self.run_command(cmd_name)
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/cmd.py", line 313, in run_command
self.distribution.run_command(command)
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/dist.py", line 986, in run_command
cmd_obj.run()
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/command/build_ext.py", line 339, in run
self.build_extensions()
File "/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9/./setup.py", line 456, in build_extensions
build_ext.build_extensions(self)
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/command/build_ext.py", line 446, in build_extensions
self._build_extensions_parallel()
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/command/build_ext.py", line 468, in _build_extensions_parallel
fut.result()
File "/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9/Lib/concurrent/futures/_base.py", line 445, in result
return self.__get_result()
File "/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9/Lib/concurrent/futures/_base.py", line 390, in __get_result
raise self._exception
File "/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9/Lib/concurrent/futures/thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
File "/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9/./setup.py", line 554, in build_extension
build_ext.build_extension(self, ext)
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/command/build_ext.py", line 528, in build_extension
objects = self.compiler.compile(sources,
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/ccompiler.py", line 574, in compile
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/unixccompiler.py", line 117, in _compile
self.spawn(compiler_so + cc_args + [src, '-o', obj] +
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/ccompiler.py", line 917, in spawn
spawn(cmd, dry_run=self.dry_run, **kwargs)
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/spawn.py", line 38, in spawn
log.info(subprocess.list2cmdline(cmd))
AttributeError: module '_bootsubprocess' has no attribute 'list2cmdline'
make: *** [sharedmods] Error 1
make: Leaving directory `/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9'
Command failed: cd "/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9" && /usr/bin/make -j2 -w all
```
### Code of Conduct
- [ ] I agree to follow the PSF Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `_distutils_hack/__init__.py`
Content:
```
1 # don't import any costly modules
2 import sys
3 import os
4
5
6 is_pypy = '__pypy__' in sys.builtin_module_names
7
8
9 def warn_distutils_present():
10 if 'distutils' not in sys.modules:
11 return
12 if is_pypy and sys.version_info < (3, 7):
13 # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
14 # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
15 return
16 import warnings
17 warnings.warn(
18 "Distutils was imported before Setuptools, but importing Setuptools "
19 "also replaces the `distutils` module in `sys.modules`. This may lead "
20 "to undesirable behaviors or errors. To avoid these issues, avoid "
21 "using distutils directly, ensure that setuptools is installed in the "
22 "traditional way (e.g. not an editable install), and/or make sure "
23 "that setuptools is always imported before distutils.")
24
25
26 def clear_distutils():
27 if 'distutils' not in sys.modules:
28 return
29 import warnings
30 warnings.warn("Setuptools is replacing distutils.")
31 mods = [
32 name for name in sys.modules
33 if name == "distutils" or name.startswith("distutils.")
34 ]
35 for name in mods:
36 del sys.modules[name]
37
38
39 def enabled():
40 """
41 Allow selection of distutils by environment variable.
42 """
43 which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
44 return which == 'local'
45
46
47 def ensure_local_distutils():
48 import importlib
49 clear_distutils()
50
51 # With the DistutilsMetaFinder in place,
52 # perform an import to cause distutils to be
53 # loaded from setuptools._distutils. Ref #2906.
54 with shim():
55 importlib.import_module('distutils')
56
57 # check that submodules load as expected
58 core = importlib.import_module('distutils.core')
59 assert '_distutils' in core.__file__, core.__file__
60
61
62 def do_override():
63 """
64 Ensure that the local copy of distutils is preferred over stdlib.
65
66 See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
67 for more motivation.
68 """
69 if enabled():
70 warn_distutils_present()
71 ensure_local_distutils()
72
73
74 class _TrivialRe:
75 def __init__(self, *patterns):
76 self._patterns = patterns
77
78 def match(self, string):
79 return all(pat in string for pat in self._patterns)
80
81
82 class DistutilsMetaFinder:
83 def find_spec(self, fullname, path, target=None):
84 if path is not None:
85 return
86
87 method_name = 'spec_for_{fullname}'.format(**locals())
88 method = getattr(self, method_name, lambda: None)
89 return method()
90
91 def spec_for_distutils(self):
92 import importlib
93 import importlib.abc
94 import importlib.util
95
96 try:
97 mod = importlib.import_module('setuptools._distutils')
98 except Exception:
99 # There are a couple of cases where setuptools._distutils
100 # may not be present:
101 # - An older Setuptools without a local distutils is
102 # taking precedence. Ref #2957.
103 # - Path manipulation during sitecustomize removes
104 # setuptools from the path but only after the hook
105 # has been loaded. Ref #2980.
106 # In either case, fall back to stdlib behavior.
107 return
108
109 class DistutilsLoader(importlib.abc.Loader):
110
111 def create_module(self, spec):
112 return mod
113
114 def exec_module(self, module):
115 pass
116
117 return importlib.util.spec_from_loader(
118 'distutils', DistutilsLoader(), origin=mod.__file__
119 )
120
121 def spec_for_pip(self):
122 """
123 Ensure stdlib distutils when running under pip.
124 See pypa/pip#8761 for rationale.
125 """
126 if self.pip_imported_during_build():
127 return
128 clear_distutils()
129 self.spec_for_distutils = lambda: None
130
131 def spec_for_setuptools(self):
132 """
133 get-pip imports setuptools solely for the purpose of
134 determining if it's installed. In this case, provide
135 a stubbed spec to represent setuptools being present
136 without invoking any behavior.
137
138 Workaround for pypa/get-pip#137. Ref #2993.
139 """
140 if not self.is_script('get-pip'):
141 return
142
143 import importlib
144
145 class StubbedLoader(importlib.abc.Loader):
146
147 def create_module(self, spec):
148 import types
149 return types.ModuleType('setuptools')
150
151 def exec_module(self, module):
152 pass
153
154 return importlib.util.spec_from_loader(
155 'setuptools', StubbedLoader(),
156 )
157
158 @classmethod
159 def pip_imported_during_build(cls):
160 """
161 Detect if pip is being imported in a build script. Ref #2355.
162 """
163 import traceback
164 return any(
165 cls.frame_file_is_setup(frame)
166 for frame, line in traceback.walk_stack(None)
167 )
168
169 @staticmethod
170 def is_script(name):
171 try:
172 import __main__
173 return os.path.basename(__main__.__file__) == f'{name}.py'
174 except AttributeError:
175 pass
176
177 @staticmethod
178 def frame_file_is_setup(frame):
179 """
180 Return True if the indicated frame suggests a setup.py file.
181 """
182 # some frames may not have __file__ (#2940)
183 return frame.f_globals.get('__file__', '').endswith('setup.py')
184
185
186 DISTUTILS_FINDER = DistutilsMetaFinder()
187
188
189 def add_shim():
190 DISTUTILS_FINDER in sys.meta_path or insert_shim()
191
192
193 class shim:
194 def __enter__(self):
195 insert_shim()
196
197 def __exit__(self, exc, value, tb):
198 remove_shim()
199
200
201 def insert_shim():
202 sys.meta_path.insert(0, DISTUTILS_FINDER)
203
204
205 def remove_shim():
206 try:
207 sys.meta_path.remove(DISTUTILS_FINDER)
208 except ValueError:
209 pass
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py
--- a/_distutils_hack/__init__.py
+++ b/_distutils_hack/__init__.py
@@ -89,6 +89,9 @@
return method()
def spec_for_distutils(self):
+ if self.is_cpython():
+ return
+
import importlib
import importlib.abc
import importlib.util
@@ -118,6 +121,14 @@
'distutils', DistutilsLoader(), origin=mod.__file__
)
+ @staticmethod
+ def is_cpython():
+ """
+ Suppress supplying distutils for CPython (build and tests).
+ Ref #2965 and #3007.
+ """
+ return os.path.isfile('pybuilddir.txt')
+
def spec_for_pip(self):
"""
Ensure stdlib distutils when running under pip.
|
{"golden_diff": "diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py\n--- a/_distutils_hack/__init__.py\n+++ b/_distutils_hack/__init__.py\n@@ -89,6 +89,9 @@\n return method()\n \n def spec_for_distutils(self):\n+ if self.is_cpython():\n+ return\n+\n import importlib\n import importlib.abc\n import importlib.util\n@@ -118,6 +121,14 @@\n 'distutils', DistutilsLoader(), origin=mod.__file__\n )\n \n+ @staticmethod\n+ def is_cpython():\n+ \"\"\"\n+ Suppress supplying distutils for CPython (build and tests).\n+ Ref #2965 and #3007.\n+ \"\"\"\n+ return os.path.isfile('pybuilddir.txt')\n+\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n", "issue": "[BUG] Having setuptools installed causes cpython stdlib build to fail\n### setuptools version\n\nsetuptools>=60\n\n### Python version\n\nPython 3.9.9 (and probably others)\n\n### OS\n\nProbably affects all; reported on macOS\n\n### Additional environment information\n\nOriginally reported in the context of MacPorts' python39 and py39-setuptools ports.\n\n### Description\n\nIf you have setuptools installed and attempt to build cpython from source, setuptools' copy of distutils may be used when building the stdlib modules and cause the build to fail due to an incompatibility.\r\n\r\nThis is easily cured by setting `SETUPTOOLS_USE_DISTUTILS=stdlib` if you understand the problem, but if you don't, it's a pretty baffling failure mode.\r\n\r\nDownstream report: https://trac.macports.org/ticket/64352\n\n### Expected behavior\n\nCpython build uses its own distutils and succeeds.\n\n### How to Reproduce\n\n1. Install setuptools into a convenient site-packages directory.\r\n2. Configure cpython to install in a prefix such that it will be using the site-packages directory from step 1.\r\n3. Build cpython.\n\n### Output\n\n```console\r\nTraceback (most recent call last):\r\n File \"/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9/./setup.py\", line 2598, in <module>\r\n main()\r\n File \"/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9/./setup.py\", line 2568, in main\r\n setup(# PyPI Metadata (PEP 301)\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/core.py\", line 148, in setup\r\n return run_commands(dist)\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/core.py\", line 163, in run_commands\r\n dist.run_commands()\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/dist.py\", line 967, in run_commands\r\n self.run_command(cmd)\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/dist.py\", line 986, in run_command\r\n cmd_obj.run()\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/command/build.py\", line 135, in run\r\n self.run_command(cmd_name)\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/cmd.py\", line 313, in run_command\r\n self.distribution.run_command(command)\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/dist.py\", line 986, in run_command\r\n cmd_obj.run()\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/command/build_ext.py\", line 339, in run\r\n self.build_extensions()\r\n File \"/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9/./setup.py\", line 456, in build_extensions\r\n build_ext.build_extensions(self)\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/command/build_ext.py\", line 446, in build_extensions\r\n self._build_extensions_parallel()\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/command/build_ext.py\", line 468, in _build_extensions_parallel\r\n fut.result()\r\n File \"/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9/Lib/concurrent/futures/_base.py\", line 445, in result\r\n return self.__get_result()\r\n File \"/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9/Lib/concurrent/futures/_base.py\", line 390, in __get_result\r\n raise self._exception\r\n File \"/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9/Lib/concurrent/futures/thread.py\", line 58, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File \"/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9/./setup.py\", line 554, in build_extension\r\n build_ext.build_extension(self, ext)\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/command/build_ext.py\", line 528, in build_extension\r\n objects = self.compiler.compile(sources,\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/ccompiler.py\", line 574, in compile\r\n self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/unixccompiler.py\", line 117, in _compile\r\n self.spawn(compiler_so + cc_args + [src, '-o', obj] +\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/ccompiler.py\", line 917, in spawn\r\n spawn(cmd, dry_run=self.dry_run, **kwargs)\r\n File \"/opt/local/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/setuptools/_distutils/spawn.py\", line 38, in spawn\r\n log.info(subprocess.list2cmdline(cmd))\r\nAttributeError: module '_bootsubprocess' has no attribute 'list2cmdline'\r\nmake: *** [sharedmods] Error 1\r\nmake: Leaving directory `/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9'\r\nCommand failed: cd \"/opt/local/var/macports/build/_opt_local_var_macports_sources_rsync.macports.org_macports_release_tarballs_ports_lang_python39/python39/work/Python-3.9.9\" && /usr/bin/make -j2 -w all\r\n```\r\n\n\n### Code of Conduct\n\n- [ ] I agree to follow the PSF Code of Conduct\n", "before_files": [{"content": "# don't import any costly modules\nimport sys\nimport os\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n import warnings\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure \"\n \"that setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n import warnings\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [\n name for name in sys.modules\n if name == \"distutils\" or name.startswith(\"distutils.\")\n ]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n import importlib\n clear_distutils()\n\n # With the DistutilsMetaFinder in place,\n # perform an import to cause distutils to be\n # loaded from setuptools._distutils. Ref #2906.\n with shim():\n importlib.import_module('distutils')\n\n # check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass _TrivialRe:\n def __init__(self, *patterns):\n self._patterns = patterns\n\n def match(self, string):\n return all(pat in string for pat in self._patterns)\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib\n import importlib.abc\n import importlib.util\n\n try:\n mod = importlib.import_module('setuptools._distutils')\n except Exception:\n # There are a couple of cases where setuptools._distutils\n # may not be present:\n # - An older Setuptools without a local distutils is\n # taking precedence. Ref #2957.\n # - Path manipulation during sitecustomize removes\n # setuptools from the path but only after the hook\n # has been loaded. Ref #2980.\n # In either case, fall back to stdlib behavior.\n return\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return mod\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader(\n 'distutils', DistutilsLoader(), origin=mod.__file__\n )\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n if self.pip_imported_during_build():\n return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n def spec_for_setuptools(self):\n \"\"\"\n get-pip imports setuptools solely for the purpose of\n determining if it's installed. In this case, provide\n a stubbed spec to represent setuptools being present\n without invoking any behavior.\n\n Workaround for pypa/get-pip#137. Ref #2993.\n \"\"\"\n if not self.is_script('get-pip'):\n return\n\n import importlib\n\n class StubbedLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n import types\n return types.ModuleType('setuptools')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader(\n 'setuptools', StubbedLoader(),\n )\n\n @classmethod\n def pip_imported_during_build(cls):\n \"\"\"\n Detect if pip is being imported in a build script. Ref #2355.\n \"\"\"\n import traceback\n return any(\n cls.frame_file_is_setup(frame)\n for frame, line in traceback.walk_stack(None)\n )\n\n @staticmethod\n def is_script(name):\n try:\n import __main__\n return os.path.basename(__main__.__file__) == f'{name}.py'\n except AttributeError:\n pass\n\n @staticmethod\n def frame_file_is_setup(frame):\n \"\"\"\n Return True if the indicated frame suggests a setup.py file.\n \"\"\"\n # some frames may not have __file__ (#2940)\n return frame.f_globals.get('__file__', '').endswith('setup.py')\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n DISTUTILS_FINDER in sys.meta_path or insert_shim()\n\n\nclass shim:\n def __enter__(self):\n insert_shim()\n\n def __exit__(self, exc, value, tb):\n remove_shim()\n\n\ndef insert_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}], "after_files": [{"content": "# don't import any costly modules\nimport sys\nimport os\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n import warnings\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure \"\n \"that setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n import warnings\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [\n name for name in sys.modules\n if name == \"distutils\" or name.startswith(\"distutils.\")\n ]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n import importlib\n clear_distutils()\n\n # With the DistutilsMetaFinder in place,\n # perform an import to cause distutils to be\n # loaded from setuptools._distutils. Ref #2906.\n with shim():\n importlib.import_module('distutils')\n\n # check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass _TrivialRe:\n def __init__(self, *patterns):\n self._patterns = patterns\n\n def match(self, string):\n return all(pat in string for pat in self._patterns)\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n if self.is_cpython():\n return\n\n import importlib\n import importlib.abc\n import importlib.util\n\n try:\n mod = importlib.import_module('setuptools._distutils')\n except Exception:\n # There are a couple of cases where setuptools._distutils\n # may not be present:\n # - An older Setuptools without a local distutils is\n # taking precedence. Ref #2957.\n # - Path manipulation during sitecustomize removes\n # setuptools from the path but only after the hook\n # has been loaded. Ref #2980.\n # In either case, fall back to stdlib behavior.\n return\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return mod\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader(\n 'distutils', DistutilsLoader(), origin=mod.__file__\n )\n\n @staticmethod\n def is_cpython():\n \"\"\"\n Suppress supplying distutils for CPython (build and tests).\n Ref #2965 and #3007.\n \"\"\"\n return os.path.isfile('pybuilddir.txt')\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n if self.pip_imported_during_build():\n return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n def spec_for_setuptools(self):\n \"\"\"\n get-pip imports setuptools solely for the purpose of\n determining if it's installed. In this case, provide\n a stubbed spec to represent setuptools being present\n without invoking any behavior.\n\n Workaround for pypa/get-pip#137. Ref #2993.\n \"\"\"\n if not self.is_script('get-pip'):\n return\n\n import importlib\n\n class StubbedLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n import types\n return types.ModuleType('setuptools')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader(\n 'setuptools', StubbedLoader(),\n )\n\n @classmethod\n def pip_imported_during_build(cls):\n \"\"\"\n Detect if pip is being imported in a build script. Ref #2355.\n \"\"\"\n import traceback\n return any(\n cls.frame_file_is_setup(frame)\n for frame, line in traceback.walk_stack(None)\n )\n\n @staticmethod\n def is_script(name):\n try:\n import __main__\n return os.path.basename(__main__.__file__) == f'{name}.py'\n except AttributeError:\n pass\n\n @staticmethod\n def frame_file_is_setup(frame):\n \"\"\"\n Return True if the indicated frame suggests a setup.py file.\n \"\"\"\n # some frames may not have __file__ (#2940)\n return frame.f_globals.get('__file__', '').endswith('setup.py')\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n DISTUTILS_FINDER in sys.meta_path or insert_shim()\n\n\nclass shim:\n def __enter__(self):\n insert_shim()\n\n def __exit__(self, exc, value, tb):\n remove_shim()\n\n\ndef insert_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}]}
| 3,869 | 220 |
gh_patches_debug_4929
|
rasdani/github-patches
|
git_diff
|
carpentries__amy-1283
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add search by github handle to API persons endpoint
Would it be possible to add the functionality to search for people using their github handle through the API? i.e. `https://amy.software-carpentry.org/api/v1/persons/?github=fmichonneau`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api/filters.py`
Content:
```
1 from django_filters import rest_framework as filters
2
3 from workshops.filters import AMYFilterSet
4 from workshops.models import Event, Task, Tag, Person, Badge
5
6
7 def filter_tag_by_name(queryset, name, values):
8 tags = Tag.objects.filter(name__in=values)
9 for tag in tags:
10 queryset = queryset.filter(tags=tag)
11 return queryset
12
13
14 class EventFilter(filters.FilterSet):
15 start_after = filters.DateFilter(name='start', lookup_expr='gte')
16 start_before = filters.DateFilter(name='start', lookup_expr='lte')
17 end_after = filters.DateFilter(name='end', lookup_expr='gte')
18 end_before = filters.DateFilter(name='end', lookup_expr='lte')
19 TAG_CHOICES = Tag.objects.all().values_list('name', 'name')
20 tag = filters.MultipleChoiceFilter(
21 choices=TAG_CHOICES, name='tags', method=filter_tag_by_name,
22 )
23
24 class Meta:
25 model = Event
26 fields = (
27 'completed', 'tag',
28 'start', 'start_before', 'start_after',
29 'end', 'end_before', 'end_after',
30 )
31 order_by = ('-slug', 'slug', 'start', '-start', 'end', '-end')
32
33
34 class TaskFilter(filters.FilterSet):
35 role = filters.CharFilter(name='role__name')
36
37 class Meta:
38 model = Task
39 fields = (
40 'role',
41 )
42
43
44 def filter_instructors(queryset, name, value):
45 instructor_badges = Badge.objects.instructor_badges()
46 if value is True:
47 return queryset.filter(badges__in=instructor_badges)
48 elif value is False:
49 return queryset.exclude(badges__in=instructor_badges)
50 else:
51 return queryset
52
53
54 class PersonFilter(filters.FilterSet):
55 is_instructor = filters.BooleanFilter(method=filter_instructors,
56 label='Is instructor?')
57
58 class Meta:
59 model = Person
60 fields = (
61 'badges', 'username', 'personal', 'middle', 'family', 'email',
62 'may_contact', 'publish_profile',
63 )
64 order_by = (
65 'lastname', '-lastname', 'firstname', '-firstname', 'email',
66 '-email',
67 )
68
69 def get_order_by(self, order_value):
70 if order_value == 'firstname':
71 return ['personal', 'middle', 'family']
72 elif order_value == '-firstname':
73 return ['-personal', '-middle', '-family']
74 elif order_value == 'lastname':
75 return ['family', 'middle', 'personal']
76 elif order_value == '-lastname':
77 return ['-family', '-middle', '-personal']
78 return super().get_order_by(order_value)
79
80
81 class InstructorsOverTimeFilter(AMYFilterSet):
82 badges = filters.ModelMultipleChoiceFilter(
83 queryset=Badge.objects.instructor_badges(),
84 label='Badges',
85 lookup_expr='in',
86 )
87
88 class Meta:
89 model = Person
90 fields = [
91 'badges',
92 ]
93
94
95 class WorkshopsOverTimeFilter(AMYFilterSet):
96 tags = filters.ModelMultipleChoiceFilter(
97 queryset=Tag.objects.all(),
98 label='Events with at least one of the following tags:',
99 )
100
101 class Meta:
102 model = Event
103 fields = [
104 'tags',
105 ]
106
107
108 class LearnersOverTimeFilter(AMYFilterSet):
109 tags = filters.ModelMultipleChoiceFilter(
110 queryset=Tag.objects.all(),
111 label='Events with all the following tags:',
112 conjoined=True,
113 )
114
115 class Meta:
116 model = Event
117 fields = [
118 'tags',
119 ]
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/api/filters.py b/api/filters.py
--- a/api/filters.py
+++ b/api/filters.py
@@ -59,7 +59,7 @@
model = Person
fields = (
'badges', 'username', 'personal', 'middle', 'family', 'email',
- 'may_contact', 'publish_profile',
+ 'may_contact', 'publish_profile', 'github',
)
order_by = (
'lastname', '-lastname', 'firstname', '-firstname', 'email',
|
{"golden_diff": "diff --git a/api/filters.py b/api/filters.py\n--- a/api/filters.py\n+++ b/api/filters.py\n@@ -59,7 +59,7 @@\n model = Person\n fields = (\n 'badges', 'username', 'personal', 'middle', 'family', 'email',\n- 'may_contact', 'publish_profile',\n+ 'may_contact', 'publish_profile', 'github',\n )\n order_by = (\n 'lastname', '-lastname', 'firstname', '-firstname', 'email',\n", "issue": "Add search by github handle to API persons endpoint\nWould it be possible to add the functionality to search for people using their github handle through the API? i.e. `https://amy.software-carpentry.org/api/v1/persons/?github=fmichonneau`\n", "before_files": [{"content": "from django_filters import rest_framework as filters\n\nfrom workshops.filters import AMYFilterSet\nfrom workshops.models import Event, Task, Tag, Person, Badge\n\n\ndef filter_tag_by_name(queryset, name, values):\n tags = Tag.objects.filter(name__in=values)\n for tag in tags:\n queryset = queryset.filter(tags=tag)\n return queryset\n\n\nclass EventFilter(filters.FilterSet):\n start_after = filters.DateFilter(name='start', lookup_expr='gte')\n start_before = filters.DateFilter(name='start', lookup_expr='lte')\n end_after = filters.DateFilter(name='end', lookup_expr='gte')\n end_before = filters.DateFilter(name='end', lookup_expr='lte')\n TAG_CHOICES = Tag.objects.all().values_list('name', 'name')\n tag = filters.MultipleChoiceFilter(\n choices=TAG_CHOICES, name='tags', method=filter_tag_by_name,\n )\n\n class Meta:\n model = Event\n fields = (\n 'completed', 'tag',\n 'start', 'start_before', 'start_after',\n 'end', 'end_before', 'end_after',\n )\n order_by = ('-slug', 'slug', 'start', '-start', 'end', '-end')\n\n\nclass TaskFilter(filters.FilterSet):\n role = filters.CharFilter(name='role__name')\n\n class Meta:\n model = Task\n fields = (\n 'role',\n )\n\n\ndef filter_instructors(queryset, name, value):\n instructor_badges = Badge.objects.instructor_badges()\n if value is True:\n return queryset.filter(badges__in=instructor_badges)\n elif value is False:\n return queryset.exclude(badges__in=instructor_badges)\n else:\n return queryset\n\n\nclass PersonFilter(filters.FilterSet):\n is_instructor = filters.BooleanFilter(method=filter_instructors,\n label='Is instructor?')\n\n class Meta:\n model = Person\n fields = (\n 'badges', 'username', 'personal', 'middle', 'family', 'email',\n 'may_contact', 'publish_profile',\n )\n order_by = (\n 'lastname', '-lastname', 'firstname', '-firstname', 'email',\n '-email',\n )\n\n def get_order_by(self, order_value):\n if order_value == 'firstname':\n return ['personal', 'middle', 'family']\n elif order_value == '-firstname':\n return ['-personal', '-middle', '-family']\n elif order_value == 'lastname':\n return ['family', 'middle', 'personal']\n elif order_value == '-lastname':\n return ['-family', '-middle', '-personal']\n return super().get_order_by(order_value)\n\n\nclass InstructorsOverTimeFilter(AMYFilterSet):\n badges = filters.ModelMultipleChoiceFilter(\n queryset=Badge.objects.instructor_badges(),\n label='Badges',\n lookup_expr='in',\n )\n\n class Meta:\n model = Person\n fields = [\n 'badges',\n ]\n\n\nclass WorkshopsOverTimeFilter(AMYFilterSet):\n tags = filters.ModelMultipleChoiceFilter(\n queryset=Tag.objects.all(),\n label='Events with at least one of the following tags:',\n )\n\n class Meta:\n model = Event\n fields = [\n 'tags',\n ]\n\n\nclass LearnersOverTimeFilter(AMYFilterSet):\n tags = filters.ModelMultipleChoiceFilter(\n queryset=Tag.objects.all(),\n label='Events with all the following tags:',\n conjoined=True,\n )\n\n class Meta:\n model = Event\n fields = [\n 'tags',\n ]\n", "path": "api/filters.py"}], "after_files": [{"content": "from django_filters import rest_framework as filters\n\nfrom workshops.filters import AMYFilterSet\nfrom workshops.models import Event, Task, Tag, Person, Badge\n\n\ndef filter_tag_by_name(queryset, name, values):\n tags = Tag.objects.filter(name__in=values)\n for tag in tags:\n queryset = queryset.filter(tags=tag)\n return queryset\n\n\nclass EventFilter(filters.FilterSet):\n start_after = filters.DateFilter(name='start', lookup_expr='gte')\n start_before = filters.DateFilter(name='start', lookup_expr='lte')\n end_after = filters.DateFilter(name='end', lookup_expr='gte')\n end_before = filters.DateFilter(name='end', lookup_expr='lte')\n TAG_CHOICES = Tag.objects.all().values_list('name', 'name')\n tag = filters.MultipleChoiceFilter(\n choices=TAG_CHOICES, name='tags', method=filter_tag_by_name,\n )\n\n class Meta:\n model = Event\n fields = (\n 'completed', 'tag',\n 'start', 'start_before', 'start_after',\n 'end', 'end_before', 'end_after',\n )\n order_by = ('-slug', 'slug', 'start', '-start', 'end', '-end')\n\n\nclass TaskFilter(filters.FilterSet):\n role = filters.CharFilter(name='role__name')\n\n class Meta:\n model = Task\n fields = (\n 'role',\n )\n\n\ndef filter_instructors(queryset, name, value):\n instructor_badges = Badge.objects.instructor_badges()\n if value is True:\n return queryset.filter(badges__in=instructor_badges)\n elif value is False:\n return queryset.exclude(badges__in=instructor_badges)\n else:\n return queryset\n\n\nclass PersonFilter(filters.FilterSet):\n is_instructor = filters.BooleanFilter(method=filter_instructors,\n label='Is instructor?')\n\n class Meta:\n model = Person\n fields = (\n 'badges', 'username', 'personal', 'middle', 'family', 'email',\n 'may_contact', 'publish_profile', 'github',\n )\n order_by = (\n 'lastname', '-lastname', 'firstname', '-firstname', 'email',\n '-email',\n )\n\n def get_order_by(self, order_value):\n if order_value == 'firstname':\n return ['personal', 'middle', 'family']\n elif order_value == '-firstname':\n return ['-personal', '-middle', '-family']\n elif order_value == 'lastname':\n return ['family', 'middle', 'personal']\n elif order_value == '-lastname':\n return ['-family', '-middle', '-personal']\n return super().get_order_by(order_value)\n\n\nclass InstructorsOverTimeFilter(AMYFilterSet):\n badges = filters.ModelMultipleChoiceFilter(\n queryset=Badge.objects.instructor_badges(),\n label='Badges',\n lookup_expr='in',\n )\n\n class Meta:\n model = Person\n fields = [\n 'badges',\n ]\n\n\nclass WorkshopsOverTimeFilter(AMYFilterSet):\n tags = filters.ModelMultipleChoiceFilter(\n queryset=Tag.objects.all(),\n label='Events with at least one of the following tags:',\n )\n\n class Meta:\n model = Event\n fields = [\n 'tags',\n ]\n\n\nclass LearnersOverTimeFilter(AMYFilterSet):\n tags = filters.ModelMultipleChoiceFilter(\n queryset=Tag.objects.all(),\n label='Events with all the following tags:',\n conjoined=True,\n )\n\n class Meta:\n model = Event\n fields = [\n 'tags',\n ]\n", "path": "api/filters.py"}]}
| 1,340 | 115 |
gh_patches_debug_14474
|
rasdani/github-patches
|
git_diff
|
ocadotechnology__aimmo-512
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make logout button on AI:MMO
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `players/urls.py`
Content:
```
1 from django.conf.urls import url
2 from django.contrib.auth import views as auth_views
3 from django.contrib.auth.decorators import login_required
4 from django.views.generic import TemplateView
5 from django.views.generic import RedirectView
6
7 from players import views
8
9 urlpatterns = [
10 url(r'^$', TemplateView.as_view(template_name='players/home.html'), name='aimmo/home'),
11
12 url(r'^accounts/login/$', auth_views.login),
13
14 url(r'^program/(?P<id>[0-9]+)/$', login_required(views.ProgramView.as_view()), name='aimmo/program'),
15 url(r'^program_level/(?P<num>[0-9]+)/$', login_required(views.program_level), name='aimmo/program_level'),
16 url(r'^watch/(?P<id>[0-9]+)/$', login_required(views.watch_game), name='aimmo/watch'),
17 url(r'^watch_level/(?P<num>[0-9]+)/$', login_required(views.watch_level), name='aimmo/watch_level'),
18 url(r'^statistics/$', TemplateView.as_view(template_name='players/statistics.html'), name='aimmo/statistics'),
19 url(r'^game_ide/$', TemplateView.as_view(template_name='players/game_ide.html'), name='aimmo/game_ide'),
20
21 url(r'^api/code/(?P<id>[0-9]+)/$', views.code, name='aimmo/code'),
22 url(r'^api/games/$', views.list_games, name='aimmo/games'),
23 url(r'^api/games/(?P<id>[0-9]+)/$', views.get_game, name='aimmo/game_details'),
24 url(r'^api/games/(?P<id>[0-9]+)/complete/$', views.mark_game_complete, name='aimmo/complete_game'),
25 url(r'^api/games/(?P<game_id>[0-9]+)/current_avatar/$', views.current_avatar_in_game, name='aimmo/current_avatar_in_game'),
26
27 url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='aimmo/js_reverse'), # TODO: Pull request to make django_js_reverse.urls
28 url(r'^games/new/$', views.add_game, name='aimmo/new_game'),
29
30 # TODO: this is a quickfix for redirecting for the Unity resources
31 url(r'^watch/(?P<id>[0-9]+)/(?P<resource>.[0-9A-Za-z/.]+)$',
32 RedirectView.as_view(url='/static/unity/%(resource)s', permanent=False)),
33
34 url(r'^socket.io/socket.io.js',
35 RedirectView.as_view(url='https://cdnjs.cloudflare.com/ajax/libs/socket.io/1.7.4/socket.io.min.js', permanent=False)),
36 ]
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/players/urls.py b/players/urls.py
--- a/players/urls.py
+++ b/players/urls.py
@@ -10,6 +10,8 @@
url(r'^$', TemplateView.as_view(template_name='players/home.html'), name='aimmo/home'),
url(r'^accounts/login/$', auth_views.login),
+ url(r'^accounts/logout/$', auth_views.logout, {'next_page' : 'aimmo/logout_success'}, name='aimmo/logout'),
+ url(r'^accounts/logout_success/$', TemplateView.as_view(template_name='registration/success_logout.html'), name='aimmo/logout_success'),
url(r'^program/(?P<id>[0-9]+)/$', login_required(views.ProgramView.as_view()), name='aimmo/program'),
url(r'^program_level/(?P<num>[0-9]+)/$', login_required(views.program_level), name='aimmo/program_level'),
|
{"golden_diff": "diff --git a/players/urls.py b/players/urls.py\n--- a/players/urls.py\n+++ b/players/urls.py\n@@ -10,6 +10,8 @@\n url(r'^$', TemplateView.as_view(template_name='players/home.html'), name='aimmo/home'),\n \n url(r'^accounts/login/$', auth_views.login),\n+ url(r'^accounts/logout/$', auth_views.logout, {'next_page' : 'aimmo/logout_success'}, name='aimmo/logout'),\n+ url(r'^accounts/logout_success/$', TemplateView.as_view(template_name='registration/success_logout.html'), name='aimmo/logout_success'),\n \n url(r'^program/(?P<id>[0-9]+)/$', login_required(views.ProgramView.as_view()), name='aimmo/program'),\n url(r'^program_level/(?P<num>[0-9]+)/$', login_required(views.program_level), name='aimmo/program_level'),\n", "issue": "Make logout button on AI:MMO\n\n", "before_files": [{"content": "from django.conf.urls import url\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import TemplateView\nfrom django.views.generic import RedirectView\n\nfrom players import views\n\nurlpatterns = [\n url(r'^$', TemplateView.as_view(template_name='players/home.html'), name='aimmo/home'),\n\n url(r'^accounts/login/$', auth_views.login),\n\n url(r'^program/(?P<id>[0-9]+)/$', login_required(views.ProgramView.as_view()), name='aimmo/program'),\n url(r'^program_level/(?P<num>[0-9]+)/$', login_required(views.program_level), name='aimmo/program_level'),\n url(r'^watch/(?P<id>[0-9]+)/$', login_required(views.watch_game), name='aimmo/watch'),\n url(r'^watch_level/(?P<num>[0-9]+)/$', login_required(views.watch_level), name='aimmo/watch_level'),\n url(r'^statistics/$', TemplateView.as_view(template_name='players/statistics.html'), name='aimmo/statistics'),\n url(r'^game_ide/$', TemplateView.as_view(template_name='players/game_ide.html'), name='aimmo/game_ide'),\n\n url(r'^api/code/(?P<id>[0-9]+)/$', views.code, name='aimmo/code'),\n url(r'^api/games/$', views.list_games, name='aimmo/games'),\n url(r'^api/games/(?P<id>[0-9]+)/$', views.get_game, name='aimmo/game_details'),\n url(r'^api/games/(?P<id>[0-9]+)/complete/$', views.mark_game_complete, name='aimmo/complete_game'),\n url(r'^api/games/(?P<game_id>[0-9]+)/current_avatar/$', views.current_avatar_in_game, name='aimmo/current_avatar_in_game'),\n\n url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='aimmo/js_reverse'), # TODO: Pull request to make django_js_reverse.urls\n url(r'^games/new/$', views.add_game, name='aimmo/new_game'),\n\n # TODO: this is a quickfix for redirecting for the Unity resources\n url(r'^watch/(?P<id>[0-9]+)/(?P<resource>.[0-9A-Za-z/.]+)$',\n RedirectView.as_view(url='/static/unity/%(resource)s', permanent=False)),\n\n url(r'^socket.io/socket.io.js',\n RedirectView.as_view(url='https://cdnjs.cloudflare.com/ajax/libs/socket.io/1.7.4/socket.io.min.js', permanent=False)),\n]\n", "path": "players/urls.py"}], "after_files": [{"content": "from django.conf.urls import url\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import TemplateView\nfrom django.views.generic import RedirectView\n\nfrom players import views\n\nurlpatterns = [\n url(r'^$', TemplateView.as_view(template_name='players/home.html'), name='aimmo/home'),\n\n url(r'^accounts/login/$', auth_views.login),\n url(r'^accounts/logout/$', auth_views.logout, {'next_page' : 'aimmo/logout_success'}, name='aimmo/logout'),\n url(r'^accounts/logout_success/$', TemplateView.as_view(template_name='registration/success_logout.html'), name='aimmo/logout_success'),\n\n url(r'^program/(?P<id>[0-9]+)/$', login_required(views.ProgramView.as_view()), name='aimmo/program'),\n url(r'^program_level/(?P<num>[0-9]+)/$', login_required(views.program_level), name='aimmo/program_level'),\n url(r'^watch/(?P<id>[0-9]+)/$', login_required(views.watch_game), name='aimmo/watch'),\n url(r'^watch_level/(?P<num>[0-9]+)/$', login_required(views.watch_level), name='aimmo/watch_level'),\n url(r'^statistics/$', TemplateView.as_view(template_name='players/statistics.html'), name='aimmo/statistics'),\n url(r'^game_ide/$', TemplateView.as_view(template_name='players/game_ide.html'), name='aimmo/game_ide'),\n\n url(r'^api/code/(?P<id>[0-9]+)/$', views.code, name='aimmo/code'),\n url(r'^api/games/$', views.list_games, name='aimmo/games'),\n url(r'^api/games/(?P<id>[0-9]+)/$', views.get_game, name='aimmo/game_details'),\n url(r'^api/games/(?P<id>[0-9]+)/complete/$', views.mark_game_complete, name='aimmo/complete_game'),\n url(r'^api/games/(?P<game_id>[0-9]+)/current_avatar/$', views.current_avatar_in_game, name='aimmo/current_avatar_in_game'),\n\n url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='aimmo/js_reverse'), # TODO: Pull request to make django_js_reverse.urls\n url(r'^games/new/$', views.add_game, name='aimmo/new_game'),\n\n # TODO: this is a quickfix for redirecting for the Unity resources\n url(r'^watch/(?P<id>[0-9]+)/(?P<resource>.[0-9A-Za-z/.]+)$',\n RedirectView.as_view(url='/static/unity/%(resource)s', permanent=False)),\n\n url(r'^socket.io/socket.io.js',\n RedirectView.as_view(url='https://cdnjs.cloudflare.com/ajax/libs/socket.io/1.7.4/socket.io.min.js', permanent=False)),\n]\n", "path": "players/urls.py"}]}
| 887 | 197 |
gh_patches_debug_173
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-2038
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Parsl v1.1.0 Release Checklist
## Checklist
Please edit the checklist if I've missed any items.
### Documentation updates :
- [x] Update docs to point at 1.1.0 as the latest
- [x] Make sure docs are not broken on readthedocs, since a broken doc build will stick on as stable till next release.
- [x] Update changelog with summary of changes since 0.9.0 [@benclifford to take a crack at this]
- [ ] Update Parsl tutorial repo with a 1.1.0 branch that folds in changes
- [x] Add `Beta` tags to components/features that are not yet stable.
### Testing :
- [ ] All testing should be green on Travis
- [x] Update all configs in `parsl/parsl/configs` to match current best practices
- [x] Update all test configs in `parsl/parsl/test/configs`
- [x] Test notebooks/tutorials and basic tests on a Mac
- [ ] Post news update on the website about release
- [x] Site testing:
- [x] Bridges2(PSC) [YY]
- [ ] ~~Comet (SDSC)~~ Machine is getting replaced by Expanse
- [x] Cori (NERSC) [YY/Yadu]
- [x] Stampede2 (TACC) [Yadu]
- [ ] ~~Frontera (TACC)~~ [Yadu, no access]
- [x] Theta (ALCF) [YY]
- [x] Bluewaters (NCSA) [ZZ]
- [x] Summit (ORNL) [Yadu]
- [ ] ~~CC-IN2P3 (French Grid)~~ [Yadu]
- [x] Midway (RCC, UChicago) [YY]
- [x] Open Science Grid
- [x] AWS
- [x] Kubernetes [ZZ]
- [x] NSCC Singapore [ZZ]
- [ ] Ad-Hoc clusters [YY]
### Release Tagging and pushing to PyPI
I'll make an updated alpha to smoothen installation and site testing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/version.py`
Content:
```
1 """Set module version.
2
3 <Major>.<Minor>.<maintenance>[alpha/beta/..]
4 Alphas will be numbered like this -> 0.4.0a0
5 """
6 VERSION = '1.1.0a1'
7
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsl/version.py b/parsl/version.py
--- a/parsl/version.py
+++ b/parsl/version.py
@@ -3,4 +3,4 @@
<Major>.<Minor>.<maintenance>[alpha/beta/..]
Alphas will be numbered like this -> 0.4.0a0
"""
-VERSION = '1.1.0a1'
+VERSION = '1.1.0'
|
{"golden_diff": "diff --git a/parsl/version.py b/parsl/version.py\n--- a/parsl/version.py\n+++ b/parsl/version.py\n@@ -3,4 +3,4 @@\n <Major>.<Minor>.<maintenance>[alpha/beta/..]\n Alphas will be numbered like this -> 0.4.0a0\n \"\"\"\n-VERSION = '1.1.0a1'\n+VERSION = '1.1.0'\n", "issue": "Parsl v1.1.0 Release Checklist\n## Checklist\r\n\r\nPlease edit the checklist if I've missed any items. \r\n\r\n### Documentation updates :\r\n\r\n- [x] Update docs to point at 1.1.0 as the latest\r\n- [x] Make sure docs are not broken on readthedocs, since a broken doc build will stick on as stable till next release.\r\n- [x] Update changelog with summary of changes since 0.9.0 [@benclifford to take a crack at this]\r\n- [ ] Update Parsl tutorial repo with a 1.1.0 branch that folds in changes \r\n- [x] Add `Beta` tags to components/features that are not yet stable.\r\n\r\n\r\n### Testing :\r\n\r\n- [ ] All testing should be green on Travis\r\n- [x] Update all configs in `parsl/parsl/configs` to match current best practices\r\n- [x] Update all test configs in `parsl/parsl/test/configs`\r\n- [x] Test notebooks/tutorials and basic tests on a Mac\r\n- [ ] Post news update on the website about release\r\n\r\n- [x] Site testing:\r\n - [x] Bridges2(PSC) [YY]\r\n - [ ] ~~Comet (SDSC)~~ Machine is getting replaced by Expanse\r\n - [x] Cori (NERSC) [YY/Yadu]\r\n - [x] Stampede2 (TACC) [Yadu]\r\n - [ ] ~~Frontera (TACC)~~ [Yadu, no access]\r\n - [x] Theta (ALCF) [YY]\r\n - [x] Bluewaters (NCSA) [ZZ]\r\n - [x] Summit (ORNL) [Yadu]\r\n - [ ] ~~CC-IN2P3 (French Grid)~~ [Yadu]\r\n - [x] Midway (RCC, UChicago) [YY]\r\n - [x] Open Science Grid\r\n - [x] AWS\r\n - [x] Kubernetes [ZZ]\r\n - [x] NSCC Singapore [ZZ]\r\n - [ ] Ad-Hoc clusters [YY]\r\n\r\n### Release Tagging and pushing to PyPI\r\n\r\nI'll make an updated alpha to smoothen installation and site testing.\r\n\n", "before_files": [{"content": "\"\"\"Set module version.\n\n<Major>.<Minor>.<maintenance>[alpha/beta/..]\nAlphas will be numbered like this -> 0.4.0a0\n\"\"\"\nVERSION = '1.1.0a1'\n", "path": "parsl/version.py"}], "after_files": [{"content": "\"\"\"Set module version.\n\n<Major>.<Minor>.<maintenance>[alpha/beta/..]\nAlphas will be numbered like this -> 0.4.0a0\n\"\"\"\nVERSION = '1.1.0'\n", "path": "parsl/version.py"}]}
| 799 | 97 |
gh_patches_debug_6931
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-28775
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve integrations documentation in help center
We should:
- Rename /help/bots-and-integrations to "Bots overview" everywhere (sidebar, page title, page URL).
- Add a copy of https://zulip.com/api/integrations-overview as the second page in the Bots & integrations section, titled "Integration overview"
- Cross-link as appropriate, both in related articles and in the content of the pages.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/url_redirects.py`
Content:
```
1 from dataclasses import dataclass
2 from typing import List
3
4
5 @dataclass
6 class URLRedirect:
7 old_url: str
8 new_url: str
9
10
11 API_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
12 # Add URL redirects for REST API documentation here:
13 URLRedirect("/api/delete-stream", "/api/archive-stream"),
14 ]
15
16 POLICY_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
17 # Add URL redirects for policy documentation here:
18 URLRedirect("/privacy/", "/policies/privacy"),
19 URLRedirect("/terms/", "/policies/terms"),
20 ]
21
22 HELP_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
23 # Add URL redirects for help center documentation here:
24 URLRedirect("/help/pm-mention-alert-notifications", "/help/dm-mention-alert-notifications"),
25 URLRedirect("/help/restrict-private-messages", "/help/restrict-direct-messages"),
26 URLRedirect("/help/reading-pms", "/help/reading-dms"),
27 URLRedirect("/help/private-messages", "/help/direct-messages"),
28 URLRedirect("/help/configure-who-can-edit-topics", "/help/restrict-moving-messages"),
29 URLRedirect(
30 "/help/configure-message-editing-and-deletion",
31 "/help/restrict-message-editing-and-deletion",
32 ),
33 URLRedirect("/help/restrict-visibility-of-email-addresses", "/help/configure-email-visibility"),
34 URLRedirect("/help/change-default-view", "/help/configure-default-view"),
35 URLRedirect("/help/recent-topics", "/help/recent-conversations"),
36 URLRedirect(
37 "/help/add-custom-profile-fields",
38 "/help/custom-profile-fields",
39 ),
40 URLRedirect(
41 "/help/enable-enter-to-send",
42 "/help/mastering-the-compose-box#toggle-between-ctrl-enter-and-enter-to-send-a-message",
43 ),
44 URLRedirect(
45 "/help/change-the-default-language-for-your-organization",
46 "/help/configure-organization-language",
47 ),
48 URLRedirect("/help/delete-a-stream", "/help/archive-a-stream"),
49 URLRedirect("/help/change-the-topic-of-a-message", "/help/rename-a-topic"),
50 URLRedirect("/help/configure-missed-message-emails", "/help/email-notifications"),
51 URLRedirect("/help/add-an-alert-word", "/help/dm-mention-alert-notifications#alert-words"),
52 URLRedirect("/help/test-mobile-notifications", "/help/mobile-notifications"),
53 URLRedirect(
54 "/help/troubleshooting-desktop-notifications",
55 "/help/desktop-notifications#troubleshooting-desktop-notifications",
56 ),
57 URLRedirect(
58 "/help/change-notification-sound", "/help/desktop-notifications#change-notification-sound"
59 ),
60 URLRedirect("/help/configure-message-notification-emails", "/help/email-notifications"),
61 URLRedirect("/help/disable-new-login-emails", "/help/email-notifications#new-login-emails"),
62 # The `help/about-streams-and-topics` redirect is particularly important,
63 # because the old URL appears in links from Welcome Bot messages.
64 URLRedirect("/help/about-streams-and-topics", "/help/streams-and-topics"),
65 URLRedirect("/help/community-topic-edits", "/help/restrict-moving-messages"),
66 URLRedirect(
67 "/help/only-allow-admins-to-add-emoji", "/help/custom-emoji#change-who-can-add-custom-emoji"
68 ),
69 URLRedirect(
70 "/help/configure-who-can-add-custom-emoji",
71 "/help/custom-emoji#change-who-can-add-custom-emoji",
72 ),
73 URLRedirect("/help/add-custom-emoji", "/help/custom-emoji"),
74 URLRedirect("/help/night-mode", "/help/dark-theme"),
75 URLRedirect("/help/enable-emoticon-translations", "/help/configure-emoticon-translations"),
76 URLRedirect("/help/web-public-streams", "/help/public-access-option"),
77 URLRedirect("/help/starting-a-new-private-thread", "/help/starting-a-new-direct-message"),
78 URLRedirect("/help/edit-or-delete-a-message", "/help/delete-a-message"),
79 URLRedirect("/help/start-a-new-topic", "/help/starting-a-new-topic"),
80 URLRedirect("/help/configure-default-view", "/help/configure-home-view"),
81 URLRedirect("/help/reading-topics", "/help/reading-conversations"),
82 URLRedirect("/help/finding-a-topic-to-read", "/help/finding-a-conversation-to-read"),
83 URLRedirect("/help/view-and-browse-images", "/help/view-images-and-videos"),
84 ]
85
86 LANDING_PAGE_REDIRECTS = [
87 # Add URL redirects for corporate landing pages here.
88 URLRedirect("/new-user/", "/hello/"),
89 URLRedirect("/developer-community/", "/development-community"),
90 URLRedirect("/for/companies/", "/for/business"),
91 URLRedirect("/for/working-groups-and-communities/", "/for/communities"),
92 ]
93
94 DOCUMENTATION_REDIRECTS = (
95 API_DOCUMENTATION_REDIRECTS + POLICY_DOCUMENTATION_REDIRECTS + HELP_DOCUMENTATION_REDIRECTS
96 )
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zerver/lib/url_redirects.py b/zerver/lib/url_redirects.py
--- a/zerver/lib/url_redirects.py
+++ b/zerver/lib/url_redirects.py
@@ -81,6 +81,7 @@
URLRedirect("/help/reading-topics", "/help/reading-conversations"),
URLRedirect("/help/finding-a-topic-to-read", "/help/finding-a-conversation-to-read"),
URLRedirect("/help/view-and-browse-images", "/help/view-images-and-videos"),
+ URLRedirect("/help/bots-and-integrations", "/help/bots-overview"),
]
LANDING_PAGE_REDIRECTS = [
|
{"golden_diff": "diff --git a/zerver/lib/url_redirects.py b/zerver/lib/url_redirects.py\n--- a/zerver/lib/url_redirects.py\n+++ b/zerver/lib/url_redirects.py\n@@ -81,6 +81,7 @@\n URLRedirect(\"/help/reading-topics\", \"/help/reading-conversations\"),\n URLRedirect(\"/help/finding-a-topic-to-read\", \"/help/finding-a-conversation-to-read\"),\n URLRedirect(\"/help/view-and-browse-images\", \"/help/view-images-and-videos\"),\n+ URLRedirect(\"/help/bots-and-integrations\", \"/help/bots-overview\"),\n ]\n \n LANDING_PAGE_REDIRECTS = [\n", "issue": "Improve integrations documentation in help center\nWe should:\r\n- Rename /help/bots-and-integrations to \"Bots overview\" everywhere (sidebar, page title, page URL).\r\n- Add a copy of https://zulip.com/api/integrations-overview as the second page in the Bots & integrations section, titled \"Integration overview\"\r\n- Cross-link as appropriate, both in related articles and in the content of the pages.\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom typing import List\n\n\n@dataclass\nclass URLRedirect:\n old_url: str\n new_url: str\n\n\nAPI_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for REST API documentation here:\n URLRedirect(\"/api/delete-stream\", \"/api/archive-stream\"),\n]\n\nPOLICY_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for policy documentation here:\n URLRedirect(\"/privacy/\", \"/policies/privacy\"),\n URLRedirect(\"/terms/\", \"/policies/terms\"),\n]\n\nHELP_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for help center documentation here:\n URLRedirect(\"/help/pm-mention-alert-notifications\", \"/help/dm-mention-alert-notifications\"),\n URLRedirect(\"/help/restrict-private-messages\", \"/help/restrict-direct-messages\"),\n URLRedirect(\"/help/reading-pms\", \"/help/reading-dms\"),\n URLRedirect(\"/help/private-messages\", \"/help/direct-messages\"),\n URLRedirect(\"/help/configure-who-can-edit-topics\", \"/help/restrict-moving-messages\"),\n URLRedirect(\n \"/help/configure-message-editing-and-deletion\",\n \"/help/restrict-message-editing-and-deletion\",\n ),\n URLRedirect(\"/help/restrict-visibility-of-email-addresses\", \"/help/configure-email-visibility\"),\n URLRedirect(\"/help/change-default-view\", \"/help/configure-default-view\"),\n URLRedirect(\"/help/recent-topics\", \"/help/recent-conversations\"),\n URLRedirect(\n \"/help/add-custom-profile-fields\",\n \"/help/custom-profile-fields\",\n ),\n URLRedirect(\n \"/help/enable-enter-to-send\",\n \"/help/mastering-the-compose-box#toggle-between-ctrl-enter-and-enter-to-send-a-message\",\n ),\n URLRedirect(\n \"/help/change-the-default-language-for-your-organization\",\n \"/help/configure-organization-language\",\n ),\n URLRedirect(\"/help/delete-a-stream\", \"/help/archive-a-stream\"),\n URLRedirect(\"/help/change-the-topic-of-a-message\", \"/help/rename-a-topic\"),\n URLRedirect(\"/help/configure-missed-message-emails\", \"/help/email-notifications\"),\n URLRedirect(\"/help/add-an-alert-word\", \"/help/dm-mention-alert-notifications#alert-words\"),\n URLRedirect(\"/help/test-mobile-notifications\", \"/help/mobile-notifications\"),\n URLRedirect(\n \"/help/troubleshooting-desktop-notifications\",\n \"/help/desktop-notifications#troubleshooting-desktop-notifications\",\n ),\n URLRedirect(\n \"/help/change-notification-sound\", \"/help/desktop-notifications#change-notification-sound\"\n ),\n URLRedirect(\"/help/configure-message-notification-emails\", \"/help/email-notifications\"),\n URLRedirect(\"/help/disable-new-login-emails\", \"/help/email-notifications#new-login-emails\"),\n # The `help/about-streams-and-topics` redirect is particularly important,\n # because the old URL appears in links from Welcome Bot messages.\n URLRedirect(\"/help/about-streams-and-topics\", \"/help/streams-and-topics\"),\n URLRedirect(\"/help/community-topic-edits\", \"/help/restrict-moving-messages\"),\n URLRedirect(\n \"/help/only-allow-admins-to-add-emoji\", \"/help/custom-emoji#change-who-can-add-custom-emoji\"\n ),\n URLRedirect(\n \"/help/configure-who-can-add-custom-emoji\",\n \"/help/custom-emoji#change-who-can-add-custom-emoji\",\n ),\n URLRedirect(\"/help/add-custom-emoji\", \"/help/custom-emoji\"),\n URLRedirect(\"/help/night-mode\", \"/help/dark-theme\"),\n URLRedirect(\"/help/enable-emoticon-translations\", \"/help/configure-emoticon-translations\"),\n URLRedirect(\"/help/web-public-streams\", \"/help/public-access-option\"),\n URLRedirect(\"/help/starting-a-new-private-thread\", \"/help/starting-a-new-direct-message\"),\n URLRedirect(\"/help/edit-or-delete-a-message\", \"/help/delete-a-message\"),\n URLRedirect(\"/help/start-a-new-topic\", \"/help/starting-a-new-topic\"),\n URLRedirect(\"/help/configure-default-view\", \"/help/configure-home-view\"),\n URLRedirect(\"/help/reading-topics\", \"/help/reading-conversations\"),\n URLRedirect(\"/help/finding-a-topic-to-read\", \"/help/finding-a-conversation-to-read\"),\n URLRedirect(\"/help/view-and-browse-images\", \"/help/view-images-and-videos\"),\n]\n\nLANDING_PAGE_REDIRECTS = [\n # Add URL redirects for corporate landing pages here.\n URLRedirect(\"/new-user/\", \"/hello/\"),\n URLRedirect(\"/developer-community/\", \"/development-community\"),\n URLRedirect(\"/for/companies/\", \"/for/business\"),\n URLRedirect(\"/for/working-groups-and-communities/\", \"/for/communities\"),\n]\n\nDOCUMENTATION_REDIRECTS = (\n API_DOCUMENTATION_REDIRECTS + POLICY_DOCUMENTATION_REDIRECTS + HELP_DOCUMENTATION_REDIRECTS\n)\n", "path": "zerver/lib/url_redirects.py"}], "after_files": [{"content": "from dataclasses import dataclass\nfrom typing import List\n\n\n@dataclass\nclass URLRedirect:\n old_url: str\n new_url: str\n\n\nAPI_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for REST API documentation here:\n URLRedirect(\"/api/delete-stream\", \"/api/archive-stream\"),\n]\n\nPOLICY_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for policy documentation here:\n URLRedirect(\"/privacy/\", \"/policies/privacy\"),\n URLRedirect(\"/terms/\", \"/policies/terms\"),\n]\n\nHELP_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for help center documentation here:\n URLRedirect(\"/help/pm-mention-alert-notifications\", \"/help/dm-mention-alert-notifications\"),\n URLRedirect(\"/help/restrict-private-messages\", \"/help/restrict-direct-messages\"),\n URLRedirect(\"/help/reading-pms\", \"/help/reading-dms\"),\n URLRedirect(\"/help/private-messages\", \"/help/direct-messages\"),\n URLRedirect(\"/help/configure-who-can-edit-topics\", \"/help/restrict-moving-messages\"),\n URLRedirect(\n \"/help/configure-message-editing-and-deletion\",\n \"/help/restrict-message-editing-and-deletion\",\n ),\n URLRedirect(\"/help/restrict-visibility-of-email-addresses\", \"/help/configure-email-visibility\"),\n URLRedirect(\"/help/change-default-view\", \"/help/configure-default-view\"),\n URLRedirect(\"/help/recent-topics\", \"/help/recent-conversations\"),\n URLRedirect(\n \"/help/add-custom-profile-fields\",\n \"/help/custom-profile-fields\",\n ),\n URLRedirect(\n \"/help/enable-enter-to-send\",\n \"/help/mastering-the-compose-box#toggle-between-ctrl-enter-and-enter-to-send-a-message\",\n ),\n URLRedirect(\n \"/help/change-the-default-language-for-your-organization\",\n \"/help/configure-organization-language\",\n ),\n URLRedirect(\"/help/delete-a-stream\", \"/help/archive-a-stream\"),\n URLRedirect(\"/help/change-the-topic-of-a-message\", \"/help/rename-a-topic\"),\n URLRedirect(\"/help/configure-missed-message-emails\", \"/help/email-notifications\"),\n URLRedirect(\"/help/add-an-alert-word\", \"/help/dm-mention-alert-notifications#alert-words\"),\n URLRedirect(\"/help/test-mobile-notifications\", \"/help/mobile-notifications\"),\n URLRedirect(\n \"/help/troubleshooting-desktop-notifications\",\n \"/help/desktop-notifications#troubleshooting-desktop-notifications\",\n ),\n URLRedirect(\n \"/help/change-notification-sound\", \"/help/desktop-notifications#change-notification-sound\"\n ),\n URLRedirect(\"/help/configure-message-notification-emails\", \"/help/email-notifications\"),\n URLRedirect(\"/help/disable-new-login-emails\", \"/help/email-notifications#new-login-emails\"),\n # The `help/about-streams-and-topics` redirect is particularly important,\n # because the old URL appears in links from Welcome Bot messages.\n URLRedirect(\"/help/about-streams-and-topics\", \"/help/streams-and-topics\"),\n URLRedirect(\"/help/community-topic-edits\", \"/help/restrict-moving-messages\"),\n URLRedirect(\n \"/help/only-allow-admins-to-add-emoji\", \"/help/custom-emoji#change-who-can-add-custom-emoji\"\n ),\n URLRedirect(\n \"/help/configure-who-can-add-custom-emoji\",\n \"/help/custom-emoji#change-who-can-add-custom-emoji\",\n ),\n URLRedirect(\"/help/add-custom-emoji\", \"/help/custom-emoji\"),\n URLRedirect(\"/help/night-mode\", \"/help/dark-theme\"),\n URLRedirect(\"/help/enable-emoticon-translations\", \"/help/configure-emoticon-translations\"),\n URLRedirect(\"/help/web-public-streams\", \"/help/public-access-option\"),\n URLRedirect(\"/help/starting-a-new-private-thread\", \"/help/starting-a-new-direct-message\"),\n URLRedirect(\"/help/edit-or-delete-a-message\", \"/help/delete-a-message\"),\n URLRedirect(\"/help/start-a-new-topic\", \"/help/starting-a-new-topic\"),\n URLRedirect(\"/help/configure-default-view\", \"/help/configure-home-view\"),\n URLRedirect(\"/help/reading-topics\", \"/help/reading-conversations\"),\n URLRedirect(\"/help/finding-a-topic-to-read\", \"/help/finding-a-conversation-to-read\"),\n URLRedirect(\"/help/view-and-browse-images\", \"/help/view-images-and-videos\"),\n URLRedirect(\"/help/bots-and-integrations\", \"/help/bots-overview\"),\n]\n\nLANDING_PAGE_REDIRECTS = [\n # Add URL redirects for corporate landing pages here.\n URLRedirect(\"/new-user/\", \"/hello/\"),\n URLRedirect(\"/developer-community/\", \"/development-community\"),\n URLRedirect(\"/for/companies/\", \"/for/business\"),\n URLRedirect(\"/for/working-groups-and-communities/\", \"/for/communities\"),\n]\n\nDOCUMENTATION_REDIRECTS = (\n API_DOCUMENTATION_REDIRECTS + POLICY_DOCUMENTATION_REDIRECTS + HELP_DOCUMENTATION_REDIRECTS\n)\n", "path": "zerver/lib/url_redirects.py"}]}
| 1,574 | 139 |
gh_patches_debug_20189
|
rasdani/github-patches
|
git_diff
|
plotly__dash-2856
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] set_props called multiple times only keep the last props.
For regular callbacks, when multiple call of `set_props` to the same component id, only the last call is saved.
Example:
```
from dash import Dash, Input, html, set_props
app = Dash()
app.layout = [
html.Button("start", id="start"),
html.Div("initial", id="output"),
]
@app.callback(
Input("start", "n_clicks"),
)
def on_click(_):
set_props("output", {"children": "changed"})
set_props("output", {"style": {"background": "red"}})
if __name__ == "__main__":
app.run(debug=True)
```
Clicking on the start button only set the background red, the text stays at "initial". The props should be merged and both updated.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dash/long_callback/_proxy_set_props.py`
Content:
```
1 class ProxySetProps(dict):
2 """
3 Defer dictionary item setter to run a custom function on change.
4 Used by background callback manager to save the `set_props` data.
5 """
6
7 def __init__(self, on_change):
8 super().__init__()
9 self.on_change = on_change
10
11 def __setitem__(self, key, value):
12 self.on_change(key, value)
13
```
Path: `dash/_callback_context.py`
Content:
```
1 import functools
2 import warnings
3 import json
4 import contextvars
5 import typing
6
7 import flask
8
9 from . import exceptions
10 from ._utils import AttributeDict, stringify_id
11
12
13 context_value = contextvars.ContextVar("callback_context")
14 context_value.set({})
15
16
17 def has_context(func):
18 @functools.wraps(func)
19 def assert_context(*args, **kwargs):
20 if not context_value.get():
21 raise exceptions.MissingCallbackContextException(
22 f"dash.callback_context.{getattr(func, '__name__')} is only available from a callback!"
23 )
24 return func(*args, **kwargs)
25
26 return assert_context
27
28
29 def _get_context_value():
30 return context_value.get()
31
32
33 class FalsyList(list):
34 def __bool__(self):
35 # for Python 3
36 return False
37
38 def __nonzero__(self):
39 # for Python 2
40 return False
41
42
43 falsy_triggered = FalsyList([{"prop_id": ".", "value": None}])
44
45
46 # pylint: disable=no-init
47 class CallbackContext:
48 @property
49 @has_context
50 def inputs(self):
51 return getattr(_get_context_value(), "input_values", {})
52
53 @property
54 @has_context
55 def states(self):
56 return getattr(_get_context_value(), "state_values", {})
57
58 @property
59 @has_context
60 def triggered(self):
61 """
62 Returns a list of all the Input props that changed and caused the callback to execute. It is empty when the
63 callback is called on initial load, unless an Input prop got its value from another initial callback.
64 Callbacks triggered by user actions typically have one item in triggered, unless the same action changes
65 two props at once or the callback has several Input props that are all modified by another callback based on
66 a single user action.
67
68 Example: To get the id of the component that triggered the callback:
69 `component_id = ctx.triggered[0]['prop_id'].split('.')[0]`
70
71 Example: To detect initial call, empty triggered is not really empty, it's falsy so that you can use:
72 `if ctx.triggered:`
73 """
74 # For backward compatibility: previously `triggered` always had a
75 # value - to avoid breaking existing apps, add a dummy item but
76 # make the list still look falsy. So `if ctx.triggered` will make it
77 # look empty, but you can still do `triggered[0]["prop_id"].split(".")`
78 return getattr(_get_context_value(), "triggered_inputs", []) or falsy_triggered
79
80 @property
81 @has_context
82 def triggered_prop_ids(self):
83 """
84 Returns a dictionary of all the Input props that changed and caused the callback to execute. It is empty when
85 the callback is called on initial load, unless an Input prop got its value from another initial callback.
86 Callbacks triggered by user actions typically have one item in triggered, unless the same action changes
87 two props at once or the callback has several Input props that are all modified by another callback based
88 on a single user action.
89
90 triggered_prop_ids (dict):
91 - keys (str) : the triggered "prop_id" composed of "component_id.component_property"
92 - values (str or dict): the id of the component that triggered the callback. Will be the dict id for pattern matching callbacks
93
94 Example - regular callback
95 {"btn-1.n_clicks": "btn-1"}
96
97 Example - pattern matching callbacks:
98 {'{"index":0,"type":"filter-dropdown"}.value': {"index":0,"type":"filter-dropdown"}}
99
100 Example usage:
101 `if "btn-1.n_clicks" in ctx.triggered_prop_ids:
102 do_something()`
103 """
104 triggered = getattr(_get_context_value(), "triggered_inputs", [])
105 ids = AttributeDict({})
106 for item in triggered:
107 component_id, _, _ = item["prop_id"].rpartition(".")
108 ids[item["prop_id"]] = component_id
109 if component_id.startswith("{"):
110 ids[item["prop_id"]] = AttributeDict(json.loads(component_id))
111 return ids
112
113 @property
114 @has_context
115 def triggered_id(self):
116 """
117 Returns the component id (str or dict) of the Input component that triggered the callback.
118
119 Note - use `triggered_prop_ids` if you need both the component id and the prop that triggered the callback or if
120 multiple Inputs triggered the callback.
121
122 Example usage:
123 `if "btn-1" == ctx.triggered_id:
124 do_something()`
125
126 """
127 component_id = None
128 if self.triggered:
129 prop_id = self.triggered_prop_ids.first()
130 component_id = self.triggered_prop_ids[prop_id]
131 return component_id
132
133 @property
134 @has_context
135 def args_grouping(self):
136 """
137 args_grouping is a dict of the inputs used with flexible callback signatures. The keys are the variable names
138 and the values are dictionaries containing:
139 - “id”: (string or dict) the component id. If it’s a pattern matching id, it will be a dict.
140 - “id_str”: (str) for pattern matching ids, it’s the stringified dict id with no white spaces.
141 - “property”: (str) The component property used in the callback.
142 - “value”: the value of the component property at the time the callback was fired.
143 - “triggered”: (bool)Whether this input triggered the callback.
144
145 Example usage:
146 @app.callback(
147 Output("container", "children"),
148 inputs=dict(btn1=Input("btn-1", "n_clicks"), btn2=Input("btn-2", "n_clicks")),
149 )
150 def display(btn1, btn2):
151 c = ctx.args_grouping
152 if c.btn1.triggered:
153 return f"Button 1 clicked {btn1} times"
154 elif c.btn2.triggered:
155 return f"Button 2 clicked {btn2} times"
156 else:
157 return "No clicks yet"
158
159 """
160 return getattr(_get_context_value(), "args_grouping", [])
161
162 @property
163 @has_context
164 def outputs_grouping(self):
165 return getattr(_get_context_value(), "outputs_grouping", [])
166
167 @property
168 @has_context
169 def outputs_list(self):
170 if self.using_outputs_grouping:
171 warnings.warn(
172 "outputs_list is deprecated, use outputs_grouping instead",
173 DeprecationWarning,
174 )
175
176 return getattr(_get_context_value(), "outputs_list", [])
177
178 @property
179 @has_context
180 def inputs_list(self):
181 if self.using_args_grouping:
182 warnings.warn(
183 "inputs_list is deprecated, use args_grouping instead",
184 DeprecationWarning,
185 )
186
187 return getattr(_get_context_value(), "inputs_list", [])
188
189 @property
190 @has_context
191 def states_list(self):
192 if self.using_args_grouping:
193 warnings.warn(
194 "states_list is deprecated, use args_grouping instead",
195 DeprecationWarning,
196 )
197 return getattr(_get_context_value(), "states_list", [])
198
199 @property
200 @has_context
201 def response(self):
202 return getattr(_get_context_value(), "dash_response")
203
204 @staticmethod
205 @has_context
206 def record_timing(name, duration=None, description=None):
207 """Records timing information for a server resource.
208
209 :param name: The name of the resource.
210 :type name: string
211
212 :param duration: The time in seconds to report. Internally, this
213 is rounded to the nearest millisecond.
214 :type duration: float or None
215
216 :param description: A description of the resource.
217 :type description: string or None
218 """
219 timing_information = getattr(flask.g, "timing_information", {})
220
221 if name in timing_information:
222 raise KeyError(f'Duplicate resource name "{name}" found.')
223
224 timing_information[name] = {"dur": round(duration * 1000), "desc": description}
225
226 setattr(flask.g, "timing_information", timing_information)
227
228 @property
229 @has_context
230 def using_args_grouping(self):
231 """
232 Return True if this callback is using dictionary or nested groupings for
233 Input/State dependencies, or if Input and State dependencies are interleaved
234 """
235 return getattr(_get_context_value(), "using_args_grouping", [])
236
237 @property
238 @has_context
239 def using_outputs_grouping(self):
240 """
241 Return True if this callback is using dictionary or nested groupings for
242 Output dependencies.
243 """
244 return getattr(_get_context_value(), "using_outputs_grouping", [])
245
246 @property
247 @has_context
248 def timing_information(self):
249 return getattr(flask.g, "timing_information", {})
250
251 @has_context
252 def set_props(self, component_id: typing.Union[str, dict], props: dict):
253 ctx_value = _get_context_value()
254 _id = stringify_id(component_id)
255 ctx_value.updated_props[_id] = props
256
257
258 callback_context = CallbackContext()
259
260
261 def set_props(component_id: typing.Union[str, dict], props: dict):
262 """
263 Set the props for a component not included in the callback outputs.
264 """
265 callback_context.set_props(component_id, props)
266
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dash/_callback_context.py b/dash/_callback_context.py
--- a/dash/_callback_context.py
+++ b/dash/_callback_context.py
@@ -252,7 +252,11 @@
def set_props(self, component_id: typing.Union[str, dict], props: dict):
ctx_value = _get_context_value()
_id = stringify_id(component_id)
- ctx_value.updated_props[_id] = props
+ existing = ctx_value.updated_props.get(_id)
+ if existing is not None:
+ ctx_value.updated_props[_id] = {**existing, **props}
+ else:
+ ctx_value.updated_props[_id] = props
callback_context = CallbackContext()
diff --git a/dash/long_callback/_proxy_set_props.py b/dash/long_callback/_proxy_set_props.py
--- a/dash/long_callback/_proxy_set_props.py
+++ b/dash/long_callback/_proxy_set_props.py
@@ -7,6 +7,12 @@
def __init__(self, on_change):
super().__init__()
self.on_change = on_change
+ self._data = {}
def __setitem__(self, key, value):
self.on_change(key, value)
+ self._data.setdefault(key, {})
+ self._data[key] = {**self._data[key], **value}
+
+ def get(self, key):
+ return self._data.get(key)
|
{"golden_diff": "diff --git a/dash/_callback_context.py b/dash/_callback_context.py\n--- a/dash/_callback_context.py\n+++ b/dash/_callback_context.py\n@@ -252,7 +252,11 @@\n def set_props(self, component_id: typing.Union[str, dict], props: dict):\n ctx_value = _get_context_value()\n _id = stringify_id(component_id)\n- ctx_value.updated_props[_id] = props\n+ existing = ctx_value.updated_props.get(_id)\n+ if existing is not None:\n+ ctx_value.updated_props[_id] = {**existing, **props}\n+ else:\n+ ctx_value.updated_props[_id] = props\n \n \n callback_context = CallbackContext()\ndiff --git a/dash/long_callback/_proxy_set_props.py b/dash/long_callback/_proxy_set_props.py\n--- a/dash/long_callback/_proxy_set_props.py\n+++ b/dash/long_callback/_proxy_set_props.py\n@@ -7,6 +7,12 @@\n def __init__(self, on_change):\n super().__init__()\n self.on_change = on_change\n+ self._data = {}\n \n def __setitem__(self, key, value):\n self.on_change(key, value)\n+ self._data.setdefault(key, {})\n+ self._data[key] = {**self._data[key], **value}\n+\n+ def get(self, key):\n+ return self._data.get(key)\n", "issue": "[BUG] set_props called multiple times only keep the last props.\nFor regular callbacks, when multiple call of `set_props` to the same component id, only the last call is saved.\r\n\r\nExample:\r\n```\r\nfrom dash import Dash, Input, html, set_props\r\n\r\napp = Dash()\r\n\r\napp.layout = [\r\n html.Button(\"start\", id=\"start\"),\r\n html.Div(\"initial\", id=\"output\"),\r\n]\r\n\r\[email protected](\r\n Input(\"start\", \"n_clicks\"),\r\n)\r\ndef on_click(_):\r\n set_props(\"output\", {\"children\": \"changed\"})\r\n set_props(\"output\", {\"style\": {\"background\": \"red\"}})\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n\r\n```\r\n\r\nClicking on the start button only set the background red, the text stays at \"initial\". The props should be merged and both updated.\r\n\r\n\n", "before_files": [{"content": "class ProxySetProps(dict):\n \"\"\"\n Defer dictionary item setter to run a custom function on change.\n Used by background callback manager to save the `set_props` data.\n \"\"\"\n\n def __init__(self, on_change):\n super().__init__()\n self.on_change = on_change\n\n def __setitem__(self, key, value):\n self.on_change(key, value)\n", "path": "dash/long_callback/_proxy_set_props.py"}, {"content": "import functools\nimport warnings\nimport json\nimport contextvars\nimport typing\n\nimport flask\n\nfrom . import exceptions\nfrom ._utils import AttributeDict, stringify_id\n\n\ncontext_value = contextvars.ContextVar(\"callback_context\")\ncontext_value.set({})\n\n\ndef has_context(func):\n @functools.wraps(func)\n def assert_context(*args, **kwargs):\n if not context_value.get():\n raise exceptions.MissingCallbackContextException(\n f\"dash.callback_context.{getattr(func, '__name__')} is only available from a callback!\"\n )\n return func(*args, **kwargs)\n\n return assert_context\n\n\ndef _get_context_value():\n return context_value.get()\n\n\nclass FalsyList(list):\n def __bool__(self):\n # for Python 3\n return False\n\n def __nonzero__(self):\n # for Python 2\n return False\n\n\nfalsy_triggered = FalsyList([{\"prop_id\": \".\", \"value\": None}])\n\n\n# pylint: disable=no-init\nclass CallbackContext:\n @property\n @has_context\n def inputs(self):\n return getattr(_get_context_value(), \"input_values\", {})\n\n @property\n @has_context\n def states(self):\n return getattr(_get_context_value(), \"state_values\", {})\n\n @property\n @has_context\n def triggered(self):\n \"\"\"\n Returns a list of all the Input props that changed and caused the callback to execute. It is empty when the\n callback is called on initial load, unless an Input prop got its value from another initial callback.\n Callbacks triggered by user actions typically have one item in triggered, unless the same action changes\n two props at once or the callback has several Input props that are all modified by another callback based on\n a single user action.\n\n Example: To get the id of the component that triggered the callback:\n `component_id = ctx.triggered[0]['prop_id'].split('.')[0]`\n\n Example: To detect initial call, empty triggered is not really empty, it's falsy so that you can use:\n `if ctx.triggered:`\n \"\"\"\n # For backward compatibility: previously `triggered` always had a\n # value - to avoid breaking existing apps, add a dummy item but\n # make the list still look falsy. So `if ctx.triggered` will make it\n # look empty, but you can still do `triggered[0][\"prop_id\"].split(\".\")`\n return getattr(_get_context_value(), \"triggered_inputs\", []) or falsy_triggered\n\n @property\n @has_context\n def triggered_prop_ids(self):\n \"\"\"\n Returns a dictionary of all the Input props that changed and caused the callback to execute. It is empty when\n the callback is called on initial load, unless an Input prop got its value from another initial callback.\n Callbacks triggered by user actions typically have one item in triggered, unless the same action changes\n two props at once or the callback has several Input props that are all modified by another callback based\n on a single user action.\n\n triggered_prop_ids (dict):\n - keys (str) : the triggered \"prop_id\" composed of \"component_id.component_property\"\n - values (str or dict): the id of the component that triggered the callback. Will be the dict id for pattern matching callbacks\n\n Example - regular callback\n {\"btn-1.n_clicks\": \"btn-1\"}\n\n Example - pattern matching callbacks:\n {'{\"index\":0,\"type\":\"filter-dropdown\"}.value': {\"index\":0,\"type\":\"filter-dropdown\"}}\n\n Example usage:\n `if \"btn-1.n_clicks\" in ctx.triggered_prop_ids:\n do_something()`\n \"\"\"\n triggered = getattr(_get_context_value(), \"triggered_inputs\", [])\n ids = AttributeDict({})\n for item in triggered:\n component_id, _, _ = item[\"prop_id\"].rpartition(\".\")\n ids[item[\"prop_id\"]] = component_id\n if component_id.startswith(\"{\"):\n ids[item[\"prop_id\"]] = AttributeDict(json.loads(component_id))\n return ids\n\n @property\n @has_context\n def triggered_id(self):\n \"\"\"\n Returns the component id (str or dict) of the Input component that triggered the callback.\n\n Note - use `triggered_prop_ids` if you need both the component id and the prop that triggered the callback or if\n multiple Inputs triggered the callback.\n\n Example usage:\n `if \"btn-1\" == ctx.triggered_id:\n do_something()`\n\n \"\"\"\n component_id = None\n if self.triggered:\n prop_id = self.triggered_prop_ids.first()\n component_id = self.triggered_prop_ids[prop_id]\n return component_id\n\n @property\n @has_context\n def args_grouping(self):\n \"\"\"\n args_grouping is a dict of the inputs used with flexible callback signatures. The keys are the variable names\n and the values are dictionaries containing:\n - \u201cid\u201d: (string or dict) the component id. If it\u2019s a pattern matching id, it will be a dict.\n - \u201cid_str\u201d: (str) for pattern matching ids, it\u2019s the stringified dict id with no white spaces.\n - \u201cproperty\u201d: (str) The component property used in the callback.\n - \u201cvalue\u201d: the value of the component property at the time the callback was fired.\n - \u201ctriggered\u201d: (bool)Whether this input triggered the callback.\n\n Example usage:\n @app.callback(\n Output(\"container\", \"children\"),\n inputs=dict(btn1=Input(\"btn-1\", \"n_clicks\"), btn2=Input(\"btn-2\", \"n_clicks\")),\n )\n def display(btn1, btn2):\n c = ctx.args_grouping\n if c.btn1.triggered:\n return f\"Button 1 clicked {btn1} times\"\n elif c.btn2.triggered:\n return f\"Button 2 clicked {btn2} times\"\n else:\n return \"No clicks yet\"\n\n \"\"\"\n return getattr(_get_context_value(), \"args_grouping\", [])\n\n @property\n @has_context\n def outputs_grouping(self):\n return getattr(_get_context_value(), \"outputs_grouping\", [])\n\n @property\n @has_context\n def outputs_list(self):\n if self.using_outputs_grouping:\n warnings.warn(\n \"outputs_list is deprecated, use outputs_grouping instead\",\n DeprecationWarning,\n )\n\n return getattr(_get_context_value(), \"outputs_list\", [])\n\n @property\n @has_context\n def inputs_list(self):\n if self.using_args_grouping:\n warnings.warn(\n \"inputs_list is deprecated, use args_grouping instead\",\n DeprecationWarning,\n )\n\n return getattr(_get_context_value(), \"inputs_list\", [])\n\n @property\n @has_context\n def states_list(self):\n if self.using_args_grouping:\n warnings.warn(\n \"states_list is deprecated, use args_grouping instead\",\n DeprecationWarning,\n )\n return getattr(_get_context_value(), \"states_list\", [])\n\n @property\n @has_context\n def response(self):\n return getattr(_get_context_value(), \"dash_response\")\n\n @staticmethod\n @has_context\n def record_timing(name, duration=None, description=None):\n \"\"\"Records timing information for a server resource.\n\n :param name: The name of the resource.\n :type name: string\n\n :param duration: The time in seconds to report. Internally, this\n is rounded to the nearest millisecond.\n :type duration: float or None\n\n :param description: A description of the resource.\n :type description: string or None\n \"\"\"\n timing_information = getattr(flask.g, \"timing_information\", {})\n\n if name in timing_information:\n raise KeyError(f'Duplicate resource name \"{name}\" found.')\n\n timing_information[name] = {\"dur\": round(duration * 1000), \"desc\": description}\n\n setattr(flask.g, \"timing_information\", timing_information)\n\n @property\n @has_context\n def using_args_grouping(self):\n \"\"\"\n Return True if this callback is using dictionary or nested groupings for\n Input/State dependencies, or if Input and State dependencies are interleaved\n \"\"\"\n return getattr(_get_context_value(), \"using_args_grouping\", [])\n\n @property\n @has_context\n def using_outputs_grouping(self):\n \"\"\"\n Return True if this callback is using dictionary or nested groupings for\n Output dependencies.\n \"\"\"\n return getattr(_get_context_value(), \"using_outputs_grouping\", [])\n\n @property\n @has_context\n def timing_information(self):\n return getattr(flask.g, \"timing_information\", {})\n\n @has_context\n def set_props(self, component_id: typing.Union[str, dict], props: dict):\n ctx_value = _get_context_value()\n _id = stringify_id(component_id)\n ctx_value.updated_props[_id] = props\n\n\ncallback_context = CallbackContext()\n\n\ndef set_props(component_id: typing.Union[str, dict], props: dict):\n \"\"\"\n Set the props for a component not included in the callback outputs.\n \"\"\"\n callback_context.set_props(component_id, props)\n", "path": "dash/_callback_context.py"}], "after_files": [{"content": "class ProxySetProps(dict):\n \"\"\"\n Defer dictionary item setter to run a custom function on change.\n Used by background callback manager to save the `set_props` data.\n \"\"\"\n\n def __init__(self, on_change):\n super().__init__()\n self.on_change = on_change\n self._data = {}\n\n def __setitem__(self, key, value):\n self.on_change(key, value)\n self._data.setdefault(key, {})\n self._data[key] = {**self._data[key], **value}\n\n def get(self, key):\n return self._data.get(key)\n", "path": "dash/long_callback/_proxy_set_props.py"}, {"content": "import functools\nimport warnings\nimport json\nimport contextvars\nimport typing\n\nimport flask\n\nfrom . import exceptions\nfrom ._utils import AttributeDict, stringify_id\n\n\ncontext_value = contextvars.ContextVar(\"callback_context\")\ncontext_value.set({})\n\n\ndef has_context(func):\n @functools.wraps(func)\n def assert_context(*args, **kwargs):\n if not context_value.get():\n raise exceptions.MissingCallbackContextException(\n f\"dash.callback_context.{getattr(func, '__name__')} is only available from a callback!\"\n )\n return func(*args, **kwargs)\n\n return assert_context\n\n\ndef _get_context_value():\n return context_value.get()\n\n\nclass FalsyList(list):\n def __bool__(self):\n # for Python 3\n return False\n\n def __nonzero__(self):\n # for Python 2\n return False\n\n\nfalsy_triggered = FalsyList([{\"prop_id\": \".\", \"value\": None}])\n\n\n# pylint: disable=no-init\nclass CallbackContext:\n @property\n @has_context\n def inputs(self):\n return getattr(_get_context_value(), \"input_values\", {})\n\n @property\n @has_context\n def states(self):\n return getattr(_get_context_value(), \"state_values\", {})\n\n @property\n @has_context\n def triggered(self):\n \"\"\"\n Returns a list of all the Input props that changed and caused the callback to execute. It is empty when the\n callback is called on initial load, unless an Input prop got its value from another initial callback.\n Callbacks triggered by user actions typically have one item in triggered, unless the same action changes\n two props at once or the callback has several Input props that are all modified by another callback based on\n a single user action.\n\n Example: To get the id of the component that triggered the callback:\n `component_id = ctx.triggered[0]['prop_id'].split('.')[0]`\n\n Example: To detect initial call, empty triggered is not really empty, it's falsy so that you can use:\n `if ctx.triggered:`\n \"\"\"\n # For backward compatibility: previously `triggered` always had a\n # value - to avoid breaking existing apps, add a dummy item but\n # make the list still look falsy. So `if ctx.triggered` will make it\n # look empty, but you can still do `triggered[0][\"prop_id\"].split(\".\")`\n return getattr(_get_context_value(), \"triggered_inputs\", []) or falsy_triggered\n\n @property\n @has_context\n def triggered_prop_ids(self):\n \"\"\"\n Returns a dictionary of all the Input props that changed and caused the callback to execute. It is empty when\n the callback is called on initial load, unless an Input prop got its value from another initial callback.\n Callbacks triggered by user actions typically have one item in triggered, unless the same action changes\n two props at once or the callback has several Input props that are all modified by another callback based\n on a single user action.\n\n triggered_prop_ids (dict):\n - keys (str) : the triggered \"prop_id\" composed of \"component_id.component_property\"\n - values (str or dict): the id of the component that triggered the callback. Will be the dict id for pattern matching callbacks\n\n Example - regular callback\n {\"btn-1.n_clicks\": \"btn-1\"}\n\n Example - pattern matching callbacks:\n {'{\"index\":0,\"type\":\"filter-dropdown\"}.value': {\"index\":0,\"type\":\"filter-dropdown\"}}\n\n Example usage:\n `if \"btn-1.n_clicks\" in ctx.triggered_prop_ids:\n do_something()`\n \"\"\"\n triggered = getattr(_get_context_value(), \"triggered_inputs\", [])\n ids = AttributeDict({})\n for item in triggered:\n component_id, _, _ = item[\"prop_id\"].rpartition(\".\")\n ids[item[\"prop_id\"]] = component_id\n if component_id.startswith(\"{\"):\n ids[item[\"prop_id\"]] = AttributeDict(json.loads(component_id))\n return ids\n\n @property\n @has_context\n def triggered_id(self):\n \"\"\"\n Returns the component id (str or dict) of the Input component that triggered the callback.\n\n Note - use `triggered_prop_ids` if you need both the component id and the prop that triggered the callback or if\n multiple Inputs triggered the callback.\n\n Example usage:\n `if \"btn-1\" == ctx.triggered_id:\n do_something()`\n\n \"\"\"\n component_id = None\n if self.triggered:\n prop_id = self.triggered_prop_ids.first()\n component_id = self.triggered_prop_ids[prop_id]\n return component_id\n\n @property\n @has_context\n def args_grouping(self):\n \"\"\"\n args_grouping is a dict of the inputs used with flexible callback signatures. The keys are the variable names\n and the values are dictionaries containing:\n - \u201cid\u201d: (string or dict) the component id. If it\u2019s a pattern matching id, it will be a dict.\n - \u201cid_str\u201d: (str) for pattern matching ids, it\u2019s the stringified dict id with no white spaces.\n - \u201cproperty\u201d: (str) The component property used in the callback.\n - \u201cvalue\u201d: the value of the component property at the time the callback was fired.\n - \u201ctriggered\u201d: (bool)Whether this input triggered the callback.\n\n Example usage:\n @app.callback(\n Output(\"container\", \"children\"),\n inputs=dict(btn1=Input(\"btn-1\", \"n_clicks\"), btn2=Input(\"btn-2\", \"n_clicks\")),\n )\n def display(btn1, btn2):\n c = ctx.args_grouping\n if c.btn1.triggered:\n return f\"Button 1 clicked {btn1} times\"\n elif c.btn2.triggered:\n return f\"Button 2 clicked {btn2} times\"\n else:\n return \"No clicks yet\"\n\n \"\"\"\n return getattr(_get_context_value(), \"args_grouping\", [])\n\n @property\n @has_context\n def outputs_grouping(self):\n return getattr(_get_context_value(), \"outputs_grouping\", [])\n\n @property\n @has_context\n def outputs_list(self):\n if self.using_outputs_grouping:\n warnings.warn(\n \"outputs_list is deprecated, use outputs_grouping instead\",\n DeprecationWarning,\n )\n\n return getattr(_get_context_value(), \"outputs_list\", [])\n\n @property\n @has_context\n def inputs_list(self):\n if self.using_args_grouping:\n warnings.warn(\n \"inputs_list is deprecated, use args_grouping instead\",\n DeprecationWarning,\n )\n\n return getattr(_get_context_value(), \"inputs_list\", [])\n\n @property\n @has_context\n def states_list(self):\n if self.using_args_grouping:\n warnings.warn(\n \"states_list is deprecated, use args_grouping instead\",\n DeprecationWarning,\n )\n return getattr(_get_context_value(), \"states_list\", [])\n\n @property\n @has_context\n def response(self):\n return getattr(_get_context_value(), \"dash_response\")\n\n @staticmethod\n @has_context\n def record_timing(name, duration=None, description=None):\n \"\"\"Records timing information for a server resource.\n\n :param name: The name of the resource.\n :type name: string\n\n :param duration: The time in seconds to report. Internally, this\n is rounded to the nearest millisecond.\n :type duration: float or None\n\n :param description: A description of the resource.\n :type description: string or None\n \"\"\"\n timing_information = getattr(flask.g, \"timing_information\", {})\n\n if name in timing_information:\n raise KeyError(f'Duplicate resource name \"{name}\" found.')\n\n timing_information[name] = {\"dur\": round(duration * 1000), \"desc\": description}\n\n setattr(flask.g, \"timing_information\", timing_information)\n\n @property\n @has_context\n def using_args_grouping(self):\n \"\"\"\n Return True if this callback is using dictionary or nested groupings for\n Input/State dependencies, or if Input and State dependencies are interleaved\n \"\"\"\n return getattr(_get_context_value(), \"using_args_grouping\", [])\n\n @property\n @has_context\n def using_outputs_grouping(self):\n \"\"\"\n Return True if this callback is using dictionary or nested groupings for\n Output dependencies.\n \"\"\"\n return getattr(_get_context_value(), \"using_outputs_grouping\", [])\n\n @property\n @has_context\n def timing_information(self):\n return getattr(flask.g, \"timing_information\", {})\n\n @has_context\n def set_props(self, component_id: typing.Union[str, dict], props: dict):\n ctx_value = _get_context_value()\n _id = stringify_id(component_id)\n existing = ctx_value.updated_props.get(_id)\n if existing is not None:\n ctx_value.updated_props[_id] = {**existing, **props}\n else:\n ctx_value.updated_props[_id] = props\n\n\ncallback_context = CallbackContext()\n\n\ndef set_props(component_id: typing.Union[str, dict], props: dict):\n \"\"\"\n Set the props for a component not included in the callback outputs.\n \"\"\"\n callback_context.set_props(component_id, props)\n", "path": "dash/_callback_context.py"}]}
| 3,267 | 320 |
gh_patches_debug_3281
|
rasdani/github-patches
|
git_diff
|
bentoml__BentoML-951
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
When I built a H2O-based image, the following error occurred
**Describe the bug**
When I built a H2O-based image, the following error occurred. From the Dockerfile generated by BentoML, the base image is bentoml/model-server:0.8.1, and JRE is indeed not installed
I used service.py to build a bentoML bundle
```python
import bentoml
from bentoml.adapters import DataframeInput
from bentoml.artifact import H2oModelArtifact
from customize import preprocessing, postprocessing
@bentoml.env(auto_pip_dependencies=True)
@bentoml.artifacts([H2oModelArtifact('model')])
class LoanDefaultPrediction(bentoml.BentoService):
@bentoml.api(input=DataframeInput())
def predict(self, df):
model_input = preprocessing(df)
model_output = self.artifacts.model.predict(model_input)
return postprocessing(model_output)
if __name__ == "__main__":
import h2o
h2o.init(strict_version_check=False)
model = h2o.load_model('H2O_AutoML_20200720_153457')
model_service = LoanDefaultPrediction()
model_service.pack("model", model)
saved_path = model_service.save()
```
And another customize.py to pre process
```python
import h2o
def preprocessing(model_input):
return h2o.H2OFrame(model_input, na_strings=['NaN'])
def postprocessing(model_output):
return model_output.as_data_frame()
```
Finally, After I build service image with this bundle, I run this docker image with below log:
```
Traceback (most recent call last):
File "/opt/conda/lib/python3.7/site-packages/gunicorn/arbiter.py", line 583, in spawn_worker
worker.init_process()
File "/opt/conda/lib/python3.7/site-packages/gunicorn/workers/base.py", line 119, in init_process
self.load_wsgi()
File "/opt/conda/lib/python3.7/site-packages/gunicorn/workers/base.py", line 144, in load_wsgi
self.wsgi = self.app.wsgi()
File "/opt/conda/lib/python3.7/site-packages/gunicorn/app/base.py", line 67, in wsgi
self.callable = self.load()
File "/opt/conda/lib/python3.7/site-packages/bentoml/server/gunicorn_server.py", line 92, in load
bento_service = load(self.bento_service_bundle_path)
File "/opt/conda/lib/python3.7/site-packages/bentoml/saved_bundle/loader.py", line 179, in load
svc = svc_cls()
File "/opt/conda/lib/python3.7/site-packages/bentoml/service.py", line 526, in __init__
self._load_artifacts(self._bento_service_bundle_path)
File "/opt/conda/lib/python3.7/site-packages/bentoml/service.py", line 734, in _load_artifacts
packed_artifact = artifact.load(artifacts_path)
File "/opt/conda/lib/python3.7/site-packages/bentoml/artifact/h2o_model_artifact.py", line 79, in load
h2o.init()
File "/opt/conda/lib/python3.7/site-packages/h2o/h2o.py", line 307, in init
bind_to_localhost=bind_to_localhost)
File "/opt/conda/lib/python3.7/site-packages/h2o/backend/server.py", line 143, in start
bind_to_localhost=bind_to_localhost, log_dir=log_dir, log_level=log_level, max_log_file_size=max_log_file_size)
File "/opt/conda/lib/python3.7/site-packages/h2o/backend/server.py", line 276, in _launch_server
java = self._find_java()
File "/opt/conda/lib/python3.7/site-packages/h2o/backend/server.py", line 441, in _find_java
raise H2OStartupError("Cannot find Java. Please install the latest JRE from\n"
h2o.exceptions.H2OStartupError: Cannot find Java. Please install the latest JRE from
http://www.oracle.com/technetwork/java/javase/downloads/index.html
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bentoml/artifact/h2o_model_artifact.py`
Content:
```
1 # Copyright 2019 Atalaya Tech, Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import shutil
17
18 from bentoml.artifact import BentoServiceArtifact
19 from bentoml.exceptions import MissingDependencyException
20 from bentoml.service_env import BentoServiceEnv
21
22
23 class H2oModelArtifact(BentoServiceArtifact):
24 """Abstraction for saving/loading objects with h2o.save_model and h2o.load_model
25
26 Args:
27 name (str): Name for this h2o artifact..
28
29 Raises:
30 MissingDependencyException: h2o package is required to use H2o model artifact
31
32 Example usage:
33
34 >>> import h2o
35 >>> h2o.init()
36 >>>
37 >>> from h2o.estimators.deeplearning import H2ODeepLearningEstimator
38 >>> model_to_save = H2ODeepLearningEstimator(...)
39 >>> # train model with data
40 >>> data = h2o.import_file(...)
41 >>> model_to_save.train(...)
42 >>>
43 >>> import bentoml
44 >>> from bentoml.artifact import H2oModelArtifact
45 >>> from bentoml.adapters import DataframeInput
46 >>>
47 >>> @bentoml.artifacts([H2oModelArtifact('model')])
48 >>> @bentoml.env(auto_pip_dependencies=True)
49 >>> class H2oModelService(bentoml.BentoService):
50 >>>
51 >>> @bentoml.api(input=DataframeInput())
52 >>> def predict(self, df):
53 >>> hf = h2o.H2OFrame(df)
54 >>> predictions = self.artifacts.model.predict(hf)
55 >>> return predictions.as_data_frame()
56 >>>
57 >>> svc = H2oModelService()
58 >>>
59 >>> svc.pack('model', model_to_save)
60 """
61
62 def __init__(self, name):
63 super(H2oModelArtifact, self).__init__(name)
64
65 self._model = None
66
67 def set_dependencies(self, env: BentoServiceEnv):
68 env.add_pip_dependencies_if_missing(['h2o'])
69
70 def _model_file_path(self, base_path):
71 return os.path.join(base_path, self.name)
72
73 def pack(self, model): # pylint:disable=arguments-differ
74 self._model = model
75 return self
76
77 def load(self, path):
78 try:
79 import h2o
80 except ImportError:
81 raise MissingDependencyException(
82 "h2o package is required to use H2oModelArtifact"
83 )
84
85 h2o.init()
86 model = h2o.load_model(self._model_file_path(path))
87 self._model = model
88 return self
89
90 def save(self, dst):
91 try:
92 import h2o
93 except ImportError:
94 raise MissingDependencyException(
95 "h2o package is required to use H2oModelArtifact"
96 )
97
98 h2o_saved_path = h2o.save_model(model=self._model, path=dst, force=True)
99 shutil.move(h2o_saved_path, self._model_file_path(dst))
100 return
101
102 def get(self):
103 return self._model
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bentoml/artifact/h2o_model_artifact.py b/bentoml/artifact/h2o_model_artifact.py
--- a/bentoml/artifact/h2o_model_artifact.py
+++ b/bentoml/artifact/h2o_model_artifact.py
@@ -66,6 +66,7 @@
def set_dependencies(self, env: BentoServiceEnv):
env.add_pip_dependencies_if_missing(['h2o'])
+ env.add_conda_dependencies(['openjdk'])
def _model_file_path(self, base_path):
return os.path.join(base_path, self.name)
|
{"golden_diff": "diff --git a/bentoml/artifact/h2o_model_artifact.py b/bentoml/artifact/h2o_model_artifact.py\n--- a/bentoml/artifact/h2o_model_artifact.py\n+++ b/bentoml/artifact/h2o_model_artifact.py\n@@ -66,6 +66,7 @@\n \n def set_dependencies(self, env: BentoServiceEnv):\n env.add_pip_dependencies_if_missing(['h2o'])\n+ env.add_conda_dependencies(['openjdk'])\n \n def _model_file_path(self, base_path):\n return os.path.join(base_path, self.name)\n", "issue": "When I built a H2O-based image, the following error occurred\n**Describe the bug**\r\nWhen I built a H2O-based image, the following error occurred. From the Dockerfile generated by BentoML, the base image is bentoml/model-server:0.8.1, and JRE is indeed not installed\r\n\r\n\r\nI used service.py to build a bentoML bundle\r\n```python\r\nimport bentoml\r\nfrom bentoml.adapters import DataframeInput\r\nfrom bentoml.artifact import H2oModelArtifact\r\nfrom customize import preprocessing, postprocessing\r\n\r\n\r\[email protected](auto_pip_dependencies=True)\r\[email protected]([H2oModelArtifact('model')])\r\nclass LoanDefaultPrediction(bentoml.BentoService):\r\n @bentoml.api(input=DataframeInput())\r\n def predict(self, df):\r\n model_input = preprocessing(df)\r\n model_output = self.artifacts.model.predict(model_input)\r\n return postprocessing(model_output)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import h2o\r\n h2o.init(strict_version_check=False)\r\n model = h2o.load_model('H2O_AutoML_20200720_153457')\r\n\r\n model_service = LoanDefaultPrediction()\r\n model_service.pack(\"model\", model)\r\n\r\n saved_path = model_service.save()\r\n```\r\nAnd another customize.py to pre process\r\n```python\r\nimport h2o\r\n\r\ndef preprocessing(model_input):\r\n return h2o.H2OFrame(model_input, na_strings=['NaN'])\r\n\r\n\r\ndef postprocessing(model_output):\r\n return model_output.as_data_frame()\r\n```\r\n\r\nFinally, After I build service image with this bundle, I run this docker image with below log:\r\n```\r\nTraceback (most recent call last):\r\n File \"/opt/conda/lib/python3.7/site-packages/gunicorn/arbiter.py\", line 583, in spawn_worker\r\n worker.init_process()\r\n File \"/opt/conda/lib/python3.7/site-packages/gunicorn/workers/base.py\", line 119, in init_process\r\n self.load_wsgi()\r\n File \"/opt/conda/lib/python3.7/site-packages/gunicorn/workers/base.py\", line 144, in load_wsgi\r\n self.wsgi = self.app.wsgi()\r\n File \"/opt/conda/lib/python3.7/site-packages/gunicorn/app/base.py\", line 67, in wsgi\r\n self.callable = self.load()\r\n File \"/opt/conda/lib/python3.7/site-packages/bentoml/server/gunicorn_server.py\", line 92, in load\r\n bento_service = load(self.bento_service_bundle_path)\r\n File \"/opt/conda/lib/python3.7/site-packages/bentoml/saved_bundle/loader.py\", line 179, in load\r\n svc = svc_cls()\r\n File \"/opt/conda/lib/python3.7/site-packages/bentoml/service.py\", line 526, in __init__\r\n self._load_artifacts(self._bento_service_bundle_path)\r\n File \"/opt/conda/lib/python3.7/site-packages/bentoml/service.py\", line 734, in _load_artifacts\r\n packed_artifact = artifact.load(artifacts_path)\r\n File \"/opt/conda/lib/python3.7/site-packages/bentoml/artifact/h2o_model_artifact.py\", line 79, in load\r\n h2o.init()\r\n File \"/opt/conda/lib/python3.7/site-packages/h2o/h2o.py\", line 307, in init\r\n bind_to_localhost=bind_to_localhost)\r\n File \"/opt/conda/lib/python3.7/site-packages/h2o/backend/server.py\", line 143, in start\r\n bind_to_localhost=bind_to_localhost, log_dir=log_dir, log_level=log_level, max_log_file_size=max_log_file_size)\r\n File \"/opt/conda/lib/python3.7/site-packages/h2o/backend/server.py\", line 276, in _launch_server\r\n java = self._find_java()\r\n File \"/opt/conda/lib/python3.7/site-packages/h2o/backend/server.py\", line 441, in _find_java\r\n raise H2OStartupError(\"Cannot find Java. Please install the latest JRE from\\n\"\r\nh2o.exceptions.H2OStartupError: Cannot find Java. Please install the latest JRE from\r\nhttp://www.oracle.com/technetwork/java/javase/downloads/index.html\r\n```\n", "before_files": [{"content": "# Copyright 2019 Atalaya Tech, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\n\nfrom bentoml.artifact import BentoServiceArtifact\nfrom bentoml.exceptions import MissingDependencyException\nfrom bentoml.service_env import BentoServiceEnv\n\n\nclass H2oModelArtifact(BentoServiceArtifact):\n \"\"\"Abstraction for saving/loading objects with h2o.save_model and h2o.load_model\n\n Args:\n name (str): Name for this h2o artifact..\n\n Raises:\n MissingDependencyException: h2o package is required to use H2o model artifact\n\n Example usage:\n\n >>> import h2o\n >>> h2o.init()\n >>>\n >>> from h2o.estimators.deeplearning import H2ODeepLearningEstimator\n >>> model_to_save = H2ODeepLearningEstimator(...)\n >>> # train model with data\n >>> data = h2o.import_file(...)\n >>> model_to_save.train(...)\n >>>\n >>> import bentoml\n >>> from bentoml.artifact import H2oModelArtifact\n >>> from bentoml.adapters import DataframeInput\n >>>\n >>> @bentoml.artifacts([H2oModelArtifact('model')])\n >>> @bentoml.env(auto_pip_dependencies=True)\n >>> class H2oModelService(bentoml.BentoService):\n >>>\n >>> @bentoml.api(input=DataframeInput())\n >>> def predict(self, df):\n >>> hf = h2o.H2OFrame(df)\n >>> predictions = self.artifacts.model.predict(hf)\n >>> return predictions.as_data_frame()\n >>>\n >>> svc = H2oModelService()\n >>>\n >>> svc.pack('model', model_to_save)\n \"\"\"\n\n def __init__(self, name):\n super(H2oModelArtifact, self).__init__(name)\n\n self._model = None\n\n def set_dependencies(self, env: BentoServiceEnv):\n env.add_pip_dependencies_if_missing(['h2o'])\n\n def _model_file_path(self, base_path):\n return os.path.join(base_path, self.name)\n\n def pack(self, model): # pylint:disable=arguments-differ\n self._model = model\n return self\n\n def load(self, path):\n try:\n import h2o\n except ImportError:\n raise MissingDependencyException(\n \"h2o package is required to use H2oModelArtifact\"\n )\n\n h2o.init()\n model = h2o.load_model(self._model_file_path(path))\n self._model = model\n return self\n\n def save(self, dst):\n try:\n import h2o\n except ImportError:\n raise MissingDependencyException(\n \"h2o package is required to use H2oModelArtifact\"\n )\n\n h2o_saved_path = h2o.save_model(model=self._model, path=dst, force=True)\n shutil.move(h2o_saved_path, self._model_file_path(dst))\n return\n\n def get(self):\n return self._model\n", "path": "bentoml/artifact/h2o_model_artifact.py"}], "after_files": [{"content": "# Copyright 2019 Atalaya Tech, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\n\nfrom bentoml.artifact import BentoServiceArtifact\nfrom bentoml.exceptions import MissingDependencyException\nfrom bentoml.service_env import BentoServiceEnv\n\n\nclass H2oModelArtifact(BentoServiceArtifact):\n \"\"\"Abstraction for saving/loading objects with h2o.save_model and h2o.load_model\n\n Args:\n name (str): Name for this h2o artifact..\n\n Raises:\n MissingDependencyException: h2o package is required to use H2o model artifact\n\n Example usage:\n\n >>> import h2o\n >>> h2o.init()\n >>>\n >>> from h2o.estimators.deeplearning import H2ODeepLearningEstimator\n >>> model_to_save = H2ODeepLearningEstimator(...)\n >>> # train model with data\n >>> data = h2o.import_file(...)\n >>> model_to_save.train(...)\n >>>\n >>> import bentoml\n >>> from bentoml.artifact import H2oModelArtifact\n >>> from bentoml.adapters import DataframeInput\n >>>\n >>> @bentoml.artifacts([H2oModelArtifact('model')])\n >>> @bentoml.env(auto_pip_dependencies=True)\n >>> class H2oModelService(bentoml.BentoService):\n >>>\n >>> @bentoml.api(input=DataframeInput())\n >>> def predict(self, df):\n >>> hf = h2o.H2OFrame(df)\n >>> predictions = self.artifacts.model.predict(hf)\n >>> return predictions.as_data_frame()\n >>>\n >>> svc = H2oModelService()\n >>>\n >>> svc.pack('model', model_to_save)\n \"\"\"\n\n def __init__(self, name):\n super(H2oModelArtifact, self).__init__(name)\n\n self._model = None\n\n def set_dependencies(self, env: BentoServiceEnv):\n env.add_pip_dependencies_if_missing(['h2o'])\n env.add_conda_dependencies(['openjdk'])\n\n def _model_file_path(self, base_path):\n return os.path.join(base_path, self.name)\n\n def pack(self, model): # pylint:disable=arguments-differ\n self._model = model\n return self\n\n def load(self, path):\n try:\n import h2o\n except ImportError:\n raise MissingDependencyException(\n \"h2o package is required to use H2oModelArtifact\"\n )\n\n h2o.init()\n model = h2o.load_model(self._model_file_path(path))\n self._model = model\n return self\n\n def save(self, dst):\n try:\n import h2o\n except ImportError:\n raise MissingDependencyException(\n \"h2o package is required to use H2oModelArtifact\"\n )\n\n h2o_saved_path = h2o.save_model(model=self._model, path=dst, force=True)\n shutil.move(h2o_saved_path, self._model_file_path(dst))\n return\n\n def get(self):\n return self._model\n", "path": "bentoml/artifact/h2o_model_artifact.py"}]}
| 2,237 | 137 |
gh_patches_debug_5130
|
rasdani/github-patches
|
git_diff
|
obspy__obspy-1916
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invalid version schema "0.0.0.dev+.gXXX" - may not work with newer versions of setuptools, pip, and PyPI
```C:\Miniconda-x64\envs\test\lib\site-packages\setuptools\dist.py:350: UserWarning: The version specified (u'0.0.0.dev+.g5c641b2cb1') is an invalid version, this may not work as expected with newer versions of setuptools, pip, and PyPI. Please see PEP 440 for more details.```
see also https://ci.appveyor.com/project/obspy/obspy/build/1.0.5140-warnings/job/rxhusf826vk5atia
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `obspy/core/util/version.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Author: Douglas Creager <[email protected]>
3 # This file is placed into the public domain.
4
5 # Calculates the current version number. If possible, this is the
6 # output of “git describe”, modified to conform to the versioning
7 # scheme that setuptools uses. If “git describe” returns an error
8 # (most likely because we're in an unpacked copy of a release tarball,
9 # rather than in a git working copy), then we fall back on reading the
10 # contents of the RELEASE-VERSION file.
11 #
12 # To use this script, simply import it your setup.py file, and use the
13 # results of get_git_version() as your package version:
14 #
15 # from version import *
16 #
17 # setup(
18 # version=get_git_version(),
19 # .
20 # .
21 # .
22 # )
23 #
24 # This will automatically update the RELEASE-VERSION file, if
25 # necessary. Note that the RELEASE-VERSION file should *not* be
26 # checked into git; please add it to your top-level .gitignore file.
27 #
28 # You'll probably want to distribute the RELEASE-VERSION file in your
29 # sdist tarballs; to do this, just create a MANIFEST.in file that
30 # contains the following line:
31 #
32 # include RELEASE-VERSION
33
34 # NO IMPORTS FROM OBSPY OR FUTURE IN THIS FILE! (file gets used at
35 # installation time)
36 import inspect
37 import io
38 import os
39 import re
40 from subprocess import STDOUT, CalledProcessError, check_output
41
42
43 __all__ = ("get_git_version")
44
45 script_dir = os.path.abspath(os.path.dirname(inspect.getfile(
46 inspect.currentframe())))
47 OBSPY_ROOT = os.path.abspath(os.path.join(script_dir, os.pardir,
48 os.pardir, os.pardir))
49 VERSION_FILE = os.path.join(OBSPY_ROOT, "obspy", "RELEASE-VERSION")
50
51
52 def call_git_describe(abbrev=10, dirty=True,
53 append_remote_tracking_branch=True):
54 try:
55 p = check_output(['git', 'rev-parse', '--show-toplevel'],
56 cwd=OBSPY_ROOT, stderr=STDOUT)
57 path = p.decode().strip()
58 except (OSError, CalledProcessError):
59 return None
60
61 if os.path.normpath(path) != OBSPY_ROOT:
62 return None
63
64 command = ['git', 'describe', '--abbrev=%d' % abbrev, '--always', '--tags']
65 if dirty:
66 command.append("--dirty")
67 try:
68 p = check_output(['git', 'describe', '--dirty', '--abbrev=%d' % abbrev,
69 '--always', '--tags'],
70 cwd=OBSPY_ROOT, stderr=STDOUT)
71 line = p.decode().strip()
72 except (OSError, CalledProcessError):
73 return None
74
75 remote_tracking_branch = None
76 if append_remote_tracking_branch:
77 try:
78 # find out local alias of remote and name of remote tracking branch
79 p = check_output(['git', 'branch', '-vv'],
80 cwd=OBSPY_ROOT, stderr=STDOUT)
81 remote_info = [line_.rstrip()
82 for line_ in p.decode().splitlines()]
83 remote_info = [line_ for line_ in remote_info
84 if line_.startswith('*')][0]
85 remote_info = re.sub(r".*? \[([^ :]*).*?\] .*", r"\1", remote_info)
86 remote, branch = remote_info.split("/")
87 # find out real name of remote
88 p = check_output(['git', 'remote', '-v'],
89 cwd=OBSPY_ROOT, stderr=STDOUT)
90 stdout = [line_.strip() for line_ in p.decode().splitlines()]
91 remote = [line_ for line_ in stdout
92 if line_.startswith(remote)][0].split()[1]
93 if remote.startswith("[email protected]:"):
94 remote = re.sub(r"[email protected]:(.*?)/.*", r"\1", remote)
95 elif remote.startswith("https://github.com/"):
96 remote = re.sub(r"https://github.com/(.*?)/.*", r"\1", remote)
97 elif remote.startswith("git://github.com"):
98 remote = re.sub(r"git://github.com/(.*?)/.*", r"\1", remote)
99 else:
100 remote = None
101 if remote is not None:
102 remote_tracking_branch = re.sub(r'[^A-Za-z0-9._-]', r'_',
103 '%s-%s' % (remote, branch))
104 except (IndexError, OSError, ValueError, CalledProcessError):
105 pass
106
107 # (this line prevents official releases)
108 # should work again now, see #482 and obspy/obspy@b437f31
109 if "-" not in line and "." not in line:
110 version = "0.0.0.dev+.g%s" % line
111 else:
112 parts = line.split('-', 1)
113 version = parts[0]
114 try:
115 version += '.post+' + parts[1]
116 if remote_tracking_branch is not None:
117 version += '.' + remote_tracking_branch
118 # IndexError means we are at a release version tag cleanly,
119 # add nothing additional
120 except IndexError:
121 pass
122 return version
123
124
125 def read_release_version():
126 try:
127 with io.open(VERSION_FILE, "rt") as fh:
128 version = fh.readline()
129 return version.strip()
130 except IOError:
131 return None
132
133
134 def write_release_version(version):
135 with io.open(VERSION_FILE, "wb") as fh:
136 fh.write(("%s\n" % version).encode('ascii', 'strict'))
137
138
139 def get_git_version(abbrev=10, dirty=True, append_remote_tracking_branch=True):
140 # Read in the version that's currently in RELEASE-VERSION.
141 release_version = read_release_version()
142
143 # First try to get the current version using “git describe”.
144 version = call_git_describe(
145 abbrev, dirty=dirty,
146 append_remote_tracking_branch=append_remote_tracking_branch)
147
148 # If that doesn't work, fall back on the value that's in
149 # RELEASE-VERSION.
150 if version is None:
151 version = release_version
152
153 # If we still don't have anything, that's an error.
154 if version is None:
155 return '0.0.0+archive'
156
157 # pip uses its normalized version number (strict PEP440) instead of our
158 # original version number, so we bow to pip and use the normalized version
159 # number internally, too, to avoid discrepancies.
160 version = _normalize_version(version)
161
162 # If the current version is different from what's in the
163 # RELEASE-VERSION file, update the file to be current.
164 if version != release_version:
165 write_release_version(version)
166
167 # Finally, return the current version.
168 return version
169
170
171 def _normalize_version(version):
172 """
173 Normalize version number string to adhere with PEP440 strictly.
174 """
175 # we have a clean release version:
176 if re.match(r'^[0-9]+?\.[0-9]+?\.[0-9]+?$', version):
177 return version
178 # we have a release candidate version:
179 elif re.match(r'^[0-9]+?\.[0-9]+?\.[0-9]+?rc[0-9]+?$', version):
180 return version
181 # we have an old-style version (i.e. a git describe string), prepare it for
182 # the rest of clean up, i.e. put the '.post+' as separator for the local
183 # version number part
184 elif re.match(r'^[0-9]+?\.[0-9]+?\.[0-9]+?-[0-9]+?-g[0-9a-z]+?$', version):
185 version = re.sub(r'-', '.post+', version, count=1)
186 # only adapt local version part right
187 version = re.match(r'(.*?\+)(.*)', version)
188 # no upper case letters
189 local_version = version.group(2).lower()
190 # only alphanumeric and "." in local part
191 local_version = re.sub(r'[^A-Za-z0-9.]', r'.', local_version)
192 version = version.group(1) + local_version
193 # make sure there's a "0" after ".post"
194 version = re.sub(r'\.post\+', r'.post0+', version)
195 return version
196
197
198 if __name__ == "__main__":
199 print(get_git_version())
200
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/obspy/core/util/version.py b/obspy/core/util/version.py
--- a/obspy/core/util/version.py
+++ b/obspy/core/util/version.py
@@ -107,7 +107,7 @@
# (this line prevents official releases)
# should work again now, see #482 and obspy/obspy@b437f31
if "-" not in line and "." not in line:
- version = "0.0.0.dev+.g%s" % line
+ version = "0.0.0.dev+0.g%s" % line
else:
parts = line.split('-', 1)
version = parts[0]
|
{"golden_diff": "diff --git a/obspy/core/util/version.py b/obspy/core/util/version.py\n--- a/obspy/core/util/version.py\n+++ b/obspy/core/util/version.py\n@@ -107,7 +107,7 @@\n # (this line prevents official releases)\n # should work again now, see #482 and obspy/obspy@b437f31\n if \"-\" not in line and \".\" not in line:\n- version = \"0.0.0.dev+.g%s\" % line\n+ version = \"0.0.0.dev+0.g%s\" % line\n else:\n parts = line.split('-', 1)\n version = parts[0]\n", "issue": "Invalid version schema \"0.0.0.dev+.gXXX\" - may not work with newer versions of setuptools, pip, and PyPI\n```C:\\Miniconda-x64\\envs\\test\\lib\\site-packages\\setuptools\\dist.py:350: UserWarning: The version specified (u'0.0.0.dev+.g5c641b2cb1') is an invalid version, this may not work as expected with newer versions of setuptools, pip, and PyPI. Please see PEP 440 for more details.```\r\n\r\nsee also https://ci.appveyor.com/project/obspy/obspy/build/1.0.5140-warnings/job/rxhusf826vk5atia\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Author: Douglas Creager <[email protected]>\n# This file is placed into the public domain.\n\n# Calculates the current version number. If possible, this is the\n# output of \u201cgit describe\u201d, modified to conform to the versioning\n# scheme that setuptools uses. If \u201cgit describe\u201d returns an error\n# (most likely because we're in an unpacked copy of a release tarball,\n# rather than in a git working copy), then we fall back on reading the\n# contents of the RELEASE-VERSION file.\n#\n# To use this script, simply import it your setup.py file, and use the\n# results of get_git_version() as your package version:\n#\n# from version import *\n#\n# setup(\n# version=get_git_version(),\n# .\n# .\n# .\n# )\n#\n# This will automatically update the RELEASE-VERSION file, if\n# necessary. Note that the RELEASE-VERSION file should *not* be\n# checked into git; please add it to your top-level .gitignore file.\n#\n# You'll probably want to distribute the RELEASE-VERSION file in your\n# sdist tarballs; to do this, just create a MANIFEST.in file that\n# contains the following line:\n#\n# include RELEASE-VERSION\n\n# NO IMPORTS FROM OBSPY OR FUTURE IN THIS FILE! (file gets used at\n# installation time)\nimport inspect\nimport io\nimport os\nimport re\nfrom subprocess import STDOUT, CalledProcessError, check_output\n\n\n__all__ = (\"get_git_version\")\n\nscript_dir = os.path.abspath(os.path.dirname(inspect.getfile(\n inspect.currentframe())))\nOBSPY_ROOT = os.path.abspath(os.path.join(script_dir, os.pardir,\n os.pardir, os.pardir))\nVERSION_FILE = os.path.join(OBSPY_ROOT, \"obspy\", \"RELEASE-VERSION\")\n\n\ndef call_git_describe(abbrev=10, dirty=True,\n append_remote_tracking_branch=True):\n try:\n p = check_output(['git', 'rev-parse', '--show-toplevel'],\n cwd=OBSPY_ROOT, stderr=STDOUT)\n path = p.decode().strip()\n except (OSError, CalledProcessError):\n return None\n\n if os.path.normpath(path) != OBSPY_ROOT:\n return None\n\n command = ['git', 'describe', '--abbrev=%d' % abbrev, '--always', '--tags']\n if dirty:\n command.append(\"--dirty\")\n try:\n p = check_output(['git', 'describe', '--dirty', '--abbrev=%d' % abbrev,\n '--always', '--tags'],\n cwd=OBSPY_ROOT, stderr=STDOUT)\n line = p.decode().strip()\n except (OSError, CalledProcessError):\n return None\n\n remote_tracking_branch = None\n if append_remote_tracking_branch:\n try:\n # find out local alias of remote and name of remote tracking branch\n p = check_output(['git', 'branch', '-vv'],\n cwd=OBSPY_ROOT, stderr=STDOUT)\n remote_info = [line_.rstrip()\n for line_ in p.decode().splitlines()]\n remote_info = [line_ for line_ in remote_info\n if line_.startswith('*')][0]\n remote_info = re.sub(r\".*? \\[([^ :]*).*?\\] .*\", r\"\\1\", remote_info)\n remote, branch = remote_info.split(\"/\")\n # find out real name of remote\n p = check_output(['git', 'remote', '-v'],\n cwd=OBSPY_ROOT, stderr=STDOUT)\n stdout = [line_.strip() for line_ in p.decode().splitlines()]\n remote = [line_ for line_ in stdout\n if line_.startswith(remote)][0].split()[1]\n if remote.startswith(\"[email protected]:\"):\n remote = re.sub(r\"[email protected]:(.*?)/.*\", r\"\\1\", remote)\n elif remote.startswith(\"https://github.com/\"):\n remote = re.sub(r\"https://github.com/(.*?)/.*\", r\"\\1\", remote)\n elif remote.startswith(\"git://github.com\"):\n remote = re.sub(r\"git://github.com/(.*?)/.*\", r\"\\1\", remote)\n else:\n remote = None\n if remote is not None:\n remote_tracking_branch = re.sub(r'[^A-Za-z0-9._-]', r'_',\n '%s-%s' % (remote, branch))\n except (IndexError, OSError, ValueError, CalledProcessError):\n pass\n\n # (this line prevents official releases)\n # should work again now, see #482 and obspy/obspy@b437f31\n if \"-\" not in line and \".\" not in line:\n version = \"0.0.0.dev+.g%s\" % line\n else:\n parts = line.split('-', 1)\n version = parts[0]\n try:\n version += '.post+' + parts[1]\n if remote_tracking_branch is not None:\n version += '.' + remote_tracking_branch\n # IndexError means we are at a release version tag cleanly,\n # add nothing additional\n except IndexError:\n pass\n return version\n\n\ndef read_release_version():\n try:\n with io.open(VERSION_FILE, \"rt\") as fh:\n version = fh.readline()\n return version.strip()\n except IOError:\n return None\n\n\ndef write_release_version(version):\n with io.open(VERSION_FILE, \"wb\") as fh:\n fh.write((\"%s\\n\" % version).encode('ascii', 'strict'))\n\n\ndef get_git_version(abbrev=10, dirty=True, append_remote_tracking_branch=True):\n # Read in the version that's currently in RELEASE-VERSION.\n release_version = read_release_version()\n\n # First try to get the current version using \u201cgit describe\u201d.\n version = call_git_describe(\n abbrev, dirty=dirty,\n append_remote_tracking_branch=append_remote_tracking_branch)\n\n # If that doesn't work, fall back on the value that's in\n # RELEASE-VERSION.\n if version is None:\n version = release_version\n\n # If we still don't have anything, that's an error.\n if version is None:\n return '0.0.0+archive'\n\n # pip uses its normalized version number (strict PEP440) instead of our\n # original version number, so we bow to pip and use the normalized version\n # number internally, too, to avoid discrepancies.\n version = _normalize_version(version)\n\n # If the current version is different from what's in the\n # RELEASE-VERSION file, update the file to be current.\n if version != release_version:\n write_release_version(version)\n\n # Finally, return the current version.\n return version\n\n\ndef _normalize_version(version):\n \"\"\"\n Normalize version number string to adhere with PEP440 strictly.\n \"\"\"\n # we have a clean release version:\n if re.match(r'^[0-9]+?\\.[0-9]+?\\.[0-9]+?$', version):\n return version\n # we have a release candidate version:\n elif re.match(r'^[0-9]+?\\.[0-9]+?\\.[0-9]+?rc[0-9]+?$', version):\n return version\n # we have an old-style version (i.e. a git describe string), prepare it for\n # the rest of clean up, i.e. put the '.post+' as separator for the local\n # version number part\n elif re.match(r'^[0-9]+?\\.[0-9]+?\\.[0-9]+?-[0-9]+?-g[0-9a-z]+?$', version):\n version = re.sub(r'-', '.post+', version, count=1)\n # only adapt local version part right\n version = re.match(r'(.*?\\+)(.*)', version)\n # no upper case letters\n local_version = version.group(2).lower()\n # only alphanumeric and \".\" in local part\n local_version = re.sub(r'[^A-Za-z0-9.]', r'.', local_version)\n version = version.group(1) + local_version\n # make sure there's a \"0\" after \".post\"\n version = re.sub(r'\\.post\\+', r'.post0+', version)\n return version\n\n\nif __name__ == \"__main__\":\n print(get_git_version())\n", "path": "obspy/core/util/version.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Author: Douglas Creager <[email protected]>\n# This file is placed into the public domain.\n\n# Calculates the current version number. If possible, this is the\n# output of \u201cgit describe\u201d, modified to conform to the versioning\n# scheme that setuptools uses. If \u201cgit describe\u201d returns an error\n# (most likely because we're in an unpacked copy of a release tarball,\n# rather than in a git working copy), then we fall back on reading the\n# contents of the RELEASE-VERSION file.\n#\n# To use this script, simply import it your setup.py file, and use the\n# results of get_git_version() as your package version:\n#\n# from version import *\n#\n# setup(\n# version=get_git_version(),\n# .\n# .\n# .\n# )\n#\n# This will automatically update the RELEASE-VERSION file, if\n# necessary. Note that the RELEASE-VERSION file should *not* be\n# checked into git; please add it to your top-level .gitignore file.\n#\n# You'll probably want to distribute the RELEASE-VERSION file in your\n# sdist tarballs; to do this, just create a MANIFEST.in file that\n# contains the following line:\n#\n# include RELEASE-VERSION\n\n# NO IMPORTS FROM OBSPY OR FUTURE IN THIS FILE! (file gets used at\n# installation time)\nimport inspect\nimport io\nimport os\nimport re\nfrom subprocess import STDOUT, CalledProcessError, check_output\n\n\n__all__ = (\"get_git_version\")\n\nscript_dir = os.path.abspath(os.path.dirname(inspect.getfile(\n inspect.currentframe())))\nOBSPY_ROOT = os.path.abspath(os.path.join(script_dir, os.pardir,\n os.pardir, os.pardir))\nVERSION_FILE = os.path.join(OBSPY_ROOT, \"obspy\", \"RELEASE-VERSION\")\n\n\ndef call_git_describe(abbrev=10, dirty=True,\n append_remote_tracking_branch=True):\n try:\n p = check_output(['git', 'rev-parse', '--show-toplevel'],\n cwd=OBSPY_ROOT, stderr=STDOUT)\n path = p.decode().strip()\n except (OSError, CalledProcessError):\n return None\n\n if os.path.normpath(path) != OBSPY_ROOT:\n return None\n\n command = ['git', 'describe', '--abbrev=%d' % abbrev, '--always', '--tags']\n if dirty:\n command.append(\"--dirty\")\n try:\n p = check_output(['git', 'describe', '--dirty', '--abbrev=%d' % abbrev,\n '--always', '--tags'],\n cwd=OBSPY_ROOT, stderr=STDOUT)\n line = p.decode().strip()\n except (OSError, CalledProcessError):\n return None\n\n remote_tracking_branch = None\n if append_remote_tracking_branch:\n try:\n # find out local alias of remote and name of remote tracking branch\n p = check_output(['git', 'branch', '-vv'],\n cwd=OBSPY_ROOT, stderr=STDOUT)\n remote_info = [line_.rstrip()\n for line_ in p.decode().splitlines()]\n remote_info = [line_ for line_ in remote_info\n if line_.startswith('*')][0]\n remote_info = re.sub(r\".*? \\[([^ :]*).*?\\] .*\", r\"\\1\", remote_info)\n remote, branch = remote_info.split(\"/\")\n # find out real name of remote\n p = check_output(['git', 'remote', '-v'],\n cwd=OBSPY_ROOT, stderr=STDOUT)\n stdout = [line_.strip() for line_ in p.decode().splitlines()]\n remote = [line_ for line_ in stdout\n if line_.startswith(remote)][0].split()[1]\n if remote.startswith(\"[email protected]:\"):\n remote = re.sub(r\"[email protected]:(.*?)/.*\", r\"\\1\", remote)\n elif remote.startswith(\"https://github.com/\"):\n remote = re.sub(r\"https://github.com/(.*?)/.*\", r\"\\1\", remote)\n elif remote.startswith(\"git://github.com\"):\n remote = re.sub(r\"git://github.com/(.*?)/.*\", r\"\\1\", remote)\n else:\n remote = None\n if remote is not None:\n remote_tracking_branch = re.sub(r'[^A-Za-z0-9._-]', r'_',\n '%s-%s' % (remote, branch))\n except (IndexError, OSError, ValueError, CalledProcessError):\n pass\n\n # (this line prevents official releases)\n # should work again now, see #482 and obspy/obspy@b437f31\n if \"-\" not in line and \".\" not in line:\n version = \"0.0.0.dev+0.g%s\" % line\n else:\n parts = line.split('-', 1)\n version = parts[0]\n try:\n version += '.post+' + parts[1]\n if remote_tracking_branch is not None:\n version += '.' + remote_tracking_branch\n # IndexError means we are at a release version tag cleanly,\n # add nothing additional\n except IndexError:\n pass\n return version\n\n\ndef read_release_version():\n try:\n with io.open(VERSION_FILE, \"rt\") as fh:\n version = fh.readline()\n return version.strip()\n except IOError:\n return None\n\n\ndef write_release_version(version):\n with io.open(VERSION_FILE, \"wb\") as fh:\n fh.write((\"%s\\n\" % version).encode('ascii', 'strict'))\n\n\ndef get_git_version(abbrev=10, dirty=True, append_remote_tracking_branch=True):\n # Read in the version that's currently in RELEASE-VERSION.\n release_version = read_release_version()\n\n # First try to get the current version using \u201cgit describe\u201d.\n version = call_git_describe(\n abbrev, dirty=dirty,\n append_remote_tracking_branch=append_remote_tracking_branch)\n\n # If that doesn't work, fall back on the value that's in\n # RELEASE-VERSION.\n if version is None:\n version = release_version\n\n # If we still don't have anything, that's an error.\n if version is None:\n return '0.0.0+archive'\n\n # pip uses its normalized version number (strict PEP440) instead of our\n # original version number, so we bow to pip and use the normalized version\n # number internally, too, to avoid discrepancies.\n version = _normalize_version(version)\n\n # If the current version is different from what's in the\n # RELEASE-VERSION file, update the file to be current.\n if version != release_version:\n write_release_version(version)\n\n # Finally, return the current version.\n return version\n\n\ndef _normalize_version(version):\n \"\"\"\n Normalize version number string to adhere with PEP440 strictly.\n \"\"\"\n # we have a clean release version:\n if re.match(r'^[0-9]+?\\.[0-9]+?\\.[0-9]+?$', version):\n return version\n # we have a release candidate version:\n elif re.match(r'^[0-9]+?\\.[0-9]+?\\.[0-9]+?rc[0-9]+?$', version):\n return version\n # we have an old-style version (i.e. a git describe string), prepare it for\n # the rest of clean up, i.e. put the '.post+' as separator for the local\n # version number part\n elif re.match(r'^[0-9]+?\\.[0-9]+?\\.[0-9]+?-[0-9]+?-g[0-9a-z]+?$', version):\n version = re.sub(r'-', '.post+', version, count=1)\n # only adapt local version part right\n version = re.match(r'(.*?\\+)(.*)', version)\n # no upper case letters\n local_version = version.group(2).lower()\n # only alphanumeric and \".\" in local part\n local_version = re.sub(r'[^A-Za-z0-9.]', r'.', local_version)\n version = version.group(1) + local_version\n # make sure there's a \"0\" after \".post\"\n version = re.sub(r'\\.post\\+', r'.post0+', version)\n return version\n\n\nif __name__ == \"__main__\":\n print(get_git_version())\n", "path": "obspy/core/util/version.py"}]}
| 2,767 | 158 |
gh_patches_debug_62126
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-1975
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
/about/stats.json is broken
[stats.json](https://www.gittip.com/about/stats.json) is broken, I guess because of commit https://github.com/gittip/www.gittip.com/commit/892b1c28d127a18858032c88bd3d065cae973c34 by @clone1018.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `configure-aspen.py`
Content:
```
1 from __future__ import division
2
3 from importlib import import_module
4 import os
5 import sys
6 import threading
7 import time
8 import traceback
9
10 import gittip
11 import gittip.wireup
12 from gittip import canonize, configure_payments
13 from gittip.security import authentication, csrf, x_frame_options
14 from gittip.utils import cache_static, timer
15 from gittip.elsewhere import platform_classes
16
17
18 from aspen import log_dammit
19
20 # Wireup Algorithm
21 # ================
22
23 version_file = os.path.join(website.www_root, 'version.txt')
24 __version__ = open(version_file).read().strip()
25 website.version = os.environ['__VERSION__'] = __version__
26
27
28 website.renderer_default = "jinja2"
29 website.default_renderers_by_media_type['application/json'] = 'stdlib_format'
30
31 website.renderer_factories['jinja2'].Renderer.global_context = {
32 'range': range,
33 'unicode': unicode,
34 'enumerate': enumerate,
35 'len': len,
36 'float': float,
37 'type': type,
38 'str': str
39 }
40
41
42 gittip.wireup.canonical()
43 website.db = gittip.wireup.db()
44 gittip.wireup.billing()
45 gittip.wireup.username_restrictions(website)
46 gittip.wireup.nanswers()
47 gittip.wireup.envvars(website)
48 tell_sentry = gittip.wireup.make_sentry_teller(website)
49
50 # this serves two purposes:
51 # 1) ensure all platform classes are created (and thus added to platform_classes)
52 # 2) keep the platform modules around to be added to the context below
53 platform_modules = {platform: import_module("gittip.elsewhere.%s" % platform)
54 for platform in platform_classes}
55
56 # The homepage wants expensive queries. Let's periodically select into an
57 # intermediate table.
58
59 UPDATE_HOMEPAGE_EVERY = int(os.environ['UPDATE_HOMEPAGE_EVERY'])
60 def update_homepage_queries():
61 from gittip import utils
62 while 1:
63 try:
64 utils.update_global_stats(website)
65 utils.update_homepage_queries_once(website.db)
66 website.db.self_check()
67 except:
68 exception = sys.exc_info()[0]
69 tell_sentry(exception)
70 tb = traceback.format_exc().strip()
71 log_dammit(tb)
72 time.sleep(UPDATE_HOMEPAGE_EVERY)
73
74 if UPDATE_HOMEPAGE_EVERY > 0:
75 homepage_updater = threading.Thread(target=update_homepage_queries)
76 homepage_updater.daemon = True
77 homepage_updater.start()
78 else:
79 from gittip import utils
80 utils.update_global_stats(website)
81
82
83 # Server Algorithm
84 # ================
85
86 def up_minthreads(website):
87 # https://github.com/gittip/www.gittip.com/issues/1098
88 # Discovered the following API by inspecting in pdb and browsing source.
89 # This requires network_engine.bind to have already been called.
90 request_queue = website.network_engine.cheroot_server.requests
91 request_queue.min = website.min_threads
92
93
94 def setup_busy_threads_logging(website):
95 # https://github.com/gittip/www.gittip.com/issues/1572
96 log_every = website.log_busy_threads_every
97 if log_every == 0:
98 return
99
100 pool = website.network_engine.cheroot_server.requests
101 def log_busy_threads():
102 time.sleep(0.5) # without this we get a single log message where all threads are busy
103 while 1:
104
105 # Use pool.min and not pool.max because of the semantics of these
106 # inside of Cheroot. (Max is a hard limit used only when pool.grow
107 # is called, and it's never called except when the pool starts up,
108 # when it's called with pool.min.)
109
110 nbusy_threads = pool.min - pool.idle
111 print("sample#aspen.busy_threads={}".format(nbusy_threads))
112 time.sleep(log_every)
113
114 thread = threading.Thread(target=log_busy_threads)
115 thread.daemon = True
116 thread.start()
117
118
119 website.server_algorithm.insert_before('start', up_minthreads)
120 website.server_algorithm.insert_before('start', setup_busy_threads_logging)
121
122
123 # Website Algorithm
124 # =================
125
126 def add_stuff_to_context(request):
127 request.context['username'] = None
128 request.context.update(platform_modules)
129
130 def scab_body_onto_response(response):
131
132 # This is a workaround for a Cheroot bug, where the connection is closed
133 # too early if there is no body:
134 #
135 # https://bitbucket.org/cherrypy/cheroot/issue/1/fail-if-passed-zero-bytes
136 #
137 # This Cheroot bug is manifesting because of a change in Aspen's behavior
138 # with the algorithm.py refactor in 0.27+: Aspen no longer sets a body for
139 # 302s as it used to. This means that all redirects are breaking
140 # intermittently (sometimes the client seems not to care that the
141 # connection is closed too early, so I guess there's some timing
142 # involved?), which is affecting a number of parts of Gittip, notably
143 # around logging in (#1859).
144
145 if not response.body:
146 response.body = '*sigh*'
147
148
149 algorithm = website.algorithm
150 algorithm.functions = [ timer.start
151 , algorithm['parse_environ_into_request']
152 , algorithm['tack_website_onto_request']
153 , algorithm['raise_200_for_OPTIONS']
154
155 , canonize
156 , configure_payments
157 , authentication.inbound
158 , csrf.inbound
159 , add_stuff_to_context
160
161 , algorithm['dispatch_request_to_filesystem']
162 , algorithm['apply_typecasters_to_path']
163
164 , cache_static.inbound
165
166 , algorithm['get_response_for_socket']
167 , algorithm['get_resource_for_request']
168 , algorithm['get_response_for_resource']
169
170 , tell_sentry
171 , algorithm['get_response_for_exception']
172
173 , gittip.outbound
174 , authentication.outbound
175 , csrf.outbound
176 , cache_static.outbound
177 , x_frame_options
178
179 , algorithm['log_traceback_for_5xx']
180 , algorithm['delegate_error_to_simplate']
181 , tell_sentry
182 , algorithm['log_traceback_for_exception']
183 , algorithm['log_result_of_request']
184
185 , scab_body_onto_response
186 , timer.end
187 , tell_sentry
188 ]
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/configure-aspen.py b/configure-aspen.py
--- a/configure-aspen.py
+++ b/configure-aspen.py
@@ -26,7 +26,6 @@
website.renderer_default = "jinja2"
-website.default_renderers_by_media_type['application/json'] = 'stdlib_format'
website.renderer_factories['jinja2'].Renderer.global_context = {
'range': range,
|
{"golden_diff": "diff --git a/configure-aspen.py b/configure-aspen.py\n--- a/configure-aspen.py\n+++ b/configure-aspen.py\n@@ -26,7 +26,6 @@\n \n \n website.renderer_default = \"jinja2\"\n-website.default_renderers_by_media_type['application/json'] = 'stdlib_format'\n \n website.renderer_factories['jinja2'].Renderer.global_context = {\n 'range': range,\n", "issue": "/about/stats.json is broken\n[stats.json](https://www.gittip.com/about/stats.json) is broken, I guess because of commit https://github.com/gittip/www.gittip.com/commit/892b1c28d127a18858032c88bd3d065cae973c34 by @clone1018.\n\n", "before_files": [{"content": "from __future__ import division\n\nfrom importlib import import_module\nimport os\nimport sys\nimport threading\nimport time\nimport traceback\n\nimport gittip\nimport gittip.wireup\nfrom gittip import canonize, configure_payments\nfrom gittip.security import authentication, csrf, x_frame_options\nfrom gittip.utils import cache_static, timer\nfrom gittip.elsewhere import platform_classes\n\n\nfrom aspen import log_dammit\n\n# Wireup Algorithm\n# ================\n\nversion_file = os.path.join(website.www_root, 'version.txt')\n__version__ = open(version_file).read().strip()\nwebsite.version = os.environ['__VERSION__'] = __version__\n\n\nwebsite.renderer_default = \"jinja2\"\nwebsite.default_renderers_by_media_type['application/json'] = 'stdlib_format'\n\nwebsite.renderer_factories['jinja2'].Renderer.global_context = {\n 'range': range,\n 'unicode': unicode,\n 'enumerate': enumerate,\n 'len': len,\n 'float': float,\n 'type': type,\n 'str': str\n}\n\n\ngittip.wireup.canonical()\nwebsite.db = gittip.wireup.db()\ngittip.wireup.billing()\ngittip.wireup.username_restrictions(website)\ngittip.wireup.nanswers()\ngittip.wireup.envvars(website)\ntell_sentry = gittip.wireup.make_sentry_teller(website)\n\n# this serves two purposes:\n# 1) ensure all platform classes are created (and thus added to platform_classes)\n# 2) keep the platform modules around to be added to the context below\nplatform_modules = {platform: import_module(\"gittip.elsewhere.%s\" % platform)\n for platform in platform_classes}\n\n# The homepage wants expensive queries. Let's periodically select into an\n# intermediate table.\n\nUPDATE_HOMEPAGE_EVERY = int(os.environ['UPDATE_HOMEPAGE_EVERY'])\ndef update_homepage_queries():\n from gittip import utils\n while 1:\n try:\n utils.update_global_stats(website)\n utils.update_homepage_queries_once(website.db)\n website.db.self_check()\n except:\n exception = sys.exc_info()[0]\n tell_sentry(exception)\n tb = traceback.format_exc().strip()\n log_dammit(tb)\n time.sleep(UPDATE_HOMEPAGE_EVERY)\n\nif UPDATE_HOMEPAGE_EVERY > 0:\n homepage_updater = threading.Thread(target=update_homepage_queries)\n homepage_updater.daemon = True\n homepage_updater.start()\nelse:\n from gittip import utils\n utils.update_global_stats(website)\n\n\n# Server Algorithm\n# ================\n\ndef up_minthreads(website):\n # https://github.com/gittip/www.gittip.com/issues/1098\n # Discovered the following API by inspecting in pdb and browsing source.\n # This requires network_engine.bind to have already been called.\n request_queue = website.network_engine.cheroot_server.requests\n request_queue.min = website.min_threads\n\n\ndef setup_busy_threads_logging(website):\n # https://github.com/gittip/www.gittip.com/issues/1572\n log_every = website.log_busy_threads_every\n if log_every == 0:\n return\n\n pool = website.network_engine.cheroot_server.requests\n def log_busy_threads():\n time.sleep(0.5) # without this we get a single log message where all threads are busy\n while 1:\n\n # Use pool.min and not pool.max because of the semantics of these\n # inside of Cheroot. (Max is a hard limit used only when pool.grow\n # is called, and it's never called except when the pool starts up,\n # when it's called with pool.min.)\n\n nbusy_threads = pool.min - pool.idle\n print(\"sample#aspen.busy_threads={}\".format(nbusy_threads))\n time.sleep(log_every)\n\n thread = threading.Thread(target=log_busy_threads)\n thread.daemon = True\n thread.start()\n\n\nwebsite.server_algorithm.insert_before('start', up_minthreads)\nwebsite.server_algorithm.insert_before('start', setup_busy_threads_logging)\n\n\n# Website Algorithm\n# =================\n\ndef add_stuff_to_context(request):\n request.context['username'] = None\n request.context.update(platform_modules)\n\ndef scab_body_onto_response(response):\n\n # This is a workaround for a Cheroot bug, where the connection is closed\n # too early if there is no body:\n #\n # https://bitbucket.org/cherrypy/cheroot/issue/1/fail-if-passed-zero-bytes\n #\n # This Cheroot bug is manifesting because of a change in Aspen's behavior\n # with the algorithm.py refactor in 0.27+: Aspen no longer sets a body for\n # 302s as it used to. This means that all redirects are breaking\n # intermittently (sometimes the client seems not to care that the\n # connection is closed too early, so I guess there's some timing\n # involved?), which is affecting a number of parts of Gittip, notably\n # around logging in (#1859).\n\n if not response.body:\n response.body = '*sigh*'\n\n\nalgorithm = website.algorithm\nalgorithm.functions = [ timer.start\n , algorithm['parse_environ_into_request']\n , algorithm['tack_website_onto_request']\n , algorithm['raise_200_for_OPTIONS']\n\n , canonize\n , configure_payments\n , authentication.inbound\n , csrf.inbound\n , add_stuff_to_context\n\n , algorithm['dispatch_request_to_filesystem']\n , algorithm['apply_typecasters_to_path']\n\n , cache_static.inbound\n\n , algorithm['get_response_for_socket']\n , algorithm['get_resource_for_request']\n , algorithm['get_response_for_resource']\n\n , tell_sentry\n , algorithm['get_response_for_exception']\n\n , gittip.outbound\n , authentication.outbound\n , csrf.outbound\n , cache_static.outbound\n , x_frame_options\n\n , algorithm['log_traceback_for_5xx']\n , algorithm['delegate_error_to_simplate']\n , tell_sentry\n , algorithm['log_traceback_for_exception']\n , algorithm['log_result_of_request']\n\n , scab_body_onto_response\n , timer.end\n , tell_sentry\n ]\n", "path": "configure-aspen.py"}], "after_files": [{"content": "from __future__ import division\n\nfrom importlib import import_module\nimport os\nimport sys\nimport threading\nimport time\nimport traceback\n\nimport gittip\nimport gittip.wireup\nfrom gittip import canonize, configure_payments\nfrom gittip.security import authentication, csrf, x_frame_options\nfrom gittip.utils import cache_static, timer\nfrom gittip.elsewhere import platform_classes\n\n\nfrom aspen import log_dammit\n\n# Wireup Algorithm\n# ================\n\nversion_file = os.path.join(website.www_root, 'version.txt')\n__version__ = open(version_file).read().strip()\nwebsite.version = os.environ['__VERSION__'] = __version__\n\n\nwebsite.renderer_default = \"jinja2\"\n\nwebsite.renderer_factories['jinja2'].Renderer.global_context = {\n 'range': range,\n 'unicode': unicode,\n 'enumerate': enumerate,\n 'len': len,\n 'float': float,\n 'type': type,\n 'str': str\n}\n\n\ngittip.wireup.canonical()\nwebsite.db = gittip.wireup.db()\ngittip.wireup.billing()\ngittip.wireup.username_restrictions(website)\ngittip.wireup.nanswers()\ngittip.wireup.envvars(website)\ntell_sentry = gittip.wireup.make_sentry_teller(website)\n\n# this serves two purposes:\n# 1) ensure all platform classes are created (and thus added to platform_classes)\n# 2) keep the platform modules around to be added to the context below\nplatform_modules = {platform: import_module(\"gittip.elsewhere.%s\" % platform)\n for platform in platform_classes}\n\n# The homepage wants expensive queries. Let's periodically select into an\n# intermediate table.\n\nUPDATE_HOMEPAGE_EVERY = int(os.environ['UPDATE_HOMEPAGE_EVERY'])\ndef update_homepage_queries():\n from gittip import utils\n while 1:\n try:\n utils.update_global_stats(website)\n utils.update_homepage_queries_once(website.db)\n website.db.self_check()\n except:\n exception = sys.exc_info()[0]\n tell_sentry(exception)\n tb = traceback.format_exc().strip()\n log_dammit(tb)\n time.sleep(UPDATE_HOMEPAGE_EVERY)\n\nif UPDATE_HOMEPAGE_EVERY > 0:\n homepage_updater = threading.Thread(target=update_homepage_queries)\n homepage_updater.daemon = True\n homepage_updater.start()\nelse:\n from gittip import utils\n utils.update_global_stats(website)\n\n\n# Server Algorithm\n# ================\n\ndef up_minthreads(website):\n # https://github.com/gittip/www.gittip.com/issues/1098\n # Discovered the following API by inspecting in pdb and browsing source.\n # This requires network_engine.bind to have already been called.\n request_queue = website.network_engine.cheroot_server.requests\n request_queue.min = website.min_threads\n\n\ndef setup_busy_threads_logging(website):\n # https://github.com/gittip/www.gittip.com/issues/1572\n log_every = website.log_busy_threads_every\n if log_every == 0:\n return\n\n pool = website.network_engine.cheroot_server.requests\n def log_busy_threads():\n time.sleep(0.5) # without this we get a single log message where all threads are busy\n while 1:\n\n # Use pool.min and not pool.max because of the semantics of these\n # inside of Cheroot. (Max is a hard limit used only when pool.grow\n # is called, and it's never called except when the pool starts up,\n # when it's called with pool.min.)\n\n nbusy_threads = pool.min - pool.idle\n print(\"sample#aspen.busy_threads={}\".format(nbusy_threads))\n time.sleep(log_every)\n\n thread = threading.Thread(target=log_busy_threads)\n thread.daemon = True\n thread.start()\n\n\nwebsite.server_algorithm.insert_before('start', up_minthreads)\nwebsite.server_algorithm.insert_before('start', setup_busy_threads_logging)\n\n\n# Website Algorithm\n# =================\n\ndef add_stuff_to_context(request):\n request.context['username'] = None\n request.context.update(platform_modules)\n\ndef scab_body_onto_response(response):\n\n # This is a workaround for a Cheroot bug, where the connection is closed\n # too early if there is no body:\n #\n # https://bitbucket.org/cherrypy/cheroot/issue/1/fail-if-passed-zero-bytes\n #\n # This Cheroot bug is manifesting because of a change in Aspen's behavior\n # with the algorithm.py refactor in 0.27+: Aspen no longer sets a body for\n # 302s as it used to. This means that all redirects are breaking\n # intermittently (sometimes the client seems not to care that the\n # connection is closed too early, so I guess there's some timing\n # involved?), which is affecting a number of parts of Gittip, notably\n # around logging in (#1859).\n\n if not response.body:\n response.body = '*sigh*'\n\n\nalgorithm = website.algorithm\nalgorithm.functions = [ timer.start\n , algorithm['parse_environ_into_request']\n , algorithm['tack_website_onto_request']\n , algorithm['raise_200_for_OPTIONS']\n\n , canonize\n , configure_payments\n , authentication.inbound\n , csrf.inbound\n , add_stuff_to_context\n\n , algorithm['dispatch_request_to_filesystem']\n , algorithm['apply_typecasters_to_path']\n\n , cache_static.inbound\n\n , algorithm['get_response_for_socket']\n , algorithm['get_resource_for_request']\n , algorithm['get_response_for_resource']\n\n , tell_sentry\n , algorithm['get_response_for_exception']\n\n , gittip.outbound\n , authentication.outbound\n , csrf.outbound\n , cache_static.outbound\n , x_frame_options\n\n , algorithm['log_traceback_for_5xx']\n , algorithm['delegate_error_to_simplate']\n , tell_sentry\n , algorithm['log_traceback_for_exception']\n , algorithm['log_result_of_request']\n\n , scab_body_onto_response\n , timer.end\n , tell_sentry\n ]\n", "path": "configure-aspen.py"}]}
| 2,221 | 92 |
gh_patches_debug_11405
|
rasdani/github-patches
|
git_diff
|
aio-libs-abandoned__aioredis-py-1017
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[2.0] Lock class uses blocking sleep
aioredis/lock.py calls `time.sleep` instead of `asyncio.sleep`. Pull request on its way...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aioredis/lock.py`
Content:
```
1 import threading
2 import time as mod_time
3 import uuid
4 from types import SimpleNamespace
5 from typing import TYPE_CHECKING, Awaitable, NoReturn, Union
6
7 from aioredis.exceptions import LockError, LockNotOwnedError
8
9 if TYPE_CHECKING:
10 from aioredis import Redis
11
12
13 class Lock:
14 """
15 A shared, distributed Lock. Using Redis for locking allows the Lock
16 to be shared across processes and/or machines.
17
18 It's left to the user to resolve deadlock issues and make sure
19 multiple clients play nicely together.
20 """
21
22 lua_release = None
23 lua_extend = None
24 lua_reacquire = None
25
26 # KEYS[1] - lock name
27 # ARGV[1] - token
28 # return 1 if the lock was released, otherwise 0
29 LUA_RELEASE_SCRIPT = """
30 local token = redis.call('get', KEYS[1])
31 if not token or token ~= ARGV[1] then
32 return 0
33 end
34 redis.call('del', KEYS[1])
35 return 1
36 """
37
38 # KEYS[1] - lock name
39 # ARGV[1] - token
40 # ARGV[2] - additional milliseconds
41 # ARGV[3] - "0" if the additional time should be added to the lock's
42 # existing ttl or "1" if the existing ttl should be replaced
43 # return 1 if the locks time was extended, otherwise 0
44 LUA_EXTEND_SCRIPT = """
45 local token = redis.call('get', KEYS[1])
46 if not token or token ~= ARGV[1] then
47 return 0
48 end
49 local expiration = redis.call('pttl', KEYS[1])
50 if not expiration then
51 expiration = 0
52 end
53 if expiration < 0 then
54 return 0
55 end
56
57 local newttl = ARGV[2]
58 if ARGV[3] == "0" then
59 newttl = ARGV[2] + expiration
60 end
61 redis.call('pexpire', KEYS[1], newttl)
62 return 1
63 """
64
65 # KEYS[1] - lock name
66 # ARGV[1] - token
67 # ARGV[2] - milliseconds
68 # return 1 if the locks time was reacquired, otherwise 0
69 LUA_REACQUIRE_SCRIPT = """
70 local token = redis.call('get', KEYS[1])
71 if not token or token ~= ARGV[1] then
72 return 0
73 end
74 redis.call('pexpire', KEYS[1], ARGV[2])
75 return 1
76 """
77
78 def __init__(
79 self,
80 redis: "Redis",
81 name: str,
82 timeout: float = None,
83 sleep: float = 0.1,
84 blocking: bool = True,
85 blocking_timeout: float = None,
86 thread_local: bool = True,
87 ):
88 """
89 Create a new Lock instance named ``name`` using the Redis client
90 supplied by ``redis``.
91
92 ``timeout`` indicates a maximum life for the lock.
93 By default, it will remain locked until release() is called.
94 ``timeout`` can be specified as a float or integer, both representing
95 the number of seconds to wait.
96
97 ``sleep`` indicates the amount of time to sleep per loop iteration
98 when the lock is in blocking mode and another client is currently
99 holding the lock.
100
101 ``blocking`` indicates whether calling ``acquire`` should block until
102 the lock has been acquired or to fail immediately, causing ``acquire``
103 to return False and the lock not being acquired. Defaults to True.
104 Note this value can be overridden by passing a ``blocking``
105 argument to ``acquire``.
106
107 ``blocking_timeout`` indicates the maximum amount of time in seconds to
108 spend trying to acquire the lock. A value of ``None`` indicates
109 continue trying forever. ``blocking_timeout`` can be specified as a
110 float or integer, both representing the number of seconds to wait.
111
112 ``thread_local`` indicates whether the lock token is placed in
113 thread-local storage. By default, the token is placed in thread local
114 storage so that a thread only sees its token, not a token set by
115 another thread. Consider the following timeline:
116
117 time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
118 thread-1 sets the token to "abc"
119 time: 1, thread-2 blocks trying to acquire `my-lock` using the
120 Lock instance.
121 time: 5, thread-1 has not yet completed. redis expires the lock
122 key.
123 time: 5, thread-2 acquired `my-lock` now that it's available.
124 thread-2 sets the token to "xyz"
125 time: 6, thread-1 finishes its work and calls release(). if the
126 token is *not* stored in thread local storage, then
127 thread-1 would see the token value as "xyz" and would be
128 able to successfully release the thread-2's lock.
129
130 In some use cases it's necessary to disable thread local storage. For
131 example, if you have code where one thread acquires a lock and passes
132 that lock instance to a worker thread to release later. If thread
133 local storage isn't disabled in this case, the worker thread won't see
134 the token set by the thread that acquired the lock. Our assumption
135 is that these cases aren't common and as such default to using
136 thread local storage.
137 """
138 self.redis = redis
139 self.name = name
140 self.timeout = timeout
141 self.sleep = sleep
142 self.blocking = blocking
143 self.blocking_timeout = blocking_timeout
144 self.thread_local = bool(thread_local)
145 self.local = threading.local() if self.thread_local else SimpleNamespace()
146 self.local.token = None
147 self.register_scripts()
148
149 def register_scripts(self):
150 cls = self.__class__
151 client = self.redis
152 if cls.lua_release is None:
153 cls.lua_release = client.register_script(cls.LUA_RELEASE_SCRIPT)
154 if cls.lua_extend is None:
155 cls.lua_extend = client.register_script(cls.LUA_EXTEND_SCRIPT)
156 if cls.lua_reacquire is None:
157 cls.lua_reacquire = client.register_script(cls.LUA_REACQUIRE_SCRIPT)
158
159 async def __aenter__(self):
160 # force blocking, as otherwise the user would have to check whether
161 # the lock was actually acquired or not.
162 if await self.acquire(blocking=True):
163 return self
164 raise LockError("Unable to acquire lock within the time specified")
165
166 async def __aexit__(self, exc_type, exc_value, traceback):
167 await self.release()
168
169 async def acquire(
170 self,
171 blocking: bool = None,
172 blocking_timeout: float = None,
173 token: Union[str, bytes] = None,
174 ):
175 """
176 Use Redis to hold a shared, distributed lock named ``name``.
177 Returns True once the lock is acquired.
178
179 If ``blocking`` is False, always return immediately. If the lock
180 was acquired, return True, otherwise return False.
181
182 ``blocking_timeout`` specifies the maximum number of seconds to
183 wait trying to acquire the lock.
184
185 ``token`` specifies the token value to be used. If provided, token
186 must be a bytes object or a string that can be encoded to a bytes
187 object with the default encoding. If a token isn't specified, a UUID
188 will be generated.
189 """
190 sleep = self.sleep
191 if token is None:
192 token = uuid.uuid1().hex.encode()
193 else:
194 encoder = self.redis.connection_pool.get_encoder()
195 token = encoder.encode(token)
196 if blocking is None:
197 blocking = self.blocking
198 if blocking_timeout is None:
199 blocking_timeout = self.blocking_timeout
200 stop_trying_at = None
201 if blocking_timeout is not None:
202 stop_trying_at = mod_time.monotonic() + blocking_timeout
203 while True:
204 if await self.do_acquire(token):
205 self.local.token = token
206 return True
207 if not blocking:
208 return False
209 next_try_at = mod_time.monotonic() + sleep
210 if stop_trying_at is not None and next_try_at > stop_trying_at:
211 return False
212 mod_time.sleep(sleep)
213
214 async def do_acquire(self, token: Union[str, bytes]) -> bool:
215 if self.timeout:
216 # convert to milliseconds
217 timeout = int(self.timeout * 1000)
218 else:
219 timeout = None
220 if await self.redis.set(self.name, token, nx=True, px=timeout):
221 return True
222 return False
223
224 async def locked(self) -> bool:
225 """
226 Returns True if this key is locked by any process, otherwise False.
227 """
228 return await self.redis.get(self.name) is not None
229
230 async def owned(self) -> bool:
231 """
232 Returns True if this key is locked by this lock, otherwise False.
233 """
234 stored_token = await self.redis.get(self.name)
235 # need to always compare bytes to bytes
236 # TODO: this can be simplified when the context manager is finished
237 if stored_token and not isinstance(stored_token, bytes):
238 encoder = self.redis.connection_pool.get_encoder()
239 stored_token = encoder.encode(stored_token)
240 return self.local.token is not None and stored_token == self.local.token
241
242 def release(self) -> Awaitable[NoReturn]:
243 """Releases the already acquired lock"""
244 expected_token = self.local.token
245 if expected_token is None:
246 raise LockError("Cannot release an unlocked lock")
247 self.local.token = None
248 return self.do_release(expected_token)
249
250 async def do_release(self, expected_token: bytes):
251 if not bool(
252 await self.lua_release(
253 keys=[self.name], args=[expected_token], client=self.redis
254 )
255 ):
256 raise LockNotOwnedError("Cannot release a lock" " that's no longer owned")
257
258 def extend(
259 self, additional_time: float, replace_ttl: bool = False
260 ) -> Awaitable[bool]:
261 """
262 Adds more time to an already acquired lock.
263
264 ``additional_time`` can be specified as an integer or a float, both
265 representing the number of seconds to add.
266
267 ``replace_ttl`` if False (the default), add `additional_time` to
268 the lock's existing ttl. If True, replace the lock's ttl with
269 `additional_time`.
270 """
271 if self.local.token is None:
272 raise LockError("Cannot extend an unlocked lock")
273 if self.timeout is None:
274 raise LockError("Cannot extend a lock with no timeout")
275 return self.do_extend(additional_time, replace_ttl)
276
277 async def do_extend(self, additional_time, replace_ttl) -> bool:
278 additional_time = int(additional_time * 1000)
279 if not bool(
280 await self.lua_extend(
281 keys=[self.name],
282 args=[self.local.token, additional_time, replace_ttl and "1" or "0"],
283 client=self.redis,
284 )
285 ):
286 raise LockNotOwnedError("Cannot extend a lock that's" " no longer owned")
287 return True
288
289 def reacquire(self) -> Awaitable[bool]:
290 """
291 Resets a TTL of an already acquired lock back to a timeout value.
292 """
293 if self.local.token is None:
294 raise LockError("Cannot reacquire an unlocked lock")
295 if self.timeout is None:
296 raise LockError("Cannot reacquire a lock with no timeout")
297 return self.do_reacquire()
298
299 async def do_reacquire(self) -> bool:
300 timeout = int(self.timeout * 1000)
301 if not bool(
302 await self.lua_reacquire(
303 keys=[self.name], args=[self.local.token, timeout], client=self.redis
304 )
305 ):
306 raise LockNotOwnedError("Cannot reacquire a lock that's" " no longer owned")
307 return True
308
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/aioredis/lock.py b/aioredis/lock.py
--- a/aioredis/lock.py
+++ b/aioredis/lock.py
@@ -1,3 +1,4 @@
+import asyncio
import threading
import time as mod_time
import uuid
@@ -209,7 +210,7 @@
next_try_at = mod_time.monotonic() + sleep
if stop_trying_at is not None and next_try_at > stop_trying_at:
return False
- mod_time.sleep(sleep)
+ await asyncio.sleep(sleep)
async def do_acquire(self, token: Union[str, bytes]) -> bool:
if self.timeout:
|
{"golden_diff": "diff --git a/aioredis/lock.py b/aioredis/lock.py\n--- a/aioredis/lock.py\n+++ b/aioredis/lock.py\n@@ -1,3 +1,4 @@\n+import asyncio\n import threading\n import time as mod_time\n import uuid\n@@ -209,7 +210,7 @@\n next_try_at = mod_time.monotonic() + sleep\n if stop_trying_at is not None and next_try_at > stop_trying_at:\n return False\n- mod_time.sleep(sleep)\n+ await asyncio.sleep(sleep)\n \n async def do_acquire(self, token: Union[str, bytes]) -> bool:\n if self.timeout:\n", "issue": "[2.0] Lock class uses blocking sleep\naioredis/lock.py calls `time.sleep` instead of `asyncio.sleep`. Pull request on its way...\n", "before_files": [{"content": "import threading\nimport time as mod_time\nimport uuid\nfrom types import SimpleNamespace\nfrom typing import TYPE_CHECKING, Awaitable, NoReturn, Union\n\nfrom aioredis.exceptions import LockError, LockNotOwnedError\n\nif TYPE_CHECKING:\n from aioredis import Redis\n\n\nclass Lock:\n \"\"\"\n A shared, distributed Lock. Using Redis for locking allows the Lock\n to be shared across processes and/or machines.\n\n It's left to the user to resolve deadlock issues and make sure\n multiple clients play nicely together.\n \"\"\"\n\n lua_release = None\n lua_extend = None\n lua_reacquire = None\n\n # KEYS[1] - lock name\n # ARGV[1] - token\n # return 1 if the lock was released, otherwise 0\n LUA_RELEASE_SCRIPT = \"\"\"\n local token = redis.call('get', KEYS[1])\n if not token or token ~= ARGV[1] then\n return 0\n end\n redis.call('del', KEYS[1])\n return 1\n \"\"\"\n\n # KEYS[1] - lock name\n # ARGV[1] - token\n # ARGV[2] - additional milliseconds\n # ARGV[3] - \"0\" if the additional time should be added to the lock's\n # existing ttl or \"1\" if the existing ttl should be replaced\n # return 1 if the locks time was extended, otherwise 0\n LUA_EXTEND_SCRIPT = \"\"\"\n local token = redis.call('get', KEYS[1])\n if not token or token ~= ARGV[1] then\n return 0\n end\n local expiration = redis.call('pttl', KEYS[1])\n if not expiration then\n expiration = 0\n end\n if expiration < 0 then\n return 0\n end\n\n local newttl = ARGV[2]\n if ARGV[3] == \"0\" then\n newttl = ARGV[2] + expiration\n end\n redis.call('pexpire', KEYS[1], newttl)\n return 1\n \"\"\"\n\n # KEYS[1] - lock name\n # ARGV[1] - token\n # ARGV[2] - milliseconds\n # return 1 if the locks time was reacquired, otherwise 0\n LUA_REACQUIRE_SCRIPT = \"\"\"\n local token = redis.call('get', KEYS[1])\n if not token or token ~= ARGV[1] then\n return 0\n end\n redis.call('pexpire', KEYS[1], ARGV[2])\n return 1\n \"\"\"\n\n def __init__(\n self,\n redis: \"Redis\",\n name: str,\n timeout: float = None,\n sleep: float = 0.1,\n blocking: bool = True,\n blocking_timeout: float = None,\n thread_local: bool = True,\n ):\n \"\"\"\n Create a new Lock instance named ``name`` using the Redis client\n supplied by ``redis``.\n\n ``timeout`` indicates a maximum life for the lock.\n By default, it will remain locked until release() is called.\n ``timeout`` can be specified as a float or integer, both representing\n the number of seconds to wait.\n\n ``sleep`` indicates the amount of time to sleep per loop iteration\n when the lock is in blocking mode and another client is currently\n holding the lock.\n\n ``blocking`` indicates whether calling ``acquire`` should block until\n the lock has been acquired or to fail immediately, causing ``acquire``\n to return False and the lock not being acquired. Defaults to True.\n Note this value can be overridden by passing a ``blocking``\n argument to ``acquire``.\n\n ``blocking_timeout`` indicates the maximum amount of time in seconds to\n spend trying to acquire the lock. A value of ``None`` indicates\n continue trying forever. ``blocking_timeout`` can be specified as a\n float or integer, both representing the number of seconds to wait.\n\n ``thread_local`` indicates whether the lock token is placed in\n thread-local storage. By default, the token is placed in thread local\n storage so that a thread only sees its token, not a token set by\n another thread. Consider the following timeline:\n\n time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.\n thread-1 sets the token to \"abc\"\n time: 1, thread-2 blocks trying to acquire `my-lock` using the\n Lock instance.\n time: 5, thread-1 has not yet completed. redis expires the lock\n key.\n time: 5, thread-2 acquired `my-lock` now that it's available.\n thread-2 sets the token to \"xyz\"\n time: 6, thread-1 finishes its work and calls release(). if the\n token is *not* stored in thread local storage, then\n thread-1 would see the token value as \"xyz\" and would be\n able to successfully release the thread-2's lock.\n\n In some use cases it's necessary to disable thread local storage. For\n example, if you have code where one thread acquires a lock and passes\n that lock instance to a worker thread to release later. If thread\n local storage isn't disabled in this case, the worker thread won't see\n the token set by the thread that acquired the lock. Our assumption\n is that these cases aren't common and as such default to using\n thread local storage.\n \"\"\"\n self.redis = redis\n self.name = name\n self.timeout = timeout\n self.sleep = sleep\n self.blocking = blocking\n self.blocking_timeout = blocking_timeout\n self.thread_local = bool(thread_local)\n self.local = threading.local() if self.thread_local else SimpleNamespace()\n self.local.token = None\n self.register_scripts()\n\n def register_scripts(self):\n cls = self.__class__\n client = self.redis\n if cls.lua_release is None:\n cls.lua_release = client.register_script(cls.LUA_RELEASE_SCRIPT)\n if cls.lua_extend is None:\n cls.lua_extend = client.register_script(cls.LUA_EXTEND_SCRIPT)\n if cls.lua_reacquire is None:\n cls.lua_reacquire = client.register_script(cls.LUA_REACQUIRE_SCRIPT)\n\n async def __aenter__(self):\n # force blocking, as otherwise the user would have to check whether\n # the lock was actually acquired or not.\n if await self.acquire(blocking=True):\n return self\n raise LockError(\"Unable to acquire lock within the time specified\")\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n await self.release()\n\n async def acquire(\n self,\n blocking: bool = None,\n blocking_timeout: float = None,\n token: Union[str, bytes] = None,\n ):\n \"\"\"\n Use Redis to hold a shared, distributed lock named ``name``.\n Returns True once the lock is acquired.\n\n If ``blocking`` is False, always return immediately. If the lock\n was acquired, return True, otherwise return False.\n\n ``blocking_timeout`` specifies the maximum number of seconds to\n wait trying to acquire the lock.\n\n ``token`` specifies the token value to be used. If provided, token\n must be a bytes object or a string that can be encoded to a bytes\n object with the default encoding. If a token isn't specified, a UUID\n will be generated.\n \"\"\"\n sleep = self.sleep\n if token is None:\n token = uuid.uuid1().hex.encode()\n else:\n encoder = self.redis.connection_pool.get_encoder()\n token = encoder.encode(token)\n if blocking is None:\n blocking = self.blocking\n if blocking_timeout is None:\n blocking_timeout = self.blocking_timeout\n stop_trying_at = None\n if blocking_timeout is not None:\n stop_trying_at = mod_time.monotonic() + blocking_timeout\n while True:\n if await self.do_acquire(token):\n self.local.token = token\n return True\n if not blocking:\n return False\n next_try_at = mod_time.monotonic() + sleep\n if stop_trying_at is not None and next_try_at > stop_trying_at:\n return False\n mod_time.sleep(sleep)\n\n async def do_acquire(self, token: Union[str, bytes]) -> bool:\n if self.timeout:\n # convert to milliseconds\n timeout = int(self.timeout * 1000)\n else:\n timeout = None\n if await self.redis.set(self.name, token, nx=True, px=timeout):\n return True\n return False\n\n async def locked(self) -> bool:\n \"\"\"\n Returns True if this key is locked by any process, otherwise False.\n \"\"\"\n return await self.redis.get(self.name) is not None\n\n async def owned(self) -> bool:\n \"\"\"\n Returns True if this key is locked by this lock, otherwise False.\n \"\"\"\n stored_token = await self.redis.get(self.name)\n # need to always compare bytes to bytes\n # TODO: this can be simplified when the context manager is finished\n if stored_token and not isinstance(stored_token, bytes):\n encoder = self.redis.connection_pool.get_encoder()\n stored_token = encoder.encode(stored_token)\n return self.local.token is not None and stored_token == self.local.token\n\n def release(self) -> Awaitable[NoReturn]:\n \"\"\"Releases the already acquired lock\"\"\"\n expected_token = self.local.token\n if expected_token is None:\n raise LockError(\"Cannot release an unlocked lock\")\n self.local.token = None\n return self.do_release(expected_token)\n\n async def do_release(self, expected_token: bytes):\n if not bool(\n await self.lua_release(\n keys=[self.name], args=[expected_token], client=self.redis\n )\n ):\n raise LockNotOwnedError(\"Cannot release a lock\" \" that's no longer owned\")\n\n def extend(\n self, additional_time: float, replace_ttl: bool = False\n ) -> Awaitable[bool]:\n \"\"\"\n Adds more time to an already acquired lock.\n\n ``additional_time`` can be specified as an integer or a float, both\n representing the number of seconds to add.\n\n ``replace_ttl`` if False (the default), add `additional_time` to\n the lock's existing ttl. If True, replace the lock's ttl with\n `additional_time`.\n \"\"\"\n if self.local.token is None:\n raise LockError(\"Cannot extend an unlocked lock\")\n if self.timeout is None:\n raise LockError(\"Cannot extend a lock with no timeout\")\n return self.do_extend(additional_time, replace_ttl)\n\n async def do_extend(self, additional_time, replace_ttl) -> bool:\n additional_time = int(additional_time * 1000)\n if not bool(\n await self.lua_extend(\n keys=[self.name],\n args=[self.local.token, additional_time, replace_ttl and \"1\" or \"0\"],\n client=self.redis,\n )\n ):\n raise LockNotOwnedError(\"Cannot extend a lock that's\" \" no longer owned\")\n return True\n\n def reacquire(self) -> Awaitable[bool]:\n \"\"\"\n Resets a TTL of an already acquired lock back to a timeout value.\n \"\"\"\n if self.local.token is None:\n raise LockError(\"Cannot reacquire an unlocked lock\")\n if self.timeout is None:\n raise LockError(\"Cannot reacquire a lock with no timeout\")\n return self.do_reacquire()\n\n async def do_reacquire(self) -> bool:\n timeout = int(self.timeout * 1000)\n if not bool(\n await self.lua_reacquire(\n keys=[self.name], args=[self.local.token, timeout], client=self.redis\n )\n ):\n raise LockNotOwnedError(\"Cannot reacquire a lock that's\" \" no longer owned\")\n return True\n", "path": "aioredis/lock.py"}], "after_files": [{"content": "import asyncio\nimport threading\nimport time as mod_time\nimport uuid\nfrom types import SimpleNamespace\nfrom typing import TYPE_CHECKING, Awaitable, NoReturn, Union\n\nfrom aioredis.exceptions import LockError, LockNotOwnedError\n\nif TYPE_CHECKING:\n from aioredis import Redis\n\n\nclass Lock:\n \"\"\"\n A shared, distributed Lock. Using Redis for locking allows the Lock\n to be shared across processes and/or machines.\n\n It's left to the user to resolve deadlock issues and make sure\n multiple clients play nicely together.\n \"\"\"\n\n lua_release = None\n lua_extend = None\n lua_reacquire = None\n\n # KEYS[1] - lock name\n # ARGV[1] - token\n # return 1 if the lock was released, otherwise 0\n LUA_RELEASE_SCRIPT = \"\"\"\n local token = redis.call('get', KEYS[1])\n if not token or token ~= ARGV[1] then\n return 0\n end\n redis.call('del', KEYS[1])\n return 1\n \"\"\"\n\n # KEYS[1] - lock name\n # ARGV[1] - token\n # ARGV[2] - additional milliseconds\n # ARGV[3] - \"0\" if the additional time should be added to the lock's\n # existing ttl or \"1\" if the existing ttl should be replaced\n # return 1 if the locks time was extended, otherwise 0\n LUA_EXTEND_SCRIPT = \"\"\"\n local token = redis.call('get', KEYS[1])\n if not token or token ~= ARGV[1] then\n return 0\n end\n local expiration = redis.call('pttl', KEYS[1])\n if not expiration then\n expiration = 0\n end\n if expiration < 0 then\n return 0\n end\n\n local newttl = ARGV[2]\n if ARGV[3] == \"0\" then\n newttl = ARGV[2] + expiration\n end\n redis.call('pexpire', KEYS[1], newttl)\n return 1\n \"\"\"\n\n # KEYS[1] - lock name\n # ARGV[1] - token\n # ARGV[2] - milliseconds\n # return 1 if the locks time was reacquired, otherwise 0\n LUA_REACQUIRE_SCRIPT = \"\"\"\n local token = redis.call('get', KEYS[1])\n if not token or token ~= ARGV[1] then\n return 0\n end\n redis.call('pexpire', KEYS[1], ARGV[2])\n return 1\n \"\"\"\n\n def __init__(\n self,\n redis: \"Redis\",\n name: str,\n timeout: float = None,\n sleep: float = 0.1,\n blocking: bool = True,\n blocking_timeout: float = None,\n thread_local: bool = True,\n ):\n \"\"\"\n Create a new Lock instance named ``name`` using the Redis client\n supplied by ``redis``.\n\n ``timeout`` indicates a maximum life for the lock.\n By default, it will remain locked until release() is called.\n ``timeout`` can be specified as a float or integer, both representing\n the number of seconds to wait.\n\n ``sleep`` indicates the amount of time to sleep per loop iteration\n when the lock is in blocking mode and another client is currently\n holding the lock.\n\n ``blocking`` indicates whether calling ``acquire`` should block until\n the lock has been acquired or to fail immediately, causing ``acquire``\n to return False and the lock not being acquired. Defaults to True.\n Note this value can be overridden by passing a ``blocking``\n argument to ``acquire``.\n\n ``blocking_timeout`` indicates the maximum amount of time in seconds to\n spend trying to acquire the lock. A value of ``None`` indicates\n continue trying forever. ``blocking_timeout`` can be specified as a\n float or integer, both representing the number of seconds to wait.\n\n ``thread_local`` indicates whether the lock token is placed in\n thread-local storage. By default, the token is placed in thread local\n storage so that a thread only sees its token, not a token set by\n another thread. Consider the following timeline:\n\n time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.\n thread-1 sets the token to \"abc\"\n time: 1, thread-2 blocks trying to acquire `my-lock` using the\n Lock instance.\n time: 5, thread-1 has not yet completed. redis expires the lock\n key.\n time: 5, thread-2 acquired `my-lock` now that it's available.\n thread-2 sets the token to \"xyz\"\n time: 6, thread-1 finishes its work and calls release(). if the\n token is *not* stored in thread local storage, then\n thread-1 would see the token value as \"xyz\" and would be\n able to successfully release the thread-2's lock.\n\n In some use cases it's necessary to disable thread local storage. For\n example, if you have code where one thread acquires a lock and passes\n that lock instance to a worker thread to release later. If thread\n local storage isn't disabled in this case, the worker thread won't see\n the token set by the thread that acquired the lock. Our assumption\n is that these cases aren't common and as such default to using\n thread local storage.\n \"\"\"\n self.redis = redis\n self.name = name\n self.timeout = timeout\n self.sleep = sleep\n self.blocking = blocking\n self.blocking_timeout = blocking_timeout\n self.thread_local = bool(thread_local)\n self.local = threading.local() if self.thread_local else SimpleNamespace()\n self.local.token = None\n self.register_scripts()\n\n def register_scripts(self):\n cls = self.__class__\n client = self.redis\n if cls.lua_release is None:\n cls.lua_release = client.register_script(cls.LUA_RELEASE_SCRIPT)\n if cls.lua_extend is None:\n cls.lua_extend = client.register_script(cls.LUA_EXTEND_SCRIPT)\n if cls.lua_reacquire is None:\n cls.lua_reacquire = client.register_script(cls.LUA_REACQUIRE_SCRIPT)\n\n async def __aenter__(self):\n # force blocking, as otherwise the user would have to check whether\n # the lock was actually acquired or not.\n if await self.acquire(blocking=True):\n return self\n raise LockError(\"Unable to acquire lock within the time specified\")\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n await self.release()\n\n async def acquire(\n self,\n blocking: bool = None,\n blocking_timeout: float = None,\n token: Union[str, bytes] = None,\n ):\n \"\"\"\n Use Redis to hold a shared, distributed lock named ``name``.\n Returns True once the lock is acquired.\n\n If ``blocking`` is False, always return immediately. If the lock\n was acquired, return True, otherwise return False.\n\n ``blocking_timeout`` specifies the maximum number of seconds to\n wait trying to acquire the lock.\n\n ``token`` specifies the token value to be used. If provided, token\n must be a bytes object or a string that can be encoded to a bytes\n object with the default encoding. If a token isn't specified, a UUID\n will be generated.\n \"\"\"\n sleep = self.sleep\n if token is None:\n token = uuid.uuid1().hex.encode()\n else:\n encoder = self.redis.connection_pool.get_encoder()\n token = encoder.encode(token)\n if blocking is None:\n blocking = self.blocking\n if blocking_timeout is None:\n blocking_timeout = self.blocking_timeout\n stop_trying_at = None\n if blocking_timeout is not None:\n stop_trying_at = mod_time.monotonic() + blocking_timeout\n while True:\n if await self.do_acquire(token):\n self.local.token = token\n return True\n if not blocking:\n return False\n next_try_at = mod_time.monotonic() + sleep\n if stop_trying_at is not None and next_try_at > stop_trying_at:\n return False\n await asyncio.sleep(sleep)\n\n async def do_acquire(self, token: Union[str, bytes]) -> bool:\n if self.timeout:\n # convert to milliseconds\n timeout = int(self.timeout * 1000)\n else:\n timeout = None\n if await self.redis.set(self.name, token, nx=True, px=timeout):\n return True\n return False\n\n async def locked(self) -> bool:\n \"\"\"\n Returns True if this key is locked by any process, otherwise False.\n \"\"\"\n return await self.redis.get(self.name) is not None\n\n async def owned(self) -> bool:\n \"\"\"\n Returns True if this key is locked by this lock, otherwise False.\n \"\"\"\n stored_token = await self.redis.get(self.name)\n # need to always compare bytes to bytes\n # TODO: this can be simplified when the context manager is finished\n if stored_token and not isinstance(stored_token, bytes):\n encoder = self.redis.connection_pool.get_encoder()\n stored_token = encoder.encode(stored_token)\n return self.local.token is not None and stored_token == self.local.token\n\n def release(self) -> Awaitable[NoReturn]:\n \"\"\"Releases the already acquired lock\"\"\"\n expected_token = self.local.token\n if expected_token is None:\n raise LockError(\"Cannot release an unlocked lock\")\n self.local.token = None\n return self.do_release(expected_token)\n\n async def do_release(self, expected_token: bytes):\n if not bool(\n await self.lua_release(\n keys=[self.name], args=[expected_token], client=self.redis\n )\n ):\n raise LockNotOwnedError(\"Cannot release a lock\" \" that's no longer owned\")\n\n def extend(\n self, additional_time: float, replace_ttl: bool = False\n ) -> Awaitable[bool]:\n \"\"\"\n Adds more time to an already acquired lock.\n\n ``additional_time`` can be specified as an integer or a float, both\n representing the number of seconds to add.\n\n ``replace_ttl`` if False (the default), add `additional_time` to\n the lock's existing ttl. If True, replace the lock's ttl with\n `additional_time`.\n \"\"\"\n if self.local.token is None:\n raise LockError(\"Cannot extend an unlocked lock\")\n if self.timeout is None:\n raise LockError(\"Cannot extend a lock with no timeout\")\n return self.do_extend(additional_time, replace_ttl)\n\n async def do_extend(self, additional_time, replace_ttl) -> bool:\n additional_time = int(additional_time * 1000)\n if not bool(\n await self.lua_extend(\n keys=[self.name],\n args=[self.local.token, additional_time, replace_ttl and \"1\" or \"0\"],\n client=self.redis,\n )\n ):\n raise LockNotOwnedError(\"Cannot extend a lock that's\" \" no longer owned\")\n return True\n\n def reacquire(self) -> Awaitable[bool]:\n \"\"\"\n Resets a TTL of an already acquired lock back to a timeout value.\n \"\"\"\n if self.local.token is None:\n raise LockError(\"Cannot reacquire an unlocked lock\")\n if self.timeout is None:\n raise LockError(\"Cannot reacquire a lock with no timeout\")\n return self.do_reacquire()\n\n async def do_reacquire(self) -> bool:\n timeout = int(self.timeout * 1000)\n if not bool(\n await self.lua_reacquire(\n keys=[self.name], args=[self.local.token, timeout], client=self.redis\n )\n ):\n raise LockNotOwnedError(\"Cannot reacquire a lock that's\" \" no longer owned\")\n return True\n", "path": "aioredis/lock.py"}]}
| 3,734 | 156 |
gh_patches_debug_9141
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-720
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support multi-line feedback
### Motivation
<!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. -->
When users sent multiple lines of feedback, the lines are shown wrapped in the backend list. It would make the feedback more readable if the feedback was shown exactly like the user submitted it.
### Proposed Solution
<!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. -->
- Change `CharField` to `TextField`
- Use the css class `whitespace-pre-line` when rendering feedback
- Collapse feedback which has multiple lines
### Alternatives
<!-- A clear and concise description of any alternative solutions or features you've considered, and why you're proposed solution is better. -->
### Additional Context
<!-- Add any other information or screenshots about the feature request here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cms/models/feedback/feedback.py`
Content:
```
1 from django.conf import settings
2 from django.db import models
3 from django.utils.text import capfirst
4 from django.utils.translation import ugettext_lazy as _
5
6 from ...constants import feedback_ratings
7 from ...utils.translation_utils import ugettext_many_lazy as __
8 from ..languages.language import Language
9 from ..regions.region import Region
10
11
12 class Feedback(models.Model):
13 """
14 Database model representing feedback from app-users.
15 Do not directly create instances of this base model, but of the submodels (e.g. PageFeedback) instead.
16 """
17
18 region = models.ForeignKey(
19 Region,
20 on_delete=models.CASCADE,
21 related_name="feedback",
22 verbose_name=_("region"),
23 )
24 language = models.ForeignKey(
25 Language,
26 on_delete=models.CASCADE,
27 related_name="feedback",
28 verbose_name=_("language"),
29 )
30 #: Manage choices in :mod:`cms.constants.feedback_ratings`
31 rating = models.BooleanField(
32 null=True,
33 blank=True,
34 default=feedback_ratings.NOT_STATED,
35 choices=feedback_ratings.CHOICES,
36 verbose_name=_("rating"),
37 help_text=_("Whether the feedback is positive or negative"),
38 )
39 comment = models.CharField(max_length=1000, blank=True, verbose_name=_("comment"))
40 is_technical = models.BooleanField(
41 verbose_name=_("technical"),
42 help_text=_("Whether or not the feedback is targeted at the developers"),
43 )
44 read_by = models.ForeignKey(
45 settings.AUTH_USER_MODEL,
46 null=True,
47 blank=True,
48 on_delete=models.SET_NULL,
49 related_name="feedback",
50 verbose_name=_("marked as read by"),
51 help_text=__(
52 _("The user who marked this feedback as read."),
53 _("If the feedback is unread, this field is empty."),
54 ),
55 )
56 created_date = models.DateTimeField(
57 auto_now_add=True,
58 verbose_name=_("creation date"),
59 )
60
61 @property
62 def submodel_instance(self):
63 """
64 This property returns the submodel instance (e.g. PageFeedback) of a Feedback instance.
65 """
66 # In this case we need type() instead of isinstance(), because we want to differ between inherited models
67 # pylint: disable=unidiomatic-typecheck
68 if type(self) != Feedback:
69 raise NotImplementedError(
70 "Use submodel_instance only on instances of the base Feedback model, not on submodels."
71 )
72 for submodel in Feedback.__subclasses__():
73 # Inherited models automatically get their name as lowercase assigned as reverse relationship from the base class
74 reverse_related_name = submodel.__name__.lower()
75 if hasattr(self, reverse_related_name):
76 return getattr(self, reverse_related_name)
77 raise TypeError(
78 "Do not directly create instances of the Feedback base model, but of the submodels (e.g. PageFeedback) instead."
79 )
80
81 @property
82 def category(self):
83 """
84 This property returns the category (verbose name of the submodel) of this feedback object.
85 """
86 return capfirst(type(self.submodel_instance)._meta.verbose_name)
87
88 @property
89 def object_name(self):
90 """
91 This property returns the name of the object this feedback comments on.
92 To be implemented in the inheriting model.
93 """
94 return self.submodel_instance.object_name
95
96 @property
97 def object_url(self):
98 """
99 This property returns the url to the object this feedback comments on.
100 To be implemented in the inheriting model.
101 """
102 return self.submodel_instance.object_url
103
104 @property
105 def related_feedback(self):
106 """
107 This property returns all feedback entries which relate to the same object and have the same is_technical value.
108
109 :return: The queryset of related feedback
110 :rtype: ~django.db.models.query.QuerySet [ ~cms.models.feedback.feedback.Feedback ]
111 """
112 return self.submodel_instance.related_feedback
113
114 @property
115 def rating_sum_positive(self):
116 """
117 This property returns the sum of the up-ratings of this object.
118
119 :return: The number of positive ratings on this feedback object
120 :rtype: int
121 """
122 # Enable this property on instances of the base Feedback model
123 # In this case we need type() instead of isinstance(), because we want to differ between inherited models
124 # pylint: disable=unidiomatic-typecheck
125 if type(self) == Feedback:
126 instance = self.submodel_instance
127 else:
128 instance = self
129 return instance.related_feedback.filter(
130 rating=feedback_ratings.POSITIVE
131 ).count()
132
133 @property
134 def rating_sum_negative(self):
135 """
136 This property returns the sum of the down-ratings of this object.
137
138 :return: The number of negative ratings on this feedback object
139 :rtype: int
140 """
141 # Enable this property on instances of the base Feedback model
142 # In this case we need type() instead of isinstance(), because we want to differ between inherited models
143 # pylint: disable=unidiomatic-typecheck
144 if type(self) == Feedback:
145 instance = self.submodel_instance
146 else:
147 instance = self
148 return instance.related_feedback.filter(
149 rating=feedback_ratings.NEGATIVE
150 ).count()
151
152 @property
153 def read(self):
154 """
155 This property returns whether or not the feedback is marked as read or not.
156 It is ``True`` if :attr:`~cms.models.feedback.feedback.Feedback.read_by` is set and ``False`` otherwise.
157 """
158 return bool(self.read_by)
159
160 class Meta:
161 #: The verbose name of the model
162 verbose_name = _("feedback")
163 #: The plural verbose name of the model
164 verbose_name_plural = _("feedback")
165 #: The fields which are used to sort the returned objects of a QuerySet
166 ordering = ["-created_date"]
167 #: The default permissions for this model
168 default_permissions = ()
169 #: The custom permissions for this model
170 permissions = (("manage_feedback", "Can manage feedback"),)
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cms/models/feedback/feedback.py b/src/cms/models/feedback/feedback.py
--- a/src/cms/models/feedback/feedback.py
+++ b/src/cms/models/feedback/feedback.py
@@ -36,7 +36,7 @@
verbose_name=_("rating"),
help_text=_("Whether the feedback is positive or negative"),
)
- comment = models.CharField(max_length=1000, blank=True, verbose_name=_("comment"))
+ comment = models.TextField(blank=True, verbose_name=_("comment"))
is_technical = models.BooleanField(
verbose_name=_("technical"),
help_text=_("Whether or not the feedback is targeted at the developers"),
|
{"golden_diff": "diff --git a/src/cms/models/feedback/feedback.py b/src/cms/models/feedback/feedback.py\n--- a/src/cms/models/feedback/feedback.py\n+++ b/src/cms/models/feedback/feedback.py\n@@ -36,7 +36,7 @@\n verbose_name=_(\"rating\"),\n help_text=_(\"Whether the feedback is positive or negative\"),\n )\n- comment = models.CharField(max_length=1000, blank=True, verbose_name=_(\"comment\"))\n+ comment = models.TextField(blank=True, verbose_name=_(\"comment\"))\n is_technical = models.BooleanField(\n verbose_name=_(\"technical\"),\n help_text=_(\"Whether or not the feedback is targeted at the developers\"),\n", "issue": "Support multi-line feedback\n### Motivation\r\n<!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. -->\r\nWhen users sent multiple lines of feedback, the lines are shown wrapped in the backend list. It would make the feedback more readable if the feedback was shown exactly like the user submitted it.\r\n\r\n### Proposed Solution\r\n<!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. -->\r\n- Change `CharField` to `TextField`\r\n- Use the css class `whitespace-pre-line` when rendering feedback\r\n- Collapse feedback which has multiple lines\r\n\r\n### Alternatives\r\n<!-- A clear and concise description of any alternative solutions or features you've considered, and why you're proposed solution is better. -->\r\n\r\n\r\n### Additional Context\r\n<!-- Add any other information or screenshots about the feature request here. -->\r\n\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.db import models\nfrom django.utils.text import capfirst\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ...constants import feedback_ratings\nfrom ...utils.translation_utils import ugettext_many_lazy as __\nfrom ..languages.language import Language\nfrom ..regions.region import Region\n\n\nclass Feedback(models.Model):\n \"\"\"\n Database model representing feedback from app-users.\n Do not directly create instances of this base model, but of the submodels (e.g. PageFeedback) instead.\n \"\"\"\n\n region = models.ForeignKey(\n Region,\n on_delete=models.CASCADE,\n related_name=\"feedback\",\n verbose_name=_(\"region\"),\n )\n language = models.ForeignKey(\n Language,\n on_delete=models.CASCADE,\n related_name=\"feedback\",\n verbose_name=_(\"language\"),\n )\n #: Manage choices in :mod:`cms.constants.feedback_ratings`\n rating = models.BooleanField(\n null=True,\n blank=True,\n default=feedback_ratings.NOT_STATED,\n choices=feedback_ratings.CHOICES,\n verbose_name=_(\"rating\"),\n help_text=_(\"Whether the feedback is positive or negative\"),\n )\n comment = models.CharField(max_length=1000, blank=True, verbose_name=_(\"comment\"))\n is_technical = models.BooleanField(\n verbose_name=_(\"technical\"),\n help_text=_(\"Whether or not the feedback is targeted at the developers\"),\n )\n read_by = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name=\"feedback\",\n verbose_name=_(\"marked as read by\"),\n help_text=__(\n _(\"The user who marked this feedback as read.\"),\n _(\"If the feedback is unread, this field is empty.\"),\n ),\n )\n created_date = models.DateTimeField(\n auto_now_add=True,\n verbose_name=_(\"creation date\"),\n )\n\n @property\n def submodel_instance(self):\n \"\"\"\n This property returns the submodel instance (e.g. PageFeedback) of a Feedback instance.\n \"\"\"\n # In this case we need type() instead of isinstance(), because we want to differ between inherited models\n # pylint: disable=unidiomatic-typecheck\n if type(self) != Feedback:\n raise NotImplementedError(\n \"Use submodel_instance only on instances of the base Feedback model, not on submodels.\"\n )\n for submodel in Feedback.__subclasses__():\n # Inherited models automatically get their name as lowercase assigned as reverse relationship from the base class\n reverse_related_name = submodel.__name__.lower()\n if hasattr(self, reverse_related_name):\n return getattr(self, reverse_related_name)\n raise TypeError(\n \"Do not directly create instances of the Feedback base model, but of the submodels (e.g. PageFeedback) instead.\"\n )\n\n @property\n def category(self):\n \"\"\"\n This property returns the category (verbose name of the submodel) of this feedback object.\n \"\"\"\n return capfirst(type(self.submodel_instance)._meta.verbose_name)\n\n @property\n def object_name(self):\n \"\"\"\n This property returns the name of the object this feedback comments on.\n To be implemented in the inheriting model.\n \"\"\"\n return self.submodel_instance.object_name\n\n @property\n def object_url(self):\n \"\"\"\n This property returns the url to the object this feedback comments on.\n To be implemented in the inheriting model.\n \"\"\"\n return self.submodel_instance.object_url\n\n @property\n def related_feedback(self):\n \"\"\"\n This property returns all feedback entries which relate to the same object and have the same is_technical value.\n\n :return: The queryset of related feedback\n :rtype: ~django.db.models.query.QuerySet [ ~cms.models.feedback.feedback.Feedback ]\n \"\"\"\n return self.submodel_instance.related_feedback\n\n @property\n def rating_sum_positive(self):\n \"\"\"\n This property returns the sum of the up-ratings of this object.\n\n :return: The number of positive ratings on this feedback object\n :rtype: int\n \"\"\"\n # Enable this property on instances of the base Feedback model\n # In this case we need type() instead of isinstance(), because we want to differ between inherited models\n # pylint: disable=unidiomatic-typecheck\n if type(self) == Feedback:\n instance = self.submodel_instance\n else:\n instance = self\n return instance.related_feedback.filter(\n rating=feedback_ratings.POSITIVE\n ).count()\n\n @property\n def rating_sum_negative(self):\n \"\"\"\n This property returns the sum of the down-ratings of this object.\n\n :return: The number of negative ratings on this feedback object\n :rtype: int\n \"\"\"\n # Enable this property on instances of the base Feedback model\n # In this case we need type() instead of isinstance(), because we want to differ between inherited models\n # pylint: disable=unidiomatic-typecheck\n if type(self) == Feedback:\n instance = self.submodel_instance\n else:\n instance = self\n return instance.related_feedback.filter(\n rating=feedback_ratings.NEGATIVE\n ).count()\n\n @property\n def read(self):\n \"\"\"\n This property returns whether or not the feedback is marked as read or not.\n It is ``True`` if :attr:`~cms.models.feedback.feedback.Feedback.read_by` is set and ``False`` otherwise.\n \"\"\"\n return bool(self.read_by)\n\n class Meta:\n #: The verbose name of the model\n verbose_name = _(\"feedback\")\n #: The plural verbose name of the model\n verbose_name_plural = _(\"feedback\")\n #: The fields which are used to sort the returned objects of a QuerySet\n ordering = [\"-created_date\"]\n #: The default permissions for this model\n default_permissions = ()\n #: The custom permissions for this model\n permissions = ((\"manage_feedback\", \"Can manage feedback\"),)\n", "path": "src/cms/models/feedback/feedback.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.db import models\nfrom django.utils.text import capfirst\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ...constants import feedback_ratings\nfrom ...utils.translation_utils import ugettext_many_lazy as __\nfrom ..languages.language import Language\nfrom ..regions.region import Region\n\n\nclass Feedback(models.Model):\n \"\"\"\n Database model representing feedback from app-users.\n Do not directly create instances of this base model, but of the submodels (e.g. PageFeedback) instead.\n \"\"\"\n\n region = models.ForeignKey(\n Region,\n on_delete=models.CASCADE,\n related_name=\"feedback\",\n verbose_name=_(\"region\"),\n )\n language = models.ForeignKey(\n Language,\n on_delete=models.CASCADE,\n related_name=\"feedback\",\n verbose_name=_(\"language\"),\n )\n #: Manage choices in :mod:`cms.constants.feedback_ratings`\n rating = models.BooleanField(\n null=True,\n blank=True,\n default=feedback_ratings.NOT_STATED,\n choices=feedback_ratings.CHOICES,\n verbose_name=_(\"rating\"),\n help_text=_(\"Whether the feedback is positive or negative\"),\n )\n comment = models.TextField(blank=True, verbose_name=_(\"comment\"))\n is_technical = models.BooleanField(\n verbose_name=_(\"technical\"),\n help_text=_(\"Whether or not the feedback is targeted at the developers\"),\n )\n read_by = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name=\"feedback\",\n verbose_name=_(\"marked as read by\"),\n help_text=__(\n _(\"The user who marked this feedback as read.\"),\n _(\"If the feedback is unread, this field is empty.\"),\n ),\n )\n created_date = models.DateTimeField(\n auto_now_add=True,\n verbose_name=_(\"creation date\"),\n )\n\n @property\n def submodel_instance(self):\n \"\"\"\n This property returns the submodel instance (e.g. PageFeedback) of a Feedback instance.\n \"\"\"\n # In this case we need type() instead of isinstance(), because we want to differ between inherited models\n # pylint: disable=unidiomatic-typecheck\n if type(self) != Feedback:\n raise NotImplementedError(\n \"Use submodel_instance only on instances of the base Feedback model, not on submodels.\"\n )\n for submodel in Feedback.__subclasses__():\n # Inherited models automatically get their name as lowercase assigned as reverse relationship from the base class\n reverse_related_name = submodel.__name__.lower()\n if hasattr(self, reverse_related_name):\n return getattr(self, reverse_related_name)\n raise TypeError(\n \"Do not directly create instances of the Feedback base model, but of the submodels (e.g. PageFeedback) instead.\"\n )\n\n @property\n def category(self):\n \"\"\"\n This property returns the category (verbose name of the submodel) of this feedback object.\n \"\"\"\n return capfirst(type(self.submodel_instance)._meta.verbose_name)\n\n @property\n def object_name(self):\n \"\"\"\n This property returns the name of the object this feedback comments on.\n To be implemented in the inheriting model.\n \"\"\"\n return self.submodel_instance.object_name\n\n @property\n def object_url(self):\n \"\"\"\n This property returns the url to the object this feedback comments on.\n To be implemented in the inheriting model.\n \"\"\"\n return self.submodel_instance.object_url\n\n @property\n def related_feedback(self):\n \"\"\"\n This property returns all feedback entries which relate to the same object and have the same is_technical value.\n\n :return: The queryset of related feedback\n :rtype: ~django.db.models.query.QuerySet [ ~cms.models.feedback.feedback.Feedback ]\n \"\"\"\n return self.submodel_instance.related_feedback\n\n @property\n def rating_sum_positive(self):\n \"\"\"\n This property returns the sum of the up-ratings of this object.\n\n :return: The number of positive ratings on this feedback object\n :rtype: int\n \"\"\"\n # Enable this property on instances of the base Feedback model\n # In this case we need type() instead of isinstance(), because we want to differ between inherited models\n # pylint: disable=unidiomatic-typecheck\n if type(self) == Feedback:\n instance = self.submodel_instance\n else:\n instance = self\n return instance.related_feedback.filter(\n rating=feedback_ratings.POSITIVE\n ).count()\n\n @property\n def rating_sum_negative(self):\n \"\"\"\n This property returns the sum of the down-ratings of this object.\n\n :return: The number of negative ratings on this feedback object\n :rtype: int\n \"\"\"\n # Enable this property on instances of the base Feedback model\n # In this case we need type() instead of isinstance(), because we want to differ between inherited models\n # pylint: disable=unidiomatic-typecheck\n if type(self) == Feedback:\n instance = self.submodel_instance\n else:\n instance = self\n return instance.related_feedback.filter(\n rating=feedback_ratings.NEGATIVE\n ).count()\n\n @property\n def read(self):\n \"\"\"\n This property returns whether or not the feedback is marked as read or not.\n It is ``True`` if :attr:`~cms.models.feedback.feedback.Feedback.read_by` is set and ``False`` otherwise.\n \"\"\"\n return bool(self.read_by)\n\n class Meta:\n #: The verbose name of the model\n verbose_name = _(\"feedback\")\n #: The plural verbose name of the model\n verbose_name_plural = _(\"feedback\")\n #: The fields which are used to sort the returned objects of a QuerySet\n ordering = [\"-created_date\"]\n #: The default permissions for this model\n default_permissions = ()\n #: The custom permissions for this model\n permissions = ((\"manage_feedback\", \"Can manage feedback\"),)\n", "path": "src/cms/models/feedback/feedback.py"}]}
| 2,103 | 142 |
gh_patches_debug_6451
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-5630
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Validator throws KeyError 'table.head' while interactively creating Expectation Suite on a BigQuery datasource
**Describe the bug**
I am getting the same error as described in #3540 when interactively creating an Expectation Suite on a BigQuery datasource via CLI. As requested in the discussion, I am opening a new issue for this.
In the "Edit Your Expectation Suite" notebook provided by `great_expectations suite new`, the following function call throws an error:
```python
validator.head(n_rows=5, fetch_all=False)
```
Thrown error:
```text
KeyError Traceback (most recent call last)
Input In [11], in <cell line: 1>()
----> 1 validator.head(n_rows=5, fetch_all=False)
File some-path/.venv/lib/python3.9/site-packages/great_expectations/validator/validator.py:2146, in Validator.head(self, n_rows, domain_kwargs, fetch_all)
2141 if domain_kwargs is None:
2142 domain_kwargs = {
2143 "batch_id": self.execution_engine.active_batch_data_id,
2144 }
-> 2146 data: Any = self.get_metric(
2147 metric=MetricConfiguration(
2148 metric_name="table.head",
2149 metric_domain_kwargs=domain_kwargs,
2150 metric_value_kwargs={
2151 "n_rows": n_rows,
2152 "fetch_all": fetch_all,
2153 },
2154 )
2155 )
2157 df: pd.DataFrame
2158 if isinstance(
2159 self.execution_engine, (PandasExecutionEngine, SqlAlchemyExecutionEngine)
2160 ):
File some-path/.venv/lib/python3.9/site-packages/great_expectations/validator/validator.py:891, in Validator.get_metric(self, metric)
889 def get_metric(self, metric: MetricConfiguration) -> Any:
890 """return the value of the requested metric."""
--> 891 return self.get_metrics(metrics={metric.metric_name: metric})[
892 metric.metric_name
893 ]
File some-path/.venv/lib/python3.9/site-packages/great_expectations/validator/validator.py:856, in Validator.get_metrics(self, metrics)
848 """
849 metrics: Dictionary of desired metrics to be resolved, with metric_name as key and MetricConfiguration as value.
850 Return Dictionary with requested metrics resolved, with metric_name as key and computed metric as value.
851 """
852 resolved_metrics: Dict[Tuple[str, str, str], Any] = self.compute_metrics(
853 metric_configurations=list(metrics.values())
854 )
--> 856 return {
857 metric_configuration.metric_name: resolved_metrics[metric_configuration.id]
858 for metric_configuration in metrics.values()
859 }
File some-path/.venv/lib/python3.9/site-packages/great_expectations/validator/validator.py:857, in <dictcomp>(.0)
848 """
849 metrics: Dictionary of desired metrics to be resolved, with metric_name as key and MetricConfiguration as value.
850 Return Dictionary with requested metrics resolved, with metric_name as key and computed metric as value.
851 """
852 resolved_metrics: Dict[Tuple[str, str, str], Any] = self.compute_metrics(
853 metric_configurations=list(metrics.values())
854 )
856 return {
--> 857 metric_configuration.metric_name: resolved_metrics[metric_configuration.id]
858 for metric_configuration in metrics.values()
859 }
KeyError: ('table.head', 'batch_id=15a077d486452b3e1c894458758b7972', '04166707abe073177c1dd922d3584468')
```
**To Reproduce**
Steps to reproduce the behavior:
1. Initialize GE project
2. Add a BigQuery datasource via `great_expectations datasource new`
3. Create a new Expectation Suite via `great_expectations suite new`
4. Choose Interactively and select your datasource and data asset
5. Execute notebook code including the _validator.head()_ call
6. See error above
**Expected behavior**
Calling validator.head() should not raise a KeyError.
**Environment**
- Operating System: MacOS 12.3.1
- Great Expectations Version: 0.15.11
**Additional context**
I have examined the GCP logs in the period of the call of the validator.head() function. I exclude a permission error, because the used service account has maximum rights on used GCP project during debugging. However, errors occur here in the BigQuery service with the JobService.InsertJob method, which are not due to insufficient permissions:
```json
"serviceName": "bigquery.googleapis.com",
"methodName": "google.cloud.bigquery.v2.JobService.InsertJob",
"authorizationInfo": [
{
"resource": "projects/my-project",
"permission": "bigquery.jobs.create",
"granted": true,
"resourceAttributes": {}
}
],
```
The error itself is reported in the response object _jobStatus_:
```json
"jobStatus": {
"errors": [
{
"code": 3,
"message": "Cannot access field id on a value with type ARRAY<STRUCT<id STRING>> at [1:4656]"
}
],
"errorResult": {
"message": "Cannot access field id on a value with type ARRAY<STRUCT<id STRING>> at [1:4656]",
"code": 3
},
"jobState": "DONE"
},
```
Some fields of the table I use are nested fields. Does the validator have problems with these?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/expectations/metrics/table_metrics/table_head.py`
Content:
```
1 from typing import Any, Dict
2
3 import pandas as pd
4
5 from great_expectations.core.metric_domain_types import MetricDomainTypes
6 from great_expectations.execution_engine import (
7 PandasExecutionEngine,
8 SparkDFExecutionEngine,
9 SqlAlchemyExecutionEngine,
10 )
11 from great_expectations.expectations.metrics.import_manager import sa
12 from great_expectations.expectations.metrics.metric_provider import metric_value
13 from great_expectations.expectations.metrics.table_metric_provider import (
14 TableMetricProvider,
15 )
16 from great_expectations.validator.metric_configuration import MetricConfiguration
17 from great_expectations.validator.validator import Validator
18
19
20 class TableHead(TableMetricProvider):
21 metric_name = "table.head"
22 value_keys = ("n_rows", "fetch_all")
23 default_kwarg_values = {"n_rows": 5, "fetch_all": False}
24
25 @metric_value(engine=PandasExecutionEngine)
26 def _pandas(
27 cls,
28 execution_engine: PandasExecutionEngine,
29 metric_domain_kwargs: Dict,
30 metric_value_kwargs: Dict,
31 metrics: Dict[str, Any],
32 runtime_configuration: Dict,
33 ):
34 df, _, _ = execution_engine.get_compute_domain(
35 metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE
36 )
37 if metric_value_kwargs.get("fetch_all", cls.default_kwarg_values["fetch_all"]):
38 return df
39 return df.head(metric_value_kwargs["n_rows"])
40
41 @metric_value(engine=SqlAlchemyExecutionEngine)
42 def _sqlalchemy(
43 cls,
44 execution_engine: SqlAlchemyExecutionEngine,
45 metric_domain_kwargs: Dict,
46 metric_value_kwargs: Dict,
47 metrics: Dict[str, Any],
48 runtime_configuration: Dict,
49 ):
50 selectable, _, _ = execution_engine.get_compute_domain(
51 metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE
52 )
53 df = None
54 table_name = getattr(selectable, "name", None)
55 if table_name is None:
56 # if a custom query was passed
57 try:
58 if metric_value_kwargs["fetch_all"]:
59 df = pd.read_sql_query(
60 sql=selectable,
61 con=execution_engine.engine,
62 )
63 else:
64 df = next(
65 pd.read_sql_query(
66 sql=selectable,
67 con=execution_engine.engine,
68 chunksize=metric_value_kwargs["n_rows"],
69 )
70 )
71 except (ValueError, NotImplementedError):
72 # it looks like MetaData that is used by pd.read_sql_query
73 # cannot work on a temp table.
74 # If it fails, we are trying to get the data using read_sql
75 df = None
76 except StopIteration:
77 validator = Validator(execution_engine=execution_engine)
78 columns = validator.get_metric(
79 MetricConfiguration("table.columns", metric_domain_kwargs)
80 )
81 df = pd.DataFrame(columns=columns)
82 else:
83 try:
84 if metric_value_kwargs["fetch_all"]:
85 df = pd.read_sql_table(
86 table_name=getattr(selectable, "name", None),
87 schema=getattr(selectable, "schema", None),
88 con=execution_engine.engine,
89 )
90 else:
91 df = next(
92 pd.read_sql_table(
93 table_name=getattr(selectable, "name", None),
94 schema=getattr(selectable, "schema", None),
95 con=execution_engine.engine,
96 chunksize=metric_value_kwargs["n_rows"],
97 )
98 )
99 except (ValueError, NotImplementedError):
100 # it looks like MetaData that is used by pd.read_sql_table
101 # cannot work on a temp table.
102 # If it fails, we are trying to get the data using read_sql
103 df = None
104 except StopIteration:
105 validator = Validator(execution_engine=execution_engine)
106 columns = validator.get_metric(
107 MetricConfiguration("table.columns", metric_domain_kwargs)
108 )
109 df = pd.DataFrame(columns=columns)
110
111 if df is None:
112 # we want to compile our selectable
113 stmt = sa.select(["*"]).select_from(selectable)
114 if metric_value_kwargs["fetch_all"]:
115 sql = stmt.compile(
116 dialect=execution_engine.engine.dialect,
117 compile_kwargs={"literal_binds": True},
118 )
119 elif execution_engine.engine.dialect.name.lower() == "mssql":
120 # limit doesn't compile properly for mssql
121 sql = str(
122 stmt.compile(
123 dialect=execution_engine.engine.dialect,
124 compile_kwargs={"literal_binds": True},
125 )
126 )
127 sql = f"SELECT TOP {metric_value_kwargs['n_rows']}{sql[6:]}"
128 else:
129 stmt = stmt.limit(metric_value_kwargs["n_rows"])
130 sql = stmt.compile(
131 dialect=execution_engine.engine.dialect,
132 compile_kwargs={"literal_binds": True},
133 )
134
135 df = pd.read_sql(sql, con=execution_engine.engine)
136
137 return df
138
139 @metric_value(engine=SparkDFExecutionEngine)
140 def _spark(
141 cls,
142 execution_engine: SparkDFExecutionEngine,
143 metric_domain_kwargs: Dict,
144 metric_value_kwargs: Dict,
145 metrics: Dict[str, Any],
146 runtime_configuration: Dict,
147 ):
148 df, _, _ = execution_engine.get_compute_domain(
149 metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE
150 )
151 if metric_value_kwargs["fetch_all"]:
152 return df.collect()
153 return df.head(metric_value_kwargs["n_rows"])
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/great_expectations/expectations/metrics/table_metrics/table_head.py b/great_expectations/expectations/metrics/table_metrics/table_head.py
--- a/great_expectations/expectations/metrics/table_metrics/table_head.py
+++ b/great_expectations/expectations/metrics/table_metrics/table_head.py
@@ -52,7 +52,10 @@
)
df = None
table_name = getattr(selectable, "name", None)
- if table_name is None:
+ if (
+ isinstance(table_name, sa.sql.elements._anonymous_label)
+ or table_name is None
+ ):
# if a custom query was passed
try:
if metric_value_kwargs["fetch_all"]:
|
{"golden_diff": "diff --git a/great_expectations/expectations/metrics/table_metrics/table_head.py b/great_expectations/expectations/metrics/table_metrics/table_head.py\n--- a/great_expectations/expectations/metrics/table_metrics/table_head.py\n+++ b/great_expectations/expectations/metrics/table_metrics/table_head.py\n@@ -52,7 +52,10 @@\n )\n df = None\n table_name = getattr(selectable, \"name\", None)\n- if table_name is None:\n+ if (\n+ isinstance(table_name, sa.sql.elements._anonymous_label)\n+ or table_name is None\n+ ):\n # if a custom query was passed\n try:\n if metric_value_kwargs[\"fetch_all\"]:\n", "issue": "Validator throws KeyError 'table.head' while interactively creating Expectation Suite on a BigQuery datasource\n**Describe the bug**\r\nI am getting the same error as described in #3540 when interactively creating an Expectation Suite on a BigQuery datasource via CLI. As requested in the discussion, I am opening a new issue for this.\r\n\r\nIn the \"Edit Your Expectation Suite\" notebook provided by `great_expectations suite new`, the following function call throws an error:\r\n\r\n```python\r\nvalidator.head(n_rows=5, fetch_all=False)\r\n```\r\n\r\nThrown error:\r\n\r\n```text\r\nKeyError Traceback (most recent call last)\r\nInput In [11], in <cell line: 1>()\r\n----> 1 validator.head(n_rows=5, fetch_all=False)\r\n\r\nFile some-path/.venv/lib/python3.9/site-packages/great_expectations/validator/validator.py:2146, in Validator.head(self, n_rows, domain_kwargs, fetch_all)\r\n 2141 if domain_kwargs is None:\r\n 2142 domain_kwargs = {\r\n 2143 \"batch_id\": self.execution_engine.active_batch_data_id,\r\n 2144 }\r\n-> 2146 data: Any = self.get_metric(\r\n 2147 metric=MetricConfiguration(\r\n 2148 metric_name=\"table.head\",\r\n 2149 metric_domain_kwargs=domain_kwargs,\r\n 2150 metric_value_kwargs={\r\n 2151 \"n_rows\": n_rows,\r\n 2152 \"fetch_all\": fetch_all,\r\n 2153 },\r\n 2154 )\r\n 2155 )\r\n 2157 df: pd.DataFrame\r\n 2158 if isinstance(\r\n 2159 self.execution_engine, (PandasExecutionEngine, SqlAlchemyExecutionEngine)\r\n 2160 ):\r\n\r\nFile some-path/.venv/lib/python3.9/site-packages/great_expectations/validator/validator.py:891, in Validator.get_metric(self, metric)\r\n 889 def get_metric(self, metric: MetricConfiguration) -> Any:\r\n 890 \"\"\"return the value of the requested metric.\"\"\"\r\n--> 891 return self.get_metrics(metrics={metric.metric_name: metric})[\r\n 892 metric.metric_name\r\n 893 ]\r\n\r\nFile some-path/.venv/lib/python3.9/site-packages/great_expectations/validator/validator.py:856, in Validator.get_metrics(self, metrics)\r\n 848 \"\"\"\r\n 849 metrics: Dictionary of desired metrics to be resolved, with metric_name as key and MetricConfiguration as value.\r\n 850 Return Dictionary with requested metrics resolved, with metric_name as key and computed metric as value.\r\n 851 \"\"\"\r\n 852 resolved_metrics: Dict[Tuple[str, str, str], Any] = self.compute_metrics(\r\n 853 metric_configurations=list(metrics.values())\r\n 854 )\r\n--> 856 return {\r\n 857 metric_configuration.metric_name: resolved_metrics[metric_configuration.id]\r\n 858 for metric_configuration in metrics.values()\r\n 859 }\r\n\r\nFile some-path/.venv/lib/python3.9/site-packages/great_expectations/validator/validator.py:857, in <dictcomp>(.0)\r\n 848 \"\"\"\r\n 849 metrics: Dictionary of desired metrics to be resolved, with metric_name as key and MetricConfiguration as value.\r\n 850 Return Dictionary with requested metrics resolved, with metric_name as key and computed metric as value.\r\n 851 \"\"\"\r\n 852 resolved_metrics: Dict[Tuple[str, str, str], Any] = self.compute_metrics(\r\n 853 metric_configurations=list(metrics.values())\r\n 854 )\r\n 856 return {\r\n--> 857 metric_configuration.metric_name: resolved_metrics[metric_configuration.id]\r\n 858 for metric_configuration in metrics.values()\r\n 859 }\r\n\r\nKeyError: ('table.head', 'batch_id=15a077d486452b3e1c894458758b7972', '04166707abe073177c1dd922d3584468')\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. Initialize GE project\r\n2. Add a BigQuery datasource via `great_expectations datasource new`\r\n3. Create a new Expectation Suite via `great_expectations suite new`\r\n4. Choose Interactively and select your datasource and data asset\r\n5. Execute notebook code including the _validator.head()_ call\r\n6. See error above\r\n\r\n**Expected behavior**\r\nCalling validator.head() should not raise a KeyError.\r\n\r\n**Environment**\r\n - Operating System: MacOS 12.3.1\r\n - Great Expectations Version: 0.15.11\r\n\r\n**Additional context**\r\n\r\nI have examined the GCP logs in the period of the call of the validator.head() function. I exclude a permission error, because the used service account has maximum rights on used GCP project during debugging. However, errors occur here in the BigQuery service with the JobService.InsertJob method, which are not due to insufficient permissions:\r\n\r\n```json\r\n\"serviceName\": \"bigquery.googleapis.com\",\r\n\"methodName\": \"google.cloud.bigquery.v2.JobService.InsertJob\",\r\n\"authorizationInfo\": [\r\n {\r\n \"resource\": \"projects/my-project\",\r\n \"permission\": \"bigquery.jobs.create\",\r\n \"granted\": true,\r\n \"resourceAttributes\": {}\r\n }\r\n],\r\n```\r\n\r\nThe error itself is reported in the response object _jobStatus_:\r\n\r\n```json\r\n\"jobStatus\": {\r\n \"errors\": [\r\n {\r\n \"code\": 3,\r\n \"message\": \"Cannot access field id on a value with type ARRAY<STRUCT<id STRING>> at [1:4656]\"\r\n }\r\n ],\r\n \"errorResult\": {\r\n \"message\": \"Cannot access field id on a value with type ARRAY<STRUCT<id STRING>> at [1:4656]\",\r\n \"code\": 3\r\n },\r\n \"jobState\": \"DONE\"\r\n},\r\n```\r\n\r\nSome fields of the table I use are nested fields. Does the validator have problems with these?\n", "before_files": [{"content": "from typing import Any, Dict\n\nimport pandas as pd\n\nfrom great_expectations.core.metric_domain_types import MetricDomainTypes\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import sa\nfrom great_expectations.expectations.metrics.metric_provider import metric_value\nfrom great_expectations.expectations.metrics.table_metric_provider import (\n TableMetricProvider,\n)\nfrom great_expectations.validator.metric_configuration import MetricConfiguration\nfrom great_expectations.validator.validator import Validator\n\n\nclass TableHead(TableMetricProvider):\n metric_name = \"table.head\"\n value_keys = (\"n_rows\", \"fetch_all\")\n default_kwarg_values = {\"n_rows\": 5, \"fetch_all\": False}\n\n @metric_value(engine=PandasExecutionEngine)\n def _pandas(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n df, _, _ = execution_engine.get_compute_domain(\n metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE\n )\n if metric_value_kwargs.get(\"fetch_all\", cls.default_kwarg_values[\"fetch_all\"]):\n return df\n return df.head(metric_value_kwargs[\"n_rows\"])\n\n @metric_value(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n selectable, _, _ = execution_engine.get_compute_domain(\n metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE\n )\n df = None\n table_name = getattr(selectable, \"name\", None)\n if table_name is None:\n # if a custom query was passed\n try:\n if metric_value_kwargs[\"fetch_all\"]:\n df = pd.read_sql_query(\n sql=selectable,\n con=execution_engine.engine,\n )\n else:\n df = next(\n pd.read_sql_query(\n sql=selectable,\n con=execution_engine.engine,\n chunksize=metric_value_kwargs[\"n_rows\"],\n )\n )\n except (ValueError, NotImplementedError):\n # it looks like MetaData that is used by pd.read_sql_query\n # cannot work on a temp table.\n # If it fails, we are trying to get the data using read_sql\n df = None\n except StopIteration:\n validator = Validator(execution_engine=execution_engine)\n columns = validator.get_metric(\n MetricConfiguration(\"table.columns\", metric_domain_kwargs)\n )\n df = pd.DataFrame(columns=columns)\n else:\n try:\n if metric_value_kwargs[\"fetch_all\"]:\n df = pd.read_sql_table(\n table_name=getattr(selectable, \"name\", None),\n schema=getattr(selectable, \"schema\", None),\n con=execution_engine.engine,\n )\n else:\n df = next(\n pd.read_sql_table(\n table_name=getattr(selectable, \"name\", None),\n schema=getattr(selectable, \"schema\", None),\n con=execution_engine.engine,\n chunksize=metric_value_kwargs[\"n_rows\"],\n )\n )\n except (ValueError, NotImplementedError):\n # it looks like MetaData that is used by pd.read_sql_table\n # cannot work on a temp table.\n # If it fails, we are trying to get the data using read_sql\n df = None\n except StopIteration:\n validator = Validator(execution_engine=execution_engine)\n columns = validator.get_metric(\n MetricConfiguration(\"table.columns\", metric_domain_kwargs)\n )\n df = pd.DataFrame(columns=columns)\n\n if df is None:\n # we want to compile our selectable\n stmt = sa.select([\"*\"]).select_from(selectable)\n if metric_value_kwargs[\"fetch_all\"]:\n sql = stmt.compile(\n dialect=execution_engine.engine.dialect,\n compile_kwargs={\"literal_binds\": True},\n )\n elif execution_engine.engine.dialect.name.lower() == \"mssql\":\n # limit doesn't compile properly for mssql\n sql = str(\n stmt.compile(\n dialect=execution_engine.engine.dialect,\n compile_kwargs={\"literal_binds\": True},\n )\n )\n sql = f\"SELECT TOP {metric_value_kwargs['n_rows']}{sql[6:]}\"\n else:\n stmt = stmt.limit(metric_value_kwargs[\"n_rows\"])\n sql = stmt.compile(\n dialect=execution_engine.engine.dialect,\n compile_kwargs={\"literal_binds\": True},\n )\n\n df = pd.read_sql(sql, con=execution_engine.engine)\n\n return df\n\n @metric_value(engine=SparkDFExecutionEngine)\n def _spark(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n df, _, _ = execution_engine.get_compute_domain(\n metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE\n )\n if metric_value_kwargs[\"fetch_all\"]:\n return df.collect()\n return df.head(metric_value_kwargs[\"n_rows\"])\n", "path": "great_expectations/expectations/metrics/table_metrics/table_head.py"}], "after_files": [{"content": "from typing import Any, Dict\n\nimport pandas as pd\n\nfrom great_expectations.core.metric_domain_types import MetricDomainTypes\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import sa\nfrom great_expectations.expectations.metrics.metric_provider import metric_value\nfrom great_expectations.expectations.metrics.table_metric_provider import (\n TableMetricProvider,\n)\nfrom great_expectations.validator.metric_configuration import MetricConfiguration\nfrom great_expectations.validator.validator import Validator\n\n\nclass TableHead(TableMetricProvider):\n metric_name = \"table.head\"\n value_keys = (\"n_rows\", \"fetch_all\")\n default_kwarg_values = {\"n_rows\": 5, \"fetch_all\": False}\n\n @metric_value(engine=PandasExecutionEngine)\n def _pandas(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n df, _, _ = execution_engine.get_compute_domain(\n metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE\n )\n if metric_value_kwargs.get(\"fetch_all\", cls.default_kwarg_values[\"fetch_all\"]):\n return df\n return df.head(metric_value_kwargs[\"n_rows\"])\n\n @metric_value(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n selectable, _, _ = execution_engine.get_compute_domain(\n metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE\n )\n df = None\n table_name = getattr(selectable, \"name\", None)\n if (\n isinstance(table_name, sa.sql.elements._anonymous_label)\n or table_name is None\n ):\n # if a custom query was passed\n try:\n if metric_value_kwargs[\"fetch_all\"]:\n df = pd.read_sql_query(\n sql=selectable,\n con=execution_engine.engine,\n )\n else:\n df = next(\n pd.read_sql_query(\n sql=selectable,\n con=execution_engine.engine,\n chunksize=metric_value_kwargs[\"n_rows\"],\n )\n )\n except (ValueError, NotImplementedError):\n # it looks like MetaData that is used by pd.read_sql_query\n # cannot work on a temp table.\n # If it fails, we are trying to get the data using read_sql\n df = None\n except StopIteration:\n validator = Validator(execution_engine=execution_engine)\n columns = validator.get_metric(\n MetricConfiguration(\"table.columns\", metric_domain_kwargs)\n )\n df = pd.DataFrame(columns=columns)\n else:\n try:\n if metric_value_kwargs[\"fetch_all\"]:\n df = pd.read_sql_table(\n table_name=getattr(selectable, \"name\", None),\n schema=getattr(selectable, \"schema\", None),\n con=execution_engine.engine,\n )\n else:\n df = next(\n pd.read_sql_table(\n table_name=getattr(selectable, \"name\", None),\n schema=getattr(selectable, \"schema\", None),\n con=execution_engine.engine,\n chunksize=metric_value_kwargs[\"n_rows\"],\n )\n )\n except (ValueError, NotImplementedError):\n # it looks like MetaData that is used by pd.read_sql_table\n # cannot work on a temp table.\n # If it fails, we are trying to get the data using read_sql\n df = None\n except StopIteration:\n validator = Validator(execution_engine=execution_engine)\n columns = validator.get_metric(\n MetricConfiguration(\"table.columns\", metric_domain_kwargs)\n )\n df = pd.DataFrame(columns=columns)\n\n if df is None:\n # we want to compile our selectable\n stmt = sa.select([\"*\"]).select_from(selectable)\n if metric_value_kwargs[\"fetch_all\"]:\n sql = stmt.compile(\n dialect=execution_engine.engine.dialect,\n compile_kwargs={\"literal_binds\": True},\n )\n elif execution_engine.engine.dialect.name.lower() == \"mssql\":\n # limit doesn't compile properly for mssql\n sql = str(\n stmt.compile(\n dialect=execution_engine.engine.dialect,\n compile_kwargs={\"literal_binds\": True},\n )\n )\n sql = f\"SELECT TOP {metric_value_kwargs['n_rows']}{sql[6:]}\"\n else:\n stmt = stmt.limit(metric_value_kwargs[\"n_rows\"])\n sql = stmt.compile(\n dialect=execution_engine.engine.dialect,\n compile_kwargs={\"literal_binds\": True},\n )\n\n df = pd.read_sql(sql, con=execution_engine.engine)\n\n return df\n\n @metric_value(engine=SparkDFExecutionEngine)\n def _spark(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n df, _, _ = execution_engine.get_compute_domain(\n metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE\n )\n if metric_value_kwargs[\"fetch_all\"]:\n return df.collect()\n return df.head(metric_value_kwargs[\"n_rows\"])\n", "path": "great_expectations/expectations/metrics/table_metrics/table_head.py"}]}
| 3,125 | 158 |
gh_patches_debug_17179
|
rasdani/github-patches
|
git_diff
|
cisagov__manage.get.gov-1954
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Request access to admin (add fixtures for Riley Orr)
### Issue description
Add fixtures so Riley can access the sandboxes
### Acceptance criteria
- [ ] Fixtures added for Riley Orr
### Additional context
_No response_
### Links to other issues
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/registrar/fixtures_users.py`
Content:
```
1 import logging
2 from faker import Faker
3 from django.db import transaction
4
5 from registrar.models import (
6 User,
7 UserGroup,
8 )
9
10 fake = Faker()
11 logger = logging.getLogger(__name__)
12
13
14 class UserFixture:
15 """
16 Load users into the database.
17
18 Make sure this class' `load` method is called from `handle`
19 in management/commands/load.py, then use `./manage.py load`
20 to run this code.
21 """
22
23 ADMINS = [
24 {
25 "username": "5f283494-31bd-49b5-b024-a7e7cae00848",
26 "first_name": "Rachid",
27 "last_name": "Mrad",
28 },
29 {
30 "username": "eb2214cd-fc0c-48c0-9dbd-bc4cd6820c74",
31 "first_name": "Alysia",
32 "last_name": "Broddrick",
33 },
34 {
35 "username": "8f8e7293-17f7-4716-889b-1990241cbd39",
36 "first_name": "Katherine",
37 "last_name": "Osos",
38 },
39 {
40 "username": "70488e0a-e937-4894-a28c-16f5949effd4",
41 "first_name": "Gaby",
42 "last_name": "DiSarli",
43 "email": "[email protected]",
44 },
45 {
46 "username": "83c2b6dd-20a2-4cac-bb40-e22a72d2955c",
47 "first_name": "Cameron",
48 "last_name": "Dixon",
49 },
50 {
51 "username": "0353607a-cbba-47d2-98d7-e83dcd5b90ea",
52 "first_name": "Ryan",
53 "last_name": "Brooks",
54 },
55 {
56 "username": "30001ee7-0467-4df2-8db2-786e79606060",
57 "first_name": "Zander",
58 "last_name": "Adkinson",
59 },
60 {
61 "username": "2bf518c2-485a-4c42-ab1a-f5a8b0a08484",
62 "first_name": "Paul",
63 "last_name": "Kuykendall",
64 },
65 {
66 "username": "2a88a97b-be96-4aad-b99e-0b605b492c78",
67 "first_name": "Rebecca",
68 "last_name": "Hsieh",
69 },
70 {
71 "username": "fa69c8e8-da83-4798-a4f2-263c9ce93f52",
72 "first_name": "David",
73 "last_name": "Kennedy",
74 },
75 {
76 "username": "f14433d8-f0e9-41bf-9c72-b99b110e665d",
77 "first_name": "Nicolle",
78 "last_name": "LeClair",
79 },
80 {
81 "username": "24840450-bf47-4d89-8aa9-c612fe68f9da",
82 "first_name": "Erin",
83 "last_name": "Song",
84 },
85 {
86 "username": "e0ea8b94-6e53-4430-814a-849a7ca45f21",
87 "first_name": "Kristina",
88 "last_name": "Yin",
89 },
90 {
91 "username": "ac49d7c1-368a-4e6b-8f1d-60250e20a16f",
92 "first_name": "Vicky",
93 "last_name": "Chin",
94 "email": "[email protected]",
95 },
96 ]
97
98 STAFF = [
99 {
100 "username": "319c490d-453b-43d9-bc4d-7d6cd8ff6844",
101 "first_name": "Rachid-Analyst",
102 "last_name": "Mrad-Analyst",
103 "email": "[email protected]",
104 },
105 {
106 "username": "b6a15987-5c88-4e26-8de2-ca71a0bdb2cd",
107 "first_name": "Alysia-Analyst",
108 "last_name": "Alysia-Analyst",
109 },
110 {
111 "username": "91a9b97c-bd0a-458d-9823-babfde7ebf44",
112 "first_name": "Katherine-Analyst",
113 "last_name": "Osos-Analyst",
114 "email": "[email protected]",
115 },
116 {
117 "username": "2cc0cde8-8313-4a50-99d8-5882e71443e8",
118 "first_name": "Zander-Analyst",
119 "last_name": "Adkinson-Analyst",
120 },
121 {
122 "username": "57ab5847-7789-49fe-a2f9-21d38076d699",
123 "first_name": "Paul-Analyst",
124 "last_name": "Kuykendall-Analyst",
125 },
126 {
127 "username": "e474e7a9-71ca-449d-833c-8a6e094dd117",
128 "first_name": "Rebecca-Analyst",
129 "last_name": "Hsieh-Analyst",
130 },
131 {
132 "username": "5dc6c9a6-61d9-42b4-ba54-4beff28bac3c",
133 "first_name": "David-Analyst",
134 "last_name": "Kennedy-Analyst",
135 },
136 {
137 "username": "0eb6f326-a3d4-410f-a521-aa4c1fad4e47",
138 "first_name": "Gaby-Analyst",
139 "last_name": "DiSarli-Analyst",
140 "email": "[email protected]",
141 },
142 {
143 "username": "cfe7c2fc-e24a-480e-8b78-28645a1459b3",
144 "first_name": "Nicolle-Analyst",
145 "last_name": "LeClair-Analyst",
146 "email": "[email protected]",
147 },
148 {
149 "username": "378d0bc4-d5a7-461b-bd84-3ae6f6864af9",
150 "first_name": "Erin-Analyst",
151 "last_name": "Song-Analyst",
152 "email": "[email protected]",
153 },
154 {
155 "username": "9a98e4c9-9409-479d-964e-4aec7799107f",
156 "first_name": "Kristina-Analyst",
157 "last_name": "Yin-Analyst",
158 "email": "[email protected]",
159 },
160 {
161 "username": "8f42302e-b83a-4c9e-8764-fc19e2cea576",
162 "first_name": "Vickster-Analyst",
163 "last_name": "Chin-Analyst",
164 "email": "[email protected]",
165 },
166 {
167 "username": "d9839768-0c17-4fa2-9c8e-36291eef5c11",
168 "first_name": "Alex-Analyst",
169 "last_name": "Mcelya-Analyst",
170 "email": "[email protected]",
171 },
172 ]
173
174 def load_users(cls, users, group_name):
175 logger.info(f"Going to load {len(users)} users in group {group_name}")
176 for user_data in users:
177 try:
178 user, _ = User.objects.get_or_create(username=user_data["username"])
179 user.is_superuser = False
180 user.first_name = user_data["first_name"]
181 user.last_name = user_data["last_name"]
182 if "email" in user_data:
183 user.email = user_data["email"]
184 user.is_staff = True
185 user.is_active = True
186 group = UserGroup.objects.get(name=group_name)
187 user.groups.add(group)
188 user.save()
189 logger.debug(f"User object created for {user_data['first_name']}")
190 except Exception as e:
191 logger.warning(e)
192 logger.info(f"All users in group {group_name} loaded.")
193
194 @classmethod
195 def load(cls):
196 # Lumped under .atomic to ensure we don't make redundant DB calls.
197 # This bundles them all together, and then saves it in a single call.
198 # This is slightly different then bulk_create or bulk_update, in that
199 # you still get the same behaviour of .save(), but those incremental
200 # steps now do not need to close/reopen a db connection,
201 # instead they share one.
202 with transaction.atomic():
203 cls.load_users(cls, cls.ADMINS, "full_access_group")
204 cls.load_users(cls, cls.STAFF, "cisa_analysts_group")
205
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/registrar/fixtures_users.py b/src/registrar/fixtures_users.py
--- a/src/registrar/fixtures_users.py
+++ b/src/registrar/fixtures_users.py
@@ -93,6 +93,12 @@
"last_name": "Chin",
"email": "[email protected]",
},
+ {
+ "username": "012f844d-8a0f-4225-9d82-cbf87bff1d3e",
+ "first_name": "Riley",
+ "last_name": "Orr",
+ "email": "[email protected]",
+ },
]
STAFF = [
@@ -169,6 +175,12 @@
"last_name": "Mcelya-Analyst",
"email": "[email protected]",
},
+ {
+ "username": "082a066f-e0a4-45f6-8672-4343a1208a36",
+ "first_name": "Riley-Analyst",
+ "last_name": "Orr-Analyst",
+ "email": "[email protected]",
+ },
]
def load_users(cls, users, group_name):
|
{"golden_diff": "diff --git a/src/registrar/fixtures_users.py b/src/registrar/fixtures_users.py\n--- a/src/registrar/fixtures_users.py\n+++ b/src/registrar/fixtures_users.py\n@@ -93,6 +93,12 @@\n \"last_name\": \"Chin\",\n \"email\": \"[email protected]\",\n },\n+ {\n+ \"username\": \"012f844d-8a0f-4225-9d82-cbf87bff1d3e\",\n+ \"first_name\": \"Riley\",\n+ \"last_name\": \"Orr\",\n+ \"email\": \"[email protected]\",\n+ },\n ]\n \n STAFF = [\n@@ -169,6 +175,12 @@\n \"last_name\": \"Mcelya-Analyst\",\n \"email\": \"[email protected]\",\n },\n+ {\n+ \"username\": \"082a066f-e0a4-45f6-8672-4343a1208a36\",\n+ \"first_name\": \"Riley-Analyst\",\n+ \"last_name\": \"Orr-Analyst\",\n+ \"email\": \"[email protected]\",\n+ },\n ]\n \n def load_users(cls, users, group_name):\n", "issue": "Request access to admin (add fixtures for Riley Orr)\n### Issue description\n\nAdd fixtures so Riley can access the sandboxes\n\n### Acceptance criteria\n\n- [ ] Fixtures added for Riley Orr\n\n### Additional context\n\n_No response_\n\n### Links to other issues\n\n_No response_\n", "before_files": [{"content": "import logging\nfrom faker import Faker\nfrom django.db import transaction\n\nfrom registrar.models import (\n User,\n UserGroup,\n)\n\nfake = Faker()\nlogger = logging.getLogger(__name__)\n\n\nclass UserFixture:\n \"\"\"\n Load users into the database.\n\n Make sure this class' `load` method is called from `handle`\n in management/commands/load.py, then use `./manage.py load`\n to run this code.\n \"\"\"\n\n ADMINS = [\n {\n \"username\": \"5f283494-31bd-49b5-b024-a7e7cae00848\",\n \"first_name\": \"Rachid\",\n \"last_name\": \"Mrad\",\n },\n {\n \"username\": \"eb2214cd-fc0c-48c0-9dbd-bc4cd6820c74\",\n \"first_name\": \"Alysia\",\n \"last_name\": \"Broddrick\",\n },\n {\n \"username\": \"8f8e7293-17f7-4716-889b-1990241cbd39\",\n \"first_name\": \"Katherine\",\n \"last_name\": \"Osos\",\n },\n {\n \"username\": \"70488e0a-e937-4894-a28c-16f5949effd4\",\n \"first_name\": \"Gaby\",\n \"last_name\": \"DiSarli\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"83c2b6dd-20a2-4cac-bb40-e22a72d2955c\",\n \"first_name\": \"Cameron\",\n \"last_name\": \"Dixon\",\n },\n {\n \"username\": \"0353607a-cbba-47d2-98d7-e83dcd5b90ea\",\n \"first_name\": \"Ryan\",\n \"last_name\": \"Brooks\",\n },\n {\n \"username\": \"30001ee7-0467-4df2-8db2-786e79606060\",\n \"first_name\": \"Zander\",\n \"last_name\": \"Adkinson\",\n },\n {\n \"username\": \"2bf518c2-485a-4c42-ab1a-f5a8b0a08484\",\n \"first_name\": \"Paul\",\n \"last_name\": \"Kuykendall\",\n },\n {\n \"username\": \"2a88a97b-be96-4aad-b99e-0b605b492c78\",\n \"first_name\": \"Rebecca\",\n \"last_name\": \"Hsieh\",\n },\n {\n \"username\": \"fa69c8e8-da83-4798-a4f2-263c9ce93f52\",\n \"first_name\": \"David\",\n \"last_name\": \"Kennedy\",\n },\n {\n \"username\": \"f14433d8-f0e9-41bf-9c72-b99b110e665d\",\n \"first_name\": \"Nicolle\",\n \"last_name\": \"LeClair\",\n },\n {\n \"username\": \"24840450-bf47-4d89-8aa9-c612fe68f9da\",\n \"first_name\": \"Erin\",\n \"last_name\": \"Song\",\n },\n {\n \"username\": \"e0ea8b94-6e53-4430-814a-849a7ca45f21\",\n \"first_name\": \"Kristina\",\n \"last_name\": \"Yin\",\n },\n {\n \"username\": \"ac49d7c1-368a-4e6b-8f1d-60250e20a16f\",\n \"first_name\": \"Vicky\",\n \"last_name\": \"Chin\",\n \"email\": \"[email protected]\",\n },\n ]\n\n STAFF = [\n {\n \"username\": \"319c490d-453b-43d9-bc4d-7d6cd8ff6844\",\n \"first_name\": \"Rachid-Analyst\",\n \"last_name\": \"Mrad-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"b6a15987-5c88-4e26-8de2-ca71a0bdb2cd\",\n \"first_name\": \"Alysia-Analyst\",\n \"last_name\": \"Alysia-Analyst\",\n },\n {\n \"username\": \"91a9b97c-bd0a-458d-9823-babfde7ebf44\",\n \"first_name\": \"Katherine-Analyst\",\n \"last_name\": \"Osos-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"2cc0cde8-8313-4a50-99d8-5882e71443e8\",\n \"first_name\": \"Zander-Analyst\",\n \"last_name\": \"Adkinson-Analyst\",\n },\n {\n \"username\": \"57ab5847-7789-49fe-a2f9-21d38076d699\",\n \"first_name\": \"Paul-Analyst\",\n \"last_name\": \"Kuykendall-Analyst\",\n },\n {\n \"username\": \"e474e7a9-71ca-449d-833c-8a6e094dd117\",\n \"first_name\": \"Rebecca-Analyst\",\n \"last_name\": \"Hsieh-Analyst\",\n },\n {\n \"username\": \"5dc6c9a6-61d9-42b4-ba54-4beff28bac3c\",\n \"first_name\": \"David-Analyst\",\n \"last_name\": \"Kennedy-Analyst\",\n },\n {\n \"username\": \"0eb6f326-a3d4-410f-a521-aa4c1fad4e47\",\n \"first_name\": \"Gaby-Analyst\",\n \"last_name\": \"DiSarli-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"cfe7c2fc-e24a-480e-8b78-28645a1459b3\",\n \"first_name\": \"Nicolle-Analyst\",\n \"last_name\": \"LeClair-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"378d0bc4-d5a7-461b-bd84-3ae6f6864af9\",\n \"first_name\": \"Erin-Analyst\",\n \"last_name\": \"Song-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"9a98e4c9-9409-479d-964e-4aec7799107f\",\n \"first_name\": \"Kristina-Analyst\",\n \"last_name\": \"Yin-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"8f42302e-b83a-4c9e-8764-fc19e2cea576\",\n \"first_name\": \"Vickster-Analyst\",\n \"last_name\": \"Chin-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"d9839768-0c17-4fa2-9c8e-36291eef5c11\",\n \"first_name\": \"Alex-Analyst\",\n \"last_name\": \"Mcelya-Analyst\",\n \"email\": \"[email protected]\",\n },\n ]\n\n def load_users(cls, users, group_name):\n logger.info(f\"Going to load {len(users)} users in group {group_name}\")\n for user_data in users:\n try:\n user, _ = User.objects.get_or_create(username=user_data[\"username\"])\n user.is_superuser = False\n user.first_name = user_data[\"first_name\"]\n user.last_name = user_data[\"last_name\"]\n if \"email\" in user_data:\n user.email = user_data[\"email\"]\n user.is_staff = True\n user.is_active = True\n group = UserGroup.objects.get(name=group_name)\n user.groups.add(group)\n user.save()\n logger.debug(f\"User object created for {user_data['first_name']}\")\n except Exception as e:\n logger.warning(e)\n logger.info(f\"All users in group {group_name} loaded.\")\n\n @classmethod\n def load(cls):\n # Lumped under .atomic to ensure we don't make redundant DB calls.\n # This bundles them all together, and then saves it in a single call.\n # This is slightly different then bulk_create or bulk_update, in that\n # you still get the same behaviour of .save(), but those incremental\n # steps now do not need to close/reopen a db connection,\n # instead they share one.\n with transaction.atomic():\n cls.load_users(cls, cls.ADMINS, \"full_access_group\")\n cls.load_users(cls, cls.STAFF, \"cisa_analysts_group\")\n", "path": "src/registrar/fixtures_users.py"}], "after_files": [{"content": "import logging\nfrom faker import Faker\nfrom django.db import transaction\n\nfrom registrar.models import (\n User,\n UserGroup,\n)\n\nfake = Faker()\nlogger = logging.getLogger(__name__)\n\n\nclass UserFixture:\n \"\"\"\n Load users into the database.\n\n Make sure this class' `load` method is called from `handle`\n in management/commands/load.py, then use `./manage.py load`\n to run this code.\n \"\"\"\n\n ADMINS = [\n {\n \"username\": \"5f283494-31bd-49b5-b024-a7e7cae00848\",\n \"first_name\": \"Rachid\",\n \"last_name\": \"Mrad\",\n },\n {\n \"username\": \"eb2214cd-fc0c-48c0-9dbd-bc4cd6820c74\",\n \"first_name\": \"Alysia\",\n \"last_name\": \"Broddrick\",\n },\n {\n \"username\": \"8f8e7293-17f7-4716-889b-1990241cbd39\",\n \"first_name\": \"Katherine\",\n \"last_name\": \"Osos\",\n },\n {\n \"username\": \"70488e0a-e937-4894-a28c-16f5949effd4\",\n \"first_name\": \"Gaby\",\n \"last_name\": \"DiSarli\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"83c2b6dd-20a2-4cac-bb40-e22a72d2955c\",\n \"first_name\": \"Cameron\",\n \"last_name\": \"Dixon\",\n },\n {\n \"username\": \"0353607a-cbba-47d2-98d7-e83dcd5b90ea\",\n \"first_name\": \"Ryan\",\n \"last_name\": \"Brooks\",\n },\n {\n \"username\": \"30001ee7-0467-4df2-8db2-786e79606060\",\n \"first_name\": \"Zander\",\n \"last_name\": \"Adkinson\",\n },\n {\n \"username\": \"2bf518c2-485a-4c42-ab1a-f5a8b0a08484\",\n \"first_name\": \"Paul\",\n \"last_name\": \"Kuykendall\",\n },\n {\n \"username\": \"2a88a97b-be96-4aad-b99e-0b605b492c78\",\n \"first_name\": \"Rebecca\",\n \"last_name\": \"Hsieh\",\n },\n {\n \"username\": \"fa69c8e8-da83-4798-a4f2-263c9ce93f52\",\n \"first_name\": \"David\",\n \"last_name\": \"Kennedy\",\n },\n {\n \"username\": \"f14433d8-f0e9-41bf-9c72-b99b110e665d\",\n \"first_name\": \"Nicolle\",\n \"last_name\": \"LeClair\",\n },\n {\n \"username\": \"24840450-bf47-4d89-8aa9-c612fe68f9da\",\n \"first_name\": \"Erin\",\n \"last_name\": \"Song\",\n },\n {\n \"username\": \"e0ea8b94-6e53-4430-814a-849a7ca45f21\",\n \"first_name\": \"Kristina\",\n \"last_name\": \"Yin\",\n },\n {\n \"username\": \"ac49d7c1-368a-4e6b-8f1d-60250e20a16f\",\n \"first_name\": \"Vicky\",\n \"last_name\": \"Chin\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"012f844d-8a0f-4225-9d82-cbf87bff1d3e\",\n \"first_name\": \"Riley\",\n \"last_name\": \"Orr\",\n \"email\": \"[email protected]\",\n },\n ]\n\n STAFF = [\n {\n \"username\": \"319c490d-453b-43d9-bc4d-7d6cd8ff6844\",\n \"first_name\": \"Rachid-Analyst\",\n \"last_name\": \"Mrad-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"b6a15987-5c88-4e26-8de2-ca71a0bdb2cd\",\n \"first_name\": \"Alysia-Analyst\",\n \"last_name\": \"Alysia-Analyst\",\n },\n {\n \"username\": \"91a9b97c-bd0a-458d-9823-babfde7ebf44\",\n \"first_name\": \"Katherine-Analyst\",\n \"last_name\": \"Osos-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"2cc0cde8-8313-4a50-99d8-5882e71443e8\",\n \"first_name\": \"Zander-Analyst\",\n \"last_name\": \"Adkinson-Analyst\",\n },\n {\n \"username\": \"57ab5847-7789-49fe-a2f9-21d38076d699\",\n \"first_name\": \"Paul-Analyst\",\n \"last_name\": \"Kuykendall-Analyst\",\n },\n {\n \"username\": \"e474e7a9-71ca-449d-833c-8a6e094dd117\",\n \"first_name\": \"Rebecca-Analyst\",\n \"last_name\": \"Hsieh-Analyst\",\n },\n {\n \"username\": \"5dc6c9a6-61d9-42b4-ba54-4beff28bac3c\",\n \"first_name\": \"David-Analyst\",\n \"last_name\": \"Kennedy-Analyst\",\n },\n {\n \"username\": \"0eb6f326-a3d4-410f-a521-aa4c1fad4e47\",\n \"first_name\": \"Gaby-Analyst\",\n \"last_name\": \"DiSarli-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"cfe7c2fc-e24a-480e-8b78-28645a1459b3\",\n \"first_name\": \"Nicolle-Analyst\",\n \"last_name\": \"LeClair-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"378d0bc4-d5a7-461b-bd84-3ae6f6864af9\",\n \"first_name\": \"Erin-Analyst\",\n \"last_name\": \"Song-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"9a98e4c9-9409-479d-964e-4aec7799107f\",\n \"first_name\": \"Kristina-Analyst\",\n \"last_name\": \"Yin-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"8f42302e-b83a-4c9e-8764-fc19e2cea576\",\n \"first_name\": \"Vickster-Analyst\",\n \"last_name\": \"Chin-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"d9839768-0c17-4fa2-9c8e-36291eef5c11\",\n \"first_name\": \"Alex-Analyst\",\n \"last_name\": \"Mcelya-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"082a066f-e0a4-45f6-8672-4343a1208a36\",\n \"first_name\": \"Riley-Analyst\",\n \"last_name\": \"Orr-Analyst\",\n \"email\": \"[email protected]\",\n },\n ]\n\n def load_users(cls, users, group_name):\n logger.info(f\"Going to load {len(users)} users in group {group_name}\")\n for user_data in users:\n try:\n user, _ = User.objects.get_or_create(username=user_data[\"username\"])\n user.is_superuser = False\n user.first_name = user_data[\"first_name\"]\n user.last_name = user_data[\"last_name\"]\n if \"email\" in user_data:\n user.email = user_data[\"email\"]\n user.is_staff = True\n user.is_active = True\n group = UserGroup.objects.get(name=group_name)\n user.groups.add(group)\n user.save()\n logger.debug(f\"User object created for {user_data['first_name']}\")\n except Exception as e:\n logger.warning(e)\n logger.info(f\"All users in group {group_name} loaded.\")\n\n @classmethod\n def load(cls):\n # Lumped under .atomic to ensure we don't make redundant DB calls.\n # This bundles them all together, and then saves it in a single call.\n # This is slightly different then bulk_create or bulk_update, in that\n # you still get the same behaviour of .save(), but those incremental\n # steps now do not need to close/reopen a db connection,\n # instead they share one.\n with transaction.atomic():\n cls.load_users(cls, cls.ADMINS, \"full_access_group\")\n cls.load_users(cls, cls.STAFF, \"cisa_analysts_group\")\n", "path": "src/registrar/fixtures_users.py"}]}
| 3,125 | 324 |
gh_patches_debug_1008
|
rasdani/github-patches
|
git_diff
|
facebookresearch__ParlAI-4892
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OSError: File /checkpoint/meganu/projects/safety_failures/recovery/model_templates/blender_3B/model.dict-vocab.json does not exist. --bpe-vocab must be pretrained.
**Bug description**
Please enter a clear and concise description of what the bug is.
When I execute:
```sh
$python -m parlai eval_model --task fromfile:parlaiformat\
--fromfile_datapath "${test_set_path}" \
-mf zoo:saferdialogues/model\
-bs 1\
--world-logs $test_set_path.SafeRDialog_parlai.jsonl\
--no-cuda
```
It report:
```sh
16:13:53 | Overriding opt["task"] to fromfile:parlaiformat (previously: internal:safety_failures_with_recovery,internal:bst_sf_modified)
16:13:53 | Overriding opt["no_cuda"] to True (previously: False)
>>>using / style agent path
>>>finally module name: parlai.agents.transformer.generator
16:13:53 | loading dictionary from /home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/data/models/saferdialogues/model.dict
16:13:53 | num words = 8008
Traceback (most recent call last):
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/__main__.py", line 18, in <module>
main()
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/__main__.py", line 14, in main
superscript_main()
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/script.py", line 325, in superscript_main
return SCRIPT_REGISTRY[cmd].klass._run_from_parser_and_opt(opt, parser)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/script.py", line 108, in _run_from_parser_and_opt
return script.run()
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/scripts/eval_model.py", line 265, in run
return eval_model(self.opt)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/scripts/eval_model.py", line 233, in eval_model
agent = create_agent(opt, requireModelExists=True)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/agents.py", line 468, in create_agent
model = create_agent_from_opt_file(opt)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/agents.py", line 421, in create_agent_from_opt_file
return model_class(opt_from_file)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_generator_agent.py", line 462, in __init__
super().__init__(opt, shared)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_agent.py", line 783, in __init__
self.dict = self.build_dictionary()
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_agent.py", line 862, in build_dictionary
d = self.dictionary_class()(self.opt)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/dict.py", line 322, in __init__
self.bpe = bpe_factory(opt, shared)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/utils/bpe.py", line 68, in bpe_factory
bpe_helper = HuggingFaceBpeHelper(opt, shared)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/utils/bpe.py", line 841, in __init__
raise IOError(
OSError: File /checkpoint/meganu/projects/safety_failures/recovery/model_templates/blender_3B/model.dict-vocab.json does not exist. --bpe-vocab must be pretrained.
```
and the parlai version is: `1.6.0`.
**Reproduction steps**
Enter steps to reproduce the behavior.
**Expected behavior**
Give a clear and concise description of what you expected to happen.
**Logs**
Please paste the command line output:
```
Output goes here
```
**Additional context**
Add any other context about the problem here. (like proxy settings, network setup, overall goals, etc.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parlai/zoo/saferdialogues/build.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 """
8 Blender 2.7B model fine-tuned on the SaFeRDialogues and BST (without persona) tasks.
9 """
10
11 from parlai.core.build_data import download_models
12
13
14 def download(datapath):
15 opt = {'datapath': datapath}
16 version = 'v0.1'
17 fnames = [f'models_{version}.tar.gz']
18 download_models(
19 opt,
20 fnames,
21 model_folder='saferdialogues',
22 version=version,
23 use_model_type=False,
24 )
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parlai/zoo/saferdialogues/build.py b/parlai/zoo/saferdialogues/build.py
--- a/parlai/zoo/saferdialogues/build.py
+++ b/parlai/zoo/saferdialogues/build.py
@@ -13,7 +13,7 @@
def download(datapath):
opt = {'datapath': datapath}
- version = 'v0.1'
+ version = 'v0.2'
fnames = [f'models_{version}.tar.gz']
download_models(
opt,
|
{"golden_diff": "diff --git a/parlai/zoo/saferdialogues/build.py b/parlai/zoo/saferdialogues/build.py\n--- a/parlai/zoo/saferdialogues/build.py\n+++ b/parlai/zoo/saferdialogues/build.py\n@@ -13,7 +13,7 @@\n \n def download(datapath):\n opt = {'datapath': datapath}\n- version = 'v0.1'\n+ version = 'v0.2'\n fnames = [f'models_{version}.tar.gz']\n download_models(\n opt,\n", "issue": "OSError: File /checkpoint/meganu/projects/safety_failures/recovery/model_templates/blender_3B/model.dict-vocab.json does not exist. --bpe-vocab must be pretrained.\n**Bug description**\r\n\r\nPlease enter a clear and concise description of what the bug is.\r\n\r\nWhen I execute:\r\n```sh\r\n$python -m parlai eval_model --task fromfile:parlaiformat\\\r\n --fromfile_datapath \"${test_set_path}\" \\\r\n -mf zoo:saferdialogues/model\\\r\n -bs 1\\\r\n --world-logs $test_set_path.SafeRDialog_parlai.jsonl\\\r\n --no-cuda\r\n```\r\nIt report:\r\n\r\n```sh\r\n16:13:53 | Overriding opt[\"task\"] to fromfile:parlaiformat (previously: internal:safety_failures_with_recovery,internal:bst_sf_modified)\r\n16:13:53 | Overriding opt[\"no_cuda\"] to True (previously: False)\r\n>>>using / style agent path\r\n>>>finally module name: parlai.agents.transformer.generator\r\n16:13:53 | loading dictionary from /home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/data/models/saferdialogues/model.dict\r\n16:13:53 | num words = 8008\r\nTraceback (most recent call last):\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/runpy.py\", line 194, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/__main__.py\", line 18, in <module>\r\n main()\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/__main__.py\", line 14, in main\r\n superscript_main()\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/script.py\", line 325, in superscript_main\r\n return SCRIPT_REGISTRY[cmd].klass._run_from_parser_and_opt(opt, parser)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/script.py\", line 108, in _run_from_parser_and_opt\r\n return script.run()\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/scripts/eval_model.py\", line 265, in run\r\n return eval_model(self.opt)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/scripts/eval_model.py\", line 233, in eval_model\r\n agent = create_agent(opt, requireModelExists=True)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/agents.py\", line 468, in create_agent\r\n model = create_agent_from_opt_file(opt)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/agents.py\", line 421, in create_agent_from_opt_file\r\n return model_class(opt_from_file)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_generator_agent.py\", line 462, in __init__\r\n super().__init__(opt, shared)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_agent.py\", line 783, in __init__\r\n self.dict = self.build_dictionary()\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_agent.py\", line 862, in build_dictionary\r\n d = self.dictionary_class()(self.opt)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/dict.py\", line 322, in __init__\r\n self.bpe = bpe_factory(opt, shared)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/utils/bpe.py\", line 68, in bpe_factory\r\n bpe_helper = HuggingFaceBpeHelper(opt, shared)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/utils/bpe.py\", line 841, in __init__\r\n raise IOError(\r\nOSError: File /checkpoint/meganu/projects/safety_failures/recovery/model_templates/blender_3B/model.dict-vocab.json does not exist. --bpe-vocab must be pretrained.\r\n```\r\n\r\nand the parlai version is: `1.6.0`.\r\n\r\n**Reproduction steps**\r\nEnter steps to reproduce the behavior.\r\n\r\n**Expected behavior**\r\nGive a clear and concise description of what you expected to happen.\r\n\r\n**Logs**\r\nPlease paste the command line output:\r\n\r\n```\r\nOutput goes here\r\n```\r\n\r\n**Additional context**\r\nAdd any other context about the problem here. (like proxy settings, network setup, overall goals, etc.)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nBlender 2.7B model fine-tuned on the SaFeRDialogues and BST (without persona) tasks.\n\"\"\"\n\nfrom parlai.core.build_data import download_models\n\n\ndef download(datapath):\n opt = {'datapath': datapath}\n version = 'v0.1'\n fnames = [f'models_{version}.tar.gz']\n download_models(\n opt,\n fnames,\n model_folder='saferdialogues',\n version=version,\n use_model_type=False,\n )\n", "path": "parlai/zoo/saferdialogues/build.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nBlender 2.7B model fine-tuned on the SaFeRDialogues and BST (without persona) tasks.\n\"\"\"\n\nfrom parlai.core.build_data import download_models\n\n\ndef download(datapath):\n opt = {'datapath': datapath}\n version = 'v0.2'\n fnames = [f'models_{version}.tar.gz']\n download_models(\n opt,\n fnames,\n model_folder='saferdialogues',\n version=version,\n use_model_type=False,\n )\n", "path": "parlai/zoo/saferdialogues/build.py"}]}
| 1,751 | 135 |
gh_patches_debug_37035
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-4935
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
is_generator_with_return_value raises IndentationError with a flush left doc string
### Description
Code that is accepted by the python interpreter raises when fed through `textwrap.dedent`
### Steps to Reproduce
1. Create `is_generator_bug.py` with the content below (which I simplified from [the `is_generator_with_return_value` method body](https://github.com/scrapy/scrapy/blob/2.0.1/scrapy/utils/misc.py#L186-L187)
2. Run `python is_generator_bug.py`
3. Observe the kaboom
```python
import ast
import inspect
from textwrap import dedent
class Bob:
def doit(self):
"""
this line is flush left
"""
if True:
yield 1234
if __name__ == '__main__':
b = Bob()
c = b.doit
if inspect.isgeneratorfunction(c):
tree = ast.parse(dedent(inspect.getsource(c)))
```
**Expected behavior:** [What you expect to happen]
No Error
**Actual behavior:** [What actually happens]
```console
$ python3.7 is_generator_bug.py
Traceback (most recent call last):
File "is_generator_bug.py", line 16, in <module>
tree = ast.parse(dedent(inspect.getsource(c)))
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/ast.py", line 35, in parse
return compile(source, filename, mode, PyCF_ONLY_AST)
File "<unknown>", line 1
def doit(self):
^
IndentationError: unexpected indent
```
**Reproduces how often:** [What percentage of the time does it reproduce?]
100%
### Versions
```
Scrapy : 2.0.1
lxml : 4.5.0.0
libxml2 : 2.9.10
cssselect : 1.1.0
parsel : 1.5.2
w3lib : 1.21.0
Twisted : 20.3.0
Python : 3.7.7 (default, Mar 11 2020, 23:30:22) - [Clang 10.0.0 (clang-1000.11.45.5)]
pyOpenSSL : 19.1.0 (OpenSSL 1.1.1d 10 Sep 2019)
cryptography : 2.8
Platform : Darwin-17.7.0-x86_64-i386-64bit
```
### Additional context
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/misc.py`
Content:
```
1 """Helper functions which don't fit anywhere else"""
2 import ast
3 import inspect
4 import os
5 import re
6 import hashlib
7 import warnings
8 from collections import deque
9 from contextlib import contextmanager
10 from importlib import import_module
11 from pkgutil import iter_modules
12 from textwrap import dedent
13
14 from w3lib.html import replace_entities
15
16 from scrapy.utils.datatypes import LocalWeakReferencedCache
17 from scrapy.utils.python import flatten, to_unicode
18 from scrapy.item import _BaseItem
19 from scrapy.utils.deprecate import ScrapyDeprecationWarning
20
21
22 _ITERABLE_SINGLE_VALUES = dict, _BaseItem, str, bytes
23
24
25 def arg_to_iter(arg):
26 """Convert an argument to an iterable. The argument can be a None, single
27 value, or an iterable.
28
29 Exception: if arg is a dict, [arg] will be returned
30 """
31 if arg is None:
32 return []
33 elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):
34 return arg
35 else:
36 return [arg]
37
38
39 def load_object(path):
40 """Load an object given its absolute object path, and return it.
41
42 The object can be the import path of a class, function, variable or an
43 instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'.
44
45 If ``path`` is not a string, but is a callable object, such as a class or
46 a function, then return it as is.
47 """
48
49 if not isinstance(path, str):
50 if callable(path):
51 return path
52 else:
53 raise TypeError("Unexpected argument type, expected string "
54 "or object, got: %s" % type(path))
55
56 try:
57 dot = path.rindex('.')
58 except ValueError:
59 raise ValueError(f"Error loading object '{path}': not a full path")
60
61 module, name = path[:dot], path[dot + 1:]
62 mod = import_module(module)
63
64 try:
65 obj = getattr(mod, name)
66 except AttributeError:
67 raise NameError(f"Module '{module}' doesn't define any object named '{name}'")
68
69 return obj
70
71
72 def walk_modules(path):
73 """Loads a module and all its submodules from the given module path and
74 returns them. If *any* module throws an exception while importing, that
75 exception is thrown back.
76
77 For example: walk_modules('scrapy.utils')
78 """
79
80 mods = []
81 mod = import_module(path)
82 mods.append(mod)
83 if hasattr(mod, '__path__'):
84 for _, subpath, ispkg in iter_modules(mod.__path__):
85 fullpath = path + '.' + subpath
86 if ispkg:
87 mods += walk_modules(fullpath)
88 else:
89 submod = import_module(fullpath)
90 mods.append(submod)
91 return mods
92
93
94 def extract_regex(regex, text, encoding='utf-8'):
95 """Extract a list of unicode strings from the given text/encoding using the following policies:
96
97 * if the regex contains a named group called "extract" that will be returned
98 * if the regex contains multiple numbered groups, all those will be returned (flattened)
99 * if the regex doesn't contain any group the entire regex matching is returned
100 """
101 warnings.warn(
102 "scrapy.utils.misc.extract_regex has moved to parsel.utils.extract_regex.",
103 ScrapyDeprecationWarning,
104 stacklevel=2
105 )
106
107 if isinstance(regex, str):
108 regex = re.compile(regex, re.UNICODE)
109
110 try:
111 strings = [regex.search(text).group('extract')] # named group
112 except Exception:
113 strings = regex.findall(text) # full regex or numbered groups
114 strings = flatten(strings)
115
116 if isinstance(text, str):
117 return [replace_entities(s, keep=['lt', 'amp']) for s in strings]
118 else:
119 return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])
120 for s in strings]
121
122
123 def md5sum(file):
124 """Calculate the md5 checksum of a file-like object without reading its
125 whole content in memory.
126
127 >>> from io import BytesIO
128 >>> md5sum(BytesIO(b'file content to hash'))
129 '784406af91dd5a54fbb9c84c2236595a'
130 """
131 m = hashlib.md5()
132 while True:
133 d = file.read(8096)
134 if not d:
135 break
136 m.update(d)
137 return m.hexdigest()
138
139
140 def rel_has_nofollow(rel):
141 """Return True if link rel attribute has nofollow type"""
142 return rel is not None and 'nofollow' in rel.split()
143
144
145 def create_instance(objcls, settings, crawler, *args, **kwargs):
146 """Construct a class instance using its ``from_crawler`` or
147 ``from_settings`` constructors, if available.
148
149 At least one of ``settings`` and ``crawler`` needs to be different from
150 ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.
151 If ``crawler`` is ``None``, only the ``from_settings`` constructor will be
152 tried.
153
154 ``*args`` and ``**kwargs`` are forwarded to the constructors.
155
156 Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.
157
158 .. versionchanged:: 2.2
159 Raises ``TypeError`` if the resulting instance is ``None`` (e.g. if an
160 extension has not been implemented correctly).
161 """
162 if settings is None:
163 if crawler is None:
164 raise ValueError("Specify at least one of settings and crawler.")
165 settings = crawler.settings
166 if crawler and hasattr(objcls, 'from_crawler'):
167 instance = objcls.from_crawler(crawler, *args, **kwargs)
168 method_name = 'from_crawler'
169 elif hasattr(objcls, 'from_settings'):
170 instance = objcls.from_settings(settings, *args, **kwargs)
171 method_name = 'from_settings'
172 else:
173 instance = objcls(*args, **kwargs)
174 method_name = '__new__'
175 if instance is None:
176 raise TypeError(f"{objcls.__qualname__}.{method_name} returned None")
177 return instance
178
179
180 @contextmanager
181 def set_environ(**kwargs):
182 """Temporarily set environment variables inside the context manager and
183 fully restore previous environment afterwards
184 """
185
186 original_env = {k: os.environ.get(k) for k in kwargs}
187 os.environ.update(kwargs)
188 try:
189 yield
190 finally:
191 for k, v in original_env.items():
192 if v is None:
193 del os.environ[k]
194 else:
195 os.environ[k] = v
196
197
198 def walk_callable(node):
199 """Similar to ``ast.walk``, but walks only function body and skips nested
200 functions defined within the node.
201 """
202 todo = deque([node])
203 walked_func_def = False
204 while todo:
205 node = todo.popleft()
206 if isinstance(node, ast.FunctionDef):
207 if walked_func_def:
208 continue
209 walked_func_def = True
210 todo.extend(ast.iter_child_nodes(node))
211 yield node
212
213
214 _generator_callbacks_cache = LocalWeakReferencedCache(limit=128)
215
216
217 def is_generator_with_return_value(callable):
218 """
219 Returns True if a callable is a generator function which includes a
220 'return' statement with a value different than None, False otherwise
221 """
222 if callable in _generator_callbacks_cache:
223 return _generator_callbacks_cache[callable]
224
225 def returns_none(return_node):
226 value = return_node.value
227 return value is None or isinstance(value, ast.NameConstant) and value.value is None
228
229 if inspect.isgeneratorfunction(callable):
230 tree = ast.parse(dedent(inspect.getsource(callable)))
231 for node in walk_callable(tree):
232 if isinstance(node, ast.Return) and not returns_none(node):
233 _generator_callbacks_cache[callable] = True
234 return _generator_callbacks_cache[callable]
235
236 _generator_callbacks_cache[callable] = False
237 return _generator_callbacks_cache[callable]
238
239
240 def warn_on_generator_with_return_value(spider, callable):
241 """
242 Logs a warning if a callable is a generator function and includes
243 a 'return' statement with a value different than None
244 """
245 if is_generator_with_return_value(callable):
246 warnings.warn(
247 f'The "{spider.__class__.__name__}.{callable.__name__}" method is '
248 'a generator and includes a "return" statement with a value '
249 'different than None. This could lead to unexpected behaviour. Please see '
250 'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '
251 'for details about the semantics of the "return" statement within generators',
252 stacklevel=2,
253 )
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/utils/misc.py b/scrapy/utils/misc.py
--- a/scrapy/utils/misc.py
+++ b/scrapy/utils/misc.py
@@ -9,7 +9,6 @@
from contextlib import contextmanager
from importlib import import_module
from pkgutil import iter_modules
-from textwrap import dedent
from w3lib.html import replace_entities
@@ -227,7 +226,8 @@
return value is None or isinstance(value, ast.NameConstant) and value.value is None
if inspect.isgeneratorfunction(callable):
- tree = ast.parse(dedent(inspect.getsource(callable)))
+ code = re.sub(r"^[\t ]+", "", inspect.getsource(callable))
+ tree = ast.parse(code)
for node in walk_callable(tree):
if isinstance(node, ast.Return) and not returns_none(node):
_generator_callbacks_cache[callable] = True
@@ -242,12 +242,23 @@
Logs a warning if a callable is a generator function and includes
a 'return' statement with a value different than None
"""
- if is_generator_with_return_value(callable):
+ try:
+ if is_generator_with_return_value(callable):
+ warnings.warn(
+ f'The "{spider.__class__.__name__}.{callable.__name__}" method is '
+ 'a generator and includes a "return" statement with a value '
+ 'different than None. This could lead to unexpected behaviour. Please see '
+ 'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '
+ 'for details about the semantics of the "return" statement within generators',
+ stacklevel=2,
+ )
+ except IndentationError:
+ callable_name = spider.__class__.__name__ + "." + callable.__name__
warnings.warn(
- f'The "{spider.__class__.__name__}.{callable.__name__}" method is '
- 'a generator and includes a "return" statement with a value '
- 'different than None. This could lead to unexpected behaviour. Please see '
- 'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '
- 'for details about the semantics of the "return" statement within generators',
+ f'Unable to determine whether or not "{callable_name}" is a generator with a return value. '
+ 'This will not prevent your code from working, but it prevents Scrapy from detecting '
+ f'potential issues in your implementation of "{callable_name}". Please, report this in the '
+ 'Scrapy issue tracker (https://github.com/scrapy/scrapy/issues), '
+ f'including the code of "{callable_name}"',
stacklevel=2,
)
|
{"golden_diff": "diff --git a/scrapy/utils/misc.py b/scrapy/utils/misc.py\n--- a/scrapy/utils/misc.py\n+++ b/scrapy/utils/misc.py\n@@ -9,7 +9,6 @@\n from contextlib import contextmanager\n from importlib import import_module\n from pkgutil import iter_modules\n-from textwrap import dedent\n \n from w3lib.html import replace_entities\n \n@@ -227,7 +226,8 @@\n return value is None or isinstance(value, ast.NameConstant) and value.value is None\n \n if inspect.isgeneratorfunction(callable):\n- tree = ast.parse(dedent(inspect.getsource(callable)))\n+ code = re.sub(r\"^[\\t ]+\", \"\", inspect.getsource(callable))\n+ tree = ast.parse(code)\n for node in walk_callable(tree):\n if isinstance(node, ast.Return) and not returns_none(node):\n _generator_callbacks_cache[callable] = True\n@@ -242,12 +242,23 @@\n Logs a warning if a callable is a generator function and includes\n a 'return' statement with a value different than None\n \"\"\"\n- if is_generator_with_return_value(callable):\n+ try:\n+ if is_generator_with_return_value(callable):\n+ warnings.warn(\n+ f'The \"{spider.__class__.__name__}.{callable.__name__}\" method is '\n+ 'a generator and includes a \"return\" statement with a value '\n+ 'different than None. This could lead to unexpected behaviour. Please see '\n+ 'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '\n+ 'for details about the semantics of the \"return\" statement within generators',\n+ stacklevel=2,\n+ )\n+ except IndentationError:\n+ callable_name = spider.__class__.__name__ + \".\" + callable.__name__\n warnings.warn(\n- f'The \"{spider.__class__.__name__}.{callable.__name__}\" method is '\n- 'a generator and includes a \"return\" statement with a value '\n- 'different than None. This could lead to unexpected behaviour. Please see '\n- 'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '\n- 'for details about the semantics of the \"return\" statement within generators',\n+ f'Unable to determine whether or not \"{callable_name}\" is a generator with a return value. '\n+ 'This will not prevent your code from working, but it prevents Scrapy from detecting '\n+ f'potential issues in your implementation of \"{callable_name}\". Please, report this in the '\n+ 'Scrapy issue tracker (https://github.com/scrapy/scrapy/issues), '\n+ f'including the code of \"{callable_name}\"',\n stacklevel=2,\n )\n", "issue": "is_generator_with_return_value raises IndentationError with a flush left doc string\n### Description\r\n\r\nCode that is accepted by the python interpreter raises when fed through `textwrap.dedent`\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create `is_generator_bug.py` with the content below (which I simplified from [the `is_generator_with_return_value` method body](https://github.com/scrapy/scrapy/blob/2.0.1/scrapy/utils/misc.py#L186-L187)\r\n2. Run `python is_generator_bug.py`\r\n3. Observe the kaboom\r\n\r\n```python\r\nimport ast\r\nimport inspect\r\nfrom textwrap import dedent\r\nclass Bob:\r\n def doit(self):\r\n \"\"\"\r\nthis line is flush left\r\n \"\"\"\r\n if True:\r\n yield 1234\r\n\r\nif __name__ == '__main__':\r\n b = Bob()\r\n c = b.doit\r\n if inspect.isgeneratorfunction(c):\r\n tree = ast.parse(dedent(inspect.getsource(c)))\r\n```\r\n\r\n**Expected behavior:** [What you expect to happen]\r\n\r\nNo Error\r\n\r\n**Actual behavior:** [What actually happens]\r\n\r\n```console\r\n$ python3.7 is_generator_bug.py\r\nTraceback (most recent call last):\r\n File \"is_generator_bug.py\", line 16, in <module>\r\n tree = ast.parse(dedent(inspect.getsource(c)))\r\n File \"/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/ast.py\", line 35, in parse\r\n return compile(source, filename, mode, PyCF_ONLY_AST)\r\n File \"<unknown>\", line 1\r\n def doit(self):\r\n ^\r\nIndentationError: unexpected indent\r\n```\r\n\r\n**Reproduces how often:** [What percentage of the time does it reproduce?]\r\n\r\n100%\r\n\r\n### Versions\r\n\r\n```\r\nScrapy : 2.0.1\r\nlxml : 4.5.0.0\r\nlibxml2 : 2.9.10\r\ncssselect : 1.1.0\r\nparsel : 1.5.2\r\nw3lib : 1.21.0\r\nTwisted : 20.3.0\r\nPython : 3.7.7 (default, Mar 11 2020, 23:30:22) - [Clang 10.0.0 (clang-1000.11.45.5)]\r\npyOpenSSL : 19.1.0 (OpenSSL 1.1.1d 10 Sep 2019)\r\ncryptography : 2.8\r\nPlatform : Darwin-17.7.0-x86_64-i386-64bit\r\n```\r\n\r\n### Additional context\r\n\n", "before_files": [{"content": "\"\"\"Helper functions which don't fit anywhere else\"\"\"\nimport ast\nimport inspect\nimport os\nimport re\nimport hashlib\nimport warnings\nfrom collections import deque\nfrom contextlib import contextmanager\nfrom importlib import import_module\nfrom pkgutil import iter_modules\nfrom textwrap import dedent\n\nfrom w3lib.html import replace_entities\n\nfrom scrapy.utils.datatypes import LocalWeakReferencedCache\nfrom scrapy.utils.python import flatten, to_unicode\nfrom scrapy.item import _BaseItem\nfrom scrapy.utils.deprecate import ScrapyDeprecationWarning\n\n\n_ITERABLE_SINGLE_VALUES = dict, _BaseItem, str, bytes\n\n\ndef arg_to_iter(arg):\n \"\"\"Convert an argument to an iterable. The argument can be a None, single\n value, or an iterable.\n\n Exception: if arg is a dict, [arg] will be returned\n \"\"\"\n if arg is None:\n return []\n elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):\n return arg\n else:\n return [arg]\n\n\ndef load_object(path):\n \"\"\"Load an object given its absolute object path, and return it.\n\n The object can be the import path of a class, function, variable or an\n instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'.\n\n If ``path`` is not a string, but is a callable object, such as a class or\n a function, then return it as is.\n \"\"\"\n\n if not isinstance(path, str):\n if callable(path):\n return path\n else:\n raise TypeError(\"Unexpected argument type, expected string \"\n \"or object, got: %s\" % type(path))\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(f\"Error loading object '{path}': not a full path\")\n\n module, name = path[:dot], path[dot + 1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(f\"Module '{module}' doesn't define any object named '{name}'\")\n\n return obj\n\n\ndef walk_modules(path):\n \"\"\"Loads a module and all its submodules from the given module path and\n returns them. If *any* module throws an exception while importing, that\n exception is thrown back.\n\n For example: walk_modules('scrapy.utils')\n \"\"\"\n\n mods = []\n mod = import_module(path)\n mods.append(mod)\n if hasattr(mod, '__path__'):\n for _, subpath, ispkg in iter_modules(mod.__path__):\n fullpath = path + '.' + subpath\n if ispkg:\n mods += walk_modules(fullpath)\n else:\n submod = import_module(fullpath)\n mods.append(submod)\n return mods\n\n\ndef extract_regex(regex, text, encoding='utf-8'):\n \"\"\"Extract a list of unicode strings from the given text/encoding using the following policies:\n\n * if the regex contains a named group called \"extract\" that will be returned\n * if the regex contains multiple numbered groups, all those will be returned (flattened)\n * if the regex doesn't contain any group the entire regex matching is returned\n \"\"\"\n warnings.warn(\n \"scrapy.utils.misc.extract_regex has moved to parsel.utils.extract_regex.\",\n ScrapyDeprecationWarning,\n stacklevel=2\n )\n\n if isinstance(regex, str):\n regex = re.compile(regex, re.UNICODE)\n\n try:\n strings = [regex.search(text).group('extract')] # named group\n except Exception:\n strings = regex.findall(text) # full regex or numbered groups\n strings = flatten(strings)\n\n if isinstance(text, str):\n return [replace_entities(s, keep=['lt', 'amp']) for s in strings]\n else:\n return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])\n for s in strings]\n\n\ndef md5sum(file):\n \"\"\"Calculate the md5 checksum of a file-like object without reading its\n whole content in memory.\n\n >>> from io import BytesIO\n >>> md5sum(BytesIO(b'file content to hash'))\n '784406af91dd5a54fbb9c84c2236595a'\n \"\"\"\n m = hashlib.md5()\n while True:\n d = file.read(8096)\n if not d:\n break\n m.update(d)\n return m.hexdigest()\n\n\ndef rel_has_nofollow(rel):\n \"\"\"Return True if link rel attribute has nofollow type\"\"\"\n return rel is not None and 'nofollow' in rel.split()\n\n\ndef create_instance(objcls, settings, crawler, *args, **kwargs):\n \"\"\"Construct a class instance using its ``from_crawler`` or\n ``from_settings`` constructors, if available.\n\n At least one of ``settings`` and ``crawler`` needs to be different from\n ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.\n If ``crawler`` is ``None``, only the ``from_settings`` constructor will be\n tried.\n\n ``*args`` and ``**kwargs`` are forwarded to the constructors.\n\n Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.\n\n .. versionchanged:: 2.2\n Raises ``TypeError`` if the resulting instance is ``None`` (e.g. if an\n extension has not been implemented correctly).\n \"\"\"\n if settings is None:\n if crawler is None:\n raise ValueError(\"Specify at least one of settings and crawler.\")\n settings = crawler.settings\n if crawler and hasattr(objcls, 'from_crawler'):\n instance = objcls.from_crawler(crawler, *args, **kwargs)\n method_name = 'from_crawler'\n elif hasattr(objcls, 'from_settings'):\n instance = objcls.from_settings(settings, *args, **kwargs)\n method_name = 'from_settings'\n else:\n instance = objcls(*args, **kwargs)\n method_name = '__new__'\n if instance is None:\n raise TypeError(f\"{objcls.__qualname__}.{method_name} returned None\")\n return instance\n\n\n@contextmanager\ndef set_environ(**kwargs):\n \"\"\"Temporarily set environment variables inside the context manager and\n fully restore previous environment afterwards\n \"\"\"\n\n original_env = {k: os.environ.get(k) for k in kwargs}\n os.environ.update(kwargs)\n try:\n yield\n finally:\n for k, v in original_env.items():\n if v is None:\n del os.environ[k]\n else:\n os.environ[k] = v\n\n\ndef walk_callable(node):\n \"\"\"Similar to ``ast.walk``, but walks only function body and skips nested\n functions defined within the node.\n \"\"\"\n todo = deque([node])\n walked_func_def = False\n while todo:\n node = todo.popleft()\n if isinstance(node, ast.FunctionDef):\n if walked_func_def:\n continue\n walked_func_def = True\n todo.extend(ast.iter_child_nodes(node))\n yield node\n\n\n_generator_callbacks_cache = LocalWeakReferencedCache(limit=128)\n\n\ndef is_generator_with_return_value(callable):\n \"\"\"\n Returns True if a callable is a generator function which includes a\n 'return' statement with a value different than None, False otherwise\n \"\"\"\n if callable in _generator_callbacks_cache:\n return _generator_callbacks_cache[callable]\n\n def returns_none(return_node):\n value = return_node.value\n return value is None or isinstance(value, ast.NameConstant) and value.value is None\n\n if inspect.isgeneratorfunction(callable):\n tree = ast.parse(dedent(inspect.getsource(callable)))\n for node in walk_callable(tree):\n if isinstance(node, ast.Return) and not returns_none(node):\n _generator_callbacks_cache[callable] = True\n return _generator_callbacks_cache[callable]\n\n _generator_callbacks_cache[callable] = False\n return _generator_callbacks_cache[callable]\n\n\ndef warn_on_generator_with_return_value(spider, callable):\n \"\"\"\n Logs a warning if a callable is a generator function and includes\n a 'return' statement with a value different than None\n \"\"\"\n if is_generator_with_return_value(callable):\n warnings.warn(\n f'The \"{spider.__class__.__name__}.{callable.__name__}\" method is '\n 'a generator and includes a \"return\" statement with a value '\n 'different than None. This could lead to unexpected behaviour. Please see '\n 'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '\n 'for details about the semantics of the \"return\" statement within generators',\n stacklevel=2,\n )\n", "path": "scrapy/utils/misc.py"}], "after_files": [{"content": "\"\"\"Helper functions which don't fit anywhere else\"\"\"\nimport ast\nimport inspect\nimport os\nimport re\nimport hashlib\nimport warnings\nfrom collections import deque\nfrom contextlib import contextmanager\nfrom importlib import import_module\nfrom pkgutil import iter_modules\n\nfrom w3lib.html import replace_entities\n\nfrom scrapy.utils.datatypes import LocalWeakReferencedCache\nfrom scrapy.utils.python import flatten, to_unicode\nfrom scrapy.item import _BaseItem\nfrom scrapy.utils.deprecate import ScrapyDeprecationWarning\n\n\n_ITERABLE_SINGLE_VALUES = dict, _BaseItem, str, bytes\n\n\ndef arg_to_iter(arg):\n \"\"\"Convert an argument to an iterable. The argument can be a None, single\n value, or an iterable.\n\n Exception: if arg is a dict, [arg] will be returned\n \"\"\"\n if arg is None:\n return []\n elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):\n return arg\n else:\n return [arg]\n\n\ndef load_object(path):\n \"\"\"Load an object given its absolute object path, and return it.\n\n The object can be the import path of a class, function, variable or an\n instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'.\n\n If ``path`` is not a string, but is a callable object, such as a class or\n a function, then return it as is.\n \"\"\"\n\n if not isinstance(path, str):\n if callable(path):\n return path\n else:\n raise TypeError(\"Unexpected argument type, expected string \"\n \"or object, got: %s\" % type(path))\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(f\"Error loading object '{path}': not a full path\")\n\n module, name = path[:dot], path[dot + 1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(f\"Module '{module}' doesn't define any object named '{name}'\")\n\n return obj\n\n\ndef walk_modules(path):\n \"\"\"Loads a module and all its submodules from the given module path and\n returns them. If *any* module throws an exception while importing, that\n exception is thrown back.\n\n For example: walk_modules('scrapy.utils')\n \"\"\"\n\n mods = []\n mod = import_module(path)\n mods.append(mod)\n if hasattr(mod, '__path__'):\n for _, subpath, ispkg in iter_modules(mod.__path__):\n fullpath = path + '.' + subpath\n if ispkg:\n mods += walk_modules(fullpath)\n else:\n submod = import_module(fullpath)\n mods.append(submod)\n return mods\n\n\ndef extract_regex(regex, text, encoding='utf-8'):\n \"\"\"Extract a list of unicode strings from the given text/encoding using the following policies:\n\n * if the regex contains a named group called \"extract\" that will be returned\n * if the regex contains multiple numbered groups, all those will be returned (flattened)\n * if the regex doesn't contain any group the entire regex matching is returned\n \"\"\"\n warnings.warn(\n \"scrapy.utils.misc.extract_regex has moved to parsel.utils.extract_regex.\",\n ScrapyDeprecationWarning,\n stacklevel=2\n )\n\n if isinstance(regex, str):\n regex = re.compile(regex, re.UNICODE)\n\n try:\n strings = [regex.search(text).group('extract')] # named group\n except Exception:\n strings = regex.findall(text) # full regex or numbered groups\n strings = flatten(strings)\n\n if isinstance(text, str):\n return [replace_entities(s, keep=['lt', 'amp']) for s in strings]\n else:\n return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])\n for s in strings]\n\n\ndef md5sum(file):\n \"\"\"Calculate the md5 checksum of a file-like object without reading its\n whole content in memory.\n\n >>> from io import BytesIO\n >>> md5sum(BytesIO(b'file content to hash'))\n '784406af91dd5a54fbb9c84c2236595a'\n \"\"\"\n m = hashlib.md5()\n while True:\n d = file.read(8096)\n if not d:\n break\n m.update(d)\n return m.hexdigest()\n\n\ndef rel_has_nofollow(rel):\n \"\"\"Return True if link rel attribute has nofollow type\"\"\"\n return rel is not None and 'nofollow' in rel.split()\n\n\ndef create_instance(objcls, settings, crawler, *args, **kwargs):\n \"\"\"Construct a class instance using its ``from_crawler`` or\n ``from_settings`` constructors, if available.\n\n At least one of ``settings`` and ``crawler`` needs to be different from\n ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.\n If ``crawler`` is ``None``, only the ``from_settings`` constructor will be\n tried.\n\n ``*args`` and ``**kwargs`` are forwarded to the constructors.\n\n Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.\n\n .. versionchanged:: 2.2\n Raises ``TypeError`` if the resulting instance is ``None`` (e.g. if an\n extension has not been implemented correctly).\n \"\"\"\n if settings is None:\n if crawler is None:\n raise ValueError(\"Specify at least one of settings and crawler.\")\n settings = crawler.settings\n if crawler and hasattr(objcls, 'from_crawler'):\n instance = objcls.from_crawler(crawler, *args, **kwargs)\n method_name = 'from_crawler'\n elif hasattr(objcls, 'from_settings'):\n instance = objcls.from_settings(settings, *args, **kwargs)\n method_name = 'from_settings'\n else:\n instance = objcls(*args, **kwargs)\n method_name = '__new__'\n if instance is None:\n raise TypeError(f\"{objcls.__qualname__}.{method_name} returned None\")\n return instance\n\n\n@contextmanager\ndef set_environ(**kwargs):\n \"\"\"Temporarily set environment variables inside the context manager and\n fully restore previous environment afterwards\n \"\"\"\n\n original_env = {k: os.environ.get(k) for k in kwargs}\n os.environ.update(kwargs)\n try:\n yield\n finally:\n for k, v in original_env.items():\n if v is None:\n del os.environ[k]\n else:\n os.environ[k] = v\n\n\ndef walk_callable(node):\n \"\"\"Similar to ``ast.walk``, but walks only function body and skips nested\n functions defined within the node.\n \"\"\"\n todo = deque([node])\n walked_func_def = False\n while todo:\n node = todo.popleft()\n if isinstance(node, ast.FunctionDef):\n if walked_func_def:\n continue\n walked_func_def = True\n todo.extend(ast.iter_child_nodes(node))\n yield node\n\n\n_generator_callbacks_cache = LocalWeakReferencedCache(limit=128)\n\n\ndef is_generator_with_return_value(callable):\n \"\"\"\n Returns True if a callable is a generator function which includes a\n 'return' statement with a value different than None, False otherwise\n \"\"\"\n if callable in _generator_callbacks_cache:\n return _generator_callbacks_cache[callable]\n\n def returns_none(return_node):\n value = return_node.value\n return value is None or isinstance(value, ast.NameConstant) and value.value is None\n\n if inspect.isgeneratorfunction(callable):\n code = re.sub(r\"^[\\t ]+\", \"\", inspect.getsource(callable))\n tree = ast.parse(code)\n for node in walk_callable(tree):\n if isinstance(node, ast.Return) and not returns_none(node):\n _generator_callbacks_cache[callable] = True\n return _generator_callbacks_cache[callable]\n\n _generator_callbacks_cache[callable] = False\n return _generator_callbacks_cache[callable]\n\n\ndef warn_on_generator_with_return_value(spider, callable):\n \"\"\"\n Logs a warning if a callable is a generator function and includes\n a 'return' statement with a value different than None\n \"\"\"\n try:\n if is_generator_with_return_value(callable):\n warnings.warn(\n f'The \"{spider.__class__.__name__}.{callable.__name__}\" method is '\n 'a generator and includes a \"return\" statement with a value '\n 'different than None. This could lead to unexpected behaviour. Please see '\n 'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '\n 'for details about the semantics of the \"return\" statement within generators',\n stacklevel=2,\n )\n except IndentationError:\n callable_name = spider.__class__.__name__ + \".\" + callable.__name__\n warnings.warn(\n f'Unable to determine whether or not \"{callable_name}\" is a generator with a return value. '\n 'This will not prevent your code from working, but it prevents Scrapy from detecting '\n f'potential issues in your implementation of \"{callable_name}\". Please, report this in the '\n 'Scrapy issue tracker (https://github.com/scrapy/scrapy/issues), '\n f'including the code of \"{callable_name}\"',\n stacklevel=2,\n )\n", "path": "scrapy/utils/misc.py"}]}
| 3,445 | 603 |
gh_patches_debug_43166
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-65417
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support measurement metrics in new metrics/traces sample endpoint
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/sentry_metrics/querying/samples_list.py`
Content:
```
1 from abc import ABC, abstractmethod
2 from datetime import datetime
3 from typing import Any
4
5 from snuba_sdk import And, Condition, Op, Or
6
7 from sentry import options
8 from sentry.search.events.builder import QueryBuilder, SpansIndexedQueryBuilder
9 from sentry.search.events.types import QueryBuilderConfig, SnubaParams
10 from sentry.snuba.dataset import Dataset
11 from sentry.snuba.metrics.naming_layer.mri import SpanMRI, TransactionMRI
12 from sentry.snuba.referrer import Referrer
13
14
15 class SamplesListExecutor(ABC):
16 def __init__(
17 self,
18 mri: str,
19 params: dict[str, Any],
20 snuba_params: SnubaParams,
21 fields: list[str],
22 query: str | None,
23 rollup: int,
24 referrer: Referrer,
25 ):
26 self.mri = mri
27 self.params = params
28 self.snuba_params = snuba_params
29 self.fields = fields
30 self.query = query
31 self.rollup = rollup
32 self.referrer = referrer
33
34 @classmethod
35 @abstractmethod
36 def supports(cls, metric_mri: str) -> bool:
37 raise NotImplementedError
38
39 @abstractmethod
40 def execute(self, offset, limit):
41 raise NotImplementedError
42
43 def get_spans_by_key(self, span_ids: list[tuple[str, str, str]]):
44 if not span_ids:
45 return {"data": []}
46
47 builder = SpansIndexedQueryBuilder(
48 Dataset.SpansIndexed,
49 self.params,
50 snuba_params=self.snuba_params,
51 selected_columns=self.fields,
52 limit=len(span_ids),
53 offset=0,
54 )
55
56 # Using `IN` sometimes does not use the bloomfilter index
57 # on the table. So we're explicitly writing the condition
58 # using `OR`s.
59 #
60 # May not be necessary because it's also filtering on the
61 # `span.group` as well which allows Clickhouse to filter
62 # via the primary key but this is a precaution.
63 conditions = [
64 And(
65 [
66 Condition(builder.column("span.group"), Op.EQ, group),
67 Condition(
68 builder.column("timestamp"), Op.EQ, datetime.fromisoformat(timestamp)
69 ),
70 Condition(builder.column("id"), Op.EQ, span_id),
71 ]
72 )
73 for (group, timestamp, span_id) in span_ids
74 ]
75
76 if len(conditions) == 1:
77 span_condition = conditions[0]
78 else:
79 span_condition = Or(conditions)
80
81 builder.add_conditions([span_condition])
82
83 query_results = builder.run_query(self.referrer.value)
84 return builder.process_results(query_results)
85
86
87 class SegmentsSamplesListExecutor(SamplesListExecutor):
88 @classmethod
89 def mri_to_column(cls, mri) -> str | None:
90 if mri == TransactionMRI.DURATION.value:
91 return "duration"
92 return None
93
94 @classmethod
95 def supports(cls, mri: str) -> bool:
96 return cls.mri_to_column(mri) is not None
97
98 def execute(self, offset, limit):
99 span_keys = self.get_span_keys(offset, limit)
100 return self.get_spans_by_key(span_keys)
101
102 def get_span_keys(self, offset: int, limit: int) -> list[tuple[str, str, str]]:
103 rounded_timestamp = f"rounded_timestamp({self.rollup})"
104
105 builder = QueryBuilder(
106 Dataset.Transactions,
107 self.params,
108 snuba_params=self.snuba_params,
109 query=self.query,
110 selected_columns=[rounded_timestamp, "example()"],
111 limit=limit,
112 offset=offset,
113 sample_rate=options.get("metrics.sample-list.sample-rate"),
114 config=QueryBuilderConfig(functions_acl=["rounded_timestamp", "example"]),
115 )
116
117 query_results = builder.run_query(self.referrer.value)
118 result = builder.process_results(query_results)
119
120 return [
121 (
122 "00", # all segments have a group of `00` currently
123 row["example"][0], # timestamp
124 row["example"][1], # span_id
125 )
126 for row in result["data"]
127 ]
128
129
130 class SpansSamplesListExecutor(SamplesListExecutor):
131 MRI_MAPPING = {
132 SpanMRI.DURATION.value: "span.duration",
133 SpanMRI.SELF_TIME.value: "span.self_time",
134 }
135
136 @classmethod
137 def mri_to_column(cls, mri) -> str | None:
138 return cls.MRI_MAPPING.get(mri)
139
140 @classmethod
141 def supports(cls, mri: str) -> bool:
142 return cls.mri_to_column(mri) is not None
143
144 def execute(self, offset, limit):
145 span_keys = self.get_span_keys(offset, limit)
146 return self.get_spans_by_key(span_keys)
147
148 def get_span_keys(self, offset: int, limit: int) -> list[tuple[str, str, str]]:
149 rounded_timestamp = f"rounded_timestamp({self.rollup})"
150
151 builder = SpansIndexedQueryBuilder(
152 Dataset.SpansIndexed,
153 self.params,
154 snuba_params=self.snuba_params,
155 query=self.query,
156 selected_columns=[rounded_timestamp, "example()"],
157 limit=limit,
158 offset=offset,
159 sample_rate=options.get("metrics.sample-list.sample-rate"),
160 config=QueryBuilderConfig(functions_acl=["rounded_timestamp", "example"]),
161 )
162
163 builder.add_conditions(
164 [
165 # The `00` group is used for spans not used within the
166 # new starfish experience. It's effectively the group
167 # for other. It is a massive group, so we've chosen
168 # to exclude it here.
169 #
170 # In the future, we will want to look into exposing them
171 Condition(builder.column("span.group"), Op.NEQ, "00")
172 ]
173 )
174
175 query_results = builder.run_query(self.referrer.value)
176 result = builder.process_results(query_results)
177
178 return [
179 (
180 row["example"][0], # group
181 row["example"][1], # timestamp
182 row["example"][2], # span_id
183 )
184 for row in result["data"]
185 ]
186
187
188 SAMPLE_LIST_EXECUTORS = [
189 SpansSamplesListExecutor,
190 SegmentsSamplesListExecutor,
191 ]
192
193
194 def get_sample_list_executor_cls(mri) -> type[SamplesListExecutor] | None:
195 for executor_cls in SAMPLE_LIST_EXECUTORS:
196 if executor_cls.supports(mri):
197 return executor_cls
198 return None
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sentry/sentry_metrics/querying/samples_list.py b/src/sentry/sentry_metrics/querying/samples_list.py
--- a/src/sentry/sentry_metrics/querying/samples_list.py
+++ b/src/sentry/sentry_metrics/querying/samples_list.py
@@ -2,13 +2,13 @@
from datetime import datetime
from typing import Any
-from snuba_sdk import And, Condition, Op, Or
+from snuba_sdk import And, Column, Condition, Function, Op, Or
from sentry import options
from sentry.search.events.builder import QueryBuilder, SpansIndexedQueryBuilder
from sentry.search.events.types import QueryBuilderConfig, SnubaParams
from sentry.snuba.dataset import Dataset
-from sentry.snuba.metrics.naming_layer.mri import SpanMRI, TransactionMRI
+from sentry.snuba.metrics.naming_layer.mri import SpanMRI, TransactionMRI, is_measurement, parse_mri
from sentry.snuba.referrer import Referrer
@@ -86,10 +86,9 @@
class SegmentsSamplesListExecutor(SamplesListExecutor):
@classmethod
+ @abstractmethod
def mri_to_column(cls, mri) -> str | None:
- if mri == TransactionMRI.DURATION.value:
- return "duration"
- return None
+ raise NotImplementedError
@classmethod
def supports(cls, mri: str) -> bool:
@@ -102,6 +101,16 @@
def get_span_keys(self, offset: int, limit: int) -> list[tuple[str, str, str]]:
rounded_timestamp = f"rounded_timestamp({self.rollup})"
+ """
+ When getting examples for a segment, it's actually much faster to read it
+ from the transactions dataset compared to the spans dataset as it's a much
+ smaller dataset.
+
+ One consideration here is that there is an one to one mapping between a
+ transaction to a segment today. If this relationship changes, we'll have to
+ rethink how to fetch segment samples a little as the transactions dataset
+ may not contain all the necessary data.
+ """
builder = QueryBuilder(
Dataset.Transactions,
self.params,
@@ -114,6 +123,8 @@
config=QueryBuilderConfig(functions_acl=["rounded_timestamp", "example"]),
)
+ builder.add_conditions(self.get_additional_conditions())
+
query_results = builder.run_query(self.referrer.value)
result = builder.process_results(query_results)
@@ -126,6 +137,42 @@
for row in result["data"]
]
+ @abstractmethod
+ def get_additional_conditions(self) -> list[Condition]:
+ raise NotImplementedError
+
+
+class TransactionDurationSamplesListExecutor(SegmentsSamplesListExecutor):
+ @classmethod
+ def mri_to_column(cls, mri) -> str | None:
+ if mri == TransactionMRI.DURATION.value:
+ return "duration"
+ return None
+
+ def get_additional_conditions(self) -> list[Condition]:
+ return []
+
+
+class MeasurementsSamplesListExecutor(SegmentsSamplesListExecutor):
+ @classmethod
+ def mri_to_column(cls, mri) -> str | None:
+ name = cls.measurement_name(mri)
+ if name is not None:
+ return f"measurements[{name}]"
+
+ return None
+
+ @classmethod
+ def measurement_name(cls, mri) -> str | None:
+ parsed_mri = parse_mri(mri)
+ if parsed_mri is not None and is_measurement(parsed_mri):
+ return parsed_mri.name[len("measurements:") :]
+ return None
+
+ def get_additional_conditions(self) -> list[Condition]:
+ name = self.measurement_name(self.mri)
+ return [Condition(Function("has", [Column("measurements.key"), name]), Op.EQ, 1)]
+
class SpansSamplesListExecutor(SamplesListExecutor):
MRI_MAPPING = {
@@ -187,7 +234,8 @@
SAMPLE_LIST_EXECUTORS = [
SpansSamplesListExecutor,
- SegmentsSamplesListExecutor,
+ TransactionDurationSamplesListExecutor,
+ MeasurementsSamplesListExecutor,
]
|
{"golden_diff": "diff --git a/src/sentry/sentry_metrics/querying/samples_list.py b/src/sentry/sentry_metrics/querying/samples_list.py\n--- a/src/sentry/sentry_metrics/querying/samples_list.py\n+++ b/src/sentry/sentry_metrics/querying/samples_list.py\n@@ -2,13 +2,13 @@\n from datetime import datetime\n from typing import Any\n \n-from snuba_sdk import And, Condition, Op, Or\n+from snuba_sdk import And, Column, Condition, Function, Op, Or\n \n from sentry import options\n from sentry.search.events.builder import QueryBuilder, SpansIndexedQueryBuilder\n from sentry.search.events.types import QueryBuilderConfig, SnubaParams\n from sentry.snuba.dataset import Dataset\n-from sentry.snuba.metrics.naming_layer.mri import SpanMRI, TransactionMRI\n+from sentry.snuba.metrics.naming_layer.mri import SpanMRI, TransactionMRI, is_measurement, parse_mri\n from sentry.snuba.referrer import Referrer\n \n \n@@ -86,10 +86,9 @@\n \n class SegmentsSamplesListExecutor(SamplesListExecutor):\n @classmethod\n+ @abstractmethod\n def mri_to_column(cls, mri) -> str | None:\n- if mri == TransactionMRI.DURATION.value:\n- return \"duration\"\n- return None\n+ raise NotImplementedError\n \n @classmethod\n def supports(cls, mri: str) -> bool:\n@@ -102,6 +101,16 @@\n def get_span_keys(self, offset: int, limit: int) -> list[tuple[str, str, str]]:\n rounded_timestamp = f\"rounded_timestamp({self.rollup})\"\n \n+ \"\"\"\n+ When getting examples for a segment, it's actually much faster to read it\n+ from the transactions dataset compared to the spans dataset as it's a much\n+ smaller dataset.\n+\n+ One consideration here is that there is an one to one mapping between a\n+ transaction to a segment today. If this relationship changes, we'll have to\n+ rethink how to fetch segment samples a little as the transactions dataset\n+ may not contain all the necessary data.\n+ \"\"\"\n builder = QueryBuilder(\n Dataset.Transactions,\n self.params,\n@@ -114,6 +123,8 @@\n config=QueryBuilderConfig(functions_acl=[\"rounded_timestamp\", \"example\"]),\n )\n \n+ builder.add_conditions(self.get_additional_conditions())\n+\n query_results = builder.run_query(self.referrer.value)\n result = builder.process_results(query_results)\n \n@@ -126,6 +137,42 @@\n for row in result[\"data\"]\n ]\n \n+ @abstractmethod\n+ def get_additional_conditions(self) -> list[Condition]:\n+ raise NotImplementedError\n+\n+\n+class TransactionDurationSamplesListExecutor(SegmentsSamplesListExecutor):\n+ @classmethod\n+ def mri_to_column(cls, mri) -> str | None:\n+ if mri == TransactionMRI.DURATION.value:\n+ return \"duration\"\n+ return None\n+\n+ def get_additional_conditions(self) -> list[Condition]:\n+ return []\n+\n+\n+class MeasurementsSamplesListExecutor(SegmentsSamplesListExecutor):\n+ @classmethod\n+ def mri_to_column(cls, mri) -> str | None:\n+ name = cls.measurement_name(mri)\n+ if name is not None:\n+ return f\"measurements[{name}]\"\n+\n+ return None\n+\n+ @classmethod\n+ def measurement_name(cls, mri) -> str | None:\n+ parsed_mri = parse_mri(mri)\n+ if parsed_mri is not None and is_measurement(parsed_mri):\n+ return parsed_mri.name[len(\"measurements:\") :]\n+ return None\n+\n+ def get_additional_conditions(self) -> list[Condition]:\n+ name = self.measurement_name(self.mri)\n+ return [Condition(Function(\"has\", [Column(\"measurements.key\"), name]), Op.EQ, 1)]\n+\n \n class SpansSamplesListExecutor(SamplesListExecutor):\n MRI_MAPPING = {\n@@ -187,7 +234,8 @@\n \n SAMPLE_LIST_EXECUTORS = [\n SpansSamplesListExecutor,\n- SegmentsSamplesListExecutor,\n+ TransactionDurationSamplesListExecutor,\n+ MeasurementsSamplesListExecutor,\n ]\n", "issue": "Support measurement metrics in new metrics/traces sample endpoint\n\n", "before_files": [{"content": "from abc import ABC, abstractmethod\nfrom datetime import datetime\nfrom typing import Any\n\nfrom snuba_sdk import And, Condition, Op, Or\n\nfrom sentry import options\nfrom sentry.search.events.builder import QueryBuilder, SpansIndexedQueryBuilder\nfrom sentry.search.events.types import QueryBuilderConfig, SnubaParams\nfrom sentry.snuba.dataset import Dataset\nfrom sentry.snuba.metrics.naming_layer.mri import SpanMRI, TransactionMRI\nfrom sentry.snuba.referrer import Referrer\n\n\nclass SamplesListExecutor(ABC):\n def __init__(\n self,\n mri: str,\n params: dict[str, Any],\n snuba_params: SnubaParams,\n fields: list[str],\n query: str | None,\n rollup: int,\n referrer: Referrer,\n ):\n self.mri = mri\n self.params = params\n self.snuba_params = snuba_params\n self.fields = fields\n self.query = query\n self.rollup = rollup\n self.referrer = referrer\n\n @classmethod\n @abstractmethod\n def supports(cls, metric_mri: str) -> bool:\n raise NotImplementedError\n\n @abstractmethod\n def execute(self, offset, limit):\n raise NotImplementedError\n\n def get_spans_by_key(self, span_ids: list[tuple[str, str, str]]):\n if not span_ids:\n return {\"data\": []}\n\n builder = SpansIndexedQueryBuilder(\n Dataset.SpansIndexed,\n self.params,\n snuba_params=self.snuba_params,\n selected_columns=self.fields,\n limit=len(span_ids),\n offset=0,\n )\n\n # Using `IN` sometimes does not use the bloomfilter index\n # on the table. So we're explicitly writing the condition\n # using `OR`s.\n #\n # May not be necessary because it's also filtering on the\n # `span.group` as well which allows Clickhouse to filter\n # via the primary key but this is a precaution.\n conditions = [\n And(\n [\n Condition(builder.column(\"span.group\"), Op.EQ, group),\n Condition(\n builder.column(\"timestamp\"), Op.EQ, datetime.fromisoformat(timestamp)\n ),\n Condition(builder.column(\"id\"), Op.EQ, span_id),\n ]\n )\n for (group, timestamp, span_id) in span_ids\n ]\n\n if len(conditions) == 1:\n span_condition = conditions[0]\n else:\n span_condition = Or(conditions)\n\n builder.add_conditions([span_condition])\n\n query_results = builder.run_query(self.referrer.value)\n return builder.process_results(query_results)\n\n\nclass SegmentsSamplesListExecutor(SamplesListExecutor):\n @classmethod\n def mri_to_column(cls, mri) -> str | None:\n if mri == TransactionMRI.DURATION.value:\n return \"duration\"\n return None\n\n @classmethod\n def supports(cls, mri: str) -> bool:\n return cls.mri_to_column(mri) is not None\n\n def execute(self, offset, limit):\n span_keys = self.get_span_keys(offset, limit)\n return self.get_spans_by_key(span_keys)\n\n def get_span_keys(self, offset: int, limit: int) -> list[tuple[str, str, str]]:\n rounded_timestamp = f\"rounded_timestamp({self.rollup})\"\n\n builder = QueryBuilder(\n Dataset.Transactions,\n self.params,\n snuba_params=self.snuba_params,\n query=self.query,\n selected_columns=[rounded_timestamp, \"example()\"],\n limit=limit,\n offset=offset,\n sample_rate=options.get(\"metrics.sample-list.sample-rate\"),\n config=QueryBuilderConfig(functions_acl=[\"rounded_timestamp\", \"example\"]),\n )\n\n query_results = builder.run_query(self.referrer.value)\n result = builder.process_results(query_results)\n\n return [\n (\n \"00\", # all segments have a group of `00` currently\n row[\"example\"][0], # timestamp\n row[\"example\"][1], # span_id\n )\n for row in result[\"data\"]\n ]\n\n\nclass SpansSamplesListExecutor(SamplesListExecutor):\n MRI_MAPPING = {\n SpanMRI.DURATION.value: \"span.duration\",\n SpanMRI.SELF_TIME.value: \"span.self_time\",\n }\n\n @classmethod\n def mri_to_column(cls, mri) -> str | None:\n return cls.MRI_MAPPING.get(mri)\n\n @classmethod\n def supports(cls, mri: str) -> bool:\n return cls.mri_to_column(mri) is not None\n\n def execute(self, offset, limit):\n span_keys = self.get_span_keys(offset, limit)\n return self.get_spans_by_key(span_keys)\n\n def get_span_keys(self, offset: int, limit: int) -> list[tuple[str, str, str]]:\n rounded_timestamp = f\"rounded_timestamp({self.rollup})\"\n\n builder = SpansIndexedQueryBuilder(\n Dataset.SpansIndexed,\n self.params,\n snuba_params=self.snuba_params,\n query=self.query,\n selected_columns=[rounded_timestamp, \"example()\"],\n limit=limit,\n offset=offset,\n sample_rate=options.get(\"metrics.sample-list.sample-rate\"),\n config=QueryBuilderConfig(functions_acl=[\"rounded_timestamp\", \"example\"]),\n )\n\n builder.add_conditions(\n [\n # The `00` group is used for spans not used within the\n # new starfish experience. It's effectively the group\n # for other. It is a massive group, so we've chosen\n # to exclude it here.\n #\n # In the future, we will want to look into exposing them\n Condition(builder.column(\"span.group\"), Op.NEQ, \"00\")\n ]\n )\n\n query_results = builder.run_query(self.referrer.value)\n result = builder.process_results(query_results)\n\n return [\n (\n row[\"example\"][0], # group\n row[\"example\"][1], # timestamp\n row[\"example\"][2], # span_id\n )\n for row in result[\"data\"]\n ]\n\n\nSAMPLE_LIST_EXECUTORS = [\n SpansSamplesListExecutor,\n SegmentsSamplesListExecutor,\n]\n\n\ndef get_sample_list_executor_cls(mri) -> type[SamplesListExecutor] | None:\n for executor_cls in SAMPLE_LIST_EXECUTORS:\n if executor_cls.supports(mri):\n return executor_cls\n return None\n", "path": "src/sentry/sentry_metrics/querying/samples_list.py"}], "after_files": [{"content": "from abc import ABC, abstractmethod\nfrom datetime import datetime\nfrom typing import Any\n\nfrom snuba_sdk import And, Column, Condition, Function, Op, Or\n\nfrom sentry import options\nfrom sentry.search.events.builder import QueryBuilder, SpansIndexedQueryBuilder\nfrom sentry.search.events.types import QueryBuilderConfig, SnubaParams\nfrom sentry.snuba.dataset import Dataset\nfrom sentry.snuba.metrics.naming_layer.mri import SpanMRI, TransactionMRI, is_measurement, parse_mri\nfrom sentry.snuba.referrer import Referrer\n\n\nclass SamplesListExecutor(ABC):\n def __init__(\n self,\n mri: str,\n params: dict[str, Any],\n snuba_params: SnubaParams,\n fields: list[str],\n query: str | None,\n rollup: int,\n referrer: Referrer,\n ):\n self.mri = mri\n self.params = params\n self.snuba_params = snuba_params\n self.fields = fields\n self.query = query\n self.rollup = rollup\n self.referrer = referrer\n\n @classmethod\n @abstractmethod\n def supports(cls, metric_mri: str) -> bool:\n raise NotImplementedError\n\n @abstractmethod\n def execute(self, offset, limit):\n raise NotImplementedError\n\n def get_spans_by_key(self, span_ids: list[tuple[str, str, str]]):\n if not span_ids:\n return {\"data\": []}\n\n builder = SpansIndexedQueryBuilder(\n Dataset.SpansIndexed,\n self.params,\n snuba_params=self.snuba_params,\n selected_columns=self.fields,\n limit=len(span_ids),\n offset=0,\n )\n\n # Using `IN` sometimes does not use the bloomfilter index\n # on the table. So we're explicitly writing the condition\n # using `OR`s.\n #\n # May not be necessary because it's also filtering on the\n # `span.group` as well which allows Clickhouse to filter\n # via the primary key but this is a precaution.\n conditions = [\n And(\n [\n Condition(builder.column(\"span.group\"), Op.EQ, group),\n Condition(\n builder.column(\"timestamp\"), Op.EQ, datetime.fromisoformat(timestamp)\n ),\n Condition(builder.column(\"id\"), Op.EQ, span_id),\n ]\n )\n for (group, timestamp, span_id) in span_ids\n ]\n\n if len(conditions) == 1:\n span_condition = conditions[0]\n else:\n span_condition = Or(conditions)\n\n builder.add_conditions([span_condition])\n\n query_results = builder.run_query(self.referrer.value)\n return builder.process_results(query_results)\n\n\nclass SegmentsSamplesListExecutor(SamplesListExecutor):\n @classmethod\n @abstractmethod\n def mri_to_column(cls, mri) -> str | None:\n raise NotImplementedError\n\n @classmethod\n def supports(cls, mri: str) -> bool:\n return cls.mri_to_column(mri) is not None\n\n def execute(self, offset, limit):\n span_keys = self.get_span_keys(offset, limit)\n return self.get_spans_by_key(span_keys)\n\n def get_span_keys(self, offset: int, limit: int) -> list[tuple[str, str, str]]:\n rounded_timestamp = f\"rounded_timestamp({self.rollup})\"\n\n \"\"\"\n When getting examples for a segment, it's actually much faster to read it\n from the transactions dataset compared to the spans dataset as it's a much\n smaller dataset.\n\n One consideration here is that there is an one to one mapping between a\n transaction to a segment today. If this relationship changes, we'll have to\n rethink how to fetch segment samples a little as the transactions dataset\n may not contain all the necessary data.\n \"\"\"\n builder = QueryBuilder(\n Dataset.Transactions,\n self.params,\n snuba_params=self.snuba_params,\n query=self.query,\n selected_columns=[rounded_timestamp, \"example()\"],\n limit=limit,\n offset=offset,\n sample_rate=options.get(\"metrics.sample-list.sample-rate\"),\n config=QueryBuilderConfig(functions_acl=[\"rounded_timestamp\", \"example\"]),\n )\n\n builder.add_conditions(self.get_additional_conditions())\n\n query_results = builder.run_query(self.referrer.value)\n result = builder.process_results(query_results)\n\n return [\n (\n \"00\", # all segments have a group of `00` currently\n row[\"example\"][0], # timestamp\n row[\"example\"][1], # span_id\n )\n for row in result[\"data\"]\n ]\n\n @abstractmethod\n def get_additional_conditions(self) -> list[Condition]:\n raise NotImplementedError\n\n\nclass TransactionDurationSamplesListExecutor(SegmentsSamplesListExecutor):\n @classmethod\n def mri_to_column(cls, mri) -> str | None:\n if mri == TransactionMRI.DURATION.value:\n return \"duration\"\n return None\n\n def get_additional_conditions(self) -> list[Condition]:\n return []\n\n\nclass MeasurementsSamplesListExecutor(SegmentsSamplesListExecutor):\n @classmethod\n def mri_to_column(cls, mri) -> str | None:\n name = cls.measurement_name(mri)\n if name is not None:\n return f\"measurements[{name}]\"\n\n return None\n\n @classmethod\n def measurement_name(cls, mri) -> str | None:\n parsed_mri = parse_mri(mri)\n if parsed_mri is not None and is_measurement(parsed_mri):\n return parsed_mri.name[len(\"measurements:\") :]\n return None\n\n def get_additional_conditions(self) -> list[Condition]:\n name = self.measurement_name(self.mri)\n return [Condition(Function(\"has\", [Column(\"measurements.key\"), name]), Op.EQ, 1)]\n\n\nclass SpansSamplesListExecutor(SamplesListExecutor):\n MRI_MAPPING = {\n SpanMRI.DURATION.value: \"span.duration\",\n SpanMRI.SELF_TIME.value: \"span.self_time\",\n }\n\n @classmethod\n def mri_to_column(cls, mri) -> str | None:\n return cls.MRI_MAPPING.get(mri)\n\n @classmethod\n def supports(cls, mri: str) -> bool:\n return cls.mri_to_column(mri) is not None\n\n def execute(self, offset, limit):\n span_keys = self.get_span_keys(offset, limit)\n return self.get_spans_by_key(span_keys)\n\n def get_span_keys(self, offset: int, limit: int) -> list[tuple[str, str, str]]:\n rounded_timestamp = f\"rounded_timestamp({self.rollup})\"\n\n builder = SpansIndexedQueryBuilder(\n Dataset.SpansIndexed,\n self.params,\n snuba_params=self.snuba_params,\n query=self.query,\n selected_columns=[rounded_timestamp, \"example()\"],\n limit=limit,\n offset=offset,\n sample_rate=options.get(\"metrics.sample-list.sample-rate\"),\n config=QueryBuilderConfig(functions_acl=[\"rounded_timestamp\", \"example\"]),\n )\n\n builder.add_conditions(\n [\n # The `00` group is used for spans not used within the\n # new starfish experience. It's effectively the group\n # for other. It is a massive group, so we've chosen\n # to exclude it here.\n #\n # In the future, we will want to look into exposing them\n Condition(builder.column(\"span.group\"), Op.NEQ, \"00\")\n ]\n )\n\n query_results = builder.run_query(self.referrer.value)\n result = builder.process_results(query_results)\n\n return [\n (\n row[\"example\"][0], # group\n row[\"example\"][1], # timestamp\n row[\"example\"][2], # span_id\n )\n for row in result[\"data\"]\n ]\n\n\nSAMPLE_LIST_EXECUTORS = [\n SpansSamplesListExecutor,\n TransactionDurationSamplesListExecutor,\n MeasurementsSamplesListExecutor,\n]\n\n\ndef get_sample_list_executor_cls(mri) -> type[SamplesListExecutor] | None:\n for executor_cls in SAMPLE_LIST_EXECUTORS:\n if executor_cls.supports(mri):\n return executor_cls\n return None\n", "path": "src/sentry/sentry_metrics/querying/samples_list.py"}]}
| 2,167 | 940 |
gh_patches_debug_17950
|
rasdani/github-patches
|
git_diff
|
easybuilders__easybuild-easyblocks-3346
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
intel-compiler fails sanity check on Debian/Ubuntu
With the recent changes to gcc easyblock #3254 and the older change to intel_compiler easyblock #1237 and its followups, intel-compiler now fails sanity check due to an incorrect module file being generated.
```
multiarch_inc_dir, ec = run_cmd("gcc -E -Wp,-v -xc /dev/null 2>&1 | grep %s$" % multiarch_out)
```
returns
```
easybuild@b-cn1615:~$ gcc -E -Wp,-v -xc /dev/null 2>&1 | grep x86_64-linux-gnu'$'
/cvmfs/ebsw.hpc2n.umu.se/amd64_ubuntu2204_icelake/software/GCCcore/13.2.0/bin/../lib/gcc/x86_64-pc-linux-gnu/13.2.0/include-fixed/x86_64-linux-gnu
/usr/include/x86_64-linux-gnu
```
resulting in an malformed CPATH being added to the module file:
```
append_path("CPATH", "/cvmfs/ebsw.hpc2n.umu.se/amd64_ubuntu2204_icelake/software/GCCcore/13.2.0/bin/../lib/gcc/x86_64-pc-linux-gnu/13.2.0/include-fixed/x86_64-linux-gnu
/usr/include/x86_64-linux-gnu")
```
Simple fix is to do ```grep -v GCCcore``` at the and of the command.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `easybuild/easyblocks/i/intel_compilers.py`
Content:
```
1 # #
2 # Copyright 2021-2024 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 # #
25 """
26 EasyBuild support for installing Intel compilers, implemented as an easyblock
27
28 @author: Kenneth Hoste (Ghent University)
29 """
30 import os
31 from easybuild.tools import LooseVersion
32
33 from easybuild.easyblocks.generic.intelbase import IntelBase
34 from easybuild.easyblocks.t.tbb import get_tbb_gccprefix
35 from easybuild.tools.build_log import EasyBuildError, print_msg
36 from easybuild.tools.run import run_cmd
37
38
39 class EB_intel_minus_compilers(IntelBase):
40 """
41 Support for installing Intel compilers, starting with verion 2021.x (oneAPI)
42 """
43
44 def __init__(self, *args, **kwargs):
45 """
46 Easyblock constructor: check version
47 """
48 super(EB_intel_minus_compilers, self).__init__(*args, **kwargs)
49
50 # this easyblock is only valid for recent versions of the Intel compilers (2021.x, oneAPI)
51 if LooseVersion(self.version) < LooseVersion('2021'):
52 raise EasyBuildError("Invalid version %s, should be >= 2021.x" % self.version)
53
54 @property
55 def compilers_subdir(self):
56 compilers_subdir = self.get_versioned_subdir('compiler')
57 if LooseVersion(self.version) < LooseVersion('2024'):
58 compilers_subdir = os.path.join(compilers_subdir, 'linux')
59 return compilers_subdir
60
61 @property
62 def tbb_subdir(self):
63 return self.get_versioned_subdir('tbb')
64
65 def prepare_step(self, *args, **kwargs):
66 """
67 Prepare environment for installing.
68
69 Specify that oneAPI versions of Intel compilers don't require a runtime license.
70 """
71 # avoid that IntelBase trips over not having license info specified
72 kwargs['requires_runtime_license'] = False
73
74 super(EB_intel_minus_compilers, self).prepare_step(*args, **kwargs)
75
76 def configure_step(self):
77 """Configure installation."""
78
79 # redefine $HOME for install step, to avoid that anything is stored in $HOME/intel
80 # (like the 'installercache' database)
81 self.cfg['preinstallopts'] += " HOME=%s " % self.builddir
82
83 def install_step(self):
84 """
85 Install step: install each 'source file' one by one.
86 Installing the Intel compilers could be done via a single installation file (HPC Toolkit),
87 or with separate installation files (patch releases of the C++ and Fortran compilers).
88 """
89 srcs = self.src[:]
90 cnt = len(srcs)
91 for idx, src in enumerate(srcs):
92 print_msg("installing part %d/%s (%s)..." % (idx + 1, cnt, src['name']))
93 self.src = [src]
94 super(EB_intel_minus_compilers, self).install_step()
95
96 def sanity_check_step(self):
97 """
98 Custom sanity check for Intel compilers.
99 """
100
101 oneapi_compiler_cmds = [
102 'dpcpp', # Intel oneAPI Data Parallel C++ compiler
103 'icx', # oneAPI Intel C compiler
104 'icpx', # oneAPI Intel C++ compiler
105 'ifx', # oneAPI Intel Fortran compiler
106 ]
107 bindir = os.path.join(self.compilers_subdir, 'bin')
108 oneapi_compiler_paths = [os.path.join(bindir, x) for x in oneapi_compiler_cmds]
109 if LooseVersion(self.version) >= LooseVersion('2024'):
110 classic_compiler_cmds = ['ifort']
111 classic_bindir = bindir
112 else:
113 classic_compiler_cmds = ['icc', 'icpc', 'ifort']
114 classic_bindir = os.path.join(bindir, 'intel64')
115 classic_compiler_paths = [os.path.join(classic_bindir, x) for x in classic_compiler_cmds]
116
117 custom_paths = {
118 'files': classic_compiler_paths + oneapi_compiler_paths,
119 'dirs': [self.compilers_subdir],
120 }
121
122 all_compiler_cmds = classic_compiler_cmds + oneapi_compiler_cmds
123 custom_commands = ["which %s" % c for c in all_compiler_cmds]
124
125 # only for 2021.x versions do all compiler commands have the expected version;
126 # for example: for 2022.0.1, icc has version 2021.5.0, icpx has 2022.0.0
127 if LooseVersion(self.version) >= LooseVersion('2022.0'):
128 custom_commands.extend("%s --version" % c for c in all_compiler_cmds)
129 else:
130 custom_commands.extend("%s --version | grep %s" % (c, self.version) for c in all_compiler_cmds)
131
132 super(EB_intel_minus_compilers, self).sanity_check_step(custom_paths=custom_paths,
133 custom_commands=custom_commands)
134
135 def make_module_req_guess(self):
136 """
137 Paths to consider for prepend-paths statements in module file
138 """
139 libdirs = [
140 'lib',
141 os.path.join('lib', 'x64'),
142 os.path.join('compiler', 'lib', 'intel64_lin'),
143 ]
144 libdirs = [os.path.join(self.compilers_subdir, x) for x in libdirs]
145 tbb_subdir = self.tbb_subdir
146 tbb_libsubdir = os.path.join(tbb_subdir, 'lib', 'intel64')
147 libdirs.append(os.path.join(tbb_libsubdir,
148 get_tbb_gccprefix(os.path.join(self.installdir, tbb_libsubdir))))
149 guesses = {
150 'PATH': [
151 os.path.join(self.compilers_subdir, 'bin'),
152 os.path.join(self.compilers_subdir, 'bin', 'intel64'),
153 ],
154 'LD_LIBRARY_PATH': libdirs,
155 'LIBRARY_PATH': libdirs,
156 'MANPATH': [
157 os.path.join(os.path.dirname(self.compilers_subdir), 'documentation', 'en', 'man', 'common'),
158 os.path.join(self.compilers_subdir, 'share', 'man'),
159 ],
160 'OCL_ICD_FILENAMES': [
161 os.path.join(self.compilers_subdir, 'lib', 'x64', 'libintelocl.so'),
162 os.path.join(self.compilers_subdir, 'lib', 'libintelocl.so'),
163 ],
164 'CPATH': [
165 os.path.join(tbb_subdir, 'include'),
166 ],
167 'TBBROOT': [tbb_subdir],
168 }
169 return guesses
170
171 def make_module_extra(self):
172 """Additional custom variables for intel-compiler"""
173 txt = super(EB_intel_minus_compilers, self).make_module_extra()
174
175 # On Debian/Ubuntu, /usr/include/x86_64-linux-gnu, or whatever dir gcc uses, needs to be included
176 # in $CPATH for Intel C compiler
177 multiarch_out, ec = run_cmd("gcc -print-multiarch", simple=False)
178 multiarch_out = multiarch_out.strip()
179 if ec == 0 and multiarch_out:
180 multiarch_inc_dir, ec = run_cmd("gcc -E -Wp,-v -xc /dev/null 2>&1 | grep %s$" % multiarch_out)
181 if ec == 0 and multiarch_inc_dir:
182 multiarch_inc_dir = multiarch_inc_dir.strip()
183 self.log.info("Adding multiarch include path %s to $CPATH in generated module file", multiarch_inc_dir)
184 # system location must be appended at the end, so use append_paths
185 txt += self.module_generator.append_paths('CPATH', [multiarch_inc_dir], allow_abs=True)
186
187 return txt
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/easybuild/easyblocks/i/intel_compilers.py b/easybuild/easyblocks/i/intel_compilers.py
--- a/easybuild/easyblocks/i/intel_compilers.py
+++ b/easybuild/easyblocks/i/intel_compilers.py
@@ -177,7 +177,12 @@
multiarch_out, ec = run_cmd("gcc -print-multiarch", simple=False)
multiarch_out = multiarch_out.strip()
if ec == 0 and multiarch_out:
- multiarch_inc_dir, ec = run_cmd("gcc -E -Wp,-v -xc /dev/null 2>&1 | grep %s$" % multiarch_out)
+ multi_arch_inc_dir_cmd = '|'.join([
+ "gcc -E -Wp,-v -xc /dev/null 2>&1",
+ "grep %s$" % multiarch_out,
+ "grep -v /include-fixed/",
+ ])
+ multiarch_inc_dir, ec = run_cmd(multi_arch_inc_dir_cmd)
if ec == 0 and multiarch_inc_dir:
multiarch_inc_dir = multiarch_inc_dir.strip()
self.log.info("Adding multiarch include path %s to $CPATH in generated module file", multiarch_inc_dir)
|
{"golden_diff": "diff --git a/easybuild/easyblocks/i/intel_compilers.py b/easybuild/easyblocks/i/intel_compilers.py\n--- a/easybuild/easyblocks/i/intel_compilers.py\n+++ b/easybuild/easyblocks/i/intel_compilers.py\n@@ -177,7 +177,12 @@\n multiarch_out, ec = run_cmd(\"gcc -print-multiarch\", simple=False)\n multiarch_out = multiarch_out.strip()\n if ec == 0 and multiarch_out:\n- multiarch_inc_dir, ec = run_cmd(\"gcc -E -Wp,-v -xc /dev/null 2>&1 | grep %s$\" % multiarch_out)\n+ multi_arch_inc_dir_cmd = '|'.join([\n+ \"gcc -E -Wp,-v -xc /dev/null 2>&1\",\n+ \"grep %s$\" % multiarch_out,\n+ \"grep -v /include-fixed/\",\n+ ])\n+ multiarch_inc_dir, ec = run_cmd(multi_arch_inc_dir_cmd)\n if ec == 0 and multiarch_inc_dir:\n multiarch_inc_dir = multiarch_inc_dir.strip()\n self.log.info(\"Adding multiarch include path %s to $CPATH in generated module file\", multiarch_inc_dir)\n", "issue": "intel-compiler fails sanity check on Debian/Ubuntu\nWith the recent changes to gcc easyblock #3254 and the older change to intel_compiler easyblock #1237 and its followups, intel-compiler now fails sanity check due to an incorrect module file being generated.\r\n\r\n```\r\nmultiarch_inc_dir, ec = run_cmd(\"gcc -E -Wp,-v -xc /dev/null 2>&1 | grep %s$\" % multiarch_out)\r\n```\r\nreturns\r\n```\r\neasybuild@b-cn1615:~$ gcc -E -Wp,-v -xc /dev/null 2>&1 | grep x86_64-linux-gnu'$'\r\n /cvmfs/ebsw.hpc2n.umu.se/amd64_ubuntu2204_icelake/software/GCCcore/13.2.0/bin/../lib/gcc/x86_64-pc-linux-gnu/13.2.0/include-fixed/x86_64-linux-gnu\r\n /usr/include/x86_64-linux-gnu\r\n```\r\n\r\nresulting in an malformed CPATH being added to the module file:\r\n```\r\nappend_path(\"CPATH\", \"/cvmfs/ebsw.hpc2n.umu.se/amd64_ubuntu2204_icelake/software/GCCcore/13.2.0/bin/../lib/gcc/x86_64-pc-linux-gnu/13.2.0/include-fixed/x86_64-linux-gnu\r\n /usr/include/x86_64-linux-gnu\")\r\n```\r\n\r\nSimple fix is to do ```grep -v GCCcore``` at the and of the command.\n", "before_files": [{"content": "# #\n# Copyright 2021-2024 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n# #\n\"\"\"\nEasyBuild support for installing Intel compilers, implemented as an easyblock\n\n@author: Kenneth Hoste (Ghent University)\n\"\"\"\nimport os\nfrom easybuild.tools import LooseVersion\n\nfrom easybuild.easyblocks.generic.intelbase import IntelBase\nfrom easybuild.easyblocks.t.tbb import get_tbb_gccprefix\nfrom easybuild.tools.build_log import EasyBuildError, print_msg\nfrom easybuild.tools.run import run_cmd\n\n\nclass EB_intel_minus_compilers(IntelBase):\n \"\"\"\n Support for installing Intel compilers, starting with verion 2021.x (oneAPI)\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Easyblock constructor: check version\n \"\"\"\n super(EB_intel_minus_compilers, self).__init__(*args, **kwargs)\n\n # this easyblock is only valid for recent versions of the Intel compilers (2021.x, oneAPI)\n if LooseVersion(self.version) < LooseVersion('2021'):\n raise EasyBuildError(\"Invalid version %s, should be >= 2021.x\" % self.version)\n\n @property\n def compilers_subdir(self):\n compilers_subdir = self.get_versioned_subdir('compiler')\n if LooseVersion(self.version) < LooseVersion('2024'):\n compilers_subdir = os.path.join(compilers_subdir, 'linux')\n return compilers_subdir\n\n @property\n def tbb_subdir(self):\n return self.get_versioned_subdir('tbb')\n\n def prepare_step(self, *args, **kwargs):\n \"\"\"\n Prepare environment for installing.\n\n Specify that oneAPI versions of Intel compilers don't require a runtime license.\n \"\"\"\n # avoid that IntelBase trips over not having license info specified\n kwargs['requires_runtime_license'] = False\n\n super(EB_intel_minus_compilers, self).prepare_step(*args, **kwargs)\n\n def configure_step(self):\n \"\"\"Configure installation.\"\"\"\n\n # redefine $HOME for install step, to avoid that anything is stored in $HOME/intel\n # (like the 'installercache' database)\n self.cfg['preinstallopts'] += \" HOME=%s \" % self.builddir\n\n def install_step(self):\n \"\"\"\n Install step: install each 'source file' one by one.\n Installing the Intel compilers could be done via a single installation file (HPC Toolkit),\n or with separate installation files (patch releases of the C++ and Fortran compilers).\n \"\"\"\n srcs = self.src[:]\n cnt = len(srcs)\n for idx, src in enumerate(srcs):\n print_msg(\"installing part %d/%s (%s)...\" % (idx + 1, cnt, src['name']))\n self.src = [src]\n super(EB_intel_minus_compilers, self).install_step()\n\n def sanity_check_step(self):\n \"\"\"\n Custom sanity check for Intel compilers.\n \"\"\"\n\n oneapi_compiler_cmds = [\n 'dpcpp', # Intel oneAPI Data Parallel C++ compiler\n 'icx', # oneAPI Intel C compiler\n 'icpx', # oneAPI Intel C++ compiler\n 'ifx', # oneAPI Intel Fortran compiler\n ]\n bindir = os.path.join(self.compilers_subdir, 'bin')\n oneapi_compiler_paths = [os.path.join(bindir, x) for x in oneapi_compiler_cmds]\n if LooseVersion(self.version) >= LooseVersion('2024'):\n classic_compiler_cmds = ['ifort']\n classic_bindir = bindir\n else:\n classic_compiler_cmds = ['icc', 'icpc', 'ifort']\n classic_bindir = os.path.join(bindir, 'intel64')\n classic_compiler_paths = [os.path.join(classic_bindir, x) for x in classic_compiler_cmds]\n\n custom_paths = {\n 'files': classic_compiler_paths + oneapi_compiler_paths,\n 'dirs': [self.compilers_subdir],\n }\n\n all_compiler_cmds = classic_compiler_cmds + oneapi_compiler_cmds\n custom_commands = [\"which %s\" % c for c in all_compiler_cmds]\n\n # only for 2021.x versions do all compiler commands have the expected version;\n # for example: for 2022.0.1, icc has version 2021.5.0, icpx has 2022.0.0\n if LooseVersion(self.version) >= LooseVersion('2022.0'):\n custom_commands.extend(\"%s --version\" % c for c in all_compiler_cmds)\n else:\n custom_commands.extend(\"%s --version | grep %s\" % (c, self.version) for c in all_compiler_cmds)\n\n super(EB_intel_minus_compilers, self).sanity_check_step(custom_paths=custom_paths,\n custom_commands=custom_commands)\n\n def make_module_req_guess(self):\n \"\"\"\n Paths to consider for prepend-paths statements in module file\n \"\"\"\n libdirs = [\n 'lib',\n os.path.join('lib', 'x64'),\n os.path.join('compiler', 'lib', 'intel64_lin'),\n ]\n libdirs = [os.path.join(self.compilers_subdir, x) for x in libdirs]\n tbb_subdir = self.tbb_subdir\n tbb_libsubdir = os.path.join(tbb_subdir, 'lib', 'intel64')\n libdirs.append(os.path.join(tbb_libsubdir,\n get_tbb_gccprefix(os.path.join(self.installdir, tbb_libsubdir))))\n guesses = {\n 'PATH': [\n os.path.join(self.compilers_subdir, 'bin'),\n os.path.join(self.compilers_subdir, 'bin', 'intel64'),\n ],\n 'LD_LIBRARY_PATH': libdirs,\n 'LIBRARY_PATH': libdirs,\n 'MANPATH': [\n os.path.join(os.path.dirname(self.compilers_subdir), 'documentation', 'en', 'man', 'common'),\n os.path.join(self.compilers_subdir, 'share', 'man'),\n ],\n 'OCL_ICD_FILENAMES': [\n os.path.join(self.compilers_subdir, 'lib', 'x64', 'libintelocl.so'),\n os.path.join(self.compilers_subdir, 'lib', 'libintelocl.so'),\n ],\n 'CPATH': [\n os.path.join(tbb_subdir, 'include'),\n ],\n 'TBBROOT': [tbb_subdir],\n }\n return guesses\n\n def make_module_extra(self):\n \"\"\"Additional custom variables for intel-compiler\"\"\"\n txt = super(EB_intel_minus_compilers, self).make_module_extra()\n\n # On Debian/Ubuntu, /usr/include/x86_64-linux-gnu, or whatever dir gcc uses, needs to be included\n # in $CPATH for Intel C compiler\n multiarch_out, ec = run_cmd(\"gcc -print-multiarch\", simple=False)\n multiarch_out = multiarch_out.strip()\n if ec == 0 and multiarch_out:\n multiarch_inc_dir, ec = run_cmd(\"gcc -E -Wp,-v -xc /dev/null 2>&1 | grep %s$\" % multiarch_out)\n if ec == 0 and multiarch_inc_dir:\n multiarch_inc_dir = multiarch_inc_dir.strip()\n self.log.info(\"Adding multiarch include path %s to $CPATH in generated module file\", multiarch_inc_dir)\n # system location must be appended at the end, so use append_paths\n txt += self.module_generator.append_paths('CPATH', [multiarch_inc_dir], allow_abs=True)\n\n return txt\n", "path": "easybuild/easyblocks/i/intel_compilers.py"}], "after_files": [{"content": "# #\n# Copyright 2021-2024 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n# #\n\"\"\"\nEasyBuild support for installing Intel compilers, implemented as an easyblock\n\n@author: Kenneth Hoste (Ghent University)\n\"\"\"\nimport os\nfrom easybuild.tools import LooseVersion\n\nfrom easybuild.easyblocks.generic.intelbase import IntelBase\nfrom easybuild.easyblocks.t.tbb import get_tbb_gccprefix\nfrom easybuild.tools.build_log import EasyBuildError, print_msg\nfrom easybuild.tools.run import run_cmd\n\n\nclass EB_intel_minus_compilers(IntelBase):\n \"\"\"\n Support for installing Intel compilers, starting with verion 2021.x (oneAPI)\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Easyblock constructor: check version\n \"\"\"\n super(EB_intel_minus_compilers, self).__init__(*args, **kwargs)\n\n # this easyblock is only valid for recent versions of the Intel compilers (2021.x, oneAPI)\n if LooseVersion(self.version) < LooseVersion('2021'):\n raise EasyBuildError(\"Invalid version %s, should be >= 2021.x\" % self.version)\n\n @property\n def compilers_subdir(self):\n compilers_subdir = self.get_versioned_subdir('compiler')\n if LooseVersion(self.version) < LooseVersion('2024'):\n compilers_subdir = os.path.join(compilers_subdir, 'linux')\n return compilers_subdir\n\n @property\n def tbb_subdir(self):\n return self.get_versioned_subdir('tbb')\n\n def prepare_step(self, *args, **kwargs):\n \"\"\"\n Prepare environment for installing.\n\n Specify that oneAPI versions of Intel compilers don't require a runtime license.\n \"\"\"\n # avoid that IntelBase trips over not having license info specified\n kwargs['requires_runtime_license'] = False\n\n super(EB_intel_minus_compilers, self).prepare_step(*args, **kwargs)\n\n def configure_step(self):\n \"\"\"Configure installation.\"\"\"\n\n # redefine $HOME for install step, to avoid that anything is stored in $HOME/intel\n # (like the 'installercache' database)\n self.cfg['preinstallopts'] += \" HOME=%s \" % self.builddir\n\n def install_step(self):\n \"\"\"\n Install step: install each 'source file' one by one.\n Installing the Intel compilers could be done via a single installation file (HPC Toolkit),\n or with separate installation files (patch releases of the C++ and Fortran compilers).\n \"\"\"\n srcs = self.src[:]\n cnt = len(srcs)\n for idx, src in enumerate(srcs):\n print_msg(\"installing part %d/%s (%s)...\" % (idx + 1, cnt, src['name']))\n self.src = [src]\n super(EB_intel_minus_compilers, self).install_step()\n\n def sanity_check_step(self):\n \"\"\"\n Custom sanity check for Intel compilers.\n \"\"\"\n\n oneapi_compiler_cmds = [\n 'dpcpp', # Intel oneAPI Data Parallel C++ compiler\n 'icx', # oneAPI Intel C compiler\n 'icpx', # oneAPI Intel C++ compiler\n 'ifx', # oneAPI Intel Fortran compiler\n ]\n bindir = os.path.join(self.compilers_subdir, 'bin')\n oneapi_compiler_paths = [os.path.join(bindir, x) for x in oneapi_compiler_cmds]\n if LooseVersion(self.version) >= LooseVersion('2024'):\n classic_compiler_cmds = ['ifort']\n classic_bindir = bindir\n else:\n classic_compiler_cmds = ['icc', 'icpc', 'ifort']\n classic_bindir = os.path.join(bindir, 'intel64')\n classic_compiler_paths = [os.path.join(classic_bindir, x) for x in classic_compiler_cmds]\n\n custom_paths = {\n 'files': classic_compiler_paths + oneapi_compiler_paths,\n 'dirs': [self.compilers_subdir],\n }\n\n all_compiler_cmds = classic_compiler_cmds + oneapi_compiler_cmds\n custom_commands = [\"which %s\" % c for c in all_compiler_cmds]\n\n # only for 2021.x versions do all compiler commands have the expected version;\n # for example: for 2022.0.1, icc has version 2021.5.0, icpx has 2022.0.0\n if LooseVersion(self.version) >= LooseVersion('2022.0'):\n custom_commands.extend(\"%s --version\" % c for c in all_compiler_cmds)\n else:\n custom_commands.extend(\"%s --version | grep %s\" % (c, self.version) for c in all_compiler_cmds)\n\n super(EB_intel_minus_compilers, self).sanity_check_step(custom_paths=custom_paths,\n custom_commands=custom_commands)\n\n def make_module_req_guess(self):\n \"\"\"\n Paths to consider for prepend-paths statements in module file\n \"\"\"\n libdirs = [\n 'lib',\n os.path.join('lib', 'x64'),\n os.path.join('compiler', 'lib', 'intel64_lin'),\n ]\n libdirs = [os.path.join(self.compilers_subdir, x) for x in libdirs]\n tbb_subdir = self.tbb_subdir\n tbb_libsubdir = os.path.join(tbb_subdir, 'lib', 'intel64')\n libdirs.append(os.path.join(tbb_libsubdir,\n get_tbb_gccprefix(os.path.join(self.installdir, tbb_libsubdir))))\n guesses = {\n 'PATH': [\n os.path.join(self.compilers_subdir, 'bin'),\n os.path.join(self.compilers_subdir, 'bin', 'intel64'),\n ],\n 'LD_LIBRARY_PATH': libdirs,\n 'LIBRARY_PATH': libdirs,\n 'MANPATH': [\n os.path.join(os.path.dirname(self.compilers_subdir), 'documentation', 'en', 'man', 'common'),\n os.path.join(self.compilers_subdir, 'share', 'man'),\n ],\n 'OCL_ICD_FILENAMES': [\n os.path.join(self.compilers_subdir, 'lib', 'x64', 'libintelocl.so'),\n os.path.join(self.compilers_subdir, 'lib', 'libintelocl.so'),\n ],\n 'CPATH': [\n os.path.join(tbb_subdir, 'include'),\n ],\n 'TBBROOT': [tbb_subdir],\n }\n return guesses\n\n def make_module_extra(self):\n \"\"\"Additional custom variables for intel-compiler\"\"\"\n txt = super(EB_intel_minus_compilers, self).make_module_extra()\n\n # On Debian/Ubuntu, /usr/include/x86_64-linux-gnu, or whatever dir gcc uses, needs to be included\n # in $CPATH for Intel C compiler\n multiarch_out, ec = run_cmd(\"gcc -print-multiarch\", simple=False)\n multiarch_out = multiarch_out.strip()\n if ec == 0 and multiarch_out:\n multi_arch_inc_dir_cmd = '|'.join([\n \"gcc -E -Wp,-v -xc /dev/null 2>&1\",\n \"grep %s$\" % multiarch_out,\n \"grep -v /include-fixed/\",\n ])\n multiarch_inc_dir, ec = run_cmd(multi_arch_inc_dir_cmd)\n if ec == 0 and multiarch_inc_dir:\n multiarch_inc_dir = multiarch_inc_dir.strip()\n self.log.info(\"Adding multiarch include path %s to $CPATH in generated module file\", multiarch_inc_dir)\n # system location must be appended at the end, so use append_paths\n txt += self.module_generator.append_paths('CPATH', [multiarch_inc_dir], allow_abs=True)\n\n return txt\n", "path": "easybuild/easyblocks/i/intel_compilers.py"}]}
| 3,018 | 280 |
gh_patches_debug_17633
|
rasdani/github-patches
|
git_diff
|
nextcloud__appstore-523
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
App Generator creates faulty info.xml when using umlauts (special characters äöü)
Problem: nextcloud/apps/foo/appinfo/info.xml misses the closing **</info>** tag. Just generates **</**
## Details
* Browser and browser version: Firefox 55.0.3
## Steps to reproduce
Steps to reproduce:
- visit https://apps.nextcloud.com/developer/apps/generate
- enter the required information; in summary and description enter "This is a test app äöü"
- click generate and download.
- look at the generated info.xml
App Generator creates faulty info.xml when using umlauts (special characters äöü)
Problem: nextcloud/apps/foo/appinfo/info.xml misses the closing **</info>** tag. Just generates **</**
## Details
* Browser and browser version: Firefox 55.0.3
## Steps to reproduce
Steps to reproduce:
- visit https://apps.nextcloud.com/developer/apps/generate
- enter the required information; in summary and description enter "This is a test app äöü"
- click generate and download.
- look at the generated info.xml
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nextcloudappstore/scaffolding/archive.py`
Content:
```
1 import re
2 import tarfile
3 from io import BytesIO, StringIO
4 from typing import Dict
5 from os.path import join, isdir, relpath
6 from os import walk
7
8 from django.template import Context
9 from django.template import Template
10
11 from nextcloudappstore.core.facades import resolve_file_relative_path
12 from nextcloudappstore.settings.base import APP_SCAFFOLDING_PROFILES
13
14
15 def build_files(args: Dict[str, str]) -> Dict[str, str]:
16 platform = int(args['platform']) # prevent path traversal
17 vars = {
18 'id': args['name'].lower(),
19 'summary': args['summary'],
20 'description': args['description'],
21 'name': ' '.join(re.findall(r'[A-Z][^A-Z]*', args['name'])),
22 'namespace': args['name'],
23 'author_name': args['author_name'],
24 'author_mail': args['author_email'],
25 'author_homepage': args['author_homepage'],
26 'issue_tracker': args['issue_tracker'],
27 'categories': args['categories'],
28 'nextcloud_version': platform
29 }
30 vars.update(APP_SCAFFOLDING_PROFILES.get(platform, {}))
31 relative_base = 'app-templates/%i/app/' % platform
32 base = resolve_file_relative_path(__file__, relative_base)
33
34 context = Context({'app': vars})
35 result = {}
36 if isdir(base):
37 for root, dirs, files in walk(base):
38 for file in files:
39 file_path = join(root, file)
40 rel_file_path = '%s/%s' % (
41 vars['id'], relpath(file_path, base)
42 )
43 with open(file_path) as f:
44 t = Template(f.read())
45 result[rel_file_path] = t.render(context)
46
47 return result
48
49
50 def build_archive(parameters: Dict[str, str]) -> BytesIO:
51 buffer = BytesIO()
52 with tarfile.open(fileobj=buffer, mode='w:gz') as f:
53 files = build_files(parameters)
54 for path, contents in files.items():
55 info = tarfile.TarInfo(path)
56 info.size = len(contents)
57 f.addfile(info, BytesIO(contents.encode()))
58 buffer.seek(0)
59 return buffer
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nextcloudappstore/scaffolding/archive.py b/nextcloudappstore/scaffolding/archive.py
--- a/nextcloudappstore/scaffolding/archive.py
+++ b/nextcloudappstore/scaffolding/archive.py
@@ -1,9 +1,9 @@
import re
import tarfile
-from io import BytesIO, StringIO
-from typing import Dict
-from os.path import join, isdir, relpath
+from io import BytesIO
from os import walk
+from os.path import join, isdir, relpath
+from typing import Dict
from django.template import Context
from django.template import Template
@@ -53,7 +53,8 @@
files = build_files(parameters)
for path, contents in files.items():
info = tarfile.TarInfo(path)
- info.size = len(contents)
- f.addfile(info, BytesIO(contents.encode()))
+ encoded_content = contents.encode()
+ info.size = len(encoded_content)
+ f.addfile(info, BytesIO(encoded_content))
buffer.seek(0)
return buffer
|
{"golden_diff": "diff --git a/nextcloudappstore/scaffolding/archive.py b/nextcloudappstore/scaffolding/archive.py\n--- a/nextcloudappstore/scaffolding/archive.py\n+++ b/nextcloudappstore/scaffolding/archive.py\n@@ -1,9 +1,9 @@\n import re\n import tarfile\n-from io import BytesIO, StringIO\n-from typing import Dict\n-from os.path import join, isdir, relpath\n+from io import BytesIO\n from os import walk\n+from os.path import join, isdir, relpath\n+from typing import Dict\n \n from django.template import Context\n from django.template import Template\n@@ -53,7 +53,8 @@\n files = build_files(parameters)\n for path, contents in files.items():\n info = tarfile.TarInfo(path)\n- info.size = len(contents)\n- f.addfile(info, BytesIO(contents.encode()))\n+ encoded_content = contents.encode()\n+ info.size = len(encoded_content)\n+ f.addfile(info, BytesIO(encoded_content))\n buffer.seek(0)\n return buffer\n", "issue": "App Generator creates faulty info.xml when using umlauts (special characters \u00e4\u00f6\u00fc)\nProblem: nextcloud/apps/foo/appinfo/info.xml misses the closing **</info>** tag. Just generates **</**\r\n\r\n## Details\r\n\r\n* Browser and browser version: Firefox 55.0.3\r\n\r\n## Steps to reproduce\r\n\r\nSteps to reproduce:\r\n\r\n- visit https://apps.nextcloud.com/developer/apps/generate\r\n- enter the required information; in summary and description enter \"This is a test app \u00e4\u00f6\u00fc\"\r\n- click generate and download.\r\n- look at the generated info.xml\r\n\r\n\nApp Generator creates faulty info.xml when using umlauts (special characters \u00e4\u00f6\u00fc)\nProblem: nextcloud/apps/foo/appinfo/info.xml misses the closing **</info>** tag. Just generates **</**\r\n\r\n## Details\r\n\r\n* Browser and browser version: Firefox 55.0.3\r\n\r\n## Steps to reproduce\r\n\r\nSteps to reproduce:\r\n\r\n- visit https://apps.nextcloud.com/developer/apps/generate\r\n- enter the required information; in summary and description enter \"This is a test app \u00e4\u00f6\u00fc\"\r\n- click generate and download.\r\n- look at the generated info.xml\r\n\r\n\n", "before_files": [{"content": "import re\nimport tarfile\nfrom io import BytesIO, StringIO\nfrom typing import Dict\nfrom os.path import join, isdir, relpath\nfrom os import walk\n\nfrom django.template import Context\nfrom django.template import Template\n\nfrom nextcloudappstore.core.facades import resolve_file_relative_path\nfrom nextcloudappstore.settings.base import APP_SCAFFOLDING_PROFILES\n\n\ndef build_files(args: Dict[str, str]) -> Dict[str, str]:\n platform = int(args['platform']) # prevent path traversal\n vars = {\n 'id': args['name'].lower(),\n 'summary': args['summary'],\n 'description': args['description'],\n 'name': ' '.join(re.findall(r'[A-Z][^A-Z]*', args['name'])),\n 'namespace': args['name'],\n 'author_name': args['author_name'],\n 'author_mail': args['author_email'],\n 'author_homepage': args['author_homepage'],\n 'issue_tracker': args['issue_tracker'],\n 'categories': args['categories'],\n 'nextcloud_version': platform\n }\n vars.update(APP_SCAFFOLDING_PROFILES.get(platform, {}))\n relative_base = 'app-templates/%i/app/' % platform\n base = resolve_file_relative_path(__file__, relative_base)\n\n context = Context({'app': vars})\n result = {}\n if isdir(base):\n for root, dirs, files in walk(base):\n for file in files:\n file_path = join(root, file)\n rel_file_path = '%s/%s' % (\n vars['id'], relpath(file_path, base)\n )\n with open(file_path) as f:\n t = Template(f.read())\n result[rel_file_path] = t.render(context)\n\n return result\n\n\ndef build_archive(parameters: Dict[str, str]) -> BytesIO:\n buffer = BytesIO()\n with tarfile.open(fileobj=buffer, mode='w:gz') as f:\n files = build_files(parameters)\n for path, contents in files.items():\n info = tarfile.TarInfo(path)\n info.size = len(contents)\n f.addfile(info, BytesIO(contents.encode()))\n buffer.seek(0)\n return buffer\n", "path": "nextcloudappstore/scaffolding/archive.py"}], "after_files": [{"content": "import re\nimport tarfile\nfrom io import BytesIO\nfrom os import walk\nfrom os.path import join, isdir, relpath\nfrom typing import Dict\n\nfrom django.template import Context\nfrom django.template import Template\n\nfrom nextcloudappstore.core.facades import resolve_file_relative_path\nfrom nextcloudappstore.settings.base import APP_SCAFFOLDING_PROFILES\n\n\ndef build_files(args: Dict[str, str]) -> Dict[str, str]:\n platform = int(args['platform']) # prevent path traversal\n vars = {\n 'id': args['name'].lower(),\n 'summary': args['summary'],\n 'description': args['description'],\n 'name': ' '.join(re.findall(r'[A-Z][^A-Z]*', args['name'])),\n 'namespace': args['name'],\n 'author_name': args['author_name'],\n 'author_mail': args['author_email'],\n 'author_homepage': args['author_homepage'],\n 'issue_tracker': args['issue_tracker'],\n 'categories': args['categories'],\n 'nextcloud_version': platform\n }\n vars.update(APP_SCAFFOLDING_PROFILES.get(platform, {}))\n relative_base = 'app-templates/%i/app/' % platform\n base = resolve_file_relative_path(__file__, relative_base)\n\n context = Context({'app': vars})\n result = {}\n if isdir(base):\n for root, dirs, files in walk(base):\n for file in files:\n file_path = join(root, file)\n rel_file_path = '%s/%s' % (\n vars['id'], relpath(file_path, base)\n )\n with open(file_path) as f:\n t = Template(f.read())\n result[rel_file_path] = t.render(context)\n\n return result\n\n\ndef build_archive(parameters: Dict[str, str]) -> BytesIO:\n buffer = BytesIO()\n with tarfile.open(fileobj=buffer, mode='w:gz') as f:\n files = build_files(parameters)\n for path, contents in files.items():\n info = tarfile.TarInfo(path)\n encoded_content = contents.encode()\n info.size = len(encoded_content)\n f.addfile(info, BytesIO(encoded_content))\n buffer.seek(0)\n return buffer\n", "path": "nextcloudappstore/scaffolding/archive.py"}]}
| 1,099 | 234 |
gh_patches_debug_7203
|
rasdani/github-patches
|
git_diff
|
saulpw__visidata-2338
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python error after pressing SPACE to pull up command-name input
**Small description**
The first time I press SPACE on a sheet, I often get the following Python error. It usually succeeds on the second press.
**Expected result**
Open command prompt.
**Actual result with screenshot**

[errors_recent.txt](https://github.com/saulpw/visidata/files/14471969/errors_recent.txt)
**Steps to reproduce with sample data and a .vd**
Open any sheet and press SPACE.
It may take a few attempts. It seems to only happen the first time it is entered on a sheet, and only some of the time.
**Additional context**
Python 3.8.0
VisiData 3.0.2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `visidata/features/cmdpalette.py`
Content:
```
1 import collections
2 from functools import partial
3 from visidata import DrawablePane, BaseSheet, vd, VisiData, CompleteKey, clipdraw, HelpSheet, colors, AcceptInput, AttrDict, drawcache_property
4
5
6 vd.theme_option('color_cmdpalette', 'black on 72', 'base color of command palette')
7 vd.theme_option('disp_cmdpal_max', 10, 'max number of suggestions for command palette')
8
9 vd.help_longname = '''# Choose Command
10 Start typing a command longname or keyword in its helpstring.
11
12 - `Enter` to execute top command.
13 - `Tab` to highlight top command.
14
15 ## When Command Highlighted
16
17 - `Tab`/`Shift+Tab` to cycle highlighted command.
18 - `Enter` to execute highlighted command.
19 - `0-9` to execute numbered command.
20 '''
21
22 def add_to_input(v, i, value=''):
23 items = list(v.split())
24 if not v or v.endswith(' '):
25 items.append(value)
26 else:
27 items[-1] = value
28 v = ' '.join(items) + ' '
29 return v, len(v)
30
31
32 def accept_input(v, i, value=None):
33 raise AcceptInput(v if value is None else value)
34
35 def accept_input_if_subset(v, i, value=''):
36 # if no input, accept value under cmd palette cursor
37 if not v:
38 raise AcceptInput(value)
39
40 # if the last item is a partial match, replace it with the full value
41 parts = v.split()
42 if value and value.startswith(parts[-1]):
43 v = ' '.join(parts[:-1] + [value])
44
45 raise AcceptInput(v)
46
47 @VisiData.lazy_property
48 def usedInputs(vd):
49 return collections.defaultdict(int)
50
51 @DrawablePane.after
52 def execCommand2(sheet, cmd, *args, **kwargs):
53 vd.usedInputs[cmd.longname] += 1
54
55 @BaseSheet.api
56 def inputPalette(sheet, prompt, items,
57 value_key='key',
58 formatter=lambda m, item, trigger_key: f'{trigger_key} {item}',
59 multiple=False,
60 **kwargs):
61 bindings = dict()
62
63 tabitem = -1
64
65 def tab(n, nitems):
66 nonlocal tabitem
67 if not nitems: return None
68 tabitem = (tabitem + n) % nitems
69
70 def _draw_palette(value):
71 words = value.lower().split()
72
73 if multiple and words:
74 if value.endswith(' '):
75 finished_words = words
76 unfinished_words = []
77 else:
78 finished_words = words[:-1]
79 unfinished_words = [words[-1]]
80 else:
81 unfinished_words = words
82 finished_words = []
83
84 unuseditems = [item for item in items if item[value_key] not in finished_words]
85
86 matches = vd.fuzzymatch(unuseditems, unfinished_words)
87
88 h = sheet.windowHeight
89 w = min(100, sheet.windowWidth)
90 nitems = min(h-1, sheet.options.disp_cmdpal_max)
91
92 useditems = []
93 palrows = []
94
95 for m in matches[:nitems]:
96 useditems.append(m.match)
97 palrows.append((m, m.match))
98
99 favitems = sorted([item for item in unuseditems if item not in useditems],
100 key=lambda item: -vd.usedInputs.get(item[value_key], 0))
101
102 for item in favitems[:nitems-len(palrows)]:
103 palrows.append((None, item))
104
105 navailitems = min(len(palrows), nitems)
106
107 bindings['^I'] = lambda *args: tab(1, navailitems) or args
108 bindings['KEY_BTAB'] = lambda *args: tab(-1, navailitems) or args
109
110 for i in range(nitems-len(palrows)):
111 palrows.append((None, None))
112
113 used_triggers = set()
114 for i, (m, item) in enumerate(palrows):
115 trigger_key = ''
116 if tabitem >= 0 and item:
117 tkey = f'{i+1}'[-1]
118 if tkey not in used_triggers:
119 trigger_key = tkey
120 bindings[trigger_key] = partial(add_to_input if multiple else accept_input, value=item[value_key])
121 used_triggers.add(trigger_key)
122
123 attr = colors.color_cmdpalette
124
125 if tabitem < 0 and palrows:
126 _ , topitem = palrows[0]
127 if not topitem: return
128 if multiple:
129 bindings[' '] = partial(add_to_input, value=topitem[value_key])
130 bindings['^J'] = partial(accept_input_if_subset, value=topitem[value_key])
131 else:
132 bindings['^J'] = partial(accept_input, value=topitem[value_key])
133 elif item and i == tabitem:
134 if not item: return
135 if multiple:
136 bindings['^J'] = partial(accept_input_if_subset, value=item[value_key])
137 bindings[' '] = partial(add_to_input, value=item[value_key])
138 else:
139 bindings['^J'] = partial(accept_input, value=item[value_key])
140 attr = colors.color_menu_spec
141
142 match_summary = formatter(m, item, trigger_key) if item else ' '
143
144 clipdraw(sheet._scr, h-nitems-1+i, 0, match_summary, attr, w=w)
145
146 return None
147
148 completer = CompleteKey(sorted(item[value_key] for item in items))
149 return vd.input(prompt,
150 completer=completer,
151 updater=_draw_palette,
152 bindings=bindings,
153 **kwargs)
154
155
156 def cmdlist(sheet):
157 return [
158 AttrDict(longname=row.longname,
159 description=sheet.cmddict[(row.sheet, row.longname)].helpstr)
160 for row in sheet.rows
161 ]
162 HelpSheet.cmdlist = drawcache_property(cmdlist)
163
164
165 @BaseSheet.api
166 def inputLongname(sheet):
167 prompt = 'command name: '
168 # get set of commands possible in the sheet
169 this_sheets_help = HelpSheet('', source=sheet)
170 this_sheets_help.ensureLoaded()
171
172 def _fmt_cmdpal_summary(match, row, trigger_key):
173 keystrokes = this_sheets_help.revbinds.get(row.longname, [None])[0] or ' '
174 formatted_longname = match.formatted.get('longname', row.longname) if match else row.longname
175 formatted_name = f'[:bold][:onclick {row.longname}]{formatted_longname}[/][/]'
176 if vd.options.debug and match:
177 keystrokes = f'[{match.score}]'
178 r = f' [:keystrokes]{keystrokes.rjust(len(prompt)-5)}[/] '
179 if trigger_key:
180 r += f'[:keystrokes]{trigger_key}[/]'
181 else:
182 r += ' '
183
184 r += f' {formatted_name}'
185 if row.description:
186 formatted_desc = match.formatted.get('description', row.description) if match else row.description
187 r += f' - {formatted_desc}'
188 return r
189
190 return sheet.inputPalette(prompt, this_sheets_help.cmdlist,
191 value_key='longname',
192 formatter=_fmt_cmdpal_summary,
193 help=vd.help_longname,
194 type='longname')
195
196
197 @BaseSheet.api
198 def exec_longname(sheet, longname):
199 if not sheet.getCommand(longname):
200 vd.fail(f'no command {longname}')
201 sheet.execCommand(longname)
202
203
204 vd.addCommand('Space', 'exec-longname', 'exec_longname(inputLongname())', 'execute command by its longname')
205
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/visidata/features/cmdpalette.py b/visidata/features/cmdpalette.py
--- a/visidata/features/cmdpalette.py
+++ b/visidata/features/cmdpalette.py
@@ -167,7 +167,7 @@
prompt = 'command name: '
# get set of commands possible in the sheet
this_sheets_help = HelpSheet('', source=sheet)
- this_sheets_help.ensureLoaded()
+ vd.sync(this_sheets_help.ensureLoaded())
def _fmt_cmdpal_summary(match, row, trigger_key):
keystrokes = this_sheets_help.revbinds.get(row.longname, [None])[0] or ' '
|
{"golden_diff": "diff --git a/visidata/features/cmdpalette.py b/visidata/features/cmdpalette.py\n--- a/visidata/features/cmdpalette.py\n+++ b/visidata/features/cmdpalette.py\n@@ -167,7 +167,7 @@\n prompt = 'command name: '\n # get set of commands possible in the sheet\n this_sheets_help = HelpSheet('', source=sheet)\n- this_sheets_help.ensureLoaded()\n+ vd.sync(this_sheets_help.ensureLoaded())\n \n def _fmt_cmdpal_summary(match, row, trigger_key):\n keystrokes = this_sheets_help.revbinds.get(row.longname, [None])[0] or ' '\n", "issue": "Python error after pressing SPACE to pull up command-name input\n**Small description**\r\nThe first time I press SPACE on a sheet, I often get the following Python error. It usually succeeds on the second press.\r\n\r\n**Expected result**\r\nOpen command prompt.\r\n\r\n**Actual result with screenshot**\r\n\r\n[errors_recent.txt](https://github.com/saulpw/visidata/files/14471969/errors_recent.txt)\r\n\r\n**Steps to reproduce with sample data and a .vd**\r\nOpen any sheet and press SPACE.\r\nIt may take a few attempts. It seems to only happen the first time it is entered on a sheet, and only some of the time.\r\n\r\n**Additional context**\r\nPython 3.8.0\r\nVisiData 3.0.2\r\n\n", "before_files": [{"content": "import collections\nfrom functools import partial\nfrom visidata import DrawablePane, BaseSheet, vd, VisiData, CompleteKey, clipdraw, HelpSheet, colors, AcceptInput, AttrDict, drawcache_property\n\n\nvd.theme_option('color_cmdpalette', 'black on 72', 'base color of command palette')\nvd.theme_option('disp_cmdpal_max', 10, 'max number of suggestions for command palette')\n\nvd.help_longname = '''# Choose Command\nStart typing a command longname or keyword in its helpstring.\n\n- `Enter` to execute top command.\n- `Tab` to highlight top command.\n\n## When Command Highlighted\n\n- `Tab`/`Shift+Tab` to cycle highlighted command.\n- `Enter` to execute highlighted command.\n- `0-9` to execute numbered command.\n'''\n\ndef add_to_input(v, i, value=''):\n items = list(v.split())\n if not v or v.endswith(' '):\n items.append(value)\n else:\n items[-1] = value\n v = ' '.join(items) + ' '\n return v, len(v)\n\n\ndef accept_input(v, i, value=None):\n raise AcceptInput(v if value is None else value)\n\ndef accept_input_if_subset(v, i, value=''):\n # if no input, accept value under cmd palette cursor\n if not v:\n raise AcceptInput(value)\n\n # if the last item is a partial match, replace it with the full value\n parts = v.split()\n if value and value.startswith(parts[-1]):\n v = ' '.join(parts[:-1] + [value])\n\n raise AcceptInput(v)\n\[email protected]_property\ndef usedInputs(vd):\n return collections.defaultdict(int)\n\[email protected]\ndef execCommand2(sheet, cmd, *args, **kwargs):\n vd.usedInputs[cmd.longname] += 1\n\[email protected]\ndef inputPalette(sheet, prompt, items,\n value_key='key',\n formatter=lambda m, item, trigger_key: f'{trigger_key} {item}',\n multiple=False,\n **kwargs):\n bindings = dict()\n\n tabitem = -1\n\n def tab(n, nitems):\n nonlocal tabitem\n if not nitems: return None\n tabitem = (tabitem + n) % nitems\n\n def _draw_palette(value):\n words = value.lower().split()\n\n if multiple and words:\n if value.endswith(' '):\n finished_words = words\n unfinished_words = []\n else:\n finished_words = words[:-1]\n unfinished_words = [words[-1]]\n else:\n unfinished_words = words\n finished_words = []\n\n unuseditems = [item for item in items if item[value_key] not in finished_words]\n\n matches = vd.fuzzymatch(unuseditems, unfinished_words)\n\n h = sheet.windowHeight\n w = min(100, sheet.windowWidth)\n nitems = min(h-1, sheet.options.disp_cmdpal_max)\n\n useditems = []\n palrows = []\n\n for m in matches[:nitems]:\n useditems.append(m.match)\n palrows.append((m, m.match))\n\n favitems = sorted([item for item in unuseditems if item not in useditems],\n key=lambda item: -vd.usedInputs.get(item[value_key], 0))\n\n for item in favitems[:nitems-len(palrows)]:\n palrows.append((None, item))\n\n navailitems = min(len(palrows), nitems)\n\n bindings['^I'] = lambda *args: tab(1, navailitems) or args\n bindings['KEY_BTAB'] = lambda *args: tab(-1, navailitems) or args\n\n for i in range(nitems-len(palrows)):\n palrows.append((None, None))\n\n used_triggers = set()\n for i, (m, item) in enumerate(palrows):\n trigger_key = ''\n if tabitem >= 0 and item:\n tkey = f'{i+1}'[-1]\n if tkey not in used_triggers:\n trigger_key = tkey\n bindings[trigger_key] = partial(add_to_input if multiple else accept_input, value=item[value_key])\n used_triggers.add(trigger_key)\n\n attr = colors.color_cmdpalette\n\n if tabitem < 0 and palrows:\n _ , topitem = palrows[0]\n if not topitem: return\n if multiple:\n bindings[' '] = partial(add_to_input, value=topitem[value_key])\n bindings['^J'] = partial(accept_input_if_subset, value=topitem[value_key])\n else:\n bindings['^J'] = partial(accept_input, value=topitem[value_key])\n elif item and i == tabitem:\n if not item: return\n if multiple:\n bindings['^J'] = partial(accept_input_if_subset, value=item[value_key])\n bindings[' '] = partial(add_to_input, value=item[value_key])\n else:\n bindings['^J'] = partial(accept_input, value=item[value_key])\n attr = colors.color_menu_spec\n\n match_summary = formatter(m, item, trigger_key) if item else ' '\n\n clipdraw(sheet._scr, h-nitems-1+i, 0, match_summary, attr, w=w)\n\n return None\n\n completer = CompleteKey(sorted(item[value_key] for item in items))\n return vd.input(prompt,\n completer=completer,\n updater=_draw_palette,\n bindings=bindings,\n **kwargs)\n\n\ndef cmdlist(sheet):\n return [\n AttrDict(longname=row.longname,\n description=sheet.cmddict[(row.sheet, row.longname)].helpstr)\n for row in sheet.rows\n ]\nHelpSheet.cmdlist = drawcache_property(cmdlist)\n\n\[email protected]\ndef inputLongname(sheet):\n prompt = 'command name: '\n # get set of commands possible in the sheet\n this_sheets_help = HelpSheet('', source=sheet)\n this_sheets_help.ensureLoaded()\n\n def _fmt_cmdpal_summary(match, row, trigger_key):\n keystrokes = this_sheets_help.revbinds.get(row.longname, [None])[0] or ' '\n formatted_longname = match.formatted.get('longname', row.longname) if match else row.longname\n formatted_name = f'[:bold][:onclick {row.longname}]{formatted_longname}[/][/]'\n if vd.options.debug and match:\n keystrokes = f'[{match.score}]'\n r = f' [:keystrokes]{keystrokes.rjust(len(prompt)-5)}[/] '\n if trigger_key:\n r += f'[:keystrokes]{trigger_key}[/]'\n else:\n r += ' '\n\n r += f' {formatted_name}'\n if row.description:\n formatted_desc = match.formatted.get('description', row.description) if match else row.description\n r += f' - {formatted_desc}'\n return r\n\n return sheet.inputPalette(prompt, this_sheets_help.cmdlist,\n value_key='longname',\n formatter=_fmt_cmdpal_summary,\n help=vd.help_longname,\n type='longname')\n\n\[email protected]\ndef exec_longname(sheet, longname):\n if not sheet.getCommand(longname):\n vd.fail(f'no command {longname}')\n sheet.execCommand(longname)\n\n\nvd.addCommand('Space', 'exec-longname', 'exec_longname(inputLongname())', 'execute command by its longname')\n", "path": "visidata/features/cmdpalette.py"}], "after_files": [{"content": "import collections\nfrom functools import partial\nfrom visidata import DrawablePane, BaseSheet, vd, VisiData, CompleteKey, clipdraw, HelpSheet, colors, AcceptInput, AttrDict, drawcache_property\n\n\nvd.theme_option('color_cmdpalette', 'black on 72', 'base color of command palette')\nvd.theme_option('disp_cmdpal_max', 10, 'max number of suggestions for command palette')\n\nvd.help_longname = '''# Choose Command\nStart typing a command longname or keyword in its helpstring.\n\n- `Enter` to execute top command.\n- `Tab` to highlight top command.\n\n## When Command Highlighted\n\n- `Tab`/`Shift+Tab` to cycle highlighted command.\n- `Enter` to execute highlighted command.\n- `0-9` to execute numbered command.\n'''\n\ndef add_to_input(v, i, value=''):\n items = list(v.split())\n if not v or v.endswith(' '):\n items.append(value)\n else:\n items[-1] = value\n v = ' '.join(items) + ' '\n return v, len(v)\n\n\ndef accept_input(v, i, value=None):\n raise AcceptInput(v if value is None else value)\n\ndef accept_input_if_subset(v, i, value=''):\n # if no input, accept value under cmd palette cursor\n if not v:\n raise AcceptInput(value)\n\n # if the last item is a partial match, replace it with the full value\n parts = v.split()\n if value and value.startswith(parts[-1]):\n v = ' '.join(parts[:-1] + [value])\n\n raise AcceptInput(v)\n\[email protected]_property\ndef usedInputs(vd):\n return collections.defaultdict(int)\n\[email protected]\ndef execCommand2(sheet, cmd, *args, **kwargs):\n vd.usedInputs[cmd.longname] += 1\n\[email protected]\ndef inputPalette(sheet, prompt, items,\n value_key='key',\n formatter=lambda m, item, trigger_key: f'{trigger_key} {item}',\n multiple=False,\n **kwargs):\n bindings = dict()\n\n tabitem = -1\n\n def tab(n, nitems):\n nonlocal tabitem\n if not nitems: return None\n tabitem = (tabitem + n) % nitems\n\n def _draw_palette(value):\n words = value.lower().split()\n\n if multiple and words:\n if value.endswith(' '):\n finished_words = words\n unfinished_words = []\n else:\n finished_words = words[:-1]\n unfinished_words = [words[-1]]\n else:\n unfinished_words = words\n finished_words = []\n\n unuseditems = [item for item in items if item[value_key] not in finished_words]\n\n matches = vd.fuzzymatch(unuseditems, unfinished_words)\n\n h = sheet.windowHeight\n w = min(100, sheet.windowWidth)\n nitems = min(h-1, sheet.options.disp_cmdpal_max)\n\n useditems = []\n palrows = []\n\n for m in matches[:nitems]:\n useditems.append(m.match)\n palrows.append((m, m.match))\n\n favitems = sorted([item for item in unuseditems if item not in useditems],\n key=lambda item: -vd.usedInputs.get(item[value_key], 0))\n\n for item in favitems[:nitems-len(palrows)]:\n palrows.append((None, item))\n\n navailitems = min(len(palrows), nitems)\n\n bindings['^I'] = lambda *args: tab(1, navailitems) or args\n bindings['KEY_BTAB'] = lambda *args: tab(-1, navailitems) or args\n\n for i in range(nitems-len(palrows)):\n palrows.append((None, None))\n\n used_triggers = set()\n for i, (m, item) in enumerate(palrows):\n trigger_key = ''\n if tabitem >= 0 and item:\n tkey = f'{i+1}'[-1]\n if tkey not in used_triggers:\n trigger_key = tkey\n bindings[trigger_key] = partial(add_to_input if multiple else accept_input, value=item[value_key])\n used_triggers.add(trigger_key)\n\n attr = colors.color_cmdpalette\n\n if tabitem < 0 and palrows:\n _ , topitem = palrows[0]\n if not topitem: return\n if multiple:\n bindings[' '] = partial(add_to_input, value=topitem[value_key])\n bindings['^J'] = partial(accept_input_if_subset, value=topitem[value_key])\n else:\n bindings['^J'] = partial(accept_input, value=topitem[value_key])\n elif item and i == tabitem:\n if not item: return\n if multiple:\n bindings['^J'] = partial(accept_input_if_subset, value=item[value_key])\n bindings[' '] = partial(add_to_input, value=item[value_key])\n else:\n bindings['^J'] = partial(accept_input, value=item[value_key])\n attr = colors.color_menu_spec\n\n match_summary = formatter(m, item, trigger_key) if item else ' '\n\n clipdraw(sheet._scr, h-nitems-1+i, 0, match_summary, attr, w=w)\n\n return None\n\n completer = CompleteKey(sorted(item[value_key] for item in items))\n return vd.input(prompt,\n completer=completer,\n updater=_draw_palette,\n bindings=bindings,\n **kwargs)\n\n\ndef cmdlist(sheet):\n return [\n AttrDict(longname=row.longname,\n description=sheet.cmddict[(row.sheet, row.longname)].helpstr)\n for row in sheet.rows\n ]\nHelpSheet.cmdlist = drawcache_property(cmdlist)\n\n\[email protected]\ndef inputLongname(sheet):\n prompt = 'command name: '\n # get set of commands possible in the sheet\n this_sheets_help = HelpSheet('', source=sheet)\n vd.sync(this_sheets_help.ensureLoaded())\n\n def _fmt_cmdpal_summary(match, row, trigger_key):\n keystrokes = this_sheets_help.revbinds.get(row.longname, [None])[0] or ' '\n formatted_longname = match.formatted.get('longname', row.longname) if match else row.longname\n formatted_name = f'[:bold][:onclick {row.longname}]{formatted_longname}[/][/]'\n if vd.options.debug and match:\n keystrokes = f'[{match.score}]'\n r = f' [:keystrokes]{keystrokes.rjust(len(prompt)-5)}[/] '\n if trigger_key:\n r += f'[:keystrokes]{trigger_key}[/]'\n else:\n r += ' '\n\n r += f' {formatted_name}'\n if row.description:\n formatted_desc = match.formatted.get('description', row.description) if match else row.description\n r += f' - {formatted_desc}'\n return r\n\n return sheet.inputPalette(prompt, this_sheets_help.cmdlist,\n value_key='longname',\n formatter=_fmt_cmdpal_summary,\n help=vd.help_longname,\n type='longname')\n\n\[email protected]\ndef exec_longname(sheet, longname):\n if not sheet.getCommand(longname):\n vd.fail(f'no command {longname}')\n sheet.execCommand(longname)\n\n\nvd.addCommand('Space', 'exec-longname', 'exec_longname(inputLongname())', 'execute command by its longname')\n", "path": "visidata/features/cmdpalette.py"}]}
| 2,647 | 145 |
gh_patches_debug_3122
|
rasdani/github-patches
|
git_diff
|
saulpw__visidata-1721
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bugs with save-all
I've a question about saving.
1. I load a two sheet notebook "responses-grades-cda.xlsx".
2. `g ctrl+s`
3. it gives me a blank line -- why not default to name of file without xlsx extension?
4. I type in file name without extension + `vds`: "responses-grades-cda.vds".
5. I get an error: "AttributeError: 'X1sxIndexSheet' object has no attribute 'join'"
_Originally posted by @reagle in https://github.com/saulpw/visidata/discussions/1266#discussioncomment-4870711_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `visidata/save.py`
Content:
```
1 import collections
2
3 from visidata import *
4
5
6 vd.option('confirm_overwrite', True, 'whether to prompt for overwrite confirmation on save')
7 vd.option('safe_error', '#ERR', 'error string to use while saving', replay=True)
8
9 @Sheet.api
10 def safe_trdict(vs):
11 'returns string.translate dictionary for replacing tabs and newlines'
12 if options.safety_first:
13 delim = vs.options.delimiter
14 return {
15 0: '', # strip NUL completely
16 ord(delim): vs.options.tsv_safe_tab, # \t
17 10: vs.options.tsv_safe_newline, # \n
18 13: vs.options.tsv_safe_newline, # \r
19 }
20 return {}
21
22
23 @Sheet.api
24 def iterdispvals(sheet, *cols, format=False):
25 'For each row in sheet, yield OrderedDict of values for given cols. Values are typed if format=False, or a formatted display string if format=True.'
26 if not cols:
27 cols = sheet.visibleCols
28
29 transformers = collections.OrderedDict() # list of transformers for each column in order
30 for col in cols:
31 transformers[col] = [ col.type ]
32 if format:
33 formatMaker = getattr(col, 'formatter_'+(col.formatter or sheet.options.disp_formatter))
34 transformers[col].append(formatMaker(col._formatdict))
35 trdict = sheet.safe_trdict()
36 if trdict:
37 transformers[col].append(lambda v,trdict=trdict: v.translate(trdict))
38
39 options_safe_error = options.safe_error
40 for r in Progress(sheet.rows):
41 dispvals = collections.OrderedDict() # [col] -> value
42 for col, transforms in transformers.items():
43 try:
44 dispval = col.getValue(r)
45
46 except Exception as e:
47 vd.exceptionCaught(e)
48 dispval = options_safe_error or str(e)
49
50 try:
51 for t in transforms:
52 if dispval is None:
53 break
54 elif isinstance(dispval, TypedExceptionWrapper):
55 dispval = options_safe_error or str(dispval)
56 break
57 else:
58 dispval = t(dispval)
59
60 if dispval is None and format:
61 dispval = ''
62 except Exception as e:
63 dispval = str(dispval)
64
65 dispvals[col] = dispval
66
67 yield dispvals
68
69
70 @Sheet.api
71 def itervals(sheet, *cols, format=False):
72 for row in sheet.iterdispvals(*cols, format=format):
73 yield [row[c] for c in cols]
74
75 @BaseSheet.api
76 def getDefaultSaveName(sheet):
77 src = getattr(sheet, 'source', None)
78 if hasattr(src, 'scheme') and src.scheme:
79 return src.name + src.suffix
80 if isinstance(src, Path):
81 if sheet.options.is_set('save_filetype', sheet):
82 # if save_filetype is over-ridden from default, use it as the extension
83 return str(src.with_suffix('')) + '.' + sheet.options.save_filetype
84 return str(src)
85 else:
86 return sheet.name+'.'+getattr(sheet, 'filetype', options.save_filetype)
87
88
89 @VisiData.api
90 def save_cols(vd, cols):
91 sheet = cols[0].sheet
92 vs = copy(sheet)
93 vs.columns = list(cols)
94 vs.rows = sheet.rows
95 if len(cols) == 1:
96 savedcoltxt = cols[0].name + ' column'
97 else:
98 savedcoltxt = '%s columns' % len(cols)
99 path = vd.inputPath('save %s to: ' % savedcoltxt, value=vs.getDefaultSaveName())
100 vd.saveSheets(path, vs, confirm_overwrite=options.confirm_overwrite)
101
102
103 @VisiData.api
104 def saveSheets(vd, givenpath, *vsheets, confirm_overwrite=False):
105 'Save all *vsheets* to *givenpath*.'
106
107 filetype = givenpath.ext or options.save_filetype
108
109 vd.clearCaches()
110
111 savefunc = getattr(vsheets[0], 'save_' + filetype, None) or getattr(vd, 'save_' + filetype, None)
112
113 if savefunc is None:
114 vd.fail(f'no function to save as {filetype}')
115
116 if givenpath.exists() and confirm_overwrite:
117 vd.confirm("%s already exists. overwrite? " % givenpath.given)
118
119 vd.status('saving %s sheets to %s as %s' % (len(vsheets), givenpath.given, filetype))
120
121 if not givenpath.given.endswith('/'): # forcibly specify save individual files into directory by ending path with /
122 for vs in vsheets:
123 vs.hasBeenModified = False
124 # savefuncs(vd, p, *vsheets) will have 2 argcount (*vsheets does not get counted as an arg)
125 # savefuncs(vd, p, vs) will have 3 argcount (vs counts as an arg, along with vd, path)
126 if savefunc.__code__.co_argcount == 3 and len(vsheets) > 1:
127 vd.fail(f'cannot save multiple {filetype} sheets to non-dir')
128 return vd.execAsync(savefunc, givenpath, *vsheets)
129
130 # path is a dir
131
132 # save as individual files in the givenpath directory
133 try:
134 os.makedirs(givenpath, exist_ok=True)
135 except FileExistsError:
136 pass
137
138 if not givenpath.is_dir():
139 vd.fail(f'cannot save multiple {filetype} sheets to non-dir')
140
141 def _savefiles(vsheets, givenpath, savefunc, filetype):
142 for vs in vsheets:
143 p = Path((givenpath / vs.name).with_suffix('.'+filetype))
144 savefunc(p, vs)
145 vs.hasBeenModified = False
146 return vd.execAsync(_savefiles, vsheets, givenpath, savefunc, filetype)
147
148
149 @VisiData.api
150 def save_zip(vd, p, *vsheets):
151 vd.clearCaches()
152
153 import tempfile
154 import zipfile
155 with tempfile.TemporaryDirectory() as tmpdir:
156 with zipfile.ZipFile(str(p), 'w', zipfile.ZIP_DEFLATED, allowZip64=True, compresslevel=9) as zfp:
157 for vs in Progress(vsheets):
158 filetype = vs.options.save_filetype
159 tmpp = Path(f'{tmpdir}{vs.name}.{filetype}')
160 savefunc = getattr(vs, 'save_' + filetype, None) or getattr(vd, 'save_' + filetype, None)
161 savefunc(tmpp, vs)
162 zfp.write(tmpp, f'{vs.name}.{vs.options.save_filetype}')
163 vd.status('%s save finished' % p)
164
165
166 @VisiData.api
167 def save_txt(vd, p, *vsheets):
168 with p.open_text(mode='w', encoding=vsheets[0].options.encoding) as fp:
169 for vs in vsheets:
170 unitsep = vs.options.delimiter
171 rowsep = vs.options.row_delimiter
172 for dispvals in vs.iterdispvals(*vs.visibleCols, format=True):
173 fp.write(unitsep.join(dispvals.values()))
174 fp.write(rowsep)
175 vd.status('%s save finished' % p)
176
177
178 @BaseSheet.api
179 def rootSheet(sheet):
180 r = sheet
181 while isinstance(r.source, BaseSheet):
182 r = r.source
183
184 return r
185
186 BaseSheet.addCommand('^S', 'save-sheet', 'vd.saveSheets(inputPath("save to: ", value=getDefaultSaveName()), sheet, confirm_overwrite=options.confirm_overwrite)', 'save current sheet to filename in format determined by extension (default .tsv)')
187 BaseSheet.addCommand('', 'save-sheet-really', 'vd.saveSheets(Path(getDefaultSaveName()), sheet, confirm_overwrite=False)', 'save current sheet without asking for filename or confirmation')
188 BaseSheet.addCommand('', 'save-source', 'vd.saveSheets(rootSheet().source, rootSheet(), confirm_overwrite=options.confirm_overwrite)', 'save root sheet to its source')
189 BaseSheet.addCommand('g^S', 'save-all', 'vd.saveSheets(inputPath("save all sheets to: "), *vd.stackedSheets, confirm_overwrite=options.confirm_overwrite)', 'save all sheets to given file or directory)')
190 IndexSheet.addCommand('g^S', 'save-selected', 'vd.saveSheets(inputPath("save %d sheets to: " % nSelectedRows, value="_".join(getattr(vs, "name", None) or "blank" for vs in selectedRows)), *selectedRows, confirm_overwrite=options.confirm_overwrite)', 'save all selected sheets to given file or directory')
191 Sheet.addCommand('', 'save-col', 'save_cols([cursorCol])', 'save current column only to filename in format determined by extension (default .tsv)')
192 Sheet.addCommand('', 'save-col-keys', 'save_cols(keyCols + [cursorCol])', 'save key columns and current column to filename in format determined by extension (default .tsv)')
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/visidata/save.py b/visidata/save.py
--- a/visidata/save.py
+++ b/visidata/save.py
@@ -104,6 +104,10 @@
def saveSheets(vd, givenpath, *vsheets, confirm_overwrite=False):
'Save all *vsheets* to *givenpath*.'
+ if not vsheets: # blank tuple
+ vd.warning('no sheets to save')
+ return
+
filetype = givenpath.ext or options.save_filetype
vd.clearCaches()
|
{"golden_diff": "diff --git a/visidata/save.py b/visidata/save.py\n--- a/visidata/save.py\n+++ b/visidata/save.py\n@@ -104,6 +104,10 @@\n def saveSheets(vd, givenpath, *vsheets, confirm_overwrite=False):\n 'Save all *vsheets* to *givenpath*.'\n \n+ if not vsheets: # blank tuple\n+ vd.warning('no sheets to save')\n+ return\n+\n filetype = givenpath.ext or options.save_filetype\n \n vd.clearCaches()\n", "issue": "Bugs with save-all\n I've a question about saving.\r\n\r\n1. I load a two sheet notebook \"responses-grades-cda.xlsx\".\r\n2. `g ctrl+s`\r\n3. it gives me a blank line -- why not default to name of file without xlsx extension?\r\n4. I type in file name without extension + `vds`: \"responses-grades-cda.vds\".\r\n5. I get an error: \"AttributeError: 'X1sxIndexSheet' object has no attribute 'join'\"\r\n\r\n_Originally posted by @reagle in https://github.com/saulpw/visidata/discussions/1266#discussioncomment-4870711_\r\n \n", "before_files": [{"content": "import collections\n\nfrom visidata import *\n\n\nvd.option('confirm_overwrite', True, 'whether to prompt for overwrite confirmation on save')\nvd.option('safe_error', '#ERR', 'error string to use while saving', replay=True)\n\[email protected]\ndef safe_trdict(vs):\n 'returns string.translate dictionary for replacing tabs and newlines'\n if options.safety_first:\n delim = vs.options.delimiter\n return {\n 0: '', # strip NUL completely\n ord(delim): vs.options.tsv_safe_tab, # \\t\n 10: vs.options.tsv_safe_newline, # \\n\n 13: vs.options.tsv_safe_newline, # \\r\n }\n return {}\n\n\[email protected]\ndef iterdispvals(sheet, *cols, format=False):\n 'For each row in sheet, yield OrderedDict of values for given cols. Values are typed if format=False, or a formatted display string if format=True.'\n if not cols:\n cols = sheet.visibleCols\n\n transformers = collections.OrderedDict() # list of transformers for each column in order\n for col in cols:\n transformers[col] = [ col.type ]\n if format:\n formatMaker = getattr(col, 'formatter_'+(col.formatter or sheet.options.disp_formatter))\n transformers[col].append(formatMaker(col._formatdict))\n trdict = sheet.safe_trdict()\n if trdict:\n transformers[col].append(lambda v,trdict=trdict: v.translate(trdict))\n\n options_safe_error = options.safe_error\n for r in Progress(sheet.rows):\n dispvals = collections.OrderedDict() # [col] -> value\n for col, transforms in transformers.items():\n try:\n dispval = col.getValue(r)\n\n except Exception as e:\n vd.exceptionCaught(e)\n dispval = options_safe_error or str(e)\n\n try:\n for t in transforms:\n if dispval is None:\n break\n elif isinstance(dispval, TypedExceptionWrapper):\n dispval = options_safe_error or str(dispval)\n break\n else:\n dispval = t(dispval)\n\n if dispval is None and format:\n dispval = ''\n except Exception as e:\n dispval = str(dispval)\n\n dispvals[col] = dispval\n\n yield dispvals\n\n\[email protected]\ndef itervals(sheet, *cols, format=False):\n for row in sheet.iterdispvals(*cols, format=format):\n yield [row[c] for c in cols]\n\[email protected]\ndef getDefaultSaveName(sheet):\n src = getattr(sheet, 'source', None)\n if hasattr(src, 'scheme') and src.scheme:\n return src.name + src.suffix\n if isinstance(src, Path):\n if sheet.options.is_set('save_filetype', sheet):\n # if save_filetype is over-ridden from default, use it as the extension\n return str(src.with_suffix('')) + '.' + sheet.options.save_filetype\n return str(src)\n else:\n return sheet.name+'.'+getattr(sheet, 'filetype', options.save_filetype)\n\n\[email protected]\ndef save_cols(vd, cols):\n sheet = cols[0].sheet\n vs = copy(sheet)\n vs.columns = list(cols)\n vs.rows = sheet.rows\n if len(cols) == 1:\n savedcoltxt = cols[0].name + ' column'\n else:\n savedcoltxt = '%s columns' % len(cols)\n path = vd.inputPath('save %s to: ' % savedcoltxt, value=vs.getDefaultSaveName())\n vd.saveSheets(path, vs, confirm_overwrite=options.confirm_overwrite)\n\n\[email protected]\ndef saveSheets(vd, givenpath, *vsheets, confirm_overwrite=False):\n 'Save all *vsheets* to *givenpath*.'\n\n filetype = givenpath.ext or options.save_filetype\n\n vd.clearCaches()\n\n savefunc = getattr(vsheets[0], 'save_' + filetype, None) or getattr(vd, 'save_' + filetype, None)\n\n if savefunc is None:\n vd.fail(f'no function to save as {filetype}')\n\n if givenpath.exists() and confirm_overwrite:\n vd.confirm(\"%s already exists. overwrite? \" % givenpath.given)\n\n vd.status('saving %s sheets to %s as %s' % (len(vsheets), givenpath.given, filetype))\n\n if not givenpath.given.endswith('/'): # forcibly specify save individual files into directory by ending path with /\n for vs in vsheets:\n vs.hasBeenModified = False\n # savefuncs(vd, p, *vsheets) will have 2 argcount (*vsheets does not get counted as an arg)\n # savefuncs(vd, p, vs) will have 3 argcount (vs counts as an arg, along with vd, path)\n if savefunc.__code__.co_argcount == 3 and len(vsheets) > 1:\n vd.fail(f'cannot save multiple {filetype} sheets to non-dir')\n return vd.execAsync(savefunc, givenpath, *vsheets)\n\n # path is a dir\n\n # save as individual files in the givenpath directory\n try:\n os.makedirs(givenpath, exist_ok=True)\n except FileExistsError:\n pass\n\n if not givenpath.is_dir():\n vd.fail(f'cannot save multiple {filetype} sheets to non-dir')\n\n def _savefiles(vsheets, givenpath, savefunc, filetype):\n for vs in vsheets:\n p = Path((givenpath / vs.name).with_suffix('.'+filetype))\n savefunc(p, vs)\n vs.hasBeenModified = False\n return vd.execAsync(_savefiles, vsheets, givenpath, savefunc, filetype)\n\n\[email protected]\ndef save_zip(vd, p, *vsheets):\n vd.clearCaches()\n\n import tempfile\n import zipfile\n with tempfile.TemporaryDirectory() as tmpdir:\n with zipfile.ZipFile(str(p), 'w', zipfile.ZIP_DEFLATED, allowZip64=True, compresslevel=9) as zfp:\n for vs in Progress(vsheets):\n filetype = vs.options.save_filetype\n tmpp = Path(f'{tmpdir}{vs.name}.{filetype}')\n savefunc = getattr(vs, 'save_' + filetype, None) or getattr(vd, 'save_' + filetype, None)\n savefunc(tmpp, vs)\n zfp.write(tmpp, f'{vs.name}.{vs.options.save_filetype}')\n vd.status('%s save finished' % p)\n\n\[email protected]\ndef save_txt(vd, p, *vsheets):\n with p.open_text(mode='w', encoding=vsheets[0].options.encoding) as fp:\n for vs in vsheets:\n unitsep = vs.options.delimiter\n rowsep = vs.options.row_delimiter\n for dispvals in vs.iterdispvals(*vs.visibleCols, format=True):\n fp.write(unitsep.join(dispvals.values()))\n fp.write(rowsep)\n vd.status('%s save finished' % p)\n\n\[email protected]\ndef rootSheet(sheet):\n r = sheet\n while isinstance(r.source, BaseSheet):\n r = r.source\n\n return r\n\nBaseSheet.addCommand('^S', 'save-sheet', 'vd.saveSheets(inputPath(\"save to: \", value=getDefaultSaveName()), sheet, confirm_overwrite=options.confirm_overwrite)', 'save current sheet to filename in format determined by extension (default .tsv)')\nBaseSheet.addCommand('', 'save-sheet-really', 'vd.saveSheets(Path(getDefaultSaveName()), sheet, confirm_overwrite=False)', 'save current sheet without asking for filename or confirmation')\nBaseSheet.addCommand('', 'save-source', 'vd.saveSheets(rootSheet().source, rootSheet(), confirm_overwrite=options.confirm_overwrite)', 'save root sheet to its source')\nBaseSheet.addCommand('g^S', 'save-all', 'vd.saveSheets(inputPath(\"save all sheets to: \"), *vd.stackedSheets, confirm_overwrite=options.confirm_overwrite)', 'save all sheets to given file or directory)')\nIndexSheet.addCommand('g^S', 'save-selected', 'vd.saveSheets(inputPath(\"save %d sheets to: \" % nSelectedRows, value=\"_\".join(getattr(vs, \"name\", None) or \"blank\" for vs in selectedRows)), *selectedRows, confirm_overwrite=options.confirm_overwrite)', 'save all selected sheets to given file or directory')\nSheet.addCommand('', 'save-col', 'save_cols([cursorCol])', 'save current column only to filename in format determined by extension (default .tsv)')\nSheet.addCommand('', 'save-col-keys', 'save_cols(keyCols + [cursorCol])', 'save key columns and current column to filename in format determined by extension (default .tsv)')\n", "path": "visidata/save.py"}], "after_files": [{"content": "import collections\n\nfrom visidata import *\n\n\nvd.option('confirm_overwrite', True, 'whether to prompt for overwrite confirmation on save')\nvd.option('safe_error', '#ERR', 'error string to use while saving', replay=True)\n\[email protected]\ndef safe_trdict(vs):\n 'returns string.translate dictionary for replacing tabs and newlines'\n if options.safety_first:\n delim = vs.options.delimiter\n return {\n 0: '', # strip NUL completely\n ord(delim): vs.options.tsv_safe_tab, # \\t\n 10: vs.options.tsv_safe_newline, # \\n\n 13: vs.options.tsv_safe_newline, # \\r\n }\n return {}\n\n\[email protected]\ndef iterdispvals(sheet, *cols, format=False):\n 'For each row in sheet, yield OrderedDict of values for given cols. Values are typed if format=False, or a formatted display string if format=True.'\n if not cols:\n cols = sheet.visibleCols\n\n transformers = collections.OrderedDict() # list of transformers for each column in order\n for col in cols:\n transformers[col] = [ col.type ]\n if format:\n formatMaker = getattr(col, 'formatter_'+(col.formatter or sheet.options.disp_formatter))\n transformers[col].append(formatMaker(col._formatdict))\n trdict = sheet.safe_trdict()\n if trdict:\n transformers[col].append(lambda v,trdict=trdict: v.translate(trdict))\n\n options_safe_error = options.safe_error\n for r in Progress(sheet.rows):\n dispvals = collections.OrderedDict() # [col] -> value\n for col, transforms in transformers.items():\n try:\n dispval = col.getValue(r)\n\n except Exception as e:\n vd.exceptionCaught(e)\n dispval = options_safe_error or str(e)\n\n try:\n for t in transforms:\n if dispval is None:\n break\n elif isinstance(dispval, TypedExceptionWrapper):\n dispval = options_safe_error or str(dispval)\n break\n else:\n dispval = t(dispval)\n\n if dispval is None and format:\n dispval = ''\n except Exception as e:\n dispval = str(dispval)\n\n dispvals[col] = dispval\n\n yield dispvals\n\n\[email protected]\ndef itervals(sheet, *cols, format=False):\n for row in sheet.iterdispvals(*cols, format=format):\n yield [row[c] for c in cols]\n\[email protected]\ndef getDefaultSaveName(sheet):\n src = getattr(sheet, 'source', None)\n if hasattr(src, 'scheme') and src.scheme:\n return src.name + src.suffix\n if isinstance(src, Path):\n if sheet.options.is_set('save_filetype', sheet):\n # if save_filetype is over-ridden from default, use it as the extension\n return str(src.with_suffix('')) + '.' + sheet.options.save_filetype\n return str(src)\n else:\n return sheet.name+'.'+getattr(sheet, 'filetype', options.save_filetype)\n\n\[email protected]\ndef save_cols(vd, cols):\n sheet = cols[0].sheet\n vs = copy(sheet)\n vs.columns = list(cols)\n vs.rows = sheet.rows\n if len(cols) == 1:\n savedcoltxt = cols[0].name + ' column'\n else:\n savedcoltxt = '%s columns' % len(cols)\n path = vd.inputPath('save %s to: ' % savedcoltxt, value=vs.getDefaultSaveName())\n vd.saveSheets(path, vs, confirm_overwrite=options.confirm_overwrite)\n\n\[email protected]\ndef saveSheets(vd, givenpath, *vsheets, confirm_overwrite=False):\n 'Save all *vsheets* to *givenpath*.'\n\n if not vsheets: # blank tuple\n vd.warning('no sheets to save')\n return\n\n filetype = givenpath.ext or options.save_filetype\n\n vd.clearCaches()\n\n savefunc = getattr(vsheets[0], 'save_' + filetype, None) or getattr(vd, 'save_' + filetype, None)\n\n if savefunc is None:\n vd.fail(f'no function to save as {filetype}')\n\n if givenpath.exists() and confirm_overwrite:\n vd.confirm(\"%s already exists. overwrite? \" % givenpath.given)\n\n vd.status('saving %s sheets to %s as %s' % (len(vsheets), givenpath.given, filetype))\n\n if not givenpath.given.endswith('/'): # forcibly specify save individual files into directory by ending path with /\n for vs in vsheets:\n vs.hasBeenModified = False\n # savefuncs(vd, p, *vsheets) will have 2 argcount (*vsheets does not get counted as an arg)\n # savefuncs(vd, p, vs) will have 3 argcount (vs counts as an arg, along with vd, path)\n if savefunc.__code__.co_argcount == 3 and len(vsheets) > 1:\n vd.fail(f'cannot save multiple {filetype} sheets to non-dir')\n return vd.execAsync(savefunc, givenpath, *vsheets)\n\n # path is a dir\n\n # save as individual files in the givenpath directory\n try:\n os.makedirs(givenpath, exist_ok=True)\n except FileExistsError:\n pass\n\n if not givenpath.is_dir():\n vd.fail(f'cannot save multiple {filetype} sheets to non-dir')\n\n def _savefiles(vsheets, givenpath, savefunc, filetype):\n for vs in vsheets:\n p = Path((givenpath / vs.name).with_suffix('.'+filetype))\n savefunc(p, vs)\n vs.hasBeenModified = False\n return vd.execAsync(_savefiles, vsheets, givenpath, savefunc, filetype)\n\n\[email protected]\ndef save_zip(vd, p, *vsheets):\n vd.clearCaches()\n\n import tempfile\n import zipfile\n with tempfile.TemporaryDirectory() as tmpdir:\n with zipfile.ZipFile(str(p), 'w', zipfile.ZIP_DEFLATED, allowZip64=True, compresslevel=9) as zfp:\n for vs in Progress(vsheets):\n filetype = vs.options.save_filetype\n tmpp = Path(f'{tmpdir}{vs.name}.{filetype}')\n savefunc = getattr(vs, 'save_' + filetype, None) or getattr(vd, 'save_' + filetype, None)\n savefunc(tmpp, vs)\n zfp.write(tmpp, f'{vs.name}.{vs.options.save_filetype}')\n vd.status('%s save finished' % p)\n\n\[email protected]\ndef save_txt(vd, p, *vsheets):\n with p.open_text(mode='w', encoding=vsheets[0].options.encoding) as fp:\n for vs in vsheets:\n unitsep = vs.options.delimiter\n rowsep = vs.options.row_delimiter\n for dispvals in vs.iterdispvals(*vs.visibleCols, format=True):\n fp.write(unitsep.join(dispvals.values()))\n fp.write(rowsep)\n vd.status('%s save finished' % p)\n\n\[email protected]\ndef rootSheet(sheet):\n r = sheet\n while isinstance(r.source, BaseSheet):\n r = r.source\n\n return r\n\nBaseSheet.addCommand('^S', 'save-sheet', 'vd.saveSheets(inputPath(\"save to: \", value=getDefaultSaveName()), sheet, confirm_overwrite=options.confirm_overwrite)', 'save current sheet to filename in format determined by extension (default .tsv)')\nBaseSheet.addCommand('', 'save-sheet-really', 'vd.saveSheets(Path(getDefaultSaveName()), sheet, confirm_overwrite=False)', 'save current sheet without asking for filename or confirmation')\nBaseSheet.addCommand('', 'save-source', 'vd.saveSheets(rootSheet().source, rootSheet(), confirm_overwrite=options.confirm_overwrite)', 'save root sheet to its source')\nBaseSheet.addCommand('g^S', 'save-all', 'vd.saveSheets(inputPath(\"save all sheets to: \"), *vd.stackedSheets, confirm_overwrite=options.confirm_overwrite)', 'save all sheets to given file or directory)')\nIndexSheet.addCommand('g^S', 'save-selected', 'vd.saveSheets(inputPath(\"save %d sheets to: \" % nSelectedRows, value=\"_\".join(getattr(vs, \"name\", None) or \"blank\" for vs in selectedRows)), *selectedRows, confirm_overwrite=options.confirm_overwrite)', 'save all selected sheets to given file or directory')\nSheet.addCommand('', 'save-col', 'save_cols([cursorCol])', 'save current column only to filename in format determined by extension (default .tsv)')\nSheet.addCommand('', 'save-col-keys', 'save_cols(keyCols + [cursorCol])', 'save key columns and current column to filename in format determined by extension (default .tsv)')\n", "path": "visidata/save.py"}]}
| 2,821 | 125 |
gh_patches_debug_41254
|
rasdani/github-patches
|
git_diff
|
Pylons__pyramid-3029
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
remove request.exception if the excview tween fails to handle the exception
Pyramid 1.9 makes `request.exception` and `request.exc_info` a little more important as I've moved the pyramid_tm tween over the excview and in general would advocate to move most tweens over the excview. With that in mind it's currently not possible to test `request.exception` to see if the response was rendered in relation to that exception - the excview tween sets the exception even if it failed to squash it (attempted to render an excview and couldn't find one). Ideally the exception would be related to the response that was generated when it was squashed. This would be more explicit if we used `response.exception` to indicate the response is from a squashed exception but I think that's a larger change.
I'm proposing to remove `request.exception` and `request.exc_info` in the excview tween if it reraises the original exception. This makes introspection `request.exception` more reliable by upstream tweens that want to know what the squashed exception was... Of course any raised exception should be more interesting than the original `request.exception` but if the tween receives a response then they can see if it is a response generated by a squashed exception or if it is a "normal" response.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyramid/tweens.py`
Content:
```
1 import sys
2
3 from pyramid.compat import reraise
4 from pyramid.exceptions import PredicateMismatch
5 from pyramid.interfaces import (
6 IExceptionViewClassifier,
7 IRequest,
8 )
9
10 from zope.interface import providedBy
11 from pyramid.view import _call_view
12
13 def excview_tween_factory(handler, registry):
14 """ A :term:`tween` factory which produces a tween that catches an
15 exception raised by downstream tweens (or the main Pyramid request
16 handler) and, if possible, converts it into a Response using an
17 :term:`exception view`."""
18
19 def excview_tween(request):
20 attrs = request.__dict__
21 try:
22 response = handler(request)
23 except Exception as exc:
24 # WARNING: do not assign the result of sys.exc_info() to a local
25 # var here, doing so will cause a leak. We used to actually
26 # explicitly delete both "exception" and "exc_info" from ``attrs``
27 # in a ``finally:`` clause below, but now we do not because these
28 # attributes are useful to upstream tweens. This actually still
29 # apparently causes a reference cycle, but it is broken
30 # successfully by the garbage collector (see
31 # https://github.com/Pylons/pyramid/issues/1223).
32 attrs['exc_info'] = sys.exc_info()
33 attrs['exception'] = exc
34 # clear old generated request.response, if any; it may
35 # have been mutated by the view, and its state is not
36 # sane (e.g. caching headers)
37 if 'response' in attrs:
38 del attrs['response']
39 # we use .get instead of .__getitem__ below due to
40 # https://github.com/Pylons/pyramid/issues/700
41 request_iface = attrs.get('request_iface', IRequest)
42 provides = providedBy(exc)
43 try:
44 response = _call_view(
45 registry,
46 request,
47 exc,
48 provides,
49 '',
50 view_classifier=IExceptionViewClassifier,
51 request_iface=request_iface.combined
52 )
53
54 # if views matched but did not pass predicates, squash the error
55 # and re-raise the original exception
56 except PredicateMismatch:
57 response = None
58
59 # re-raise the original exception as no exception views were
60 # able to handle the error
61 if response is None:
62 reraise(*attrs['exc_info'])
63
64 return response
65
66 return excview_tween
67
68 MAIN = 'MAIN'
69 INGRESS = 'INGRESS'
70 EXCVIEW = 'pyramid.tweens.excview_tween_factory'
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyramid/tweens.py b/pyramid/tweens.py
--- a/pyramid/tweens.py
+++ b/pyramid/tweens.py
@@ -10,6 +10,50 @@
from zope.interface import providedBy
from pyramid.view import _call_view
+def _error_handler(request, exc):
+ # NOTE: we do not need to delete exc_info because this function
+ # should never be in the call stack of the exception
+ exc_info = sys.exc_info()
+
+ attrs = request.__dict__
+ attrs['exc_info'] = exc_info
+ attrs['exception'] = exc
+ # clear old generated request.response, if any; it may
+ # have been mutated by the view, and its state is not
+ # sane (e.g. caching headers)
+ if 'response' in attrs:
+ del attrs['response']
+ # we use .get instead of .__getitem__ below due to
+ # https://github.com/Pylons/pyramid/issues/700
+ request_iface = attrs.get('request_iface', IRequest)
+ provides = providedBy(exc)
+ try:
+ response = _call_view(
+ request.registry,
+ request,
+ exc,
+ provides,
+ '',
+ view_classifier=IExceptionViewClassifier,
+ request_iface=request_iface.combined
+ )
+
+ # if views matched but did not pass predicates then treat the
+ # same as not finding any matching views
+ except PredicateMismatch:
+ response = None
+
+ # re-raise the original exception as no exception views were
+ # able to handle the error
+ if response is None:
+ if 'exception' in attrs:
+ del attrs['exception']
+ if 'exc_info' in attrs:
+ del attrs['exc_info']
+ reraise(*exc_info)
+
+ return response
+
def excview_tween_factory(handler, registry):
""" A :term:`tween` factory which produces a tween that catches an
exception raised by downstream tweens (or the main Pyramid request
@@ -17,50 +61,10 @@
:term:`exception view`."""
def excview_tween(request):
- attrs = request.__dict__
try:
response = handler(request)
except Exception as exc:
- # WARNING: do not assign the result of sys.exc_info() to a local
- # var here, doing so will cause a leak. We used to actually
- # explicitly delete both "exception" and "exc_info" from ``attrs``
- # in a ``finally:`` clause below, but now we do not because these
- # attributes are useful to upstream tweens. This actually still
- # apparently causes a reference cycle, but it is broken
- # successfully by the garbage collector (see
- # https://github.com/Pylons/pyramid/issues/1223).
- attrs['exc_info'] = sys.exc_info()
- attrs['exception'] = exc
- # clear old generated request.response, if any; it may
- # have been mutated by the view, and its state is not
- # sane (e.g. caching headers)
- if 'response' in attrs:
- del attrs['response']
- # we use .get instead of .__getitem__ below due to
- # https://github.com/Pylons/pyramid/issues/700
- request_iface = attrs.get('request_iface', IRequest)
- provides = providedBy(exc)
- try:
- response = _call_view(
- registry,
- request,
- exc,
- provides,
- '',
- view_classifier=IExceptionViewClassifier,
- request_iface=request_iface.combined
- )
-
- # if views matched but did not pass predicates, squash the error
- # and re-raise the original exception
- except PredicateMismatch:
- response = None
-
- # re-raise the original exception as no exception views were
- # able to handle the error
- if response is None:
- reraise(*attrs['exc_info'])
-
+ response = _error_handler(request, exc)
return response
return excview_tween
|
{"golden_diff": "diff --git a/pyramid/tweens.py b/pyramid/tweens.py\n--- a/pyramid/tweens.py\n+++ b/pyramid/tweens.py\n@@ -10,6 +10,50 @@\n from zope.interface import providedBy\n from pyramid.view import _call_view\n \n+def _error_handler(request, exc):\n+ # NOTE: we do not need to delete exc_info because this function\n+ # should never be in the call stack of the exception\n+ exc_info = sys.exc_info()\n+\n+ attrs = request.__dict__\n+ attrs['exc_info'] = exc_info\n+ attrs['exception'] = exc\n+ # clear old generated request.response, if any; it may\n+ # have been mutated by the view, and its state is not\n+ # sane (e.g. caching headers)\n+ if 'response' in attrs:\n+ del attrs['response']\n+ # we use .get instead of .__getitem__ below due to\n+ # https://github.com/Pylons/pyramid/issues/700\n+ request_iface = attrs.get('request_iface', IRequest)\n+ provides = providedBy(exc)\n+ try:\n+ response = _call_view(\n+ request.registry,\n+ request,\n+ exc,\n+ provides,\n+ '',\n+ view_classifier=IExceptionViewClassifier,\n+ request_iface=request_iface.combined\n+ )\n+\n+ # if views matched but did not pass predicates then treat the\n+ # same as not finding any matching views\n+ except PredicateMismatch:\n+ response = None\n+\n+ # re-raise the original exception as no exception views were\n+ # able to handle the error\n+ if response is None:\n+ if 'exception' in attrs:\n+ del attrs['exception']\n+ if 'exc_info' in attrs:\n+ del attrs['exc_info']\n+ reraise(*exc_info)\n+\n+ return response\n+\n def excview_tween_factory(handler, registry):\n \"\"\" A :term:`tween` factory which produces a tween that catches an\n exception raised by downstream tweens (or the main Pyramid request\n@@ -17,50 +61,10 @@\n :term:`exception view`.\"\"\"\n \n def excview_tween(request):\n- attrs = request.__dict__\n try:\n response = handler(request)\n except Exception as exc:\n- # WARNING: do not assign the result of sys.exc_info() to a local\n- # var here, doing so will cause a leak. We used to actually\n- # explicitly delete both \"exception\" and \"exc_info\" from ``attrs``\n- # in a ``finally:`` clause below, but now we do not because these\n- # attributes are useful to upstream tweens. This actually still\n- # apparently causes a reference cycle, but it is broken\n- # successfully by the garbage collector (see\n- # https://github.com/Pylons/pyramid/issues/1223).\n- attrs['exc_info'] = sys.exc_info()\n- attrs['exception'] = exc\n- # clear old generated request.response, if any; it may\n- # have been mutated by the view, and its state is not\n- # sane (e.g. caching headers)\n- if 'response' in attrs:\n- del attrs['response']\n- # we use .get instead of .__getitem__ below due to\n- # https://github.com/Pylons/pyramid/issues/700\n- request_iface = attrs.get('request_iface', IRequest)\n- provides = providedBy(exc)\n- try:\n- response = _call_view(\n- registry,\n- request,\n- exc,\n- provides,\n- '',\n- view_classifier=IExceptionViewClassifier,\n- request_iface=request_iface.combined\n- )\n-\n- # if views matched but did not pass predicates, squash the error\n- # and re-raise the original exception\n- except PredicateMismatch:\n- response = None\n-\n- # re-raise the original exception as no exception views were\n- # able to handle the error\n- if response is None:\n- reraise(*attrs['exc_info'])\n-\n+ response = _error_handler(request, exc)\n return response\n \n return excview_tween\n", "issue": "remove request.exception if the excview tween fails to handle the exception\nPyramid 1.9 makes `request.exception` and `request.exc_info` a little more important as I've moved the pyramid_tm tween over the excview and in general would advocate to move most tweens over the excview. With that in mind it's currently not possible to test `request.exception` to see if the response was rendered in relation to that exception - the excview tween sets the exception even if it failed to squash it (attempted to render an excview and couldn't find one). Ideally the exception would be related to the response that was generated when it was squashed. This would be more explicit if we used `response.exception` to indicate the response is from a squashed exception but I think that's a larger change.\r\n\r\nI'm proposing to remove `request.exception` and `request.exc_info` in the excview tween if it reraises the original exception. This makes introspection `request.exception` more reliable by upstream tweens that want to know what the squashed exception was... Of course any raised exception should be more interesting than the original `request.exception` but if the tween receives a response then they can see if it is a response generated by a squashed exception or if it is a \"normal\" response.\n", "before_files": [{"content": "import sys\n\nfrom pyramid.compat import reraise\nfrom pyramid.exceptions import PredicateMismatch\nfrom pyramid.interfaces import (\n IExceptionViewClassifier,\n IRequest,\n )\n\nfrom zope.interface import providedBy\nfrom pyramid.view import _call_view\n\ndef excview_tween_factory(handler, registry):\n \"\"\" A :term:`tween` factory which produces a tween that catches an\n exception raised by downstream tweens (or the main Pyramid request\n handler) and, if possible, converts it into a Response using an\n :term:`exception view`.\"\"\"\n\n def excview_tween(request):\n attrs = request.__dict__\n try:\n response = handler(request)\n except Exception as exc:\n # WARNING: do not assign the result of sys.exc_info() to a local\n # var here, doing so will cause a leak. We used to actually\n # explicitly delete both \"exception\" and \"exc_info\" from ``attrs``\n # in a ``finally:`` clause below, but now we do not because these\n # attributes are useful to upstream tweens. This actually still\n # apparently causes a reference cycle, but it is broken\n # successfully by the garbage collector (see\n # https://github.com/Pylons/pyramid/issues/1223).\n attrs['exc_info'] = sys.exc_info()\n attrs['exception'] = exc\n # clear old generated request.response, if any; it may\n # have been mutated by the view, and its state is not\n # sane (e.g. caching headers)\n if 'response' in attrs:\n del attrs['response']\n # we use .get instead of .__getitem__ below due to\n # https://github.com/Pylons/pyramid/issues/700\n request_iface = attrs.get('request_iface', IRequest)\n provides = providedBy(exc)\n try:\n response = _call_view(\n registry,\n request,\n exc,\n provides,\n '',\n view_classifier=IExceptionViewClassifier,\n request_iface=request_iface.combined\n )\n\n # if views matched but did not pass predicates, squash the error\n # and re-raise the original exception\n except PredicateMismatch:\n response = None\n\n # re-raise the original exception as no exception views were\n # able to handle the error\n if response is None:\n reraise(*attrs['exc_info'])\n\n return response\n\n return excview_tween\n\nMAIN = 'MAIN'\nINGRESS = 'INGRESS'\nEXCVIEW = 'pyramid.tweens.excview_tween_factory'\n", "path": "pyramid/tweens.py"}], "after_files": [{"content": "import sys\n\nfrom pyramid.compat import reraise\nfrom pyramid.exceptions import PredicateMismatch\nfrom pyramid.interfaces import (\n IExceptionViewClassifier,\n IRequest,\n )\n\nfrom zope.interface import providedBy\nfrom pyramid.view import _call_view\n\ndef _error_handler(request, exc):\n # NOTE: we do not need to delete exc_info because this function\n # should never be in the call stack of the exception\n exc_info = sys.exc_info()\n\n attrs = request.__dict__\n attrs['exc_info'] = exc_info\n attrs['exception'] = exc\n # clear old generated request.response, if any; it may\n # have been mutated by the view, and its state is not\n # sane (e.g. caching headers)\n if 'response' in attrs:\n del attrs['response']\n # we use .get instead of .__getitem__ below due to\n # https://github.com/Pylons/pyramid/issues/700\n request_iface = attrs.get('request_iface', IRequest)\n provides = providedBy(exc)\n try:\n response = _call_view(\n request.registry,\n request,\n exc,\n provides,\n '',\n view_classifier=IExceptionViewClassifier,\n request_iface=request_iface.combined\n )\n\n # if views matched but did not pass predicates then treat the\n # same as not finding any matching views\n except PredicateMismatch:\n response = None\n\n # re-raise the original exception as no exception views were\n # able to handle the error\n if response is None:\n if 'exception' in attrs:\n del attrs['exception']\n if 'exc_info' in attrs:\n del attrs['exc_info']\n reraise(*exc_info)\n\n return response\n\ndef excview_tween_factory(handler, registry):\n \"\"\" A :term:`tween` factory which produces a tween that catches an\n exception raised by downstream tweens (or the main Pyramid request\n handler) and, if possible, converts it into a Response using an\n :term:`exception view`.\"\"\"\n\n def excview_tween(request):\n try:\n response = handler(request)\n except Exception as exc:\n response = _error_handler(request, exc)\n return response\n\n return excview_tween\n\nMAIN = 'MAIN'\nINGRESS = 'INGRESS'\nEXCVIEW = 'pyramid.tweens.excview_tween_factory'\n", "path": "pyramid/tweens.py"}]}
| 1,225 | 960 |
gh_patches_debug_14765
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-1158
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix for sunpy paper listing 3.7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/net/helio/parser.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Author: Michael Malocha <[email protected]>
3 # Last Edit: September 22nd, 2013
4 #
5 # This module was developed with funding from the GSOC 2013 summer of code
6 #
7
8 """
9 This module is meant to parse the HELIO registry and return WSDL endpoints to
10 facilitate the interfacing between further modules and HELIO.
11 """
12 from __future__ import absolute_import
13 from urllib2 import urlopen, URLError
14 #import sunpy.util.etree as EL
15 import xml.etree.ElementTree as EL
16 from sunpy.net.helio import registry_links as RL
17 from bs4 import BeautifulSoup
18 from contextlib import closing
19
20 __author__ = 'Michael Malocha'
21 __version__ = 'September 22nd, 2013'
22
23 # Lifespan in seconds before a link times-out
24 LINK_TIMEOUT = 3
25
26
27 def webservice_parser(service='HEC'):
28 """
29 Quickly parses important contents from HELIO registry.
30
31 Uses the link contained in registry_links in with 'service' appended
32 and scrapes the web-service links contained on that webpage.
33
34 Parameters
35 ----------
36 service: str
37 Indicates which particular HELIO service is used. Defaults to HEC.
38
39 Returns
40 -------
41 links: list or NoneType
42 List of urls to registries containing WSDL endpoints.
43
44 Examples
45 --------
46 >>> from sunpy.net.helio import parser
47 >>> parser.webservice_parser()
48 ['http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioService',
49 'http://festung3.oats.inaf.it:8080/helio-hec/HelioService',
50 'http://festung1.oats.inaf.it:8080/helio-hec/HelioService',
51 'http://hec.helio-vo.eu/helio_hec/HelioService',
52 'http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioLongQueryService',
53 'http://festung3.oats.inaf.it:8080/helio-hec/HelioLongQueryService',
54 'http://festung1.oats.inaf.it:8080/helio-hec/HelioLongQueryService',
55 'http://hec.helio-vo.eu/helio_hec/HelioLongQueryService']
56 """
57 link = RL.LINK + service.lower()
58 xml = link_test(link)
59 if xml is None:
60 return xml
61 root = EL.fromstring(xml)
62 links = []
63
64 #WARNING: getiterator is deprecated in Python 2.7+
65 #Fix for 3.x support
66 for interface in root.getiterator('interface'):
67 service_type = interface.attrib
68 key = service_type.keys()
69 if len(key) > 0:
70 value = service_type[key[0]]
71 if value == 'vr:WebService':
72 for url in interface.getiterator('accessURL'):
73 if url.text not in links:
74 links.append(url.text)
75 return links
76
77
78 def endpoint_parser(link):
79 """
80 Takes a link to a list of endpoints and parses the WSDL links.
81
82 Feeding 1 result from webservice_parser() into endpoint_parser() at a time
83 will return a list of WSDL endpoints that are contained on the page from
84 that link that was passed in.
85
86 Parameters
87 ----------
88 link: str
89 A url to a page containing links to WSDL files.
90
91 Returns
92 -------
93 endpoints: list or NoneType
94 A list containing all of the available WSDL endpoints from the passed
95 in url.
96
97 Examples
98 --------
99 >>> from sunpy.net.helio import parser
100 >>> parser.endpoint_parser('http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioService')
101 ['http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioService?wsdl',
102 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioService1_0?wsdl',
103 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioService1_0b?wsdl',
104 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioLongQueryService?wsdl',
105 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioLongQueryService1_0?wsdl',
106 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioLongQueryService1_1?wsdl',
107 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioLongQueryService1_0b?wsdl',
108 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioTavernaService?wsdl']
109 """
110 endpoint_page = link_test(link)
111 if endpoint_page is None:
112 return None
113 soup = BeautifulSoup(endpoint_page)
114 endpoints = []
115 for web_link in soup.find_all('a'):
116 endpoints.append(web_link.get('href'))
117 return endpoints
118
119
120 def taverna_parser(link):
121 """
122 Takes a link to a list of endpoints and parses the taverna WSDL links.
123
124 Takes a url to a page containing a list of endpoints, then passes that url
125 to endpoint_parser(). Upon receiving the resulting list from the parser
126 taverna_parser() goes through the list and finds all the WSDL links for
127 the taverna web-service. It then returns a list containing the filtered
128 links.
129
130 Parameters
131 ----------
132 link: str
133 A url to a page containing links to WSDL files.
134
135 Returns
136 -------
137 taverna_links: list or NoneType
138 A list containing WSDL links for a taverna web-service
139
140 Examples
141 --------
142 >>> from sunpy.net.helio import parser
143 >>> parser.taverna_parser('http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioService')
144 ['http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioTavernaService?wsdl']
145 """
146 endpoints = endpoint_parser(link)
147 taverna_links = []
148 if endpoints is None:
149 return None
150 for web_link in endpoints:
151 if 'Taverna' in web_link:
152 taverna_links.append(web_link)
153 if len(taverna_links) == 0:
154 return None
155 return taverna_links
156
157
158 def link_test(link):
159 """
160 Just a quick function to test a link.
161
162 Quickly checks to see if the URL is a valid link; if it is it returns the
163 downloaded contents of that page.
164
165 Parameters
166 ----------
167 link: str
168 A string containing a URL
169
170 Returns
171 -------
172 webpage: str or NoneType
173 String containing the webresults
174
175 Examples
176 --------
177 >>> from sunpy.net.helio import parser
178 >>> parser.link_test('http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioService')
179 u'<html>\n<head>...</body>\n</html>\n'
180
181 >>> print parser.link_test('http://rrnx.invalid_url5523.com')
182 None
183 """
184 try:
185 with closing(urlopen(link)) as fd:
186 return fd.read()
187 except (ValueError, URLError):
188 return None
189
190
191 def wsdl_retriever(service='HEC'):
192 """
193 Retrieves a link to a taverna WSDL file
194
195 This is essentially the master method, from it all the other functions get
196 called and it essentially knits everything together. It gets a list of
197 service links via webservice_parser(), then filters the results via
198 taverna_parser(). Finally it tests all the returned taverna WSDL links
199 and returns the first live taverna endpoint.
200
201 Parameters
202 ----------
203 service: str
204 Indicates which particular HELIO service is used. Defaults to HEC.
205
206 Returns
207 -------
208 wsdl: str
209 URL to a single live taverna endpoint
210
211 Examples
212 --------
213 >>> from sunpy.net.helio import parser
214 >>> parser.wsdl_retriever()
215 'http://msslkz.mssl.ucl.ac.uk:80/helio_hec/HelioTavernaService?wsdl'
216
217 Notes
218 -----
219 * Currently only support for HEC exists, but it was designed so that it
220 could be expanded at a later date
221 * There is a 3 second timeout lifespan on links, so there is potential for
222 this function to take a while to return. Timeout duration can be
223 controlled through the LINK_TIMEOUT value
224 """
225 service_links = webservice_parser(service=service)
226 wsdl = None
227 wsdl_links = None
228 if service_links is None:
229 return None
230 for link in service_links:
231 wsdl_links = taverna_parser(link)
232 if wsdl_links is None:
233 return None
234 for end_point in wsdl_links:
235 if end_point is not None and link_test(end_point) is not None:
236 wsdl = end_point
237 break
238 return wsdl
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sunpy/net/helio/parser.py b/sunpy/net/helio/parser.py
--- a/sunpy/net/helio/parser.py
+++ b/sunpy/net/helio/parser.py
@@ -54,7 +54,7 @@
'http://festung1.oats.inaf.it:8080/helio-hec/HelioLongQueryService',
'http://hec.helio-vo.eu/helio_hec/HelioLongQueryService']
"""
- link = RL.LINK + service.lower()
+ link = RL.LINK + '/' + service.lower()
xml = link_test(link)
if xml is None:
return xml
@@ -182,7 +182,7 @@
None
"""
try:
- with closing(urlopen(link)) as fd:
+ with closing(urlopen(link, timeout=LINK_TIMEOUT)) as fd:
return fd.read()
except (ValueError, URLError):
return None
|
{"golden_diff": "diff --git a/sunpy/net/helio/parser.py b/sunpy/net/helio/parser.py\n--- a/sunpy/net/helio/parser.py\n+++ b/sunpy/net/helio/parser.py\n@@ -54,7 +54,7 @@\n 'http://festung1.oats.inaf.it:8080/helio-hec/HelioLongQueryService',\n 'http://hec.helio-vo.eu/helio_hec/HelioLongQueryService']\n \"\"\"\n- link = RL.LINK + service.lower()\n+ link = RL.LINK + '/' + service.lower()\n xml = link_test(link)\n if xml is None:\n return xml\n@@ -182,7 +182,7 @@\n None\n \"\"\"\n try:\n- with closing(urlopen(link)) as fd:\n+ with closing(urlopen(link, timeout=LINK_TIMEOUT)) as fd:\n return fd.read()\n except (ValueError, URLError):\n return None\n", "issue": "Fix for sunpy paper listing 3.7\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Author: Michael Malocha <[email protected]>\n# Last Edit: September 22nd, 2013\n#\n# This module was developed with funding from the GSOC 2013 summer of code\n#\n\n\"\"\"\nThis module is meant to parse the HELIO registry and return WSDL endpoints to\nfacilitate the interfacing between further modules and HELIO.\n\"\"\"\nfrom __future__ import absolute_import\nfrom urllib2 import urlopen, URLError\n#import sunpy.util.etree as EL\nimport xml.etree.ElementTree as EL\nfrom sunpy.net.helio import registry_links as RL\nfrom bs4 import BeautifulSoup\nfrom contextlib import closing\n\n__author__ = 'Michael Malocha'\n__version__ = 'September 22nd, 2013'\n\n# Lifespan in seconds before a link times-out\nLINK_TIMEOUT = 3\n\n\ndef webservice_parser(service='HEC'):\n \"\"\"\n Quickly parses important contents from HELIO registry.\n\n Uses the link contained in registry_links in with 'service' appended\n and scrapes the web-service links contained on that webpage.\n\n Parameters\n ----------\n service: str\n Indicates which particular HELIO service is used. Defaults to HEC.\n\n Returns\n -------\n links: list or NoneType\n List of urls to registries containing WSDL endpoints.\n\n Examples\n --------\n >>> from sunpy.net.helio import parser\n >>> parser.webservice_parser()\n ['http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioService',\n 'http://festung3.oats.inaf.it:8080/helio-hec/HelioService',\n 'http://festung1.oats.inaf.it:8080/helio-hec/HelioService',\n 'http://hec.helio-vo.eu/helio_hec/HelioService',\n 'http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioLongQueryService',\n 'http://festung3.oats.inaf.it:8080/helio-hec/HelioLongQueryService',\n 'http://festung1.oats.inaf.it:8080/helio-hec/HelioLongQueryService',\n 'http://hec.helio-vo.eu/helio_hec/HelioLongQueryService']\n \"\"\"\n link = RL.LINK + service.lower()\n xml = link_test(link)\n if xml is None:\n return xml\n root = EL.fromstring(xml)\n links = []\n\n #WARNING: getiterator is deprecated in Python 2.7+\n #Fix for 3.x support\n for interface in root.getiterator('interface'):\n service_type = interface.attrib\n key = service_type.keys()\n if len(key) > 0:\n value = service_type[key[0]]\n if value == 'vr:WebService':\n for url in interface.getiterator('accessURL'):\n if url.text not in links:\n links.append(url.text)\n return links\n\n\ndef endpoint_parser(link):\n \"\"\"\n Takes a link to a list of endpoints and parses the WSDL links.\n\n Feeding 1 result from webservice_parser() into endpoint_parser() at a time\n will return a list of WSDL endpoints that are contained on the page from\n that link that was passed in.\n\n Parameters\n ----------\n link: str\n A url to a page containing links to WSDL files.\n\n Returns\n -------\n endpoints: list or NoneType\n A list containing all of the available WSDL endpoints from the passed\n in url.\n\n Examples\n --------\n >>> from sunpy.net.helio import parser\n >>> parser.endpoint_parser('http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioService')\n ['http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioService?wsdl',\n 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioService1_0?wsdl',\n 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioService1_0b?wsdl',\n 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioLongQueryService?wsdl',\n 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioLongQueryService1_0?wsdl',\n 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioLongQueryService1_1?wsdl',\n 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioLongQueryService1_0b?wsdl',\n 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioTavernaService?wsdl']\n \"\"\"\n endpoint_page = link_test(link)\n if endpoint_page is None:\n return None\n soup = BeautifulSoup(endpoint_page)\n endpoints = []\n for web_link in soup.find_all('a'):\n endpoints.append(web_link.get('href'))\n return endpoints\n\n\ndef taverna_parser(link):\n \"\"\"\n Takes a link to a list of endpoints and parses the taverna WSDL links.\n\n Takes a url to a page containing a list of endpoints, then passes that url\n to endpoint_parser(). Upon receiving the resulting list from the parser\n taverna_parser() goes through the list and finds all the WSDL links for\n the taverna web-service. It then returns a list containing the filtered\n links.\n\n Parameters\n ----------\n link: str\n A url to a page containing links to WSDL files.\n\n Returns\n -------\n taverna_links: list or NoneType\n A list containing WSDL links for a taverna web-service\n\n Examples\n --------\n >>> from sunpy.net.helio import parser\n >>> parser.taverna_parser('http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioService')\n ['http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioTavernaService?wsdl']\n \"\"\"\n endpoints = endpoint_parser(link)\n taverna_links = []\n if endpoints is None:\n return None\n for web_link in endpoints:\n if 'Taverna' in web_link:\n taverna_links.append(web_link)\n if len(taverna_links) == 0:\n return None\n return taverna_links\n\n\ndef link_test(link):\n \"\"\"\n Just a quick function to test a link.\n\n Quickly checks to see if the URL is a valid link; if it is it returns the\n downloaded contents of that page.\n\n Parameters\n ----------\n link: str\n A string containing a URL\n\n Returns\n -------\n webpage: str or NoneType\n String containing the webresults\n\n Examples\n --------\n >>> from sunpy.net.helio import parser\n >>> parser.link_test('http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioService')\n u'<html>\\n<head>...</body>\\n</html>\\n'\n\n >>> print parser.link_test('http://rrnx.invalid_url5523.com')\n None\n \"\"\"\n try:\n with closing(urlopen(link)) as fd:\n return fd.read()\n except (ValueError, URLError):\n return None\n\n\ndef wsdl_retriever(service='HEC'):\n \"\"\"\n Retrieves a link to a taverna WSDL file\n\n This is essentially the master method, from it all the other functions get\n called and it essentially knits everything together. It gets a list of\n service links via webservice_parser(), then filters the results via\n taverna_parser(). Finally it tests all the returned taverna WSDL links\n and returns the first live taverna endpoint.\n\n Parameters\n ----------\n service: str\n Indicates which particular HELIO service is used. Defaults to HEC.\n\n Returns\n -------\n wsdl: str\n URL to a single live taverna endpoint\n\n Examples\n --------\n >>> from sunpy.net.helio import parser\n >>> parser.wsdl_retriever()\n 'http://msslkz.mssl.ucl.ac.uk:80/helio_hec/HelioTavernaService?wsdl'\n\n Notes\n -----\n * Currently only support for HEC exists, but it was designed so that it\n could be expanded at a later date\n * There is a 3 second timeout lifespan on links, so there is potential for\n this function to take a while to return. Timeout duration can be\n controlled through the LINK_TIMEOUT value\n \"\"\"\n service_links = webservice_parser(service=service)\n wsdl = None\n wsdl_links = None\n if service_links is None:\n return None\n for link in service_links:\n wsdl_links = taverna_parser(link)\n if wsdl_links is None:\n return None\n for end_point in wsdl_links:\n if end_point is not None and link_test(end_point) is not None:\n wsdl = end_point\n break\n return wsdl\n", "path": "sunpy/net/helio/parser.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Author: Michael Malocha <[email protected]>\n# Last Edit: September 22nd, 2013\n#\n# This module was developed with funding from the GSOC 2013 summer of code\n#\n\n\"\"\"\nThis module is meant to parse the HELIO registry and return WSDL endpoints to\nfacilitate the interfacing between further modules and HELIO.\n\"\"\"\nfrom __future__ import absolute_import\nfrom urllib2 import urlopen, URLError\n#import sunpy.util.etree as EL\nimport xml.etree.ElementTree as EL\nfrom sunpy.net.helio import registry_links as RL\nfrom bs4 import BeautifulSoup\nfrom contextlib import closing\n\n__author__ = 'Michael Malocha'\n__version__ = 'September 22nd, 2013'\n\n# Lifespan in seconds before a link times-out\nLINK_TIMEOUT = 3\n\n\ndef webservice_parser(service='HEC'):\n \"\"\"\n Quickly parses important contents from HELIO registry.\n\n Uses the link contained in registry_links in with 'service' appended\n and scrapes the web-service links contained on that webpage.\n\n Parameters\n ----------\n service: str\n Indicates which particular HELIO service is used. Defaults to HEC.\n\n Returns\n -------\n links: list or NoneType\n List of urls to registries containing WSDL endpoints.\n\n Examples\n --------\n >>> from sunpy.net.helio import parser\n >>> parser.webservice_parser()\n ['http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioService',\n 'http://festung3.oats.inaf.it:8080/helio-hec/HelioService',\n 'http://festung1.oats.inaf.it:8080/helio-hec/HelioService',\n 'http://hec.helio-vo.eu/helio_hec/HelioService',\n 'http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioLongQueryService',\n 'http://festung3.oats.inaf.it:8080/helio-hec/HelioLongQueryService',\n 'http://festung1.oats.inaf.it:8080/helio-hec/HelioLongQueryService',\n 'http://hec.helio-vo.eu/helio_hec/HelioLongQueryService']\n \"\"\"\n link = RL.LINK + '/' + service.lower()\n xml = link_test(link)\n if xml is None:\n return xml\n root = EL.fromstring(xml)\n links = []\n\n #WARNING: getiterator is deprecated in Python 2.7+\n #Fix for 3.x support\n for interface in root.getiterator('interface'):\n service_type = interface.attrib\n key = service_type.keys()\n if len(key) > 0:\n value = service_type[key[0]]\n if value == 'vr:WebService':\n for url in interface.getiterator('accessURL'):\n if url.text not in links:\n links.append(url.text)\n return links\n\n\ndef endpoint_parser(link):\n \"\"\"\n Takes a link to a list of endpoints and parses the WSDL links.\n\n Feeding 1 result from webservice_parser() into endpoint_parser() at a time\n will return a list of WSDL endpoints that are contained on the page from\n that link that was passed in.\n\n Parameters\n ----------\n link: str\n A url to a page containing links to WSDL files.\n\n Returns\n -------\n endpoints: list or NoneType\n A list containing all of the available WSDL endpoints from the passed\n in url.\n\n Examples\n --------\n >>> from sunpy.net.helio import parser\n >>> parser.endpoint_parser('http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioService')\n ['http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioService?wsdl',\n 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioService1_0?wsdl',\n 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioService1_0b?wsdl',\n 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioLongQueryService?wsdl',\n 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioLongQueryService1_0?wsdl',\n 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioLongQueryService1_1?wsdl',\n 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioLongQueryService1_0b?wsdl',\n 'http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioTavernaService?wsdl']\n \"\"\"\n endpoint_page = link_test(link)\n if endpoint_page is None:\n return None\n soup = BeautifulSoup(endpoint_page)\n endpoints = []\n for web_link in soup.find_all('a'):\n endpoints.append(web_link.get('href'))\n return endpoints\n\n\ndef taverna_parser(link):\n \"\"\"\n Takes a link to a list of endpoints and parses the taverna WSDL links.\n\n Takes a url to a page containing a list of endpoints, then passes that url\n to endpoint_parser(). Upon receiving the resulting list from the parser\n taverna_parser() goes through the list and finds all the WSDL links for\n the taverna web-service. It then returns a list containing the filtered\n links.\n\n Parameters\n ----------\n link: str\n A url to a page containing links to WSDL files.\n\n Returns\n -------\n taverna_links: list or NoneType\n A list containing WSDL links for a taverna web-service\n\n Examples\n --------\n >>> from sunpy.net.helio import parser\n >>> parser.taverna_parser('http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioService')\n ['http://msslkz.mssl.ucl.ac.uk:80/helio-hec/HelioTavernaService?wsdl']\n \"\"\"\n endpoints = endpoint_parser(link)\n taverna_links = []\n if endpoints is None:\n return None\n for web_link in endpoints:\n if 'Taverna' in web_link:\n taverna_links.append(web_link)\n if len(taverna_links) == 0:\n return None\n return taverna_links\n\n\ndef link_test(link):\n \"\"\"\n Just a quick function to test a link.\n\n Quickly checks to see if the URL is a valid link; if it is it returns the\n downloaded contents of that page.\n\n Parameters\n ----------\n link: str\n A string containing a URL\n\n Returns\n -------\n webpage: str or NoneType\n String containing the webresults\n\n Examples\n --------\n >>> from sunpy.net.helio import parser\n >>> parser.link_test('http://msslkz.mssl.ucl.ac.uk/helio-hec/HelioService')\n u'<html>\\n<head>...</body>\\n</html>\\n'\n\n >>> print parser.link_test('http://rrnx.invalid_url5523.com')\n None\n \"\"\"\n try:\n with closing(urlopen(link, timeout=LINK_TIMEOUT)) as fd:\n return fd.read()\n except (ValueError, URLError):\n return None\n\n\ndef wsdl_retriever(service='HEC'):\n \"\"\"\n Retrieves a link to a taverna WSDL file\n\n This is essentially the master method, from it all the other functions get\n called and it essentially knits everything together. It gets a list of\n service links via webservice_parser(), then filters the results via\n taverna_parser(). Finally it tests all the returned taverna WSDL links\n and returns the first live taverna endpoint.\n\n Parameters\n ----------\n service: str\n Indicates which particular HELIO service is used. Defaults to HEC.\n\n Returns\n -------\n wsdl: str\n URL to a single live taverna endpoint\n\n Examples\n --------\n >>> from sunpy.net.helio import parser\n >>> parser.wsdl_retriever()\n 'http://msslkz.mssl.ucl.ac.uk:80/helio_hec/HelioTavernaService?wsdl'\n\n Notes\n -----\n * Currently only support for HEC exists, but it was designed so that it\n could be expanded at a later date\n * There is a 3 second timeout lifespan on links, so there is potential for\n this function to take a while to return. Timeout duration can be\n controlled through the LINK_TIMEOUT value\n \"\"\"\n service_links = webservice_parser(service=service)\n wsdl = None\n wsdl_links = None\n if service_links is None:\n return None\n for link in service_links:\n wsdl_links = taverna_parser(link)\n if wsdl_links is None:\n return None\n for end_point in wsdl_links:\n if end_point is not None and link_test(end_point) is not None:\n wsdl = end_point\n break\n return wsdl\n", "path": "sunpy/net/helio/parser.py"}]}
| 2,967 | 222 |
gh_patches_debug_36756
|
rasdani/github-patches
|
git_diff
|
horovod__horovod-3245
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pytorch_lightning_mnist.py example is not working with GPU
**Environment:**
torch==1.7.1
pytorch-lightning==1.3.8
horovod==master branch
**Bug report:**
Reproduce with `horovodrun -np 1 -H agent11240-phx4:1 python pytorch_lightning_mnist.py --epochs 1`
It fails in test step since model weights are on cpu:
```
Epoch 0: 100% 946/948 [00:10<00:00, 86.76it/s, loss=0.438, v[[1,0]<stdout>:A epoch ended.
Epoch 0: 100% 948/948 [00:11<00:00, 84.24it/s, loss=0.438, v_[1,0]<stdout>:Training ends
Epoch 0: 100% 948/948 [00:11<00:00, 84.18it/s, loss=0.438, v_num=0][1,0]<stdout>:
[1,0]<stderr>:Traceback (most recent call last):
[1,0]<stderr>: File "pytorch_lightning_mnist.py", line 218, in <module>
[1,0]<stderr>: test()
[1,0]<stderr>: File "pytorch_lightning_mnist.py", line 101, in test
[1,0]<stderr>: output = model(data)
[1,0]<stderr>: File "/usr/lib/python3.6/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
[1,0]<stderr>: result = self.forward(*input, **kwargs)
[1,0]<stderr>: File "pytorch_lightning_mnist.py", line 59, in forward
[1,0]<stderr>: x = F.relu(F.max_pool2d(self.conv1(x), 2))
[1,0]<stderr>: File "/usr/lib/python3.6/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
[1,0]<stderr>: result = self.forward(*input, **kwargs)
[1,0]<stderr>: File "/usr/lib/python3.6/site-packages/torch/nn/modules/conv.py", line 423, in forward
[1,0]<stderr>: return self._conv_forward(input, self.weight)
[1,0]<stderr>: File "/usr/lib/python3.6/site-packages/torch/nn/modules/conv.py", line 420, in _conv_forward
[1,0]<stderr>: self.padding, self.dilation, self.groups)
[1,0]<stderr>:RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (torch.FloatTensor) should be the same
```
CPU version `horovodrun -np 1 -H agent11240-phx4:1 python pytorch_lightning_mnist.py --epochs 1 --no-cuda` is working.
Also, the example seems has wrong configuration for using GPUs:
The `gpus` for `trainer` should depend on `args.cuda`, instead of system default setting.
https://github.com/horovod/horovod/blob/master/examples/pytorch/pytorch_lightning_mnist.py#L208
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/pytorch/pytorch_lightning_mnist.py`
Content:
```
1 import argparse
2 import os
3 from filelock import FileLock
4 import tempfile
5
6 import torch
7 import torch.multiprocessing as mp
8 import torch.nn as nn
9 import torch.nn.functional as F
10 import torch.optim as optim
11 from torchvision import datasets, transforms
12 # import torch.utils.data.distributed
13
14 from pytorch_lightning import LightningModule, Trainer
15 from pytorch_lightning.callbacks import ModelCheckpoint
16 from pytorch_lightning.loggers import TensorBoardLogger
17 import horovod.torch as hvd
18
19 # Training settings
20 parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
21 parser.add_argument('--batch-size', type=int, default=64, metavar='N',
22 help='input batch size for training (default: 64)')
23 parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
24 help='input batch size for testing (default: 1000)')
25 parser.add_argument('--epochs', type=int, default=10, metavar='N',
26 help='number of epochs to train (default: 10)')
27 parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
28 help='learning rate (default: 0.01)')
29 parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
30 help='SGD momentum (default: 0.5)')
31 parser.add_argument('--no-cuda', action='store_true', default=False,
32 help='disables CUDA training')
33 parser.add_argument('--seed', type=int, default=42, metavar='S',
34 help='random seed (default: 42)')
35 parser.add_argument('--log-interval', type=int, default=10, metavar='N',
36 help='how many batches to wait before logging training status')
37 parser.add_argument('--fp16-allreduce', action='store_true', default=False,
38 help='use fp16 compression during allreduce')
39 parser.add_argument('--use-adasum', action='store_true', default=False,
40 help='use adasum algorithm to do reduction')
41 parser.add_argument('--gradient-predivide-factor', type=float, default=1.0,
42 help='apply gradient predivide factor in optimizer (default: 1.0)')
43 parser.add_argument('--data-dir',
44 help='location of the training dataset in the local filesystem (will be downloaded if needed)')
45
46
47 # Define the PyTorch model without any Horovod-specific parameters
48 class Net(LightningModule):
49 def __init__(self):
50 super(Net, self).__init__()
51 self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
52 self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
53 self.conv2_drop = nn.Dropout2d()
54 self.fc1 = nn.Linear(320, 50)
55 self.fc2 = nn.Linear(50, 10)
56
57 def forward(self, x):
58 x = x.float()
59 x = F.relu(F.max_pool2d(self.conv1(x), 2))
60 x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
61 x = x.view(-1, 320)
62 x = F.relu(self.fc1(x))
63 x = F.dropout(x, training=self.training)
64 x = self.fc2(x)
65 return F.log_softmax(x, -1)
66
67 def configure_optimizers(self):
68 return optim.SGD(self.parameters(), lr=0.01, momentum=0.5)
69
70 def training_step(self, batch, batch_nb):
71 x, y = batch[0], batch[1]
72 y_hat = self(x)
73 loss = F.nll_loss(y_hat, y.long())
74 tensorboard_logs = {'train_loss': loss}
75 return {'loss': loss, 'log': tensorboard_logs}
76
77 def validation_step(self, batch, batch_nb):
78 x, y = batch[0], batch[1]
79 y_hat = self(x)
80 return {'val_loss': F.nll_loss(y_hat, y.long())}
81
82 def validation_epoch_end(self, outputs):
83 avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
84 tensorboard_logs = {'val_loss': avg_loss}
85 return {'avg_val_loss': avg_loss, 'log': tensorboard_logs}
86
87
88 def metric_average(val, name):
89 tensor = torch.tensor(val)
90 avg_tensor = hvd.allreduce(tensor, name=name)
91 return avg_tensor.item()
92
93
94 def test():
95 model.eval()
96 test_loss = 0.
97 test_accuracy = 0.
98 for data, target in test_loader:
99 if args.cuda:
100 data, target = data.cuda(), target.cuda()
101 output = model(data)
102 # sum up batch loss
103 test_loss += F.nll_loss(output, target, size_average=False).item()
104 # get the index of the max log-probability
105 pred = output.data.max(1, keepdim=True)[1]
106 test_accuracy += pred.eq(target.data.view_as(pred)).cpu().float().sum()
107
108 # Horovod: use test_sampler to determine the number of examples in
109 # this worker's partition.
110 test_loss /= len(test_sampler)
111 test_accuracy /= len(test_sampler)
112
113 # Horovod: average metric values across workers.
114 test_loss = metric_average(test_loss, 'avg_loss')
115 test_accuracy = metric_average(test_accuracy, 'avg_accuracy')
116
117 # Horovod: print output only on first rank.
118 if hvd.rank() == 0:
119 print('\nTest set: Average loss: {:.4f}, Accuracy: {:.2f}%\n'.format(
120 test_loss, 100. * test_accuracy))
121
122
123 if __name__ == '__main__':
124 args = parser.parse_args()
125 args.cuda = not args.no_cuda and torch.cuda.is_available()
126 hvd.init()
127
128 kwargs = {'num_workers': 2}
129 # When supported, use 'forkserver' to spawn dataloader workers instead of 'fork' to prevent
130 # issues with Infiniband implementations that are not fork-safe
131 if (kwargs.get('num_workers', 0) > 0 and hasattr(mp, '_supports_context') and
132 mp._supports_context and 'forkserver' in mp.get_all_start_methods()):
133 kwargs['multiprocessing_context'] = 'forkserver'
134
135 # get data
136 data_dir = args.data_dir or './data'
137 with FileLock(os.path.expanduser("~/.horovod_lock")):
138 train_dataset = \
139 datasets.MNIST(data_dir, train=True, download=True,
140 transform=transforms.Compose([
141 transforms.ToTensor(),
142 transforms.Normalize((0.1307,), (0.3081,))
143 ]))
144
145 # set training data loader
146 train_sampler = torch.utils.data.distributed.DistributedSampler(
147 train_dataset, num_replicas=hvd.size(), rank=hvd.rank())
148 train_loader = torch.utils.data.DataLoader(
149 train_dataset, batch_size=args.batch_size, sampler=train_sampler, **kwargs)
150
151 test_dataset = \
152 datasets.MNIST(data_dir, train=False, transform=transforms.Compose([
153 transforms.ToTensor(),
154 transforms.Normalize((0.1307,), (0.3081,))
155 ]))
156
157 # set validation data loader
158 test_sampler = torch.utils.data.distributed.DistributedSampler(
159 test_dataset, num_replicas=hvd.size(), rank=hvd.rank())
160 test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size,
161 sampler=test_sampler, **kwargs)
162
163 epochs = args.epochs
164 with tempfile.TemporaryDirectory() as run_output_dir:
165 ckpt_path = os.path.join(run_output_dir, "checkpoint")
166 os.makedirs(ckpt_path, exist_ok=True)
167
168 logs_path = os.path.join(run_output_dir, "logger")
169 os.makedirs(logs_path, exist_ok=True)
170 logger = TensorBoardLogger(logs_path)
171
172 train_percent = 1.0
173 val_percent = 1.0
174
175 model = Net()
176 setattr(model, 'train_dataloader', lambda: train_loader)
177 setattr(model, 'val_dataloader', lambda: test_loader)
178
179 from pytorch_lightning.callbacks import Callback
180
181 class MyDummyCallback(Callback):
182 def __init__(self):
183 self.epcoh_end_counter = 0
184 self.train_epcoh_end_counter = 0
185
186 def on_init_start(self, trainer):
187 print('Starting to init trainer!')
188
189 def on_init_end(self, trainer):
190 print('Trainer is initialized.')
191
192 def on_epoch_end(self, trainer, model):
193 print('A epoch ended.')
194 self.epcoh_end_counter += 1
195
196 def on_train_epoch_end(self, trainer, model, unused=None):
197 print('A train epoch ended.')
198 self.train_epcoh_end_counter += 1
199
200 def on_train_end(self, trainer, model):
201 print('Training ends')
202 assert self.epcoh_end_counter == 2 * epochs
203 assert self.train_epcoh_end_counter == epochs
204
205 callbacks = [MyDummyCallback(), ModelCheckpoint(dirpath=ckpt_path)]
206
207 trainer = Trainer(accelerator='horovod',
208 gpus=(1 if torch.cuda.is_available() else 0),
209 callbacks=callbacks,
210 max_epochs=epochs,
211 limit_train_batches=train_percent,
212 limit_val_batches=val_percent,
213 logger=logger,
214 num_sanity_val_steps=0)
215
216 trainer.fit(model)
217
218 test()
219
220
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/pytorch/pytorch_lightning_mnist.py b/examples/pytorch/pytorch_lightning_mnist.py
--- a/examples/pytorch/pytorch_lightning_mnist.py
+++ b/examples/pytorch/pytorch_lightning_mnist.py
@@ -24,22 +24,10 @@
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
-parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
- help='learning rate (default: 0.01)')
-parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
- help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
-parser.add_argument('--log-interval', type=int, default=10, metavar='N',
- help='how many batches to wait before logging training status')
-parser.add_argument('--fp16-allreduce', action='store_true', default=False,
- help='use fp16 compression during allreduce')
-parser.add_argument('--use-adasum', action='store_true', default=False,
- help='use adasum algorithm to do reduction')
-parser.add_argument('--gradient-predivide-factor', type=float, default=1.0,
- help='apply gradient predivide factor in optimizer (default: 1.0)')
parser.add_argument('--data-dir',
help='location of the training dataset in the local filesystem (will be downloaded if needed)')
@@ -205,7 +193,7 @@
callbacks = [MyDummyCallback(), ModelCheckpoint(dirpath=ckpt_path)]
trainer = Trainer(accelerator='horovod',
- gpus=(1 if torch.cuda.is_available() else 0),
+ gpus=(1 if args.cuda else 0),
callbacks=callbacks,
max_epochs=epochs,
limit_train_batches=train_percent,
@@ -214,6 +202,7 @@
num_sanity_val_steps=0)
trainer.fit(model)
-
+ if args.cuda:
+ model = model.cuda()
test()
|
{"golden_diff": "diff --git a/examples/pytorch/pytorch_lightning_mnist.py b/examples/pytorch/pytorch_lightning_mnist.py\n--- a/examples/pytorch/pytorch_lightning_mnist.py\n+++ b/examples/pytorch/pytorch_lightning_mnist.py\n@@ -24,22 +24,10 @@\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n-parser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n- help='learning rate (default: 0.01)')\n-parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n- help='SGD momentum (default: 0.5)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=42, metavar='S',\n help='random seed (default: 42)')\n-parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n- help='how many batches to wait before logging training status')\n-parser.add_argument('--fp16-allreduce', action='store_true', default=False,\n- help='use fp16 compression during allreduce')\n-parser.add_argument('--use-adasum', action='store_true', default=False,\n- help='use adasum algorithm to do reduction')\n-parser.add_argument('--gradient-predivide-factor', type=float, default=1.0,\n- help='apply gradient predivide factor in optimizer (default: 1.0)')\n parser.add_argument('--data-dir',\n help='location of the training dataset in the local filesystem (will be downloaded if needed)')\n \n@@ -205,7 +193,7 @@\n callbacks = [MyDummyCallback(), ModelCheckpoint(dirpath=ckpt_path)]\n \n trainer = Trainer(accelerator='horovod',\n- gpus=(1 if torch.cuda.is_available() else 0),\n+ gpus=(1 if args.cuda else 0),\n callbacks=callbacks,\n max_epochs=epochs,\n limit_train_batches=train_percent,\n@@ -214,6 +202,7 @@\n num_sanity_val_steps=0)\n \n trainer.fit(model)\n-\n+ if args.cuda:\n+ model = model.cuda()\n test()\n", "issue": "pytorch_lightning_mnist.py example is not working with GPU\n**Environment:**\r\ntorch==1.7.1\r\npytorch-lightning==1.3.8\r\nhorovod==master branch\r\n\r\n**Bug report:**\r\nReproduce with `horovodrun -np 1 -H agent11240-phx4:1 python pytorch_lightning_mnist.py --epochs 1`\r\nIt fails in test step since model weights are on cpu:\r\n```\r\nEpoch 0: 100% 946/948 [00:10<00:00, 86.76it/s, loss=0.438, v[[1,0]<stdout>:A epoch ended.\r\nEpoch 0: 100% 948/948 [00:11<00:00, 84.24it/s, loss=0.438, v_[1,0]<stdout>:Training ends\r\nEpoch 0: 100% 948/948 [00:11<00:00, 84.18it/s, loss=0.438, v_num=0][1,0]<stdout>:\r\n[1,0]<stderr>:Traceback (most recent call last):\r\n[1,0]<stderr>: File \"pytorch_lightning_mnist.py\", line 218, in <module>\r\n[1,0]<stderr>: test()\r\n[1,0]<stderr>: File \"pytorch_lightning_mnist.py\", line 101, in test\r\n[1,0]<stderr>: output = model(data)\r\n[1,0]<stderr>: File \"/usr/lib/python3.6/site-packages/torch/nn/modules/module.py\", line 727, in _call_impl\r\n[1,0]<stderr>: result = self.forward(*input, **kwargs)\r\n[1,0]<stderr>: File \"pytorch_lightning_mnist.py\", line 59, in forward\r\n[1,0]<stderr>: x = F.relu(F.max_pool2d(self.conv1(x), 2))\r\n[1,0]<stderr>: File \"/usr/lib/python3.6/site-packages/torch/nn/modules/module.py\", line 727, in _call_impl\r\n[1,0]<stderr>: result = self.forward(*input, **kwargs)\r\n[1,0]<stderr>: File \"/usr/lib/python3.6/site-packages/torch/nn/modules/conv.py\", line 423, in forward\r\n[1,0]<stderr>: return self._conv_forward(input, self.weight)\r\n[1,0]<stderr>: File \"/usr/lib/python3.6/site-packages/torch/nn/modules/conv.py\", line 420, in _conv_forward\r\n[1,0]<stderr>: self.padding, self.dilation, self.groups)\r\n[1,0]<stderr>:RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (torch.FloatTensor) should be the same\r\n```\r\n\r\nCPU version `horovodrun -np 1 -H agent11240-phx4:1 python pytorch_lightning_mnist.py --epochs 1 --no-cuda` is working.\r\n\r\nAlso, the example seems has wrong configuration for using GPUs:\r\nThe `gpus` for `trainer` should depend on `args.cuda`, instead of system default setting.\r\nhttps://github.com/horovod/horovod/blob/master/examples/pytorch/pytorch_lightning_mnist.py#L208\n", "before_files": [{"content": "import argparse\nimport os\nfrom filelock import FileLock\nimport tempfile\n\nimport torch\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\n# import torch.utils.data.distributed\n\nfrom pytorch_lightning import LightningModule, Trainer\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.loggers import TensorBoardLogger\nimport horovod.torch as hvd\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\nparser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=42, metavar='S',\n help='random seed (default: 42)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--fp16-allreduce', action='store_true', default=False,\n help='use fp16 compression during allreduce')\nparser.add_argument('--use-adasum', action='store_true', default=False,\n help='use adasum algorithm to do reduction')\nparser.add_argument('--gradient-predivide-factor', type=float, default=1.0,\n help='apply gradient predivide factor in optimizer (default: 1.0)')\nparser.add_argument('--data-dir',\n help='location of the training dataset in the local filesystem (will be downloaded if needed)')\n\n\n# Define the PyTorch model without any Horovod-specific parameters\nclass Net(LightningModule):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = x.float()\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, -1)\n\n def configure_optimizers(self):\n return optim.SGD(self.parameters(), lr=0.01, momentum=0.5)\n\n def training_step(self, batch, batch_nb):\n x, y = batch[0], batch[1]\n y_hat = self(x)\n loss = F.nll_loss(y_hat, y.long())\n tensorboard_logs = {'train_loss': loss}\n return {'loss': loss, 'log': tensorboard_logs}\n\n def validation_step(self, batch, batch_nb):\n x, y = batch[0], batch[1]\n y_hat = self(x)\n return {'val_loss': F.nll_loss(y_hat, y.long())}\n\n def validation_epoch_end(self, outputs):\n avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n tensorboard_logs = {'val_loss': avg_loss}\n return {'avg_val_loss': avg_loss, 'log': tensorboard_logs}\n\n\ndef metric_average(val, name):\n tensor = torch.tensor(val)\n avg_tensor = hvd.allreduce(tensor, name=name)\n return avg_tensor.item()\n\n\ndef test():\n model.eval()\n test_loss = 0.\n test_accuracy = 0.\n for data, target in test_loader:\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n output = model(data)\n # sum up batch loss\n test_loss += F.nll_loss(output, target, size_average=False).item()\n # get the index of the max log-probability\n pred = output.data.max(1, keepdim=True)[1]\n test_accuracy += pred.eq(target.data.view_as(pred)).cpu().float().sum()\n\n # Horovod: use test_sampler to determine the number of examples in\n # this worker's partition.\n test_loss /= len(test_sampler)\n test_accuracy /= len(test_sampler)\n\n # Horovod: average metric values across workers.\n test_loss = metric_average(test_loss, 'avg_loss')\n test_accuracy = metric_average(test_accuracy, 'avg_accuracy')\n\n # Horovod: print output only on first rank.\n if hvd.rank() == 0:\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {:.2f}%\\n'.format(\n test_loss, 100. * test_accuracy))\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n hvd.init()\n\n kwargs = {'num_workers': 2}\n # When supported, use 'forkserver' to spawn dataloader workers instead of 'fork' to prevent\n # issues with Infiniband implementations that are not fork-safe\n if (kwargs.get('num_workers', 0) > 0 and hasattr(mp, '_supports_context') and\n mp._supports_context and 'forkserver' in mp.get_all_start_methods()):\n kwargs['multiprocessing_context'] = 'forkserver'\n\n # get data\n data_dir = args.data_dir or './data'\n with FileLock(os.path.expanduser(\"~/.horovod_lock\")):\n train_dataset = \\\n datasets.MNIST(data_dir, train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]))\n\n # set training data loader\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset, num_replicas=hvd.size(), rank=hvd.rank())\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, sampler=train_sampler, **kwargs)\n\n test_dataset = \\\n datasets.MNIST(data_dir, train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]))\n\n # set validation data loader\n test_sampler = torch.utils.data.distributed.DistributedSampler(\n test_dataset, num_replicas=hvd.size(), rank=hvd.rank())\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size,\n sampler=test_sampler, **kwargs)\n\n epochs = args.epochs\n with tempfile.TemporaryDirectory() as run_output_dir:\n ckpt_path = os.path.join(run_output_dir, \"checkpoint\")\n os.makedirs(ckpt_path, exist_ok=True)\n\n logs_path = os.path.join(run_output_dir, \"logger\")\n os.makedirs(logs_path, exist_ok=True)\n logger = TensorBoardLogger(logs_path)\n\n train_percent = 1.0\n val_percent = 1.0\n\n model = Net()\n setattr(model, 'train_dataloader', lambda: train_loader)\n setattr(model, 'val_dataloader', lambda: test_loader)\n\n from pytorch_lightning.callbacks import Callback\n\n class MyDummyCallback(Callback):\n def __init__(self):\n self.epcoh_end_counter = 0\n self.train_epcoh_end_counter = 0\n\n def on_init_start(self, trainer):\n print('Starting to init trainer!')\n\n def on_init_end(self, trainer):\n print('Trainer is initialized.')\n\n def on_epoch_end(self, trainer, model):\n print('A epoch ended.')\n self.epcoh_end_counter += 1\n\n def on_train_epoch_end(self, trainer, model, unused=None):\n print('A train epoch ended.')\n self.train_epcoh_end_counter += 1\n\n def on_train_end(self, trainer, model):\n print('Training ends')\n assert self.epcoh_end_counter == 2 * epochs\n assert self.train_epcoh_end_counter == epochs\n\n callbacks = [MyDummyCallback(), ModelCheckpoint(dirpath=ckpt_path)]\n\n trainer = Trainer(accelerator='horovod',\n gpus=(1 if torch.cuda.is_available() else 0),\n callbacks=callbacks,\n max_epochs=epochs,\n limit_train_batches=train_percent,\n limit_val_batches=val_percent,\n logger=logger,\n num_sanity_val_steps=0)\n\n trainer.fit(model)\n\n test()\n\n", "path": "examples/pytorch/pytorch_lightning_mnist.py"}], "after_files": [{"content": "import argparse\nimport os\nfrom filelock import FileLock\nimport tempfile\n\nimport torch\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\n# import torch.utils.data.distributed\n\nfrom pytorch_lightning import LightningModule, Trainer\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.loggers import TensorBoardLogger\nimport horovod.torch as hvd\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=42, metavar='S',\n help='random seed (default: 42)')\nparser.add_argument('--data-dir',\n help='location of the training dataset in the local filesystem (will be downloaded if needed)')\n\n\n# Define the PyTorch model without any Horovod-specific parameters\nclass Net(LightningModule):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = x.float()\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, -1)\n\n def configure_optimizers(self):\n return optim.SGD(self.parameters(), lr=0.01, momentum=0.5)\n\n def training_step(self, batch, batch_nb):\n x, y = batch[0], batch[1]\n y_hat = self(x)\n loss = F.nll_loss(y_hat, y.long())\n tensorboard_logs = {'train_loss': loss}\n return {'loss': loss, 'log': tensorboard_logs}\n\n def validation_step(self, batch, batch_nb):\n x, y = batch[0], batch[1]\n y_hat = self(x)\n return {'val_loss': F.nll_loss(y_hat, y.long())}\n\n def validation_epoch_end(self, outputs):\n avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n tensorboard_logs = {'val_loss': avg_loss}\n return {'avg_val_loss': avg_loss, 'log': tensorboard_logs}\n\n\ndef metric_average(val, name):\n tensor = torch.tensor(val)\n avg_tensor = hvd.allreduce(tensor, name=name)\n return avg_tensor.item()\n\n\ndef test():\n model.eval()\n test_loss = 0.\n test_accuracy = 0.\n for data, target in test_loader:\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n output = model(data)\n # sum up batch loss\n test_loss += F.nll_loss(output, target, size_average=False).item()\n # get the index of the max log-probability\n pred = output.data.max(1, keepdim=True)[1]\n test_accuracy += pred.eq(target.data.view_as(pred)).cpu().float().sum()\n\n # Horovod: use test_sampler to determine the number of examples in\n # this worker's partition.\n test_loss /= len(test_sampler)\n test_accuracy /= len(test_sampler)\n\n # Horovod: average metric values across workers.\n test_loss = metric_average(test_loss, 'avg_loss')\n test_accuracy = metric_average(test_accuracy, 'avg_accuracy')\n\n # Horovod: print output only on first rank.\n if hvd.rank() == 0:\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {:.2f}%\\n'.format(\n test_loss, 100. * test_accuracy))\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n hvd.init()\n\n kwargs = {'num_workers': 2}\n # When supported, use 'forkserver' to spawn dataloader workers instead of 'fork' to prevent\n # issues with Infiniband implementations that are not fork-safe\n if (kwargs.get('num_workers', 0) > 0 and hasattr(mp, '_supports_context') and\n mp._supports_context and 'forkserver' in mp.get_all_start_methods()):\n kwargs['multiprocessing_context'] = 'forkserver'\n\n # get data\n data_dir = args.data_dir or './data'\n with FileLock(os.path.expanduser(\"~/.horovod_lock\")):\n train_dataset = \\\n datasets.MNIST(data_dir, train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]))\n\n # set training data loader\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset, num_replicas=hvd.size(), rank=hvd.rank())\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, sampler=train_sampler, **kwargs)\n\n test_dataset = \\\n datasets.MNIST(data_dir, train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]))\n\n # set validation data loader\n test_sampler = torch.utils.data.distributed.DistributedSampler(\n test_dataset, num_replicas=hvd.size(), rank=hvd.rank())\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size,\n sampler=test_sampler, **kwargs)\n\n epochs = args.epochs\n with tempfile.TemporaryDirectory() as run_output_dir:\n ckpt_path = os.path.join(run_output_dir, \"checkpoint\")\n os.makedirs(ckpt_path, exist_ok=True)\n\n logs_path = os.path.join(run_output_dir, \"logger\")\n os.makedirs(logs_path, exist_ok=True)\n logger = TensorBoardLogger(logs_path)\n\n train_percent = 1.0\n val_percent = 1.0\n\n model = Net()\n setattr(model, 'train_dataloader', lambda: train_loader)\n setattr(model, 'val_dataloader', lambda: test_loader)\n\n from pytorch_lightning.callbacks import Callback\n\n class MyDummyCallback(Callback):\n def __init__(self):\n self.epcoh_end_counter = 0\n self.train_epcoh_end_counter = 0\n\n def on_init_start(self, trainer):\n print('Starting to init trainer!')\n\n def on_init_end(self, trainer):\n print('Trainer is initialized.')\n\n def on_epoch_end(self, trainer, model):\n print('A epoch ended.')\n self.epcoh_end_counter += 1\n\n def on_train_epoch_end(self, trainer, model, unused=None):\n print('A train epoch ended.')\n self.train_epcoh_end_counter += 1\n\n def on_train_end(self, trainer, model):\n print('Training ends')\n assert self.epcoh_end_counter == 2 * epochs\n assert self.train_epcoh_end_counter == epochs\n\n callbacks = [MyDummyCallback(), ModelCheckpoint(dirpath=ckpt_path)]\n\n trainer = Trainer(accelerator='horovod',\n gpus=(1 if args.cuda else 0),\n callbacks=callbacks,\n max_epochs=epochs,\n limit_train_batches=train_percent,\n limit_val_batches=val_percent,\n logger=logger,\n num_sanity_val_steps=0)\n\n trainer.fit(model)\n if args.cuda:\n model = model.cuda()\n test()\n\n", "path": "examples/pytorch/pytorch_lightning_mnist.py"}]}
| 3,664 | 535 |
gh_patches_debug_27580
|
rasdani/github-patches
|
git_diff
|
watchdogpolska__feder-441
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
zmiana sposobu przewijania spraw z mailami z monitoringu
W tej chwili można przeskakiwać tylko o jedną stronę do przodu i do tyłu, a chodziłoby o to, żeby można było przeskoczyć o kilka lub na sam koniec. Jest tak:

A lepiej jakby było tak:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `feder/monitorings/views.py`
Content:
```
1 from atom.ext.guardian.forms import TranslatedUserObjectPermissionsForm
2 from atom.views import DeleteMessageMixin, UpdateMessageMixin
3 from braces.views import (FormValidMessageMixin, LoginRequiredMixin,
4 PermissionRequiredMixin, SelectRelatedMixin,
5 UserFormKwargsMixin, PrefetchRelatedMixin)
6 from cached_property import cached_property
7 from dal import autocomplete
8 from django.contrib import messages
9 from django.contrib.auth import get_user_model
10 from django.core.exceptions import PermissionDenied
11 from django.urls import reverse, reverse_lazy
12 from django.db.models import Count
13 from django.http import HttpResponseRedirect
14 from django.shortcuts import get_object_or_404
15 from django.utils.translation import ugettext_lazy as _
16 from django.views.generic import (CreateView, DeleteView, DetailView, FormView,
17 UpdateView)
18 from django_filters.views import FilterView
19 from formtools.wizard.views import SessionWizardView
20 from guardian.shortcuts import assign_perm
21
22 from feder.cases.models import Case
23 from feder.institutions.filters import InstitutionFilter
24 from feder.institutions.models import Institution
25 from feder.letters.models import Letter
26 from feder.main.mixins import ExtraListMixin, RaisePermissionRequiredMixin
27 from .filters import MonitoringFilter
28 from .forms import (MonitoringForm, SaveTranslatedUserObjectPermissionsForm,
29 SelectUserForm)
30 from .models import Monitoring
31
32
33 class MonitoringListView(SelectRelatedMixin, FilterView):
34 filterset_class = MonitoringFilter
35 model = Monitoring
36 select_related = ['user', ]
37 paginate_by = 25
38
39 def get_queryset(self):
40 qs = super(MonitoringListView, self).get_queryset()
41
42 if not self.request.user.is_staff:
43 qs = qs.only_public()
44
45 return qs.with_case_count()
46
47
48 class MonitoringDetailView(SelectRelatedMixin, PrefetchRelatedMixin,
49 ExtraListMixin, DetailView):
50 model = Monitoring
51 select_related = ['user', ]
52 prefetch_related = ['questionary_set', ]
53 paginate_by = 25
54
55 def get_queryset(self):
56 qs = super(MonitoringDetailView, self).get_queryset()
57
58 if not self.request.user.is_staff:
59 qs = qs.only_public()
60
61 return qs
62
63 def get_object_list(self, obj):
64 return (Case.objects.filter(monitoring=obj).
65 select_related('institution').
66 prefetch_related('task_set').
67 with_record_max().
68 with_letter().
69 order_by('-record_max').
70 all())
71
72
73 class LetterListMonitoringView(SelectRelatedMixin, PrefetchRelatedMixin, ExtraListMixin, DetailView):
74 model = Monitoring
75 template_name_suffix = '_letter_list'
76 select_related = ['user', ]
77 prefetch_related = ['questionary_set', ]
78 paginate_by = 25
79
80 def get_object_list(self, obj):
81 return (Letter.objects.filter(record__case__monitoring=obj).
82 select_related('record__case').
83 with_author().
84 attachment_count().
85 order_by('-created').
86 all())
87
88
89 class DraftListMonitoringView(SelectRelatedMixin, PrefetchRelatedMixin, ExtraListMixin, DetailView):
90 model = Monitoring
91 template_name_suffix = '_draft_list'
92 select_related = ['user', ]
93 prefetch_related = ['questionary_set', ]
94 paginate_by = 25
95
96 def get_object_list(self, obj):
97 return (Letter.objects.filter(record__case__monitoring=obj).
98 is_draft().
99 select_related('record__case').
100 with_author().
101 attachment_count().
102 order_by('-created').
103 all())
104
105
106 class MonitoringCreateView(LoginRequiredMixin, PermissionRequiredMixin,
107 UserFormKwargsMixin, CreateView):
108 model = Monitoring
109 template_name = 'monitorings/monitoring_form.html'
110 form_class = MonitoringForm
111 permission_required = 'monitorings.add_monitoring'
112 raise_exception = True
113 redirect_unauthenticated_users = True
114
115 def get_form_valid_message(self):
116 return _("{0} created!").format(self.object)
117
118 def form_valid(self, form):
119 output = super(MonitoringCreateView, self).form_valid(form)
120 default_perm = ['change_monitoring', 'delete_monitoring', 'add_questionary',
121 'change_questionary', 'delete_questionary', 'add_case',
122 'change_case', 'delete_case', 'add_task', 'change_task',
123 'delete_task', 'reply', 'view_alert', 'change_alert',
124 'delete_alert', 'manage_perm',
125 'select_survey', 'add_draft']
126 for perm in default_perm:
127 assign_perm(perm, self.request.user, form.instance)
128 return output
129
130
131 class MonitoringUpdateView(RaisePermissionRequiredMixin, UserFormKwargsMixin,
132 UpdateMessageMixin, FormValidMessageMixin, UpdateView):
133 model = Monitoring
134 form_class = MonitoringForm
135 permission_required = 'monitorings.change_monitoring'
136
137
138 class MonitoringDeleteView(RaisePermissionRequiredMixin, DeleteMessageMixin,
139 DeleteView):
140 model = Monitoring
141 success_url = reverse_lazy('monitorings:list')
142 permission_required = 'monitorings.delete_monitoring'
143
144
145 class PermissionWizard(LoginRequiredMixin, SessionWizardView):
146 form_list = [SelectUserForm, TranslatedUserObjectPermissionsForm]
147 template_name = 'monitorings/permission_wizard.html'
148
149 def perm_check(self):
150 if not self.request.user.has_perm('monitorings.manage_perm',
151 self.monitoring):
152 raise PermissionDenied()
153
154 @cached_property
155 def monitoring(self):
156 return Monitoring.objects.get(slug=self.kwargs['slug'])
157
158 def get_context_data(self, *args, **kwargs):
159 context = super(PermissionWizard, self).get_context_data(*args, **kwargs)
160 context['object'] = self.monitoring
161 return context
162
163 def get_form_kwargs(self, step=None):
164 kw = super(PermissionWizard, self).get_form_kwargs(step)
165 self.perm_check()
166 if step == '1':
167 user_pk = self.storage.get_step_data('0').get('0-user')[0]
168 user = get_user_model().objects.get(pk=user_pk)
169 kw['user'] = user
170 kw['obj'] = self.monitoring
171 return kw
172
173 def get_success_message(self):
174 return _("Permissions to {monitoring} updated!").format(monitoring=self.object)
175
176 def done(self, form_list, *args, **kwargs):
177 form_list[1].save_obj_perms()
178 self.object = form_list[1].obj
179 messages.success(self.request, self.get_success_message())
180 url = reverse('monitorings:perm', kwargs={'slug': self.object.slug})
181 return HttpResponseRedirect(url)
182
183
184 class MonitoringPermissionView(RaisePermissionRequiredMixin, SelectRelatedMixin, DetailView):
185 model = Monitoring
186 template_name_suffix = '_permissions'
187 select_related = ['user', ]
188 permission_required = 'monitorings.manage_perm'
189
190 def get_context_data(self, **kwargs):
191 context = super(MonitoringPermissionView, self).get_context_data(**kwargs)
192 context['user_list'], context['index'] = self.object.permission_map()
193 return context
194
195
196 class MonitoringUpdatePermissionView(RaisePermissionRequiredMixin, SelectRelatedMixin, FormView):
197 form_class = SaveTranslatedUserObjectPermissionsForm
198 template_name = 'monitorings/monitoring_form.html'
199 permission_required = 'monitorings.manage_perm'
200
201 def get_permission_object(self):
202 return self.get_monitoring()
203
204 def get_user(self):
205 if not getattr(self, 'user', None):
206 self.user = get_object_or_404(get_user_model(), id=self.kwargs['user_pk'])
207 return self.user
208
209 def get_monitoring(self):
210 if not getattr(self, 'monitoring', None):
211 self.monitoring = get_object_or_404(Monitoring, slug=self.kwargs['slug'])
212 return self.monitoring
213
214 def get_context_data(self, **kwargs):
215 context = super(MonitoringUpdatePermissionView, self).get_context_data(**kwargs)
216 context['object'] = self.get_monitoring()
217 return context
218
219 def get_form_kwargs(self):
220 kw = super(MonitoringUpdatePermissionView, self).get_form_kwargs()
221 kw.update({'user': self.get_user(), 'obj': self.get_monitoring()})
222 return kw
223
224 def get_success_message(self):
225 return (_("Permissions to {monitoring} of {user} updated!").
226 format(monitoring=self.monitoring, user=self.user))
227
228 def form_valid(self, form):
229 form.save_obj_perms()
230 messages.success(self.request, self.get_success_message())
231 url = reverse('monitorings:perm', kwargs={'slug': self.get_monitoring().slug})
232 return HttpResponseRedirect(url)
233
234
235 class MonitoringAssignView(RaisePermissionRequiredMixin, FilterView):
236 model = Institution
237 filterset_class = InstitutionFilter
238 permission_required = 'monitorings.change_monitoring'
239 template_name = 'monitorings/institution_assign.html'
240 paginate_by = 50
241 LIMIT = 500
242
243 def get_limit_simultaneously(self):
244 return self.LIMIT
245
246 def get_queryset(self):
247 qs = super(MonitoringAssignView, self).get_queryset()
248 return (qs.exclude(case__monitoring=self.monitoring.pk).
249 with_case_count().
250 select_related('jst'))
251
252 def get_permission_object(self):
253 return self.monitoring
254
255 @cached_property
256 def monitoring(self):
257 return get_object_or_404(Monitoring, slug=self.kwargs['slug'])
258
259 def get_context_data(self, **kwargs):
260 context = super(MonitoringAssignView, self).get_context_data(**kwargs)
261 context['monitoring'] = self.monitoring
262 return context
263
264 def get_filterset_kwargs(self, filterset_class):
265 kw = super(MonitoringAssignView, self).get_filterset_kwargs(filterset_class)
266 return kw
267
268 def post(self, request, *args, **kwargs):
269 if not request.GET:
270 msg = _("You can not send letters without using filtering.")
271 messages.error(self.request, msg)
272 return HttpResponseRedirect(self.request.path)
273
274 if request.POST.get('all', 'no') == 'yes':
275 qs = self.get_filterset(self.get_filterset_class()).qs
276 else:
277 ids = request.POST.getlist('to_assign')
278 qs = Institution.objects.filter(pk__in=ids)
279 qs = qs.exclude(case__monitoring=self.monitoring.pk)
280
281 count = Case.objects.filter(monitoring=self.monitoring).count() or 0
282
283 to_assign_count = qs.count()
284 if to_assign_count > self.get_limit_simultaneously():
285 msg = _("You can not send %(count)d letters at once. "
286 "The maximum is %(limit)d. Use filtering.") % \
287 {'count': to_assign_count, 'limit': self.get_limit_simultaneously()}
288 messages.error(self.request, msg)
289 return HttpResponseRedirect(self.request.path)
290
291 for i, institution in enumerate(qs):
292 postfix = " #%d" % (i + count + 1,)
293 Letter.send_new_case(user=self.request.user,
294 monitoring=self.monitoring,
295 postfix=postfix,
296 institution=institution,
297 text=self.monitoring.template)
298 msg = _("%(count)d institutions was assigned " +
299 "to %(monitoring)s. The requests was sent.") % \
300 {'count': to_assign_count, 'monitoring': self.monitoring}
301 messages.success(self.request, msg)
302 url = reverse('monitorings:assign', kwargs={'slug': self.monitoring.slug})
303 return HttpResponseRedirect(url)
304
305
306 class MonitoringAutocomplete(autocomplete.Select2QuerySetView):
307 def get_queryset(self):
308 qs = Monitoring.objects
309 if self.q:
310 qs = qs.filter(name__icontains=self.q)
311
312 if not self.request.user.is_staff:
313 qs = qs.only_public()
314
315 return qs.all()
316
317
318 class UserMonitoringAutocomplete(autocomplete.Select2QuerySetView):
319 def get_queryset(self):
320 qs = (get_user_model().objects.
321 annotate(case_count=Count('case')).
322 filter(case_count__gt=0).all())
323 if self.q:
324 qs = qs.filter(name__icontains=self.q)
325 return qs.all()
326
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/feder/monitorings/views.py b/feder/monitorings/views.py
--- a/feder/monitorings/views.py
+++ b/feder/monitorings/views.py
@@ -60,6 +60,10 @@
return qs
+ def get_context_data(self, **kwargs):
+ kwargs['url_extra_kwargs'] = {'slug': self.object.slug}
+ return super(MonitoringDetailView, self).get_context_data(**kwargs)
+
def get_object_list(self, obj):
return (Case.objects.filter(monitoring=obj).
select_related('institution').
@@ -77,6 +81,10 @@
prefetch_related = ['questionary_set', ]
paginate_by = 25
+ def get_context_data(self, **kwargs):
+ kwargs['url_extra_kwargs'] = {'slug': self.object.slug}
+ return super(LetterListMonitoringView, self).get_context_data(**kwargs)
+
def get_object_list(self, obj):
return (Letter.objects.filter(record__case__monitoring=obj).
select_related('record__case').
@@ -93,6 +101,10 @@
prefetch_related = ['questionary_set', ]
paginate_by = 25
+ def get_context_data(self, **kwargs):
+ kwargs['url_extra_kwargs'] = {'slug': self.object.slug}
+ return super(DraftListMonitoringView, self).get_context_data(**kwargs)
+
def get_object_list(self, obj):
return (Letter.objects.filter(record__case__monitoring=obj).
is_draft().
|
{"golden_diff": "diff --git a/feder/monitorings/views.py b/feder/monitorings/views.py\n--- a/feder/monitorings/views.py\n+++ b/feder/monitorings/views.py\n@@ -60,6 +60,10 @@\n \n return qs\n \n+ def get_context_data(self, **kwargs):\n+ kwargs['url_extra_kwargs'] = {'slug': self.object.slug}\n+ return super(MonitoringDetailView, self).get_context_data(**kwargs)\n+\n def get_object_list(self, obj):\n return (Case.objects.filter(monitoring=obj).\n select_related('institution').\n@@ -77,6 +81,10 @@\n prefetch_related = ['questionary_set', ]\n paginate_by = 25\n \n+ def get_context_data(self, **kwargs):\n+ kwargs['url_extra_kwargs'] = {'slug': self.object.slug}\n+ return super(LetterListMonitoringView, self).get_context_data(**kwargs)\n+\n def get_object_list(self, obj):\n return (Letter.objects.filter(record__case__monitoring=obj).\n select_related('record__case').\n@@ -93,6 +101,10 @@\n prefetch_related = ['questionary_set', ]\n paginate_by = 25\n \n+ def get_context_data(self, **kwargs):\n+ kwargs['url_extra_kwargs'] = {'slug': self.object.slug}\n+ return super(DraftListMonitoringView, self).get_context_data(**kwargs)\n+\n def get_object_list(self, obj):\n return (Letter.objects.filter(record__case__monitoring=obj).\n is_draft().\n", "issue": "zmiana sposobu przewijania spraw z mailami z monitoringu\nW tej chwili mo\u017cna przeskakiwa\u0107 tylko o jedn\u0105 stron\u0119 do przodu i do ty\u0142u, a chodzi\u0142oby o to, \u017ceby mo\u017cna by\u0142o przeskoczy\u0107 o kilka lub na sam koniec. Jest tak:\r\n\r\n\r\n\r\nA lepiej jakby by\u0142o tak:\r\n\r\n\r\n\n", "before_files": [{"content": "from atom.ext.guardian.forms import TranslatedUserObjectPermissionsForm\nfrom atom.views import DeleteMessageMixin, UpdateMessageMixin\nfrom braces.views import (FormValidMessageMixin, LoginRequiredMixin,\n PermissionRequiredMixin, SelectRelatedMixin,\n UserFormKwargsMixin, PrefetchRelatedMixin)\nfrom cached_property import cached_property\nfrom dal import autocomplete\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import PermissionDenied\nfrom django.urls import reverse, reverse_lazy\nfrom django.db.models import Count\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import (CreateView, DeleteView, DetailView, FormView,\n UpdateView)\nfrom django_filters.views import FilterView\nfrom formtools.wizard.views import SessionWizardView\nfrom guardian.shortcuts import assign_perm\n\nfrom feder.cases.models import Case\nfrom feder.institutions.filters import InstitutionFilter\nfrom feder.institutions.models import Institution\nfrom feder.letters.models import Letter\nfrom feder.main.mixins import ExtraListMixin, RaisePermissionRequiredMixin\nfrom .filters import MonitoringFilter\nfrom .forms import (MonitoringForm, SaveTranslatedUserObjectPermissionsForm,\n SelectUserForm)\nfrom .models import Monitoring\n\n\nclass MonitoringListView(SelectRelatedMixin, FilterView):\n filterset_class = MonitoringFilter\n model = Monitoring\n select_related = ['user', ]\n paginate_by = 25\n\n def get_queryset(self):\n qs = super(MonitoringListView, self).get_queryset()\n\n if not self.request.user.is_staff:\n qs = qs.only_public()\n\n return qs.with_case_count()\n\n\nclass MonitoringDetailView(SelectRelatedMixin, PrefetchRelatedMixin,\n ExtraListMixin, DetailView):\n model = Monitoring\n select_related = ['user', ]\n prefetch_related = ['questionary_set', ]\n paginate_by = 25\n\n def get_queryset(self):\n qs = super(MonitoringDetailView, self).get_queryset()\n\n if not self.request.user.is_staff:\n qs = qs.only_public()\n\n return qs\n\n def get_object_list(self, obj):\n return (Case.objects.filter(monitoring=obj).\n select_related('institution').\n prefetch_related('task_set').\n with_record_max().\n with_letter().\n order_by('-record_max').\n all())\n\n\nclass LetterListMonitoringView(SelectRelatedMixin, PrefetchRelatedMixin, ExtraListMixin, DetailView):\n model = Monitoring\n template_name_suffix = '_letter_list'\n select_related = ['user', ]\n prefetch_related = ['questionary_set', ]\n paginate_by = 25\n\n def get_object_list(self, obj):\n return (Letter.objects.filter(record__case__monitoring=obj).\n select_related('record__case').\n with_author().\n attachment_count().\n order_by('-created').\n all())\n\n\nclass DraftListMonitoringView(SelectRelatedMixin, PrefetchRelatedMixin, ExtraListMixin, DetailView):\n model = Monitoring\n template_name_suffix = '_draft_list'\n select_related = ['user', ]\n prefetch_related = ['questionary_set', ]\n paginate_by = 25\n\n def get_object_list(self, obj):\n return (Letter.objects.filter(record__case__monitoring=obj).\n is_draft().\n select_related('record__case').\n with_author().\n attachment_count().\n order_by('-created').\n all())\n\n\nclass MonitoringCreateView(LoginRequiredMixin, PermissionRequiredMixin,\n UserFormKwargsMixin, CreateView):\n model = Monitoring\n template_name = 'monitorings/monitoring_form.html'\n form_class = MonitoringForm\n permission_required = 'monitorings.add_monitoring'\n raise_exception = True\n redirect_unauthenticated_users = True\n\n def get_form_valid_message(self):\n return _(\"{0} created!\").format(self.object)\n\n def form_valid(self, form):\n output = super(MonitoringCreateView, self).form_valid(form)\n default_perm = ['change_monitoring', 'delete_monitoring', 'add_questionary',\n 'change_questionary', 'delete_questionary', 'add_case',\n 'change_case', 'delete_case', 'add_task', 'change_task',\n 'delete_task', 'reply', 'view_alert', 'change_alert',\n 'delete_alert', 'manage_perm',\n 'select_survey', 'add_draft']\n for perm in default_perm:\n assign_perm(perm, self.request.user, form.instance)\n return output\n\n\nclass MonitoringUpdateView(RaisePermissionRequiredMixin, UserFormKwargsMixin,\n UpdateMessageMixin, FormValidMessageMixin, UpdateView):\n model = Monitoring\n form_class = MonitoringForm\n permission_required = 'monitorings.change_monitoring'\n\n\nclass MonitoringDeleteView(RaisePermissionRequiredMixin, DeleteMessageMixin,\n DeleteView):\n model = Monitoring\n success_url = reverse_lazy('monitorings:list')\n permission_required = 'monitorings.delete_monitoring'\n\n\nclass PermissionWizard(LoginRequiredMixin, SessionWizardView):\n form_list = [SelectUserForm, TranslatedUserObjectPermissionsForm]\n template_name = 'monitorings/permission_wizard.html'\n\n def perm_check(self):\n if not self.request.user.has_perm('monitorings.manage_perm',\n self.monitoring):\n raise PermissionDenied()\n\n @cached_property\n def monitoring(self):\n return Monitoring.objects.get(slug=self.kwargs['slug'])\n\n def get_context_data(self, *args, **kwargs):\n context = super(PermissionWizard, self).get_context_data(*args, **kwargs)\n context['object'] = self.monitoring\n return context\n\n def get_form_kwargs(self, step=None):\n kw = super(PermissionWizard, self).get_form_kwargs(step)\n self.perm_check()\n if step == '1':\n user_pk = self.storage.get_step_data('0').get('0-user')[0]\n user = get_user_model().objects.get(pk=user_pk)\n kw['user'] = user\n kw['obj'] = self.monitoring\n return kw\n\n def get_success_message(self):\n return _(\"Permissions to {monitoring} updated!\").format(monitoring=self.object)\n\n def done(self, form_list, *args, **kwargs):\n form_list[1].save_obj_perms()\n self.object = form_list[1].obj\n messages.success(self.request, self.get_success_message())\n url = reverse('monitorings:perm', kwargs={'slug': self.object.slug})\n return HttpResponseRedirect(url)\n\n\nclass MonitoringPermissionView(RaisePermissionRequiredMixin, SelectRelatedMixin, DetailView):\n model = Monitoring\n template_name_suffix = '_permissions'\n select_related = ['user', ]\n permission_required = 'monitorings.manage_perm'\n\n def get_context_data(self, **kwargs):\n context = super(MonitoringPermissionView, self).get_context_data(**kwargs)\n context['user_list'], context['index'] = self.object.permission_map()\n return context\n\n\nclass MonitoringUpdatePermissionView(RaisePermissionRequiredMixin, SelectRelatedMixin, FormView):\n form_class = SaveTranslatedUserObjectPermissionsForm\n template_name = 'monitorings/monitoring_form.html'\n permission_required = 'monitorings.manage_perm'\n\n def get_permission_object(self):\n return self.get_monitoring()\n\n def get_user(self):\n if not getattr(self, 'user', None):\n self.user = get_object_or_404(get_user_model(), id=self.kwargs['user_pk'])\n return self.user\n\n def get_monitoring(self):\n if not getattr(self, 'monitoring', None):\n self.monitoring = get_object_or_404(Monitoring, slug=self.kwargs['slug'])\n return self.monitoring\n\n def get_context_data(self, **kwargs):\n context = super(MonitoringUpdatePermissionView, self).get_context_data(**kwargs)\n context['object'] = self.get_monitoring()\n return context\n\n def get_form_kwargs(self):\n kw = super(MonitoringUpdatePermissionView, self).get_form_kwargs()\n kw.update({'user': self.get_user(), 'obj': self.get_monitoring()})\n return kw\n\n def get_success_message(self):\n return (_(\"Permissions to {monitoring} of {user} updated!\").\n format(monitoring=self.monitoring, user=self.user))\n\n def form_valid(self, form):\n form.save_obj_perms()\n messages.success(self.request, self.get_success_message())\n url = reverse('monitorings:perm', kwargs={'slug': self.get_monitoring().slug})\n return HttpResponseRedirect(url)\n\n\nclass MonitoringAssignView(RaisePermissionRequiredMixin, FilterView):\n model = Institution\n filterset_class = InstitutionFilter\n permission_required = 'monitorings.change_monitoring'\n template_name = 'monitorings/institution_assign.html'\n paginate_by = 50\n LIMIT = 500\n\n def get_limit_simultaneously(self):\n return self.LIMIT\n\n def get_queryset(self):\n qs = super(MonitoringAssignView, self).get_queryset()\n return (qs.exclude(case__monitoring=self.monitoring.pk).\n with_case_count().\n select_related('jst'))\n\n def get_permission_object(self):\n return self.monitoring\n\n @cached_property\n def monitoring(self):\n return get_object_or_404(Monitoring, slug=self.kwargs['slug'])\n\n def get_context_data(self, **kwargs):\n context = super(MonitoringAssignView, self).get_context_data(**kwargs)\n context['monitoring'] = self.monitoring\n return context\n\n def get_filterset_kwargs(self, filterset_class):\n kw = super(MonitoringAssignView, self).get_filterset_kwargs(filterset_class)\n return kw\n\n def post(self, request, *args, **kwargs):\n if not request.GET:\n msg = _(\"You can not send letters without using filtering.\")\n messages.error(self.request, msg)\n return HttpResponseRedirect(self.request.path)\n\n if request.POST.get('all', 'no') == 'yes':\n qs = self.get_filterset(self.get_filterset_class()).qs\n else:\n ids = request.POST.getlist('to_assign')\n qs = Institution.objects.filter(pk__in=ids)\n qs = qs.exclude(case__monitoring=self.monitoring.pk)\n\n count = Case.objects.filter(monitoring=self.monitoring).count() or 0\n\n to_assign_count = qs.count()\n if to_assign_count > self.get_limit_simultaneously():\n msg = _(\"You can not send %(count)d letters at once. \"\n \"The maximum is %(limit)d. Use filtering.\") % \\\n {'count': to_assign_count, 'limit': self.get_limit_simultaneously()}\n messages.error(self.request, msg)\n return HttpResponseRedirect(self.request.path)\n\n for i, institution in enumerate(qs):\n postfix = \" #%d\" % (i + count + 1,)\n Letter.send_new_case(user=self.request.user,\n monitoring=self.monitoring,\n postfix=postfix,\n institution=institution,\n text=self.monitoring.template)\n msg = _(\"%(count)d institutions was assigned \" +\n \"to %(monitoring)s. The requests was sent.\") % \\\n {'count': to_assign_count, 'monitoring': self.monitoring}\n messages.success(self.request, msg)\n url = reverse('monitorings:assign', kwargs={'slug': self.monitoring.slug})\n return HttpResponseRedirect(url)\n\n\nclass MonitoringAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n qs = Monitoring.objects\n if self.q:\n qs = qs.filter(name__icontains=self.q)\n\n if not self.request.user.is_staff:\n qs = qs.only_public()\n\n return qs.all()\n\n\nclass UserMonitoringAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n qs = (get_user_model().objects.\n annotate(case_count=Count('case')).\n filter(case_count__gt=0).all())\n if self.q:\n qs = qs.filter(name__icontains=self.q)\n return qs.all()\n", "path": "feder/monitorings/views.py"}], "after_files": [{"content": "from atom.ext.guardian.forms import TranslatedUserObjectPermissionsForm\nfrom atom.views import DeleteMessageMixin, UpdateMessageMixin\nfrom braces.views import (FormValidMessageMixin, LoginRequiredMixin,\n PermissionRequiredMixin, SelectRelatedMixin,\n UserFormKwargsMixin, PrefetchRelatedMixin)\nfrom cached_property import cached_property\nfrom dal import autocomplete\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import PermissionDenied\nfrom django.urls import reverse, reverse_lazy\nfrom django.db.models import Count\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import (CreateView, DeleteView, DetailView, FormView,\n UpdateView)\nfrom django_filters.views import FilterView\nfrom formtools.wizard.views import SessionWizardView\nfrom guardian.shortcuts import assign_perm\n\nfrom feder.cases.models import Case\nfrom feder.institutions.filters import InstitutionFilter\nfrom feder.institutions.models import Institution\nfrom feder.letters.models import Letter\nfrom feder.main.mixins import ExtraListMixin, RaisePermissionRequiredMixin\nfrom .filters import MonitoringFilter\nfrom .forms import (MonitoringForm, SaveTranslatedUserObjectPermissionsForm,\n SelectUserForm)\nfrom .models import Monitoring\n\n\nclass MonitoringListView(SelectRelatedMixin, FilterView):\n filterset_class = MonitoringFilter\n model = Monitoring\n select_related = ['user', ]\n paginate_by = 25\n\n def get_queryset(self):\n qs = super(MonitoringListView, self).get_queryset()\n\n if not self.request.user.is_staff:\n qs = qs.only_public()\n\n return qs.with_case_count()\n\n\nclass MonitoringDetailView(SelectRelatedMixin, PrefetchRelatedMixin,\n ExtraListMixin, DetailView):\n model = Monitoring\n select_related = ['user', ]\n prefetch_related = ['questionary_set', ]\n paginate_by = 25\n\n def get_queryset(self):\n qs = super(MonitoringDetailView, self).get_queryset()\n\n if not self.request.user.is_staff:\n qs = qs.only_public()\n\n return qs\n\n def get_context_data(self, **kwargs):\n kwargs['url_extra_kwargs'] = {'slug': self.object.slug}\n return super(MonitoringDetailView, self).get_context_data(**kwargs)\n\n def get_object_list(self, obj):\n return (Case.objects.filter(monitoring=obj).\n select_related('institution').\n prefetch_related('task_set').\n with_record_max().\n with_letter().\n order_by('-record_max').\n all())\n\n\nclass LetterListMonitoringView(SelectRelatedMixin, PrefetchRelatedMixin, ExtraListMixin, DetailView):\n model = Monitoring\n template_name_suffix = '_letter_list'\n select_related = ['user', ]\n prefetch_related = ['questionary_set', ]\n paginate_by = 25\n\n def get_context_data(self, **kwargs):\n kwargs['url_extra_kwargs'] = {'slug': self.object.slug}\n return super(LetterListMonitoringView, self).get_context_data(**kwargs)\n\n def get_object_list(self, obj):\n return (Letter.objects.filter(record__case__monitoring=obj).\n select_related('record__case').\n with_author().\n attachment_count().\n order_by('-created').\n all())\n\n\nclass DraftListMonitoringView(SelectRelatedMixin, PrefetchRelatedMixin, ExtraListMixin, DetailView):\n model = Monitoring\n template_name_suffix = '_draft_list'\n select_related = ['user', ]\n prefetch_related = ['questionary_set', ]\n paginate_by = 25\n\n def get_context_data(self, **kwargs):\n kwargs['url_extra_kwargs'] = {'slug': self.object.slug}\n return super(DraftListMonitoringView, self).get_context_data(**kwargs)\n\n def get_object_list(self, obj):\n return (Letter.objects.filter(record__case__monitoring=obj).\n is_draft().\n select_related('record__case').\n with_author().\n attachment_count().\n order_by('-created').\n all())\n\n\nclass MonitoringCreateView(LoginRequiredMixin, PermissionRequiredMixin,\n UserFormKwargsMixin, CreateView):\n model = Monitoring\n template_name = 'monitorings/monitoring_form.html'\n form_class = MonitoringForm\n permission_required = 'monitorings.add_monitoring'\n raise_exception = True\n redirect_unauthenticated_users = True\n\n def get_form_valid_message(self):\n return _(\"{0} created!\").format(self.object)\n\n def form_valid(self, form):\n output = super(MonitoringCreateView, self).form_valid(form)\n default_perm = ['change_monitoring', 'delete_monitoring', 'add_questionary',\n 'change_questionary', 'delete_questionary', 'add_case',\n 'change_case', 'delete_case', 'add_task', 'change_task',\n 'delete_task', 'reply', 'view_alert', 'change_alert',\n 'delete_alert', 'manage_perm',\n 'select_survey', 'add_draft']\n for perm in default_perm:\n assign_perm(perm, self.request.user, form.instance)\n return output\n\n\nclass MonitoringUpdateView(RaisePermissionRequiredMixin, UserFormKwargsMixin,\n UpdateMessageMixin, FormValidMessageMixin, UpdateView):\n model = Monitoring\n form_class = MonitoringForm\n permission_required = 'monitorings.change_monitoring'\n\n\nclass MonitoringDeleteView(RaisePermissionRequiredMixin, DeleteMessageMixin,\n DeleteView):\n model = Monitoring\n success_url = reverse_lazy('monitorings:list')\n permission_required = 'monitorings.delete_monitoring'\n\n\nclass PermissionWizard(LoginRequiredMixin, SessionWizardView):\n form_list = [SelectUserForm, TranslatedUserObjectPermissionsForm]\n template_name = 'monitorings/permission_wizard.html'\n\n def perm_check(self):\n if not self.request.user.has_perm('monitorings.manage_perm',\n self.monitoring):\n raise PermissionDenied()\n\n @cached_property\n def monitoring(self):\n return Monitoring.objects.get(slug=self.kwargs['slug'])\n\n def get_context_data(self, *args, **kwargs):\n context = super(PermissionWizard, self).get_context_data(*args, **kwargs)\n context['object'] = self.monitoring\n return context\n\n def get_form_kwargs(self, step=None):\n kw = super(PermissionWizard, self).get_form_kwargs(step)\n self.perm_check()\n if step == '1':\n user_pk = self.storage.get_step_data('0').get('0-user')[0]\n user = get_user_model().objects.get(pk=user_pk)\n kw['user'] = user\n kw['obj'] = self.monitoring\n return kw\n\n def get_success_message(self):\n return _(\"Permissions to {monitoring} updated!\").format(monitoring=self.object)\n\n def done(self, form_list, *args, **kwargs):\n form_list[1].save_obj_perms()\n self.object = form_list[1].obj\n messages.success(self.request, self.get_success_message())\n url = reverse('monitorings:perm', kwargs={'slug': self.object.slug})\n return HttpResponseRedirect(url)\n\n\nclass MonitoringPermissionView(RaisePermissionRequiredMixin, SelectRelatedMixin, DetailView):\n model = Monitoring\n template_name_suffix = '_permissions'\n select_related = ['user', ]\n permission_required = 'monitorings.manage_perm'\n\n def get_context_data(self, **kwargs):\n context = super(MonitoringPermissionView, self).get_context_data(**kwargs)\n context['user_list'], context['index'] = self.object.permission_map()\n return context\n\n\nclass MonitoringUpdatePermissionView(RaisePermissionRequiredMixin, SelectRelatedMixin, FormView):\n form_class = SaveTranslatedUserObjectPermissionsForm\n template_name = 'monitorings/monitoring_form.html'\n permission_required = 'monitorings.manage_perm'\n\n def get_permission_object(self):\n return self.get_monitoring()\n\n def get_user(self):\n if not getattr(self, 'user', None):\n self.user = get_object_or_404(get_user_model(), id=self.kwargs['user_pk'])\n return self.user\n\n def get_monitoring(self):\n if not getattr(self, 'monitoring', None):\n self.monitoring = get_object_or_404(Monitoring, slug=self.kwargs['slug'])\n return self.monitoring\n\n def get_context_data(self, **kwargs):\n context = super(MonitoringUpdatePermissionView, self).get_context_data(**kwargs)\n context['object'] = self.get_monitoring()\n return context\n\n def get_form_kwargs(self):\n kw = super(MonitoringUpdatePermissionView, self).get_form_kwargs()\n kw.update({'user': self.get_user(), 'obj': self.get_monitoring()})\n return kw\n\n def get_success_message(self):\n return (_(\"Permissions to {monitoring} of {user} updated!\").\n format(monitoring=self.monitoring, user=self.user))\n\n def form_valid(self, form):\n form.save_obj_perms()\n messages.success(self.request, self.get_success_message())\n url = reverse('monitorings:perm', kwargs={'slug': self.get_monitoring().slug})\n return HttpResponseRedirect(url)\n\n\nclass MonitoringAssignView(RaisePermissionRequiredMixin, FilterView):\n model = Institution\n filterset_class = InstitutionFilter\n permission_required = 'monitorings.change_monitoring'\n template_name = 'monitorings/institution_assign.html'\n paginate_by = 50\n LIMIT = 500\n\n def get_limit_simultaneously(self):\n return self.LIMIT\n\n def get_queryset(self):\n qs = super(MonitoringAssignView, self).get_queryset()\n return (qs.exclude(case__monitoring=self.monitoring.pk).\n with_case_count().\n select_related('jst'))\n\n def get_permission_object(self):\n return self.monitoring\n\n @cached_property\n def monitoring(self):\n return get_object_or_404(Monitoring, slug=self.kwargs['slug'])\n\n def get_context_data(self, **kwargs):\n context = super(MonitoringAssignView, self).get_context_data(**kwargs)\n context['monitoring'] = self.monitoring\n return context\n\n def get_filterset_kwargs(self, filterset_class):\n kw = super(MonitoringAssignView, self).get_filterset_kwargs(filterset_class)\n return kw\n\n def post(self, request, *args, **kwargs):\n if not request.GET:\n msg = _(\"You can not send letters without using filtering.\")\n messages.error(self.request, msg)\n return HttpResponseRedirect(self.request.path)\n\n if request.POST.get('all', 'no') == 'yes':\n qs = self.get_filterset(self.get_filterset_class()).qs\n else:\n ids = request.POST.getlist('to_assign')\n qs = Institution.objects.filter(pk__in=ids)\n qs = qs.exclude(case__monitoring=self.monitoring.pk)\n\n count = Case.objects.filter(monitoring=self.monitoring).count() or 0\n\n to_assign_count = qs.count()\n if to_assign_count > self.get_limit_simultaneously():\n msg = _(\"You can not send %(count)d letters at once. \"\n \"The maximum is %(limit)d. Use filtering.\") % \\\n {'count': to_assign_count, 'limit': self.get_limit_simultaneously()}\n messages.error(self.request, msg)\n return HttpResponseRedirect(self.request.path)\n\n for i, institution in enumerate(qs):\n postfix = \" #%d\" % (i + count + 1,)\n Letter.send_new_case(user=self.request.user,\n monitoring=self.monitoring,\n postfix=postfix,\n institution=institution,\n text=self.monitoring.template)\n msg = _(\"%(count)d institutions was assigned \" +\n \"to %(monitoring)s. The requests was sent.\") % \\\n {'count': to_assign_count, 'monitoring': self.monitoring}\n messages.success(self.request, msg)\n url = reverse('monitorings:assign', kwargs={'slug': self.monitoring.slug})\n return HttpResponseRedirect(url)\n\n\nclass MonitoringAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n qs = Monitoring.objects\n if self.q:\n qs = qs.filter(name__icontains=self.q)\n\n if not self.request.user.is_staff:\n qs = qs.only_public()\n\n return qs.all()\n\n\nclass UserMonitoringAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n qs = (get_user_model().objects.\n annotate(case_count=Count('case')).\n filter(case_count__gt=0).all())\n if self.q:\n qs = qs.filter(name__icontains=self.q)\n return qs.all()\n", "path": "feder/monitorings/views.py"}]}
| 3,917 | 342 |
gh_patches_debug_668
|
rasdani/github-patches
|
git_diff
|
liqd__a4-opin-388
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
timeline wrong way?
the phases in the timeline seem to be sorted in the wrong direction:


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/phases/models.py`
Content:
```
1 from django.core.exceptions import ValidationError
2 from django.db import models
3 from django.utils import timezone
4 from django.utils.translation import ugettext as _
5
6 from euth.modules import models as modules_models
7
8 from . import content
9 from .validators import validate_content
10
11
12 class PhasesQuerySet(models.QuerySet):
13
14 def active_phases(self):
15 now = timezone.now()
16 return self.filter(start_date__lte=now, end_date__gt=now)
17
18
19 class Phase(models.Model):
20 name = models.CharField(max_length=80)
21 description = models.TextField(max_length=300)
22 type = models.CharField(max_length=128, validators=[validate_content])
23 module = models.ForeignKey(modules_models.Module, on_delete=models.CASCADE)
24 start_date = models.DateTimeField(blank=True, null=True)
25 end_date = models.DateTimeField(blank=True, null=True)
26
27 objects = PhasesQuerySet.as_manager()
28
29 def __str__(self):
30 return '{} ({})'.format(self.name, self.type)
31
32 def content(self):
33 return content[self.type]
34
35 def clean(self):
36 if self.end_date and self.start_date:
37 if self.end_date < self.start_date:
38 raise ValidationError({
39 'end_date': _('End date can not be smaller'
40 'than the start date.')
41 })
42 super().clean()
43
44 @property
45 def view(self):
46 return content[self.type].view
47
48 def has_feature(self, feature, model):
49 return content[self.type].has_feature(feature, model)
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/euth/phases/models.py b/euth/phases/models.py
--- a/euth/phases/models.py
+++ b/euth/phases/models.py
@@ -26,6 +26,9 @@
objects = PhasesQuerySet.as_manager()
+ class Meta:
+ ordering = ['type']
+
def __str__(self):
return '{} ({})'.format(self.name, self.type)
|
{"golden_diff": "diff --git a/euth/phases/models.py b/euth/phases/models.py\n--- a/euth/phases/models.py\n+++ b/euth/phases/models.py\n@@ -26,6 +26,9 @@\n \n objects = PhasesQuerySet.as_manager()\n \n+ class Meta:\n+ ordering = ['type']\n+\n def __str__(self):\n return '{} ({})'.format(self.name, self.type)\n", "issue": "timeline wrong way?\nthe phases in the timeline seem to be sorted in the wrong direction:\n\n\n\n", "before_files": [{"content": "from django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\n\nfrom euth.modules import models as modules_models\n\nfrom . import content\nfrom .validators import validate_content\n\n\nclass PhasesQuerySet(models.QuerySet):\n\n def active_phases(self):\n now = timezone.now()\n return self.filter(start_date__lte=now, end_date__gt=now)\n\n\nclass Phase(models.Model):\n name = models.CharField(max_length=80)\n description = models.TextField(max_length=300)\n type = models.CharField(max_length=128, validators=[validate_content])\n module = models.ForeignKey(modules_models.Module, on_delete=models.CASCADE)\n start_date = models.DateTimeField(blank=True, null=True)\n end_date = models.DateTimeField(blank=True, null=True)\n\n objects = PhasesQuerySet.as_manager()\n\n def __str__(self):\n return '{} ({})'.format(self.name, self.type)\n\n def content(self):\n return content[self.type]\n\n def clean(self):\n if self.end_date and self.start_date:\n if self.end_date < self.start_date:\n raise ValidationError({\n 'end_date': _('End date can not be smaller'\n 'than the start date.')\n })\n super().clean()\n\n @property\n def view(self):\n return content[self.type].view\n\n def has_feature(self, feature, model):\n return content[self.type].has_feature(feature, model)\n", "path": "euth/phases/models.py"}], "after_files": [{"content": "from django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\n\nfrom euth.modules import models as modules_models\n\nfrom . import content\nfrom .validators import validate_content\n\n\nclass PhasesQuerySet(models.QuerySet):\n\n def active_phases(self):\n now = timezone.now()\n return self.filter(start_date__lte=now, end_date__gt=now)\n\n\nclass Phase(models.Model):\n name = models.CharField(max_length=80)\n description = models.TextField(max_length=300)\n type = models.CharField(max_length=128, validators=[validate_content])\n module = models.ForeignKey(modules_models.Module, on_delete=models.CASCADE)\n start_date = models.DateTimeField(blank=True, null=True)\n end_date = models.DateTimeField(blank=True, null=True)\n\n objects = PhasesQuerySet.as_manager()\n\n class Meta:\n ordering = ['type']\n\n def __str__(self):\n return '{} ({})'.format(self.name, self.type)\n\n def content(self):\n return content[self.type]\n\n def clean(self):\n if self.end_date and self.start_date:\n if self.end_date < self.start_date:\n raise ValidationError({\n 'end_date': _('End date can not be smaller'\n 'than the start date.')\n })\n super().clean()\n\n @property\n def view(self):\n return content[self.type].view\n\n def has_feature(self, feature, model):\n return content[self.type].has_feature(feature, model)\n", "path": "euth/phases/models.py"}]}
| 865 | 93 |
gh_patches_debug_29172
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-4876
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fixed light/dark theme setting
Hello, is it possible to set the dark and light theme fixed? With the OS autodetection I have otherwise problems with my different logos that I use in my Tenants settings. Light logos are not visible to users on light platforms, dark logos are not visible to users on dark platforms.
Thanks in advanced
pupazze
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/tenants/api.py`
Content:
```
1 """Serializer for tenant models"""
2 from typing import Any
3
4 from drf_spectacular.utils import extend_schema
5 from rest_framework.decorators import action
6 from rest_framework.exceptions import ValidationError
7 from rest_framework.fields import CharField, ListField
8 from rest_framework.filters import OrderingFilter, SearchFilter
9 from rest_framework.permissions import AllowAny
10 from rest_framework.request import Request
11 from rest_framework.response import Response
12 from rest_framework.serializers import ModelSerializer
13 from rest_framework.viewsets import ModelViewSet
14
15 from authentik.api.authorization import SecretKeyFilter
16 from authentik.core.api.used_by import UsedByMixin
17 from authentik.core.api.utils import PassiveSerializer
18 from authentik.lib.config import CONFIG
19 from authentik.tenants.models import Tenant
20
21
22 class FooterLinkSerializer(PassiveSerializer):
23 """Links returned in Config API"""
24
25 href = CharField(read_only=True)
26 name = CharField(read_only=True)
27
28
29 class TenantSerializer(ModelSerializer):
30 """Tenant Serializer"""
31
32 def validate(self, attrs: dict[str, Any]) -> dict[str, Any]:
33 if attrs.get("default", False):
34 tenants = Tenant.objects.filter(default=True)
35 if self.instance:
36 tenants = tenants.exclude(pk=self.instance.pk)
37 if tenants.exists():
38 raise ValidationError("Only a single Tenant can be set as default.")
39 return super().validate(attrs)
40
41 class Meta:
42 model = Tenant
43 fields = [
44 "tenant_uuid",
45 "domain",
46 "default",
47 "branding_title",
48 "branding_logo",
49 "branding_favicon",
50 "flow_authentication",
51 "flow_invalidation",
52 "flow_recovery",
53 "flow_unenrollment",
54 "flow_user_settings",
55 "flow_device_code",
56 "event_retention",
57 "web_certificate",
58 "attributes",
59 ]
60
61
62 class CurrentTenantSerializer(PassiveSerializer):
63 """Partial tenant information for styling"""
64
65 matched_domain = CharField(source="domain")
66 branding_title = CharField()
67 branding_logo = CharField()
68 branding_favicon = CharField()
69 ui_footer_links = ListField(
70 child=FooterLinkSerializer(),
71 read_only=True,
72 default=CONFIG.y("footer_links", []),
73 )
74
75 flow_authentication = CharField(source="flow_authentication.slug", required=False)
76 flow_invalidation = CharField(source="flow_invalidation.slug", required=False)
77 flow_recovery = CharField(source="flow_recovery.slug", required=False)
78 flow_unenrollment = CharField(source="flow_unenrollment.slug", required=False)
79 flow_user_settings = CharField(source="flow_user_settings.slug", required=False)
80 flow_device_code = CharField(source="flow_device_code.slug", required=False)
81
82 default_locale = CharField(read_only=True)
83
84
85 class TenantViewSet(UsedByMixin, ModelViewSet):
86 """Tenant Viewset"""
87
88 queryset = Tenant.objects.all()
89 serializer_class = TenantSerializer
90 search_fields = [
91 "domain",
92 "branding_title",
93 "web_certificate__name",
94 ]
95 filterset_fields = [
96 "tenant_uuid",
97 "domain",
98 "default",
99 "branding_title",
100 "branding_logo",
101 "branding_favicon",
102 "flow_authentication",
103 "flow_invalidation",
104 "flow_recovery",
105 "flow_unenrollment",
106 "flow_user_settings",
107 "flow_device_code",
108 "event_retention",
109 "web_certificate",
110 ]
111 ordering = ["domain"]
112
113 filter_backends = [SecretKeyFilter, OrderingFilter, SearchFilter]
114
115 @extend_schema(
116 responses=CurrentTenantSerializer(many=False),
117 )
118 @action(methods=["GET"], detail=False, permission_classes=[AllowAny])
119 def current(self, request: Request) -> Response:
120 """Get current tenant"""
121 tenant: Tenant = request._request.tenant
122 return Response(CurrentTenantSerializer(tenant).data)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/authentik/tenants/api.py b/authentik/tenants/api.py
--- a/authentik/tenants/api.py
+++ b/authentik/tenants/api.py
@@ -1,10 +1,11 @@
"""Serializer for tenant models"""
from typing import Any
+from django.db import models
from drf_spectacular.utils import extend_schema
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
-from rest_framework.fields import CharField, ListField
+from rest_framework.fields import CharField, ChoiceField, ListField
from rest_framework.filters import OrderingFilter, SearchFilter
from rest_framework.permissions import AllowAny
from rest_framework.request import Request
@@ -59,6 +60,14 @@
]
+class Themes(models.TextChoices):
+ """Themes"""
+
+ AUTOMATIC = "automatic"
+ LIGHT = "light"
+ DARK = "dark"
+
+
class CurrentTenantSerializer(PassiveSerializer):
"""Partial tenant information for styling"""
@@ -71,6 +80,12 @@
read_only=True,
default=CONFIG.y("footer_links", []),
)
+ ui_theme = ChoiceField(
+ choices=Themes.choices,
+ source="attributes.settings.theme.base",
+ default=Themes.AUTOMATIC,
+ read_only=True,
+ )
flow_authentication = CharField(source="flow_authentication.slug", required=False)
flow_invalidation = CharField(source="flow_invalidation.slug", required=False)
|
{"golden_diff": "diff --git a/authentik/tenants/api.py b/authentik/tenants/api.py\n--- a/authentik/tenants/api.py\n+++ b/authentik/tenants/api.py\n@@ -1,10 +1,11 @@\n \"\"\"Serializer for tenant models\"\"\"\n from typing import Any\n \n+from django.db import models\n from drf_spectacular.utils import extend_schema\n from rest_framework.decorators import action\n from rest_framework.exceptions import ValidationError\n-from rest_framework.fields import CharField, ListField\n+from rest_framework.fields import CharField, ChoiceField, ListField\n from rest_framework.filters import OrderingFilter, SearchFilter\n from rest_framework.permissions import AllowAny\n from rest_framework.request import Request\n@@ -59,6 +60,14 @@\n ]\n \n \n+class Themes(models.TextChoices):\n+ \"\"\"Themes\"\"\"\n+\n+ AUTOMATIC = \"automatic\"\n+ LIGHT = \"light\"\n+ DARK = \"dark\"\n+\n+\n class CurrentTenantSerializer(PassiveSerializer):\n \"\"\"Partial tenant information for styling\"\"\"\n \n@@ -71,6 +80,12 @@\n read_only=True,\n default=CONFIG.y(\"footer_links\", []),\n )\n+ ui_theme = ChoiceField(\n+ choices=Themes.choices,\n+ source=\"attributes.settings.theme.base\",\n+ default=Themes.AUTOMATIC,\n+ read_only=True,\n+ )\n \n flow_authentication = CharField(source=\"flow_authentication.slug\", required=False)\n flow_invalidation = CharField(source=\"flow_invalidation.slug\", required=False)\n", "issue": "Fixed light/dark theme setting\nHello, is it possible to set the dark and light theme fixed? With the OS autodetection I have otherwise problems with my different logos that I use in my Tenants settings. Light logos are not visible to users on light platforms, dark logos are not visible to users on dark platforms.\r\nThanks in advanced\r\npupazze\n", "before_files": [{"content": "\"\"\"Serializer for tenant models\"\"\"\nfrom typing import Any\n\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import CharField, ListField\nfrom rest_framework.filters import OrderingFilter, SearchFilter\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ModelSerializer\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom authentik.api.authorization import SecretKeyFilter\nfrom authentik.core.api.used_by import UsedByMixin\nfrom authentik.core.api.utils import PassiveSerializer\nfrom authentik.lib.config import CONFIG\nfrom authentik.tenants.models import Tenant\n\n\nclass FooterLinkSerializer(PassiveSerializer):\n \"\"\"Links returned in Config API\"\"\"\n\n href = CharField(read_only=True)\n name = CharField(read_only=True)\n\n\nclass TenantSerializer(ModelSerializer):\n \"\"\"Tenant Serializer\"\"\"\n\n def validate(self, attrs: dict[str, Any]) -> dict[str, Any]:\n if attrs.get(\"default\", False):\n tenants = Tenant.objects.filter(default=True)\n if self.instance:\n tenants = tenants.exclude(pk=self.instance.pk)\n if tenants.exists():\n raise ValidationError(\"Only a single Tenant can be set as default.\")\n return super().validate(attrs)\n\n class Meta:\n model = Tenant\n fields = [\n \"tenant_uuid\",\n \"domain\",\n \"default\",\n \"branding_title\",\n \"branding_logo\",\n \"branding_favicon\",\n \"flow_authentication\",\n \"flow_invalidation\",\n \"flow_recovery\",\n \"flow_unenrollment\",\n \"flow_user_settings\",\n \"flow_device_code\",\n \"event_retention\",\n \"web_certificate\",\n \"attributes\",\n ]\n\n\nclass CurrentTenantSerializer(PassiveSerializer):\n \"\"\"Partial tenant information for styling\"\"\"\n\n matched_domain = CharField(source=\"domain\")\n branding_title = CharField()\n branding_logo = CharField()\n branding_favicon = CharField()\n ui_footer_links = ListField(\n child=FooterLinkSerializer(),\n read_only=True,\n default=CONFIG.y(\"footer_links\", []),\n )\n\n flow_authentication = CharField(source=\"flow_authentication.slug\", required=False)\n flow_invalidation = CharField(source=\"flow_invalidation.slug\", required=False)\n flow_recovery = CharField(source=\"flow_recovery.slug\", required=False)\n flow_unenrollment = CharField(source=\"flow_unenrollment.slug\", required=False)\n flow_user_settings = CharField(source=\"flow_user_settings.slug\", required=False)\n flow_device_code = CharField(source=\"flow_device_code.slug\", required=False)\n\n default_locale = CharField(read_only=True)\n\n\nclass TenantViewSet(UsedByMixin, ModelViewSet):\n \"\"\"Tenant Viewset\"\"\"\n\n queryset = Tenant.objects.all()\n serializer_class = TenantSerializer\n search_fields = [\n \"domain\",\n \"branding_title\",\n \"web_certificate__name\",\n ]\n filterset_fields = [\n \"tenant_uuid\",\n \"domain\",\n \"default\",\n \"branding_title\",\n \"branding_logo\",\n \"branding_favicon\",\n \"flow_authentication\",\n \"flow_invalidation\",\n \"flow_recovery\",\n \"flow_unenrollment\",\n \"flow_user_settings\",\n \"flow_device_code\",\n \"event_retention\",\n \"web_certificate\",\n ]\n ordering = [\"domain\"]\n\n filter_backends = [SecretKeyFilter, OrderingFilter, SearchFilter]\n\n @extend_schema(\n responses=CurrentTenantSerializer(many=False),\n )\n @action(methods=[\"GET\"], detail=False, permission_classes=[AllowAny])\n def current(self, request: Request) -> Response:\n \"\"\"Get current tenant\"\"\"\n tenant: Tenant = request._request.tenant\n return Response(CurrentTenantSerializer(tenant).data)\n", "path": "authentik/tenants/api.py"}], "after_files": [{"content": "\"\"\"Serializer for tenant models\"\"\"\nfrom typing import Any\n\nfrom django.db import models\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import CharField, ChoiceField, ListField\nfrom rest_framework.filters import OrderingFilter, SearchFilter\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ModelSerializer\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom authentik.api.authorization import SecretKeyFilter\nfrom authentik.core.api.used_by import UsedByMixin\nfrom authentik.core.api.utils import PassiveSerializer\nfrom authentik.lib.config import CONFIG\nfrom authentik.tenants.models import Tenant\n\n\nclass FooterLinkSerializer(PassiveSerializer):\n \"\"\"Links returned in Config API\"\"\"\n\n href = CharField(read_only=True)\n name = CharField(read_only=True)\n\n\nclass TenantSerializer(ModelSerializer):\n \"\"\"Tenant Serializer\"\"\"\n\n def validate(self, attrs: dict[str, Any]) -> dict[str, Any]:\n if attrs.get(\"default\", False):\n tenants = Tenant.objects.filter(default=True)\n if self.instance:\n tenants = tenants.exclude(pk=self.instance.pk)\n if tenants.exists():\n raise ValidationError(\"Only a single Tenant can be set as default.\")\n return super().validate(attrs)\n\n class Meta:\n model = Tenant\n fields = [\n \"tenant_uuid\",\n \"domain\",\n \"default\",\n \"branding_title\",\n \"branding_logo\",\n \"branding_favicon\",\n \"flow_authentication\",\n \"flow_invalidation\",\n \"flow_recovery\",\n \"flow_unenrollment\",\n \"flow_user_settings\",\n \"flow_device_code\",\n \"event_retention\",\n \"web_certificate\",\n \"attributes\",\n ]\n\n\nclass Themes(models.TextChoices):\n \"\"\"Themes\"\"\"\n\n AUTOMATIC = \"automatic\"\n LIGHT = \"light\"\n DARK = \"dark\"\n\n\nclass CurrentTenantSerializer(PassiveSerializer):\n \"\"\"Partial tenant information for styling\"\"\"\n\n matched_domain = CharField(source=\"domain\")\n branding_title = CharField()\n branding_logo = CharField()\n branding_favicon = CharField()\n ui_footer_links = ListField(\n child=FooterLinkSerializer(),\n read_only=True,\n default=CONFIG.y(\"footer_links\", []),\n )\n ui_theme = ChoiceField(\n choices=Themes.choices,\n source=\"attributes.settings.theme.base\",\n default=Themes.AUTOMATIC,\n read_only=True,\n )\n\n flow_authentication = CharField(source=\"flow_authentication.slug\", required=False)\n flow_invalidation = CharField(source=\"flow_invalidation.slug\", required=False)\n flow_recovery = CharField(source=\"flow_recovery.slug\", required=False)\n flow_unenrollment = CharField(source=\"flow_unenrollment.slug\", required=False)\n flow_user_settings = CharField(source=\"flow_user_settings.slug\", required=False)\n flow_device_code = CharField(source=\"flow_device_code.slug\", required=False)\n\n default_locale = CharField(read_only=True)\n\n\nclass TenantViewSet(UsedByMixin, ModelViewSet):\n \"\"\"Tenant Viewset\"\"\"\n\n queryset = Tenant.objects.all()\n serializer_class = TenantSerializer\n search_fields = [\n \"domain\",\n \"branding_title\",\n \"web_certificate__name\",\n ]\n filterset_fields = [\n \"tenant_uuid\",\n \"domain\",\n \"default\",\n \"branding_title\",\n \"branding_logo\",\n \"branding_favicon\",\n \"flow_authentication\",\n \"flow_invalidation\",\n \"flow_recovery\",\n \"flow_unenrollment\",\n \"flow_user_settings\",\n \"flow_device_code\",\n \"event_retention\",\n \"web_certificate\",\n ]\n ordering = [\"domain\"]\n\n filter_backends = [SecretKeyFilter, OrderingFilter, SearchFilter]\n\n @extend_schema(\n responses=CurrentTenantSerializer(many=False),\n )\n @action(methods=[\"GET\"], detail=False, permission_classes=[AllowAny])\n def current(self, request: Request) -> Response:\n \"\"\"Get current tenant\"\"\"\n tenant: Tenant = request._request.tenant\n return Response(CurrentTenantSerializer(tenant).data)\n", "path": "authentik/tenants/api.py"}]}
| 1,418 | 327 |
gh_patches_debug_10131
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-578
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nodeenv try to download non existing tar.gz prebuilt under Cygwin
Hi,
Strange issue: I suspect a recent change broke this as it used to work last week, on another Windows computer with Cygwin.
Bug reproduction: `pre-commit run` using e.g. https://github.com/Lucas-C/pre-commit-hooks-html v1.1.0
`pre-commit` execute the following command under the hood, a command that also fails if I execute it manually:
```
nodeenv --prebuilt /cygdrive/c/Users/admin/.pre-commit/repoYHJ85q/node_env-default
```
The error is the following:
```
urllib2.HTTPError: HTTP Error 404: Not Found
```
The `tar.gz` it tries to install is https://nodejs.org/dist/v7.2.1/node-v7.2.1-cygwin_nt-6.1-x64.tar.gz, which does not exist. My guess is that `nodeenv` should use the Windows prebuilts instead: https://nodejs.org/dist/v7.2.1/node-v7.2.1-win-x64.zip This is because `platform.system()` is used: https://github.com/ekalinin/nodeenv/blob/master/nodeenv.py#L503
I'm going to ask for help on the https://github.com/ekalinin/nodeenv project, but do you have any hint at what the root cause could be here ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/node.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import contextlib
4 import os
5 import sys
6
7 from pre_commit.envcontext import envcontext
8 from pre_commit.envcontext import Var
9 from pre_commit.languages import helpers
10 from pre_commit.util import clean_path_on_failure
11 from pre_commit.xargs import xargs
12
13
14 ENVIRONMENT_DIR = 'node_env'
15 get_default_version = helpers.basic_get_default_version
16 healthy = helpers.basic_healthy
17
18
19 def get_env_patch(venv): # pragma: windows no cover
20 return (
21 ('NODE_VIRTUAL_ENV', venv),
22 ('NPM_CONFIG_PREFIX', venv),
23 ('npm_config_prefix', venv),
24 ('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),
25 ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),
26 )
27
28
29 @contextlib.contextmanager
30 def in_env(repo_cmd_runner, language_version): # pragma: windows no cover
31 envdir = repo_cmd_runner.path(
32 helpers.environment_dir(ENVIRONMENT_DIR, language_version),
33 )
34 with envcontext(get_env_patch(envdir)):
35 yield
36
37
38 def install_environment(
39 repo_cmd_runner, version, additional_dependencies,
40 ): # pragma: windows no cover
41 additional_dependencies = tuple(additional_dependencies)
42 assert repo_cmd_runner.exists('package.json')
43 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
44
45 env_dir = repo_cmd_runner.path(directory)
46 with clean_path_on_failure(env_dir):
47 cmd = [
48 sys.executable, '-m', 'nodeenv', '--prebuilt',
49 '{{prefix}}{}'.format(directory),
50 ]
51
52 if version != 'default':
53 cmd.extend(['-n', version])
54
55 repo_cmd_runner.run(cmd)
56
57 with in_env(repo_cmd_runner, version):
58 helpers.run_setup_cmd(
59 repo_cmd_runner,
60 ('npm', 'install', '-g', '.') + additional_dependencies,
61 )
62
63
64 def run_hook(repo_cmd_runner, hook, file_args): # pragma: windows no cover
65 with in_env(repo_cmd_runner, hook['language_version']):
66 return xargs(helpers.to_cmd(hook), file_args)
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py
--- a/pre_commit/languages/node.py
+++ b/pre_commit/languages/node.py
@@ -17,10 +17,11 @@
def get_env_patch(venv): # pragma: windows no cover
+ config = os.path.join(venv, 'bin') if sys.platform == 'cygwin' else venv
return (
('NODE_VIRTUAL_ENV', venv),
- ('NPM_CONFIG_PREFIX', venv),
- ('npm_config_prefix', venv),
+ ('NPM_CONFIG_PREFIX', config),
+ ('npm_config_prefix', config),
('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),
('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),
)
|
{"golden_diff": "diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py\n--- a/pre_commit/languages/node.py\n+++ b/pre_commit/languages/node.py\n@@ -17,10 +17,11 @@\n \n \n def get_env_patch(venv): # pragma: windows no cover\n+ config = os.path.join(venv, 'bin') if sys.platform == 'cygwin' else venv\n return (\n ('NODE_VIRTUAL_ENV', venv),\n- ('NPM_CONFIG_PREFIX', venv),\n- ('npm_config_prefix', venv),\n+ ('NPM_CONFIG_PREFIX', config),\n+ ('npm_config_prefix', config),\n ('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),\n ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),\n )\n", "issue": "nodeenv try to download non existing tar.gz prebuilt under Cygwin\nHi,\r\n\r\nStrange issue: I suspect a recent change broke this as it used to work last week, on another Windows computer with Cygwin.\r\n\r\nBug reproduction: `pre-commit run` using e.g. https://github.com/Lucas-C/pre-commit-hooks-html v1.1.0\r\n\r\n`pre-commit` execute the following command under the hood, a command that also fails if I execute it manually:\r\n```\r\nnodeenv --prebuilt /cygdrive/c/Users/admin/.pre-commit/repoYHJ85q/node_env-default\r\n```\r\nThe error is the following:\r\n```\r\nurllib2.HTTPError: HTTP Error 404: Not Found\r\n```\r\nThe `tar.gz` it tries to install is https://nodejs.org/dist/v7.2.1/node-v7.2.1-cygwin_nt-6.1-x64.tar.gz, which does not exist. My guess is that `nodeenv` should use the Windows prebuilts instead: https://nodejs.org/dist/v7.2.1/node-v7.2.1-win-x64.zip This is because `platform.system()` is used: https://github.com/ekalinin/nodeenv/blob/master/nodeenv.py#L503\r\n\r\nI'm going to ask for help on the https://github.com/ekalinin/nodeenv project, but do you have any hint at what the root cause could be here ?\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os\nimport sys\n\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.xargs import xargs\n\n\nENVIRONMENT_DIR = 'node_env'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(venv): # pragma: windows no cover\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', venv),\n ('npm_config_prefix', venv),\n ('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),\n ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(repo_cmd_runner, language_version): # pragma: windows no cover\n envdir = repo_cmd_runner.path(\n helpers.environment_dir(ENVIRONMENT_DIR, language_version),\n )\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef install_environment(\n repo_cmd_runner, version, additional_dependencies,\n): # pragma: windows no cover\n additional_dependencies = tuple(additional_dependencies)\n assert repo_cmd_runner.exists('package.json')\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n env_dir = repo_cmd_runner.path(directory)\n with clean_path_on_failure(env_dir):\n cmd = [\n sys.executable, '-m', 'nodeenv', '--prebuilt',\n '{{prefix}}{}'.format(directory),\n ]\n\n if version != 'default':\n cmd.extend(['-n', version])\n\n repo_cmd_runner.run(cmd)\n\n with in_env(repo_cmd_runner, version):\n helpers.run_setup_cmd(\n repo_cmd_runner,\n ('npm', 'install', '-g', '.') + additional_dependencies,\n )\n\n\ndef run_hook(repo_cmd_runner, hook, file_args): # pragma: windows no cover\n with in_env(repo_cmd_runner, hook['language_version']):\n return xargs(helpers.to_cmd(hook), file_args)\n", "path": "pre_commit/languages/node.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os\nimport sys\n\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.xargs import xargs\n\n\nENVIRONMENT_DIR = 'node_env'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(venv): # pragma: windows no cover\n config = os.path.join(venv, 'bin') if sys.platform == 'cygwin' else venv\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', config),\n ('npm_config_prefix', config),\n ('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),\n ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(repo_cmd_runner, language_version): # pragma: windows no cover\n envdir = repo_cmd_runner.path(\n helpers.environment_dir(ENVIRONMENT_DIR, language_version),\n )\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef install_environment(\n repo_cmd_runner, version, additional_dependencies,\n): # pragma: windows no cover\n additional_dependencies = tuple(additional_dependencies)\n assert repo_cmd_runner.exists('package.json')\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n env_dir = repo_cmd_runner.path(directory)\n with clean_path_on_failure(env_dir):\n cmd = [\n sys.executable, '-m', 'nodeenv', '--prebuilt',\n '{{prefix}}{}'.format(directory),\n ]\n\n if version != 'default':\n cmd.extend(['-n', version])\n\n repo_cmd_runner.run(cmd)\n\n with in_env(repo_cmd_runner, version):\n helpers.run_setup_cmd(\n repo_cmd_runner,\n ('npm', 'install', '-g', '.') + additional_dependencies,\n )\n\n\ndef run_hook(repo_cmd_runner, hook, file_args): # pragma: windows no cover\n with in_env(repo_cmd_runner, hook['language_version']):\n return xargs(helpers.to_cmd(hook), file_args)\n", "path": "pre_commit/languages/node.py"}]}
| 1,163 | 191 |
gh_patches_debug_1913
|
rasdani/github-patches
|
git_diff
|
falconry__falcon-1946
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecate falcon.api_helpers
See https://github.com/falconry/falcon/issues/1902.
Starting with 3.1, mark `falcon.api_helpers` as deprecated. We could employ module-level `__getattr__` or redecorate re-imported functions.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/api_helpers.py`
Content:
```
1 from .app_helpers import * # NOQA
2
3 # TODO deprecate
4 # import warnings
5 # from .util.deprecation import DeprecatedWarning
6
7 # warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)
8
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/falcon/api_helpers.py b/falcon/api_helpers.py
--- a/falcon/api_helpers.py
+++ b/falcon/api_helpers.py
@@ -1,7 +1,6 @@
-from .app_helpers import * # NOQA
+import warnings
-# TODO deprecate
-# import warnings
-# from .util.deprecation import DeprecatedWarning
+from .app_helpers import * # NOQA
+from .util.deprecation import DeprecatedWarning
-# warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)
+warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)
|
{"golden_diff": "diff --git a/falcon/api_helpers.py b/falcon/api_helpers.py\n--- a/falcon/api_helpers.py\n+++ b/falcon/api_helpers.py\n@@ -1,7 +1,6 @@\n-from .app_helpers import * # NOQA\n+import warnings\n \n-# TODO deprecate\n-# import warnings\n-# from .util.deprecation import DeprecatedWarning\n+from .app_helpers import * # NOQA\n+from .util.deprecation import DeprecatedWarning\n \n-# warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)\n+warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)\n", "issue": "Deprecate falcon.api_helpers\nSee https://github.com/falconry/falcon/issues/1902.\r\n\r\nStarting with 3.1, mark `falcon.api_helpers` as deprecated. We could employ module-level `__getattr__` or redecorate re-imported functions.\n", "before_files": [{"content": "from .app_helpers import * # NOQA\n\n# TODO deprecate\n# import warnings\n# from .util.deprecation import DeprecatedWarning\n\n# warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)\n", "path": "falcon/api_helpers.py"}], "after_files": [{"content": "import warnings\n\nfrom .app_helpers import * # NOQA\nfrom .util.deprecation import DeprecatedWarning\n\nwarnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)\n", "path": "falcon/api_helpers.py"}]}
| 378 | 137 |
gh_patches_debug_29346
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-13789
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error with AWX 7.0.0
Hello.
I'm testing integration between **AWX 7.0.0** (Ansible Tower) by sending notifications in **Zulip 2.0.4**.
During testing, I encounter an error from Ansible :

And I immediatly receive an email warning from Zulip with the following content :
```Logger root, from module zerver.middleware line 291:
Error generated by Ansible (user42@zulip.******.**) on zulip.******.** deployment
Traceback (most recent call last):
File "/srv/zulip-venv-cache/ebe617662f96425113e5a75344bbe5a0593f634a/zulip-py3-venv/lib/python3.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/srv/zulip-venv-cache/ebe617662f96425113e5a75344bbe5a0593f634a/zulip-py3-venv/lib/python3.7/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "./zerver/lib/request.py", line 289, in _wrapped_view_func
return view_func(request, *args, **kwargs)
File "./zerver/decorator.py", line 375, in _wrapped_func_arguments
raise err
File "./zerver/decorator.py", line 361, in _wrapped_func_arguments
return view_func(request, user_profile, *args, **kwargs)
File "./zerver/lib/request.py", line 289, in _wrapped_view_func
return view_func(request, *args, **kwargs)
File "./zerver/webhooks/ansibletower/view.py", line 27, in api_ansibletower_webhook
body = get_body(payload)
File "./zerver/webhooks/ansibletower/view.py", line 34, in get_body
if (payload['friendly_name'] == 'Job'):
KeyError: 'friendly_name'
Deployed code:
- git: 2.0.0-2546-ga1fa0b011
- ZULIP_VERSION: 2.0.4+git
Request info:
- path: /api/v1/external/ansibletower
- POST: {}
- REMOTE_ADDR: "['10.10.36.6']"
- QUERY_STRING: "['api_key=******&topic=******&stream=******&topic=******"
- SERVER_NAME: "['']"
```
I have already disable the "Disable SSL checking" but it seems also that the new version of AWX (the 7.0.0) contains new options for webhook like "HTTP Headers" and "HTTP Method".

Note that I have already notifications from GitLab so the notification service works in my self-hosted Zulip configuration.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/webhooks/ansibletower/view.py`
Content:
```
1 import operator
2 from typing import Any, Dict, List
3
4 from django.http import HttpRequest, HttpResponse
5
6 from zerver.decorator import REQ, api_key_only_webhook_view, \
7 has_request_variables
8 from zerver.lib.response import json_success
9 from zerver.lib.webhooks.common import check_send_webhook_message
10 from zerver.models import UserProfile
11
12 ANSIBLETOWER_DEFAULT_MESSAGE_TEMPLATE = "{friendly_name}: [#{id} {name}]({url}) {status}."
13
14
15 ANSIBLETOWER_JOB_MESSAGE_TEMPLATE = """
16 {friendly_name}: [#{id} {name}]({url}) {status}:
17 {hosts_final_data}
18 """.strip()
19
20 ANSIBLETOWER_JOB_HOST_ROW_TEMPLATE = '* {hostname}: {status}\n'
21
22 @api_key_only_webhook_view('Ansibletower')
23 @has_request_variables
24 def api_ansibletower_webhook(request: HttpRequest, user_profile: UserProfile,
25 payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
26
27 body = get_body(payload)
28 subject = payload['name']
29
30 check_send_webhook_message(request, user_profile, subject, body)
31 return json_success()
32
33 def get_body(payload: Dict[str, Any]) -> str:
34 if (payload['friendly_name'] == 'Job'):
35 hosts_list_data = payload['hosts']
36 hosts_data = []
37 for host in payload['hosts']:
38 if (hosts_list_data[host].get('failed') is True):
39 hoststatus = 'Failed'
40 elif (hosts_list_data[host].get('failed') is False):
41 hoststatus = 'Success'
42 hosts_data.append({
43 'hostname': host,
44 'status': hoststatus
45 })
46
47 if (payload['status'] == "successful"):
48 status = 'was successful'
49 else:
50 status = 'failed'
51
52 return ANSIBLETOWER_JOB_MESSAGE_TEMPLATE.format(
53 name=payload['name'],
54 friendly_name=payload['friendly_name'],
55 id=payload['id'],
56 url=payload['url'],
57 status=status,
58 hosts_final_data=get_hosts_content(hosts_data)
59 )
60
61 else:
62
63 if (payload['status'] == "successful"):
64 status = 'was successful'
65 else:
66 status = 'failed'
67
68 data = {
69 "name": payload['name'],
70 "friendly_name": payload['friendly_name'],
71 "id": payload['id'],
72 "url": payload['url'],
73 "status": status
74 }
75
76 return ANSIBLETOWER_DEFAULT_MESSAGE_TEMPLATE.format(**data)
77
78 def get_hosts_content(hosts_data: List[Dict[str, Any]]) -> str:
79 hosts_data = sorted(hosts_data, key=operator.itemgetter('hostname'))
80 hosts_content = ''
81 for host in hosts_data:
82 hosts_content += ANSIBLETOWER_JOB_HOST_ROW_TEMPLATE.format(
83 hostname=host.get('hostname'),
84 status=host.get('status')
85 )
86 return hosts_content
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zerver/webhooks/ansibletower/view.py b/zerver/webhooks/ansibletower/view.py
--- a/zerver/webhooks/ansibletower/view.py
+++ b/zerver/webhooks/ansibletower/view.py
@@ -30,8 +30,19 @@
check_send_webhook_message(request, user_profile, subject, body)
return json_success()
+def extract_friendly_name(payload: Dict[str, Any]) -> str:
+ tentative_job_name = payload.get("friendly_name", "")
+ if not tentative_job_name:
+ url = payload["url"]
+ segments = url.split("/")
+ tentative_job_name = segments[-3]
+ if tentative_job_name == "jobs":
+ tentative_job_name = "Job"
+ return tentative_job_name
+
def get_body(payload: Dict[str, Any]) -> str:
- if (payload['friendly_name'] == 'Job'):
+ friendly_name = extract_friendly_name(payload)
+ if (friendly_name == 'Job'):
hosts_list_data = payload['hosts']
hosts_data = []
for host in payload['hosts']:
@@ -51,7 +62,7 @@
return ANSIBLETOWER_JOB_MESSAGE_TEMPLATE.format(
name=payload['name'],
- friendly_name=payload['friendly_name'],
+ friendly_name=friendly_name,
id=payload['id'],
url=payload['url'],
status=status,
@@ -67,7 +78,7 @@
data = {
"name": payload['name'],
- "friendly_name": payload['friendly_name'],
+ "friendly_name": friendly_name,
"id": payload['id'],
"url": payload['url'],
"status": status
|
{"golden_diff": "diff --git a/zerver/webhooks/ansibletower/view.py b/zerver/webhooks/ansibletower/view.py\n--- a/zerver/webhooks/ansibletower/view.py\n+++ b/zerver/webhooks/ansibletower/view.py\n@@ -30,8 +30,19 @@\n check_send_webhook_message(request, user_profile, subject, body)\n return json_success()\n \n+def extract_friendly_name(payload: Dict[str, Any]) -> str:\n+ tentative_job_name = payload.get(\"friendly_name\", \"\")\n+ if not tentative_job_name:\n+ url = payload[\"url\"]\n+ segments = url.split(\"/\")\n+ tentative_job_name = segments[-3]\n+ if tentative_job_name == \"jobs\":\n+ tentative_job_name = \"Job\"\n+ return tentative_job_name\n+\n def get_body(payload: Dict[str, Any]) -> str:\n- if (payload['friendly_name'] == 'Job'):\n+ friendly_name = extract_friendly_name(payload)\n+ if (friendly_name == 'Job'):\n hosts_list_data = payload['hosts']\n hosts_data = []\n for host in payload['hosts']:\n@@ -51,7 +62,7 @@\n \n return ANSIBLETOWER_JOB_MESSAGE_TEMPLATE.format(\n name=payload['name'],\n- friendly_name=payload['friendly_name'],\n+ friendly_name=friendly_name,\n id=payload['id'],\n url=payload['url'],\n status=status,\n@@ -67,7 +78,7 @@\n \n data = {\n \"name\": payload['name'],\n- \"friendly_name\": payload['friendly_name'],\n+ \"friendly_name\": friendly_name,\n \"id\": payload['id'],\n \"url\": payload['url'],\n \"status\": status\n", "issue": "Error with AWX 7.0.0\nHello.\r\nI'm testing integration between **AWX 7.0.0** (Ansible Tower) by sending notifications in **Zulip 2.0.4**.\r\nDuring testing, I encounter an error from Ansible :\r\n\r\n\r\n\r\nAnd I immediatly receive an email warning from Zulip with the following content :\r\n\r\n```Logger root, from module zerver.middleware line 291:\r\nError generated by Ansible (user42@zulip.******.**) on zulip.******.** deployment\r\n\r\nTraceback (most recent call last):\r\n File \"/srv/zulip-venv-cache/ebe617662f96425113e5a75344bbe5a0593f634a/zulip-py3-venv/lib/python3.7/site-packages/django/core/handlers/base.py\", line 185, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"/srv/zulip-venv-cache/ebe617662f96425113e5a75344bbe5a0593f634a/zulip-py3-venv/lib/python3.7/site-packages/django/views/decorators/csrf.py\", line 58, in wrapped_view\r\n return view_func(*args, **kwargs)\r\n File \"./zerver/lib/request.py\", line 289, in _wrapped_view_func\r\n return view_func(request, *args, **kwargs)\r\n File \"./zerver/decorator.py\", line 375, in _wrapped_func_arguments\r\n raise err\r\n File \"./zerver/decorator.py\", line 361, in _wrapped_func_arguments\r\n return view_func(request, user_profile, *args, **kwargs)\r\n File \"./zerver/lib/request.py\", line 289, in _wrapped_view_func\r\n return view_func(request, *args, **kwargs)\r\n File \"./zerver/webhooks/ansibletower/view.py\", line 27, in api_ansibletower_webhook\r\n body = get_body(payload)\r\n File \"./zerver/webhooks/ansibletower/view.py\", line 34, in get_body\r\n if (payload['friendly_name'] == 'Job'):\r\nKeyError: 'friendly_name'\r\n\r\n\r\nDeployed code:\r\n- git: 2.0.0-2546-ga1fa0b011\r\n- ZULIP_VERSION: 2.0.4+git\r\n\r\n\r\nRequest info:\r\n- path: /api/v1/external/ansibletower\r\n- POST: {}\r\n- REMOTE_ADDR: \"['10.10.36.6']\"\r\n- QUERY_STRING: \"['api_key=******&topic=******&stream=******&topic=******\"\r\n- SERVER_NAME: \"['']\"\r\n```\r\n\r\nI have already disable the \"Disable SSL checking\" but it seems also that the new version of AWX (the 7.0.0) contains new options for webhook like \"HTTP Headers\" and \"HTTP Method\".\r\n\r\n\r\n\r\nNote that I have already notifications from GitLab so the notification service works in my self-hosted Zulip configuration.\n", "before_files": [{"content": "import operator\nfrom typing import Any, Dict, List\n\nfrom django.http import HttpRequest, HttpResponse\n\nfrom zerver.decorator import REQ, api_key_only_webhook_view, \\\n has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.webhooks.common import check_send_webhook_message\nfrom zerver.models import UserProfile\n\nANSIBLETOWER_DEFAULT_MESSAGE_TEMPLATE = \"{friendly_name}: [#{id} {name}]({url}) {status}.\"\n\n\nANSIBLETOWER_JOB_MESSAGE_TEMPLATE = \"\"\"\n{friendly_name}: [#{id} {name}]({url}) {status}:\n{hosts_final_data}\n\"\"\".strip()\n\nANSIBLETOWER_JOB_HOST_ROW_TEMPLATE = '* {hostname}: {status}\\n'\n\n@api_key_only_webhook_view('Ansibletower')\n@has_request_variables\ndef api_ansibletower_webhook(request: HttpRequest, user_profile: UserProfile,\n payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:\n\n body = get_body(payload)\n subject = payload['name']\n\n check_send_webhook_message(request, user_profile, subject, body)\n return json_success()\n\ndef get_body(payload: Dict[str, Any]) -> str:\n if (payload['friendly_name'] == 'Job'):\n hosts_list_data = payload['hosts']\n hosts_data = []\n for host in payload['hosts']:\n if (hosts_list_data[host].get('failed') is True):\n hoststatus = 'Failed'\n elif (hosts_list_data[host].get('failed') is False):\n hoststatus = 'Success'\n hosts_data.append({\n 'hostname': host,\n 'status': hoststatus\n })\n\n if (payload['status'] == \"successful\"):\n status = 'was successful'\n else:\n status = 'failed'\n\n return ANSIBLETOWER_JOB_MESSAGE_TEMPLATE.format(\n name=payload['name'],\n friendly_name=payload['friendly_name'],\n id=payload['id'],\n url=payload['url'],\n status=status,\n hosts_final_data=get_hosts_content(hosts_data)\n )\n\n else:\n\n if (payload['status'] == \"successful\"):\n status = 'was successful'\n else:\n status = 'failed'\n\n data = {\n \"name\": payload['name'],\n \"friendly_name\": payload['friendly_name'],\n \"id\": payload['id'],\n \"url\": payload['url'],\n \"status\": status\n }\n\n return ANSIBLETOWER_DEFAULT_MESSAGE_TEMPLATE.format(**data)\n\ndef get_hosts_content(hosts_data: List[Dict[str, Any]]) -> str:\n hosts_data = sorted(hosts_data, key=operator.itemgetter('hostname'))\n hosts_content = ''\n for host in hosts_data:\n hosts_content += ANSIBLETOWER_JOB_HOST_ROW_TEMPLATE.format(\n hostname=host.get('hostname'),\n status=host.get('status')\n )\n return hosts_content\n", "path": "zerver/webhooks/ansibletower/view.py"}], "after_files": [{"content": "import operator\nfrom typing import Any, Dict, List\n\nfrom django.http import HttpRequest, HttpResponse\n\nfrom zerver.decorator import REQ, api_key_only_webhook_view, \\\n has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.webhooks.common import check_send_webhook_message\nfrom zerver.models import UserProfile\n\nANSIBLETOWER_DEFAULT_MESSAGE_TEMPLATE = \"{friendly_name}: [#{id} {name}]({url}) {status}.\"\n\n\nANSIBLETOWER_JOB_MESSAGE_TEMPLATE = \"\"\"\n{friendly_name}: [#{id} {name}]({url}) {status}:\n{hosts_final_data}\n\"\"\".strip()\n\nANSIBLETOWER_JOB_HOST_ROW_TEMPLATE = '* {hostname}: {status}\\n'\n\n@api_key_only_webhook_view('Ansibletower')\n@has_request_variables\ndef api_ansibletower_webhook(request: HttpRequest, user_profile: UserProfile,\n payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:\n\n body = get_body(payload)\n subject = payload['name']\n\n check_send_webhook_message(request, user_profile, subject, body)\n return json_success()\n\ndef extract_friendly_name(payload: Dict[str, Any]) -> str:\n tentative_job_name = payload.get(\"friendly_name\", \"\")\n if not tentative_job_name:\n url = payload[\"url\"]\n segments = url.split(\"/\")\n tentative_job_name = segments[-3]\n if tentative_job_name == \"jobs\":\n tentative_job_name = \"Job\"\n return tentative_job_name\n\ndef get_body(payload: Dict[str, Any]) -> str:\n friendly_name = extract_friendly_name(payload)\n if (friendly_name == 'Job'):\n hosts_list_data = payload['hosts']\n hosts_data = []\n for host in payload['hosts']:\n if (hosts_list_data[host].get('failed') is True):\n hoststatus = 'Failed'\n elif (hosts_list_data[host].get('failed') is False):\n hoststatus = 'Success'\n hosts_data.append({\n 'hostname': host,\n 'status': hoststatus\n })\n\n if (payload['status'] == \"successful\"):\n status = 'was successful'\n else:\n status = 'failed'\n\n return ANSIBLETOWER_JOB_MESSAGE_TEMPLATE.format(\n name=payload['name'],\n friendly_name=friendly_name,\n id=payload['id'],\n url=payload['url'],\n status=status,\n hosts_final_data=get_hosts_content(hosts_data)\n )\n\n else:\n\n if (payload['status'] == \"successful\"):\n status = 'was successful'\n else:\n status = 'failed'\n\n data = {\n \"name\": payload['name'],\n \"friendly_name\": friendly_name,\n \"id\": payload['id'],\n \"url\": payload['url'],\n \"status\": status\n }\n\n return ANSIBLETOWER_DEFAULT_MESSAGE_TEMPLATE.format(**data)\n\ndef get_hosts_content(hosts_data: List[Dict[str, Any]]) -> str:\n hosts_data = sorted(hosts_data, key=operator.itemgetter('hostname'))\n hosts_content = ''\n for host in hosts_data:\n hosts_content += ANSIBLETOWER_JOB_HOST_ROW_TEMPLATE.format(\n hostname=host.get('hostname'),\n status=host.get('status')\n )\n return hosts_content\n", "path": "zerver/webhooks/ansibletower/view.py"}]}
| 1,898 | 383 |
gh_patches_debug_24613
|
rasdani/github-patches
|
git_diff
|
openshift__openshift-ansible-5619
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
docker_image_availability check fails, repo url divided into single character urls
#### Description
Looks to be a tokenizing error in the `docker_image_availability` check
```
Failure summary:
1. Hosts: infra.lab.variantweb.net, master.lab.variantweb.net, node1.lab.variantweb.net
Play: Verify Requirements
Task: openshift_health_check
Message: One or more checks failed
Details: check "docker_image_availability":
One or more required Docker images are not available:
openshift3/ose-deployer:v3.7.0-0.127.0,
openshift3/ose-docker-registry:v3.7.0-0.127.0,
openshift3/ose-haproxy-router:v3.7.0-0.127.0,
openshift3/ose-pod:v3.7.0-0.127.0
Configured registries: r (unreachable), e (unreachable), g (unreachable), i (unreachable), s (unreachable), t (unreachable), r (unreachable), y (unreachable), . (unreachable), o (unreachable), p (unreachable), s (unreachable), . (unreachable), o (unreachable), p (unreachable), e (unreachable), n (unreachable), s (unreachable), h (unreachable), i (unreachable), f (unreachable), t (unreachable), . (unreachable), c (unreachable), o (unreachable), m (unreachable), registry.access.redhat.com
Checked by: timeout 10 skopeo inspect --tls-verify=false docker://{registry}/{image}
```
##### Version
```
$ ansible --version
ansible 2.3.2.0
config file = /home/sjennings/projects/go/src/github.com/openshift/openshift-ansible/ansible.cfg
configured module search path = Default w/o overrides
python version = 2.7.13 (default, Jun 26 2017, 10:20:05) [GCC 7.1.1 20170622 (Red Hat 7.1.1-3)]
$ git describe
openshift-ansible-3.7.0-0.135.0-10-g62cb2a8d5
```
##### Steps To Reproduce
1. run playbook/byo/config.yml
2. playbook fails with above error
##### Expected Results
Installation check succeeds and each character of the `openshift_docker_additional_registries` is not treated as an individual repo url.
@sdodson
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `roles/openshift_health_checker/openshift_checks/docker_image_availability.py`
Content:
```
1 """Check that required Docker images are available."""
2
3 from openshift_checks import OpenShiftCheck
4 from openshift_checks.mixins import DockerHostMixin
5
6
7 NODE_IMAGE_SUFFIXES = ["haproxy-router", "docker-registry", "deployer", "pod"]
8 DEPLOYMENT_IMAGE_INFO = {
9 "origin": {
10 "namespace": "openshift",
11 "name": "origin",
12 "registry_console_image": "cockpit/kubernetes",
13 },
14 "openshift-enterprise": {
15 "namespace": "openshift3",
16 "name": "ose",
17 "registry_console_image": "registry.access.redhat.com/openshift3/registry-console",
18 },
19 }
20
21
22 class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
23 """Check that required Docker images are available.
24
25 Determine docker images that an install would require and check that they
26 are either present in the host's docker index, or available for the host to pull
27 with known registries as defined in our inventory file (or defaults).
28 """
29
30 name = "docker_image_availability"
31 tags = ["preflight"]
32 # we use python-docker-py to check local docker for images, and skopeo
33 # to look for images available remotely without waiting to pull them.
34 dependencies = ["python-docker-py", "skopeo"]
35 skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false docker://{registry}/{image}"
36
37 def __init__(self, *args, **kwargs):
38 super(DockerImageAvailability, self).__init__(*args, **kwargs)
39 # record whether we could reach a registry or not (and remember results)
40 self.reachable_registries = {}
41
42 def is_active(self):
43 """Skip hosts with unsupported deployment types."""
44 deployment_type = self.get_var("openshift_deployment_type")
45 has_valid_deployment_type = deployment_type in DEPLOYMENT_IMAGE_INFO
46
47 return super(DockerImageAvailability, self).is_active() and has_valid_deployment_type
48
49 def run(self):
50 msg, failed = self.ensure_dependencies()
51 if failed:
52 return {
53 "failed": True,
54 "msg": "Some dependencies are required in order to check Docker image availability.\n" + msg
55 }
56
57 required_images = self.required_images()
58 missing_images = set(required_images) - set(self.local_images(required_images))
59
60 # exit early if all images were found locally
61 if not missing_images:
62 return {}
63
64 registries = self.known_docker_registries()
65 if not registries:
66 return {"failed": True, "msg": "Unable to retrieve any docker registries."}
67
68 available_images = self.available_images(missing_images, registries)
69 unavailable_images = set(missing_images) - set(available_images)
70
71 if unavailable_images:
72 registries = [
73 reg if self.reachable_registries.get(reg, True) else reg + " (unreachable)"
74 for reg in registries
75 ]
76 msg = (
77 "One or more required Docker images are not available:\n {}\n"
78 "Configured registries: {}\n"
79 "Checked by: {}"
80 ).format(
81 ",\n ".join(sorted(unavailable_images)),
82 ", ".join(registries),
83 self.skopeo_img_check_command
84 )
85
86 return dict(failed=True, msg=msg)
87
88 return {}
89
90 def required_images(self):
91 """
92 Determine which images we expect to need for this host.
93 Returns: a set of required images like 'openshift/origin:v3.6'
94
95 The thorny issue of determining the image names from the variables is under consideration
96 via https://github.com/openshift/openshift-ansible/issues/4415
97
98 For now we operate as follows:
99 * For containerized components (master, node, ...) we look at the deployment type and
100 use openshift/origin or openshift3/ose as the base for those component images. The
101 version is openshift_image_tag as determined by the openshift_version role.
102 * For OpenShift-managed infrastructure (router, registry...) we use oreg_url if
103 it is defined; otherwise we again use the base that depends on the deployment type.
104 Registry is not included in constructed images. It may be in oreg_url or etcd image.
105 """
106 required = set()
107 deployment_type = self.get_var("openshift_deployment_type")
108 host_groups = self.get_var("group_names")
109 # containerized etcd may not have openshift_image_tag, see bz 1466622
110 image_tag = self.get_var("openshift_image_tag", default="latest")
111 image_info = DEPLOYMENT_IMAGE_INFO[deployment_type]
112
113 # template for images that run on top of OpenShift
114 image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}")
115 image_url = self.get_var("oreg_url", default="") or image_url
116 if 'nodes' in host_groups:
117 for suffix in NODE_IMAGE_SUFFIXES:
118 required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag))
119 # The registry-console is for some reason not prefixed with ose- like the other components.
120 # Nor is it versioned the same, so just look for latest.
121 # Also a completely different name is used for Origin.
122 required.add(image_info["registry_console_image"])
123
124 # images for containerized components
125 if self.get_var("openshift", "common", "is_containerized"):
126 components = set()
127 if 'nodes' in host_groups:
128 components.update(["node", "openvswitch"])
129 if 'masters' in host_groups: # name is "origin" or "ose"
130 components.add(image_info["name"])
131 for component in components:
132 required.add("{}/{}:{}".format(image_info["namespace"], component, image_tag))
133 if 'etcd' in host_groups: # special case, note it is the same for origin/enterprise
134 required.add("registry.access.redhat.com/rhel7/etcd") # and no image tag
135
136 return required
137
138 def local_images(self, images):
139 """Filter a list of images and return those available locally."""
140 registries = self.known_docker_registries()
141 found_images = []
142 for image in images:
143 # docker could have the image name as-is or prefixed with any registry
144 imglist = [image] + [reg + "/" + image for reg in registries]
145 if self.is_image_local(imglist):
146 found_images.append(image)
147 return found_images
148
149 def is_image_local(self, image):
150 """Check if image is already in local docker index."""
151 result = self.execute_module("docker_image_facts", {"name": image})
152 return bool(result.get("images")) and not result.get("failed")
153
154 def known_docker_registries(self):
155 """Build a list of docker registries available according to inventory vars."""
156 regs = list(self.get_var("openshift_docker_additional_registries", default=[]))
157
158 deployment_type = self.get_var("openshift_deployment_type")
159 if deployment_type == "origin" and "docker.io" not in regs:
160 regs.append("docker.io")
161 elif deployment_type == 'openshift-enterprise' and "registry.access.redhat.com" not in regs:
162 regs.append("registry.access.redhat.com")
163
164 return regs
165
166 def available_images(self, images, default_registries):
167 """Search remotely for images. Returns: list of images found."""
168 return [
169 image for image in images
170 if self.is_available_skopeo_image(image, default_registries)
171 ]
172
173 def is_available_skopeo_image(self, image, default_registries):
174 """Use Skopeo to determine if required image exists in known registry(s)."""
175 registries = default_registries
176
177 # If image already includes a registry, only use that.
178 # NOTE: This logic would incorrectly identify images that do not use a namespace, e.g.
179 # registry.access.redhat.com/rhel7 as if the registry were a namespace.
180 # It's not clear that there's any way to distinguish them, but fortunately
181 # the current set of images all look like [registry/]namespace/name[:version].
182 if image.count("/") > 1:
183 registry, image = image.split("/", 1)
184 registries = [registry]
185
186 for registry in registries:
187 if registry not in self.reachable_registries:
188 self.reachable_registries[registry] = self.connect_to_registry(registry)
189 if not self.reachable_registries[registry]:
190 continue
191
192 args = {"_raw_params": self.skopeo_img_check_command.format(registry=registry, image=image)}
193 result = self.execute_module_with_retries("command", args)
194 if result.get("rc", 0) == 0 and not result.get("failed"):
195 return True
196 if result.get("rc") == 124: # RC 124 == timed out; mark unreachable
197 self.reachable_registries[registry] = False
198
199 return False
200
201 def connect_to_registry(self, registry):
202 """Use ansible wait_for module to test connectivity from host to registry. Returns bool."""
203 # test a simple TCP connection
204 host, _, port = registry.partition(":")
205 port = port or 443
206 args = dict(host=host, port=port, state="started", timeout=30)
207 result = self.execute_module("wait_for", args)
208 return result.get("rc", 0) == 0 and not result.get("failed")
209
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -1,5 +1,6 @@
"""Check that required Docker images are available."""
+from ansible.module_utils import six
from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import DockerHostMixin
@@ -153,7 +154,15 @@
def known_docker_registries(self):
"""Build a list of docker registries available according to inventory vars."""
- regs = list(self.get_var("openshift_docker_additional_registries", default=[]))
+ regs = self.get_var("openshift_docker_additional_registries", default=[])
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1497274
+ # if the result was a string type, place it into a list. We must do this
+ # as using list() on a string will split the string into its characters.
+ if isinstance(regs, six.string_types):
+ regs = [regs]
+ else:
+ # Otherwise cast to a list as was done previously
+ regs = list(regs)
deployment_type = self.get_var("openshift_deployment_type")
if deployment_type == "origin" and "docker.io" not in regs:
|
{"golden_diff": "diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py\n--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py\n+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py\n@@ -1,5 +1,6 @@\n \"\"\"Check that required Docker images are available.\"\"\"\n \n+from ansible.module_utils import six\n from openshift_checks import OpenShiftCheck\n from openshift_checks.mixins import DockerHostMixin\n \n@@ -153,7 +154,15 @@\n \n def known_docker_registries(self):\n \"\"\"Build a list of docker registries available according to inventory vars.\"\"\"\n- regs = list(self.get_var(\"openshift_docker_additional_registries\", default=[]))\n+ regs = self.get_var(\"openshift_docker_additional_registries\", default=[])\n+ # https://bugzilla.redhat.com/show_bug.cgi?id=1497274\n+ # if the result was a string type, place it into a list. We must do this\n+ # as using list() on a string will split the string into its characters.\n+ if isinstance(regs, six.string_types):\n+ regs = [regs]\n+ else:\n+ # Otherwise cast to a list as was done previously\n+ regs = list(regs)\n \n deployment_type = self.get_var(\"openshift_deployment_type\")\n if deployment_type == \"origin\" and \"docker.io\" not in regs:\n", "issue": "docker_image_availability check fails, repo url divided into single character urls\n#### Description\r\n\r\nLooks to be a tokenizing error in the `docker_image_availability` check\r\n```\r\nFailure summary:\r\n\r\n\r\n 1. Hosts: infra.lab.variantweb.net, master.lab.variantweb.net, node1.lab.variantweb.net\r\n Play: Verify Requirements\r\n Task: openshift_health_check\r\n Message: One or more checks failed\r\n Details: check \"docker_image_availability\":\r\n One or more required Docker images are not available:\r\n openshift3/ose-deployer:v3.7.0-0.127.0,\r\n openshift3/ose-docker-registry:v3.7.0-0.127.0,\r\n openshift3/ose-haproxy-router:v3.7.0-0.127.0,\r\n openshift3/ose-pod:v3.7.0-0.127.0\r\n Configured registries: r (unreachable), e (unreachable), g (unreachable), i (unreachable), s (unreachable), t (unreachable), r (unreachable), y (unreachable), . (unreachable), o (unreachable), p (unreachable), s (unreachable), . (unreachable), o (unreachable), p (unreachable), e (unreachable), n (unreachable), s (unreachable), h (unreachable), i (unreachable), f (unreachable), t (unreachable), . (unreachable), c (unreachable), o (unreachable), m (unreachable), registry.access.redhat.com\r\n Checked by: timeout 10 skopeo inspect --tls-verify=false docker://{registry}/{image}\r\n```\r\n\r\n##### Version\r\n\r\n```\r\n$ ansible --version\r\nansible 2.3.2.0\r\n config file = /home/sjennings/projects/go/src/github.com/openshift/openshift-ansible/ansible.cfg\r\n configured module search path = Default w/o overrides\r\n python version = 2.7.13 (default, Jun 26 2017, 10:20:05) [GCC 7.1.1 20170622 (Red Hat 7.1.1-3)]\r\n\r\n$ git describe\r\nopenshift-ansible-3.7.0-0.135.0-10-g62cb2a8d5\r\n```\r\n\r\n##### Steps To Reproduce\r\n1. run playbook/byo/config.yml\r\n2. playbook fails with above error\r\n\r\n##### Expected Results\r\nInstallation check succeeds and each character of the `openshift_docker_additional_registries` is not treated as an individual repo url.\r\n\r\n@sdodson \n", "before_files": [{"content": "\"\"\"Check that required Docker images are available.\"\"\"\n\nfrom openshift_checks import OpenShiftCheck\nfrom openshift_checks.mixins import DockerHostMixin\n\n\nNODE_IMAGE_SUFFIXES = [\"haproxy-router\", \"docker-registry\", \"deployer\", \"pod\"]\nDEPLOYMENT_IMAGE_INFO = {\n \"origin\": {\n \"namespace\": \"openshift\",\n \"name\": \"origin\",\n \"registry_console_image\": \"cockpit/kubernetes\",\n },\n \"openshift-enterprise\": {\n \"namespace\": \"openshift3\",\n \"name\": \"ose\",\n \"registry_console_image\": \"registry.access.redhat.com/openshift3/registry-console\",\n },\n}\n\n\nclass DockerImageAvailability(DockerHostMixin, OpenShiftCheck):\n \"\"\"Check that required Docker images are available.\n\n Determine docker images that an install would require and check that they\n are either present in the host's docker index, or available for the host to pull\n with known registries as defined in our inventory file (or defaults).\n \"\"\"\n\n name = \"docker_image_availability\"\n tags = [\"preflight\"]\n # we use python-docker-py to check local docker for images, and skopeo\n # to look for images available remotely without waiting to pull them.\n dependencies = [\"python-docker-py\", \"skopeo\"]\n skopeo_img_check_command = \"timeout 10 skopeo inspect --tls-verify=false docker://{registry}/{image}\"\n\n def __init__(self, *args, **kwargs):\n super(DockerImageAvailability, self).__init__(*args, **kwargs)\n # record whether we could reach a registry or not (and remember results)\n self.reachable_registries = {}\n\n def is_active(self):\n \"\"\"Skip hosts with unsupported deployment types.\"\"\"\n deployment_type = self.get_var(\"openshift_deployment_type\")\n has_valid_deployment_type = deployment_type in DEPLOYMENT_IMAGE_INFO\n\n return super(DockerImageAvailability, self).is_active() and has_valid_deployment_type\n\n def run(self):\n msg, failed = self.ensure_dependencies()\n if failed:\n return {\n \"failed\": True,\n \"msg\": \"Some dependencies are required in order to check Docker image availability.\\n\" + msg\n }\n\n required_images = self.required_images()\n missing_images = set(required_images) - set(self.local_images(required_images))\n\n # exit early if all images were found locally\n if not missing_images:\n return {}\n\n registries = self.known_docker_registries()\n if not registries:\n return {\"failed\": True, \"msg\": \"Unable to retrieve any docker registries.\"}\n\n available_images = self.available_images(missing_images, registries)\n unavailable_images = set(missing_images) - set(available_images)\n\n if unavailable_images:\n registries = [\n reg if self.reachable_registries.get(reg, True) else reg + \" (unreachable)\"\n for reg in registries\n ]\n msg = (\n \"One or more required Docker images are not available:\\n {}\\n\"\n \"Configured registries: {}\\n\"\n \"Checked by: {}\"\n ).format(\n \",\\n \".join(sorted(unavailable_images)),\n \", \".join(registries),\n self.skopeo_img_check_command\n )\n\n return dict(failed=True, msg=msg)\n\n return {}\n\n def required_images(self):\n \"\"\"\n Determine which images we expect to need for this host.\n Returns: a set of required images like 'openshift/origin:v3.6'\n\n The thorny issue of determining the image names from the variables is under consideration\n via https://github.com/openshift/openshift-ansible/issues/4415\n\n For now we operate as follows:\n * For containerized components (master, node, ...) we look at the deployment type and\n use openshift/origin or openshift3/ose as the base for those component images. The\n version is openshift_image_tag as determined by the openshift_version role.\n * For OpenShift-managed infrastructure (router, registry...) we use oreg_url if\n it is defined; otherwise we again use the base that depends on the deployment type.\n Registry is not included in constructed images. It may be in oreg_url or etcd image.\n \"\"\"\n required = set()\n deployment_type = self.get_var(\"openshift_deployment_type\")\n host_groups = self.get_var(\"group_names\")\n # containerized etcd may not have openshift_image_tag, see bz 1466622\n image_tag = self.get_var(\"openshift_image_tag\", default=\"latest\")\n image_info = DEPLOYMENT_IMAGE_INFO[deployment_type]\n\n # template for images that run on top of OpenShift\n image_url = \"{}/{}-{}:{}\".format(image_info[\"namespace\"], image_info[\"name\"], \"${component}\", \"${version}\")\n image_url = self.get_var(\"oreg_url\", default=\"\") or image_url\n if 'nodes' in host_groups:\n for suffix in NODE_IMAGE_SUFFIXES:\n required.add(image_url.replace(\"${component}\", suffix).replace(\"${version}\", image_tag))\n # The registry-console is for some reason not prefixed with ose- like the other components.\n # Nor is it versioned the same, so just look for latest.\n # Also a completely different name is used for Origin.\n required.add(image_info[\"registry_console_image\"])\n\n # images for containerized components\n if self.get_var(\"openshift\", \"common\", \"is_containerized\"):\n components = set()\n if 'nodes' in host_groups:\n components.update([\"node\", \"openvswitch\"])\n if 'masters' in host_groups: # name is \"origin\" or \"ose\"\n components.add(image_info[\"name\"])\n for component in components:\n required.add(\"{}/{}:{}\".format(image_info[\"namespace\"], component, image_tag))\n if 'etcd' in host_groups: # special case, note it is the same for origin/enterprise\n required.add(\"registry.access.redhat.com/rhel7/etcd\") # and no image tag\n\n return required\n\n def local_images(self, images):\n \"\"\"Filter a list of images and return those available locally.\"\"\"\n registries = self.known_docker_registries()\n found_images = []\n for image in images:\n # docker could have the image name as-is or prefixed with any registry\n imglist = [image] + [reg + \"/\" + image for reg in registries]\n if self.is_image_local(imglist):\n found_images.append(image)\n return found_images\n\n def is_image_local(self, image):\n \"\"\"Check if image is already in local docker index.\"\"\"\n result = self.execute_module(\"docker_image_facts\", {\"name\": image})\n return bool(result.get(\"images\")) and not result.get(\"failed\")\n\n def known_docker_registries(self):\n \"\"\"Build a list of docker registries available according to inventory vars.\"\"\"\n regs = list(self.get_var(\"openshift_docker_additional_registries\", default=[]))\n\n deployment_type = self.get_var(\"openshift_deployment_type\")\n if deployment_type == \"origin\" and \"docker.io\" not in regs:\n regs.append(\"docker.io\")\n elif deployment_type == 'openshift-enterprise' and \"registry.access.redhat.com\" not in regs:\n regs.append(\"registry.access.redhat.com\")\n\n return regs\n\n def available_images(self, images, default_registries):\n \"\"\"Search remotely for images. Returns: list of images found.\"\"\"\n return [\n image for image in images\n if self.is_available_skopeo_image(image, default_registries)\n ]\n\n def is_available_skopeo_image(self, image, default_registries):\n \"\"\"Use Skopeo to determine if required image exists in known registry(s).\"\"\"\n registries = default_registries\n\n # If image already includes a registry, only use that.\n # NOTE: This logic would incorrectly identify images that do not use a namespace, e.g.\n # registry.access.redhat.com/rhel7 as if the registry were a namespace.\n # It's not clear that there's any way to distinguish them, but fortunately\n # the current set of images all look like [registry/]namespace/name[:version].\n if image.count(\"/\") > 1:\n registry, image = image.split(\"/\", 1)\n registries = [registry]\n\n for registry in registries:\n if registry not in self.reachable_registries:\n self.reachable_registries[registry] = self.connect_to_registry(registry)\n if not self.reachable_registries[registry]:\n continue\n\n args = {\"_raw_params\": self.skopeo_img_check_command.format(registry=registry, image=image)}\n result = self.execute_module_with_retries(\"command\", args)\n if result.get(\"rc\", 0) == 0 and not result.get(\"failed\"):\n return True\n if result.get(\"rc\") == 124: # RC 124 == timed out; mark unreachable\n self.reachable_registries[registry] = False\n\n return False\n\n def connect_to_registry(self, registry):\n \"\"\"Use ansible wait_for module to test connectivity from host to registry. Returns bool.\"\"\"\n # test a simple TCP connection\n host, _, port = registry.partition(\":\")\n port = port or 443\n args = dict(host=host, port=port, state=\"started\", timeout=30)\n result = self.execute_module(\"wait_for\", args)\n return result.get(\"rc\", 0) == 0 and not result.get(\"failed\")\n", "path": "roles/openshift_health_checker/openshift_checks/docker_image_availability.py"}], "after_files": [{"content": "\"\"\"Check that required Docker images are available.\"\"\"\n\nfrom ansible.module_utils import six\nfrom openshift_checks import OpenShiftCheck\nfrom openshift_checks.mixins import DockerHostMixin\n\n\nNODE_IMAGE_SUFFIXES = [\"haproxy-router\", \"docker-registry\", \"deployer\", \"pod\"]\nDEPLOYMENT_IMAGE_INFO = {\n \"origin\": {\n \"namespace\": \"openshift\",\n \"name\": \"origin\",\n \"registry_console_image\": \"cockpit/kubernetes\",\n },\n \"openshift-enterprise\": {\n \"namespace\": \"openshift3\",\n \"name\": \"ose\",\n \"registry_console_image\": \"registry.access.redhat.com/openshift3/registry-console\",\n },\n}\n\n\nclass DockerImageAvailability(DockerHostMixin, OpenShiftCheck):\n \"\"\"Check that required Docker images are available.\n\n Determine docker images that an install would require and check that they\n are either present in the host's docker index, or available for the host to pull\n with known registries as defined in our inventory file (or defaults).\n \"\"\"\n\n name = \"docker_image_availability\"\n tags = [\"preflight\"]\n # we use python-docker-py to check local docker for images, and skopeo\n # to look for images available remotely without waiting to pull them.\n dependencies = [\"python-docker-py\", \"skopeo\"]\n skopeo_img_check_command = \"timeout 10 skopeo inspect --tls-verify=false docker://{registry}/{image}\"\n\n def __init__(self, *args, **kwargs):\n super(DockerImageAvailability, self).__init__(*args, **kwargs)\n # record whether we could reach a registry or not (and remember results)\n self.reachable_registries = {}\n\n def is_active(self):\n \"\"\"Skip hosts with unsupported deployment types.\"\"\"\n deployment_type = self.get_var(\"openshift_deployment_type\")\n has_valid_deployment_type = deployment_type in DEPLOYMENT_IMAGE_INFO\n\n return super(DockerImageAvailability, self).is_active() and has_valid_deployment_type\n\n def run(self):\n msg, failed = self.ensure_dependencies()\n if failed:\n return {\n \"failed\": True,\n \"msg\": \"Some dependencies are required in order to check Docker image availability.\\n\" + msg\n }\n\n required_images = self.required_images()\n missing_images = set(required_images) - set(self.local_images(required_images))\n\n # exit early if all images were found locally\n if not missing_images:\n return {}\n\n registries = self.known_docker_registries()\n if not registries:\n return {\"failed\": True, \"msg\": \"Unable to retrieve any docker registries.\"}\n\n available_images = self.available_images(missing_images, registries)\n unavailable_images = set(missing_images) - set(available_images)\n\n if unavailable_images:\n registries = [\n reg if self.reachable_registries.get(reg, True) else reg + \" (unreachable)\"\n for reg in registries\n ]\n msg = (\n \"One or more required Docker images are not available:\\n {}\\n\"\n \"Configured registries: {}\\n\"\n \"Checked by: {}\"\n ).format(\n \",\\n \".join(sorted(unavailable_images)),\n \", \".join(registries),\n self.skopeo_img_check_command\n )\n\n return dict(failed=True, msg=msg)\n\n return {}\n\n def required_images(self):\n \"\"\"\n Determine which images we expect to need for this host.\n Returns: a set of required images like 'openshift/origin:v3.6'\n\n The thorny issue of determining the image names from the variables is under consideration\n via https://github.com/openshift/openshift-ansible/issues/4415\n\n For now we operate as follows:\n * For containerized components (master, node, ...) we look at the deployment type and\n use openshift/origin or openshift3/ose as the base for those component images. The\n version is openshift_image_tag as determined by the openshift_version role.\n * For OpenShift-managed infrastructure (router, registry...) we use oreg_url if\n it is defined; otherwise we again use the base that depends on the deployment type.\n Registry is not included in constructed images. It may be in oreg_url or etcd image.\n \"\"\"\n required = set()\n deployment_type = self.get_var(\"openshift_deployment_type\")\n host_groups = self.get_var(\"group_names\")\n # containerized etcd may not have openshift_image_tag, see bz 1466622\n image_tag = self.get_var(\"openshift_image_tag\", default=\"latest\")\n image_info = DEPLOYMENT_IMAGE_INFO[deployment_type]\n\n # template for images that run on top of OpenShift\n image_url = \"{}/{}-{}:{}\".format(image_info[\"namespace\"], image_info[\"name\"], \"${component}\", \"${version}\")\n image_url = self.get_var(\"oreg_url\", default=\"\") or image_url\n if 'nodes' in host_groups:\n for suffix in NODE_IMAGE_SUFFIXES:\n required.add(image_url.replace(\"${component}\", suffix).replace(\"${version}\", image_tag))\n # The registry-console is for some reason not prefixed with ose- like the other components.\n # Nor is it versioned the same, so just look for latest.\n # Also a completely different name is used for Origin.\n required.add(image_info[\"registry_console_image\"])\n\n # images for containerized components\n if self.get_var(\"openshift\", \"common\", \"is_containerized\"):\n components = set()\n if 'nodes' in host_groups:\n components.update([\"node\", \"openvswitch\"])\n if 'masters' in host_groups: # name is \"origin\" or \"ose\"\n components.add(image_info[\"name\"])\n for component in components:\n required.add(\"{}/{}:{}\".format(image_info[\"namespace\"], component, image_tag))\n if 'etcd' in host_groups: # special case, note it is the same for origin/enterprise\n required.add(\"registry.access.redhat.com/rhel7/etcd\") # and no image tag\n\n return required\n\n def local_images(self, images):\n \"\"\"Filter a list of images and return those available locally.\"\"\"\n registries = self.known_docker_registries()\n found_images = []\n for image in images:\n # docker could have the image name as-is or prefixed with any registry\n imglist = [image] + [reg + \"/\" + image for reg in registries]\n if self.is_image_local(imglist):\n found_images.append(image)\n return found_images\n\n def is_image_local(self, image):\n \"\"\"Check if image is already in local docker index.\"\"\"\n result = self.execute_module(\"docker_image_facts\", {\"name\": image})\n return bool(result.get(\"images\")) and not result.get(\"failed\")\n\n def known_docker_registries(self):\n \"\"\"Build a list of docker registries available according to inventory vars.\"\"\"\n regs = self.get_var(\"openshift_docker_additional_registries\", default=[])\n # https://bugzilla.redhat.com/show_bug.cgi?id=1497274\n # if the result was a string type, place it into a list. We must do this\n # as using list() on a string will split the string into its characters.\n if isinstance(regs, six.string_types):\n regs = [regs]\n else:\n # Otherwise cast to a list as was done previously\n regs = list(regs)\n\n deployment_type = self.get_var(\"openshift_deployment_type\")\n if deployment_type == \"origin\" and \"docker.io\" not in regs:\n regs.append(\"docker.io\")\n elif deployment_type == 'openshift-enterprise' and \"registry.access.redhat.com\" not in regs:\n regs.append(\"registry.access.redhat.com\")\n\n return regs\n\n def available_images(self, images, default_registries):\n \"\"\"Search remotely for images. Returns: list of images found.\"\"\"\n return [\n image for image in images\n if self.is_available_skopeo_image(image, default_registries)\n ]\n\n def is_available_skopeo_image(self, image, default_registries):\n \"\"\"Use Skopeo to determine if required image exists in known registry(s).\"\"\"\n registries = default_registries\n\n # If image already includes a registry, only use that.\n # NOTE: This logic would incorrectly identify images that do not use a namespace, e.g.\n # registry.access.redhat.com/rhel7 as if the registry were a namespace.\n # It's not clear that there's any way to distinguish them, but fortunately\n # the current set of images all look like [registry/]namespace/name[:version].\n if image.count(\"/\") > 1:\n registry, image = image.split(\"/\", 1)\n registries = [registry]\n\n for registry in registries:\n if registry not in self.reachable_registries:\n self.reachable_registries[registry] = self.connect_to_registry(registry)\n if not self.reachable_registries[registry]:\n continue\n\n args = {\"_raw_params\": self.skopeo_img_check_command.format(registry=registry, image=image)}\n result = self.execute_module_with_retries(\"command\", args)\n if result.get(\"rc\", 0) == 0 and not result.get(\"failed\"):\n return True\n if result.get(\"rc\") == 124: # RC 124 == timed out; mark unreachable\n self.reachable_registries[registry] = False\n\n return False\n\n def connect_to_registry(self, registry):\n \"\"\"Use ansible wait_for module to test connectivity from host to registry. Returns bool.\"\"\"\n # test a simple TCP connection\n host, _, port = registry.partition(\":\")\n port = port or 443\n args = dict(host=host, port=port, state=\"started\", timeout=30)\n result = self.execute_module(\"wait_for\", args)\n return result.get(\"rc\", 0) == 0 and not result.get(\"failed\")\n", "path": "roles/openshift_health_checker/openshift_checks/docker_image_availability.py"}]}
| 3,488 | 344 |
gh_patches_debug_28712
|
rasdani/github-patches
|
git_diff
|
python-discord__bot-648
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New custom user statuses should appear in !user
Related to #620, we must moderate the new custom statuses, but they don't appear when we issue a !user command. These should be included to form part of the channel record for moderation purposes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/cogs/information.py`
Content:
```
1 import colorsys
2 import logging
3 import pprint
4 import textwrap
5 import typing
6 from typing import Any, Mapping, Optional
7
8 import discord
9 from discord import CategoryChannel, Colour, Embed, Member, Role, TextChannel, VoiceChannel, utils
10 from discord.ext import commands
11 from discord.ext.commands import Bot, BucketType, Cog, Context, command, group
12
13 from bot.constants import Channels, Emojis, MODERATION_ROLES, STAFF_ROLES
14 from bot.decorators import InChannelCheckFailure, in_channel, with_role
15 from bot.utils.checks import cooldown_with_role_bypass, with_role_check
16 from bot.utils.time import time_since
17
18 log = logging.getLogger(__name__)
19
20
21 class Information(Cog):
22 """A cog with commands for generating embeds with server info, such as server stats and user info."""
23
24 def __init__(self, bot: Bot):
25 self.bot = bot
26
27 @with_role(*MODERATION_ROLES)
28 @command(name="roles")
29 async def roles_info(self, ctx: Context) -> None:
30 """Returns a list of all roles and their corresponding IDs."""
31 # Sort the roles alphabetically and remove the @everyone role
32 roles = sorted(ctx.guild.roles, key=lambda role: role.name)
33 roles = [role for role in roles if role.name != "@everyone"]
34
35 # Build a string
36 role_string = ""
37 for role in roles:
38 role_string += f"`{role.id}` - {role.mention}\n"
39
40 # Build an embed
41 embed = Embed(
42 title="Role information",
43 colour=Colour.blurple(),
44 description=role_string
45 )
46
47 embed.set_footer(text=f"Total roles: {len(roles)}")
48
49 await ctx.send(embed=embed)
50
51 @with_role(*MODERATION_ROLES)
52 @command(name="role")
53 async def role_info(self, ctx: Context, *roles: typing.Union[Role, str]) -> None:
54 """
55 Return information on a role or list of roles.
56
57 To specify multiple roles just add to the arguments, delimit roles with spaces in them using quotation marks.
58 """
59 parsed_roles = []
60
61 for role_name in roles:
62 if isinstance(role_name, Role):
63 # Role conversion has already succeeded
64 parsed_roles.append(role_name)
65 continue
66
67 role = utils.find(lambda r: r.name.lower() == role_name.lower(), ctx.guild.roles)
68
69 if not role:
70 await ctx.send(f":x: Could not convert `{role_name}` to a role")
71 continue
72
73 parsed_roles.append(role)
74
75 for role in parsed_roles:
76 embed = Embed(
77 title=f"{role.name} info",
78 colour=role.colour,
79 )
80
81 embed.add_field(name="ID", value=role.id, inline=True)
82
83 embed.add_field(name="Colour (RGB)", value=f"#{role.colour.value:0>6x}", inline=True)
84
85 h, s, v = colorsys.rgb_to_hsv(*role.colour.to_rgb())
86
87 embed.add_field(name="Colour (HSV)", value=f"{h:.2f} {s:.2f} {v}", inline=True)
88
89 embed.add_field(name="Member count", value=len(role.members), inline=True)
90
91 embed.add_field(name="Position", value=role.position)
92
93 embed.add_field(name="Permission code", value=role.permissions.value, inline=True)
94
95 await ctx.send(embed=embed)
96
97 @command(name="server", aliases=["server_info", "guild", "guild_info"])
98 async def server_info(self, ctx: Context) -> None:
99 """Returns an embed full of server information."""
100 created = time_since(ctx.guild.created_at, precision="days")
101 features = ", ".join(ctx.guild.features)
102 region = ctx.guild.region
103
104 # How many of each type of channel?
105 roles = len(ctx.guild.roles)
106 channels = ctx.guild.channels
107 text_channels = 0
108 category_channels = 0
109 voice_channels = 0
110 for channel in channels:
111 if type(channel) == TextChannel:
112 text_channels += 1
113 elif type(channel) == CategoryChannel:
114 category_channels += 1
115 elif type(channel) == VoiceChannel:
116 voice_channels += 1
117
118 # How many of each user status?
119 member_count = ctx.guild.member_count
120 members = ctx.guild.members
121 online = 0
122 dnd = 0
123 idle = 0
124 offline = 0
125 for member in members:
126 if str(member.status) == "online":
127 online += 1
128 elif str(member.status) == "offline":
129 offline += 1
130 elif str(member.status) == "idle":
131 idle += 1
132 elif str(member.status) == "dnd":
133 dnd += 1
134
135 embed = Embed(
136 colour=Colour.blurple(),
137 description=textwrap.dedent(f"""
138 **Server information**
139 Created: {created}
140 Voice region: {region}
141 Features: {features}
142
143 **Counts**
144 Members: {member_count:,}
145 Roles: {roles}
146 Text: {text_channels}
147 Voice: {voice_channels}
148 Channel categories: {category_channels}
149
150 **Members**
151 {Emojis.status_online} {online}
152 {Emojis.status_idle} {idle}
153 {Emojis.status_dnd} {dnd}
154 {Emojis.status_offline} {offline}
155 """)
156 )
157
158 embed.set_thumbnail(url=ctx.guild.icon_url)
159
160 await ctx.send(embed=embed)
161
162 @command(name="user", aliases=["user_info", "member", "member_info"])
163 async def user_info(self, ctx: Context, user: Member = None, hidden: bool = False) -> None:
164 """Returns info about a user."""
165 if user is None:
166 user = ctx.author
167
168 # Do a role check if this is being executed on someone other than the caller
169 if user != ctx.author and not with_role_check(ctx, *MODERATION_ROLES):
170 await ctx.send("You may not use this command on users other than yourself.")
171 return
172
173 # Non-moderators may only do this in #bot-commands and can't see hidden infractions.
174 if not with_role_check(ctx, *STAFF_ROLES):
175 if not ctx.channel.id == Channels.bot:
176 raise InChannelCheckFailure(Channels.bot)
177 # Hide hidden infractions for users without a moderation role
178 hidden = False
179
180 # User information
181 created = time_since(user.created_at, max_units=3)
182
183 name = str(user)
184 if user.nick:
185 name = f"{user.nick} ({name})"
186
187 # Member information
188 joined = time_since(user.joined_at, precision="days")
189
190 # You're welcome, Volcyyyyyyyyyyyyyyyy
191 roles = ", ".join(role.mention for role in user.roles if role.name != "@everyone")
192
193 # Infractions
194 infractions = await self.bot.api_client.get(
195 'bot/infractions',
196 params={
197 'hidden': str(hidden),
198 'user__id': str(user.id)
199 }
200 )
201
202 infr_total = 0
203 infr_active = 0
204
205 # At least it's readable.
206 for infr in infractions:
207 if infr["active"]:
208 infr_active += 1
209
210 infr_total += 1
211
212 # Let's build the embed now
213 embed = Embed(
214 title=name,
215 description=textwrap.dedent(f"""
216 **User Information**
217 Created: {created}
218 Profile: {user.mention}
219 ID: {user.id}
220
221 **Member Information**
222 Joined: {joined}
223 Roles: {roles or None}
224
225 **Infractions**
226 Total: {infr_total}
227 Active: {infr_active}
228 """)
229 )
230
231 embed.set_thumbnail(url=user.avatar_url_as(format="png"))
232 embed.colour = user.top_role.colour if roles else Colour.blurple()
233
234 await ctx.send(embed=embed)
235
236 def format_fields(self, mapping: Mapping[str, Any], field_width: Optional[int] = None) -> str:
237 """Format a mapping to be readable to a human."""
238 # sorting is technically superfluous but nice if you want to look for a specific field
239 fields = sorted(mapping.items(), key=lambda item: item[0])
240
241 if field_width is None:
242 field_width = len(max(mapping.keys(), key=len))
243
244 out = ''
245
246 for key, val in fields:
247 if isinstance(val, dict):
248 # if we have dicts inside dicts we want to apply the same treatment to the inner dictionaries
249 inner_width = int(field_width * 1.6)
250 val = '\n' + self.format_fields(val, field_width=inner_width)
251
252 elif isinstance(val, str):
253 # split up text since it might be long
254 text = textwrap.fill(val, width=100, replace_whitespace=False)
255
256 # indent it, I guess you could do this with `wrap` and `join` but this is nicer
257 val = textwrap.indent(text, ' ' * (field_width + len(': ')))
258
259 # the first line is already indented so we `str.lstrip` it
260 val = val.lstrip()
261
262 if key == 'color':
263 # makes the base 10 representation of a hex number readable to humans
264 val = hex(val)
265
266 out += '{0:>{width}}: {1}\n'.format(key, val, width=field_width)
267
268 # remove trailing whitespace
269 return out.rstrip()
270
271 @cooldown_with_role_bypass(2, 60 * 3, BucketType.member, bypass_roles=STAFF_ROLES)
272 @group(invoke_without_command=True)
273 @in_channel(Channels.bot, bypass_roles=STAFF_ROLES)
274 async def raw(self, ctx: Context, *, message: discord.Message, json: bool = False) -> None:
275 """Shows information about the raw API response."""
276 # I *guess* it could be deleted right as the command is invoked but I felt like it wasn't worth handling
277 # doing this extra request is also much easier than trying to convert everything back into a dictionary again
278 raw_data = await ctx.bot.http.get_message(message.channel.id, message.id)
279
280 paginator = commands.Paginator()
281
282 def add_content(title: str, content: str) -> None:
283 paginator.add_line(f'== {title} ==\n')
284 # replace backticks as it breaks out of code blocks. Spaces seemed to be the most reasonable solution.
285 # we hope it's not close to 2000
286 paginator.add_line(content.replace('```', '`` `'))
287 paginator.close_page()
288
289 if message.content:
290 add_content('Raw message', message.content)
291
292 transformer = pprint.pformat if json else self.format_fields
293 for field_name in ('embeds', 'attachments'):
294 data = raw_data[field_name]
295
296 if not data:
297 continue
298
299 total = len(data)
300 for current, item in enumerate(data, start=1):
301 title = f'Raw {field_name} ({current}/{total})'
302 add_content(title, transformer(item))
303
304 for page in paginator.pages:
305 await ctx.send(page)
306
307 @raw.command()
308 async def json(self, ctx: Context, message: discord.Message) -> None:
309 """Shows information about the raw API response in a copy-pasteable Python format."""
310 await ctx.invoke(self.raw, message=message, json=True)
311
312
313 def setup(bot: Bot) -> None:
314 """Information cog load."""
315 bot.add_cog(Information(bot))
316 log.info("Cog loaded: Information")
317
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bot/cogs/information.py b/bot/cogs/information.py
--- a/bot/cogs/information.py
+++ b/bot/cogs/information.py
@@ -9,6 +9,7 @@
from discord import CategoryChannel, Colour, Embed, Member, Role, TextChannel, VoiceChannel, utils
from discord.ext import commands
from discord.ext.commands import Bot, BucketType, Cog, Context, command, group
+from discord.utils import escape_markdown
from bot.constants import Channels, Emojis, MODERATION_ROLES, STAFF_ROLES
from bot.decorators import InChannelCheckFailure, in_channel, with_role
@@ -180,6 +181,13 @@
# User information
created = time_since(user.created_at, max_units=3)
+ # Custom status
+ custom_status = ''
+ for activity in user.activities:
+ if activity.name == 'Custom Status':
+ state = escape_markdown(activity.state)
+ custom_status = f'Status: {state}\n'
+
name = str(user)
if user.nick:
name = f"{user.nick} ({name})"
@@ -217,7 +225,7 @@
Created: {created}
Profile: {user.mention}
ID: {user.id}
-
+ {custom_status}
**Member Information**
Joined: {joined}
Roles: {roles or None}
|
{"golden_diff": "diff --git a/bot/cogs/information.py b/bot/cogs/information.py\n--- a/bot/cogs/information.py\n+++ b/bot/cogs/information.py\n@@ -9,6 +9,7 @@\n from discord import CategoryChannel, Colour, Embed, Member, Role, TextChannel, VoiceChannel, utils\n from discord.ext import commands\n from discord.ext.commands import Bot, BucketType, Cog, Context, command, group\n+from discord.utils import escape_markdown\n \n from bot.constants import Channels, Emojis, MODERATION_ROLES, STAFF_ROLES\n from bot.decorators import InChannelCheckFailure, in_channel, with_role\n@@ -180,6 +181,13 @@\n # User information\n created = time_since(user.created_at, max_units=3)\n \n+ # Custom status\n+ custom_status = ''\n+ for activity in user.activities:\n+ if activity.name == 'Custom Status':\n+ state = escape_markdown(activity.state)\n+ custom_status = f'Status: {state}\\n'\n+\n name = str(user)\n if user.nick:\n name = f\"{user.nick} ({name})\"\n@@ -217,7 +225,7 @@\n Created: {created}\n Profile: {user.mention}\n ID: {user.id}\n-\n+ {custom_status}\n **Member Information**\n Joined: {joined}\n Roles: {roles or None}\n", "issue": "New custom user statuses should appear in !user\nRelated to #620, we must moderate the new custom statuses, but they don't appear when we issue a !user command. These should be included to form part of the channel record for moderation purposes.\n", "before_files": [{"content": "import colorsys\nimport logging\nimport pprint\nimport textwrap\nimport typing\nfrom typing import Any, Mapping, Optional\n\nimport discord\nfrom discord import CategoryChannel, Colour, Embed, Member, Role, TextChannel, VoiceChannel, utils\nfrom discord.ext import commands\nfrom discord.ext.commands import Bot, BucketType, Cog, Context, command, group\n\nfrom bot.constants import Channels, Emojis, MODERATION_ROLES, STAFF_ROLES\nfrom bot.decorators import InChannelCheckFailure, in_channel, with_role\nfrom bot.utils.checks import cooldown_with_role_bypass, with_role_check\nfrom bot.utils.time import time_since\n\nlog = logging.getLogger(__name__)\n\n\nclass Information(Cog):\n \"\"\"A cog with commands for generating embeds with server info, such as server stats and user info.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @with_role(*MODERATION_ROLES)\n @command(name=\"roles\")\n async def roles_info(self, ctx: Context) -> None:\n \"\"\"Returns a list of all roles and their corresponding IDs.\"\"\"\n # Sort the roles alphabetically and remove the @everyone role\n roles = sorted(ctx.guild.roles, key=lambda role: role.name)\n roles = [role for role in roles if role.name != \"@everyone\"]\n\n # Build a string\n role_string = \"\"\n for role in roles:\n role_string += f\"`{role.id}` - {role.mention}\\n\"\n\n # Build an embed\n embed = Embed(\n title=\"Role information\",\n colour=Colour.blurple(),\n description=role_string\n )\n\n embed.set_footer(text=f\"Total roles: {len(roles)}\")\n\n await ctx.send(embed=embed)\n\n @with_role(*MODERATION_ROLES)\n @command(name=\"role\")\n async def role_info(self, ctx: Context, *roles: typing.Union[Role, str]) -> None:\n \"\"\"\n Return information on a role or list of roles.\n\n To specify multiple roles just add to the arguments, delimit roles with spaces in them using quotation marks.\n \"\"\"\n parsed_roles = []\n\n for role_name in roles:\n if isinstance(role_name, Role):\n # Role conversion has already succeeded\n parsed_roles.append(role_name)\n continue\n\n role = utils.find(lambda r: r.name.lower() == role_name.lower(), ctx.guild.roles)\n\n if not role:\n await ctx.send(f\":x: Could not convert `{role_name}` to a role\")\n continue\n\n parsed_roles.append(role)\n\n for role in parsed_roles:\n embed = Embed(\n title=f\"{role.name} info\",\n colour=role.colour,\n )\n\n embed.add_field(name=\"ID\", value=role.id, inline=True)\n\n embed.add_field(name=\"Colour (RGB)\", value=f\"#{role.colour.value:0>6x}\", inline=True)\n\n h, s, v = colorsys.rgb_to_hsv(*role.colour.to_rgb())\n\n embed.add_field(name=\"Colour (HSV)\", value=f\"{h:.2f} {s:.2f} {v}\", inline=True)\n\n embed.add_field(name=\"Member count\", value=len(role.members), inline=True)\n\n embed.add_field(name=\"Position\", value=role.position)\n\n embed.add_field(name=\"Permission code\", value=role.permissions.value, inline=True)\n\n await ctx.send(embed=embed)\n\n @command(name=\"server\", aliases=[\"server_info\", \"guild\", \"guild_info\"])\n async def server_info(self, ctx: Context) -> None:\n \"\"\"Returns an embed full of server information.\"\"\"\n created = time_since(ctx.guild.created_at, precision=\"days\")\n features = \", \".join(ctx.guild.features)\n region = ctx.guild.region\n\n # How many of each type of channel?\n roles = len(ctx.guild.roles)\n channels = ctx.guild.channels\n text_channels = 0\n category_channels = 0\n voice_channels = 0\n for channel in channels:\n if type(channel) == TextChannel:\n text_channels += 1\n elif type(channel) == CategoryChannel:\n category_channels += 1\n elif type(channel) == VoiceChannel:\n voice_channels += 1\n\n # How many of each user status?\n member_count = ctx.guild.member_count\n members = ctx.guild.members\n online = 0\n dnd = 0\n idle = 0\n offline = 0\n for member in members:\n if str(member.status) == \"online\":\n online += 1\n elif str(member.status) == \"offline\":\n offline += 1\n elif str(member.status) == \"idle\":\n idle += 1\n elif str(member.status) == \"dnd\":\n dnd += 1\n\n embed = Embed(\n colour=Colour.blurple(),\n description=textwrap.dedent(f\"\"\"\n **Server information**\n Created: {created}\n Voice region: {region}\n Features: {features}\n\n **Counts**\n Members: {member_count:,}\n Roles: {roles}\n Text: {text_channels}\n Voice: {voice_channels}\n Channel categories: {category_channels}\n\n **Members**\n {Emojis.status_online} {online}\n {Emojis.status_idle} {idle}\n {Emojis.status_dnd} {dnd}\n {Emojis.status_offline} {offline}\n \"\"\")\n )\n\n embed.set_thumbnail(url=ctx.guild.icon_url)\n\n await ctx.send(embed=embed)\n\n @command(name=\"user\", aliases=[\"user_info\", \"member\", \"member_info\"])\n async def user_info(self, ctx: Context, user: Member = None, hidden: bool = False) -> None:\n \"\"\"Returns info about a user.\"\"\"\n if user is None:\n user = ctx.author\n\n # Do a role check if this is being executed on someone other than the caller\n if user != ctx.author and not with_role_check(ctx, *MODERATION_ROLES):\n await ctx.send(\"You may not use this command on users other than yourself.\")\n return\n\n # Non-moderators may only do this in #bot-commands and can't see hidden infractions.\n if not with_role_check(ctx, *STAFF_ROLES):\n if not ctx.channel.id == Channels.bot:\n raise InChannelCheckFailure(Channels.bot)\n # Hide hidden infractions for users without a moderation role\n hidden = False\n\n # User information\n created = time_since(user.created_at, max_units=3)\n\n name = str(user)\n if user.nick:\n name = f\"{user.nick} ({name})\"\n\n # Member information\n joined = time_since(user.joined_at, precision=\"days\")\n\n # You're welcome, Volcyyyyyyyyyyyyyyyy\n roles = \", \".join(role.mention for role in user.roles if role.name != \"@everyone\")\n\n # Infractions\n infractions = await self.bot.api_client.get(\n 'bot/infractions',\n params={\n 'hidden': str(hidden),\n 'user__id': str(user.id)\n }\n )\n\n infr_total = 0\n infr_active = 0\n\n # At least it's readable.\n for infr in infractions:\n if infr[\"active\"]:\n infr_active += 1\n\n infr_total += 1\n\n # Let's build the embed now\n embed = Embed(\n title=name,\n description=textwrap.dedent(f\"\"\"\n **User Information**\n Created: {created}\n Profile: {user.mention}\n ID: {user.id}\n\n **Member Information**\n Joined: {joined}\n Roles: {roles or None}\n\n **Infractions**\n Total: {infr_total}\n Active: {infr_active}\n \"\"\")\n )\n\n embed.set_thumbnail(url=user.avatar_url_as(format=\"png\"))\n embed.colour = user.top_role.colour if roles else Colour.blurple()\n\n await ctx.send(embed=embed)\n\n def format_fields(self, mapping: Mapping[str, Any], field_width: Optional[int] = None) -> str:\n \"\"\"Format a mapping to be readable to a human.\"\"\"\n # sorting is technically superfluous but nice if you want to look for a specific field\n fields = sorted(mapping.items(), key=lambda item: item[0])\n\n if field_width is None:\n field_width = len(max(mapping.keys(), key=len))\n\n out = ''\n\n for key, val in fields:\n if isinstance(val, dict):\n # if we have dicts inside dicts we want to apply the same treatment to the inner dictionaries\n inner_width = int(field_width * 1.6)\n val = '\\n' + self.format_fields(val, field_width=inner_width)\n\n elif isinstance(val, str):\n # split up text since it might be long\n text = textwrap.fill(val, width=100, replace_whitespace=False)\n\n # indent it, I guess you could do this with `wrap` and `join` but this is nicer\n val = textwrap.indent(text, ' ' * (field_width + len(': ')))\n\n # the first line is already indented so we `str.lstrip` it\n val = val.lstrip()\n\n if key == 'color':\n # makes the base 10 representation of a hex number readable to humans\n val = hex(val)\n\n out += '{0:>{width}}: {1}\\n'.format(key, val, width=field_width)\n\n # remove trailing whitespace\n return out.rstrip()\n\n @cooldown_with_role_bypass(2, 60 * 3, BucketType.member, bypass_roles=STAFF_ROLES)\n @group(invoke_without_command=True)\n @in_channel(Channels.bot, bypass_roles=STAFF_ROLES)\n async def raw(self, ctx: Context, *, message: discord.Message, json: bool = False) -> None:\n \"\"\"Shows information about the raw API response.\"\"\"\n # I *guess* it could be deleted right as the command is invoked but I felt like it wasn't worth handling\n # doing this extra request is also much easier than trying to convert everything back into a dictionary again\n raw_data = await ctx.bot.http.get_message(message.channel.id, message.id)\n\n paginator = commands.Paginator()\n\n def add_content(title: str, content: str) -> None:\n paginator.add_line(f'== {title} ==\\n')\n # replace backticks as it breaks out of code blocks. Spaces seemed to be the most reasonable solution.\n # we hope it's not close to 2000\n paginator.add_line(content.replace('```', '`` `'))\n paginator.close_page()\n\n if message.content:\n add_content('Raw message', message.content)\n\n transformer = pprint.pformat if json else self.format_fields\n for field_name in ('embeds', 'attachments'):\n data = raw_data[field_name]\n\n if not data:\n continue\n\n total = len(data)\n for current, item in enumerate(data, start=1):\n title = f'Raw {field_name} ({current}/{total})'\n add_content(title, transformer(item))\n\n for page in paginator.pages:\n await ctx.send(page)\n\n @raw.command()\n async def json(self, ctx: Context, message: discord.Message) -> None:\n \"\"\"Shows information about the raw API response in a copy-pasteable Python format.\"\"\"\n await ctx.invoke(self.raw, message=message, json=True)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Information cog load.\"\"\"\n bot.add_cog(Information(bot))\n log.info(\"Cog loaded: Information\")\n", "path": "bot/cogs/information.py"}], "after_files": [{"content": "import colorsys\nimport logging\nimport pprint\nimport textwrap\nimport typing\nfrom typing import Any, Mapping, Optional\n\nimport discord\nfrom discord import CategoryChannel, Colour, Embed, Member, Role, TextChannel, VoiceChannel, utils\nfrom discord.ext import commands\nfrom discord.ext.commands import Bot, BucketType, Cog, Context, command, group\nfrom discord.utils import escape_markdown\n\nfrom bot.constants import Channels, Emojis, MODERATION_ROLES, STAFF_ROLES\nfrom bot.decorators import InChannelCheckFailure, in_channel, with_role\nfrom bot.utils.checks import cooldown_with_role_bypass, with_role_check\nfrom bot.utils.time import time_since\n\nlog = logging.getLogger(__name__)\n\n\nclass Information(Cog):\n \"\"\"A cog with commands for generating embeds with server info, such as server stats and user info.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @with_role(*MODERATION_ROLES)\n @command(name=\"roles\")\n async def roles_info(self, ctx: Context) -> None:\n \"\"\"Returns a list of all roles and their corresponding IDs.\"\"\"\n # Sort the roles alphabetically and remove the @everyone role\n roles = sorted(ctx.guild.roles, key=lambda role: role.name)\n roles = [role for role in roles if role.name != \"@everyone\"]\n\n # Build a string\n role_string = \"\"\n for role in roles:\n role_string += f\"`{role.id}` - {role.mention}\\n\"\n\n # Build an embed\n embed = Embed(\n title=\"Role information\",\n colour=Colour.blurple(),\n description=role_string\n )\n\n embed.set_footer(text=f\"Total roles: {len(roles)}\")\n\n await ctx.send(embed=embed)\n\n @with_role(*MODERATION_ROLES)\n @command(name=\"role\")\n async def role_info(self, ctx: Context, *roles: typing.Union[Role, str]) -> None:\n \"\"\"\n Return information on a role or list of roles.\n\n To specify multiple roles just add to the arguments, delimit roles with spaces in them using quotation marks.\n \"\"\"\n parsed_roles = []\n\n for role_name in roles:\n if isinstance(role_name, Role):\n # Role conversion has already succeeded\n parsed_roles.append(role_name)\n continue\n\n role = utils.find(lambda r: r.name.lower() == role_name.lower(), ctx.guild.roles)\n\n if not role:\n await ctx.send(f\":x: Could not convert `{role_name}` to a role\")\n continue\n\n parsed_roles.append(role)\n\n for role in parsed_roles:\n embed = Embed(\n title=f\"{role.name} info\",\n colour=role.colour,\n )\n\n embed.add_field(name=\"ID\", value=role.id, inline=True)\n\n embed.add_field(name=\"Colour (RGB)\", value=f\"#{role.colour.value:0>6x}\", inline=True)\n\n h, s, v = colorsys.rgb_to_hsv(*role.colour.to_rgb())\n\n embed.add_field(name=\"Colour (HSV)\", value=f\"{h:.2f} {s:.2f} {v}\", inline=True)\n\n embed.add_field(name=\"Member count\", value=len(role.members), inline=True)\n\n embed.add_field(name=\"Position\", value=role.position)\n\n embed.add_field(name=\"Permission code\", value=role.permissions.value, inline=True)\n\n await ctx.send(embed=embed)\n\n @command(name=\"server\", aliases=[\"server_info\", \"guild\", \"guild_info\"])\n async def server_info(self, ctx: Context) -> None:\n \"\"\"Returns an embed full of server information.\"\"\"\n created = time_since(ctx.guild.created_at, precision=\"days\")\n features = \", \".join(ctx.guild.features)\n region = ctx.guild.region\n\n # How many of each type of channel?\n roles = len(ctx.guild.roles)\n channels = ctx.guild.channels\n text_channels = 0\n category_channels = 0\n voice_channels = 0\n for channel in channels:\n if type(channel) == TextChannel:\n text_channels += 1\n elif type(channel) == CategoryChannel:\n category_channels += 1\n elif type(channel) == VoiceChannel:\n voice_channels += 1\n\n # How many of each user status?\n member_count = ctx.guild.member_count\n members = ctx.guild.members\n online = 0\n dnd = 0\n idle = 0\n offline = 0\n for member in members:\n if str(member.status) == \"online\":\n online += 1\n elif str(member.status) == \"offline\":\n offline += 1\n elif str(member.status) == \"idle\":\n idle += 1\n elif str(member.status) == \"dnd\":\n dnd += 1\n\n embed = Embed(\n colour=Colour.blurple(),\n description=textwrap.dedent(f\"\"\"\n **Server information**\n Created: {created}\n Voice region: {region}\n Features: {features}\n\n **Counts**\n Members: {member_count:,}\n Roles: {roles}\n Text: {text_channels}\n Voice: {voice_channels}\n Channel categories: {category_channels}\n\n **Members**\n {Emojis.status_online} {online}\n {Emojis.status_idle} {idle}\n {Emojis.status_dnd} {dnd}\n {Emojis.status_offline} {offline}\n \"\"\")\n )\n\n embed.set_thumbnail(url=ctx.guild.icon_url)\n\n await ctx.send(embed=embed)\n\n @command(name=\"user\", aliases=[\"user_info\", \"member\", \"member_info\"])\n async def user_info(self, ctx: Context, user: Member = None, hidden: bool = False) -> None:\n \"\"\"Returns info about a user.\"\"\"\n if user is None:\n user = ctx.author\n\n # Do a role check if this is being executed on someone other than the caller\n if user != ctx.author and not with_role_check(ctx, *MODERATION_ROLES):\n await ctx.send(\"You may not use this command on users other than yourself.\")\n return\n\n # Non-moderators may only do this in #bot-commands and can't see hidden infractions.\n if not with_role_check(ctx, *STAFF_ROLES):\n if not ctx.channel.id == Channels.bot:\n raise InChannelCheckFailure(Channels.bot)\n # Hide hidden infractions for users without a moderation role\n hidden = False\n\n # User information\n created = time_since(user.created_at, max_units=3)\n\n # Custom status\n custom_status = ''\n for activity in user.activities:\n if activity.name == 'Custom Status':\n state = escape_markdown(activity.state)\n custom_status = f'Status: {state}\\n'\n\n name = str(user)\n if user.nick:\n name = f\"{user.nick} ({name})\"\n\n # Member information\n joined = time_since(user.joined_at, precision=\"days\")\n\n # You're welcome, Volcyyyyyyyyyyyyyyyy\n roles = \", \".join(role.mention for role in user.roles if role.name != \"@everyone\")\n\n # Infractions\n infractions = await self.bot.api_client.get(\n 'bot/infractions',\n params={\n 'hidden': str(hidden),\n 'user__id': str(user.id)\n }\n )\n\n infr_total = 0\n infr_active = 0\n\n # At least it's readable.\n for infr in infractions:\n if infr[\"active\"]:\n infr_active += 1\n\n infr_total += 1\n\n # Let's build the embed now\n embed = Embed(\n title=name,\n description=textwrap.dedent(f\"\"\"\n **User Information**\n Created: {created}\n Profile: {user.mention}\n ID: {user.id}\n {custom_status}\n **Member Information**\n Joined: {joined}\n Roles: {roles or None}\n\n **Infractions**\n Total: {infr_total}\n Active: {infr_active}\n \"\"\")\n )\n\n embed.set_thumbnail(url=user.avatar_url_as(format=\"png\"))\n embed.colour = user.top_role.colour if roles else Colour.blurple()\n\n await ctx.send(embed=embed)\n\n def format_fields(self, mapping: Mapping[str, Any], field_width: Optional[int] = None) -> str:\n \"\"\"Format a mapping to be readable to a human.\"\"\"\n # sorting is technically superfluous but nice if you want to look for a specific field\n fields = sorted(mapping.items(), key=lambda item: item[0])\n\n if field_width is None:\n field_width = len(max(mapping.keys(), key=len))\n\n out = ''\n\n for key, val in fields:\n if isinstance(val, dict):\n # if we have dicts inside dicts we want to apply the same treatment to the inner dictionaries\n inner_width = int(field_width * 1.6)\n val = '\\n' + self.format_fields(val, field_width=inner_width)\n\n elif isinstance(val, str):\n # split up text since it might be long\n text = textwrap.fill(val, width=100, replace_whitespace=False)\n\n # indent it, I guess you could do this with `wrap` and `join` but this is nicer\n val = textwrap.indent(text, ' ' * (field_width + len(': ')))\n\n # the first line is already indented so we `str.lstrip` it\n val = val.lstrip()\n\n if key == 'color':\n # makes the base 10 representation of a hex number readable to humans\n val = hex(val)\n\n out += '{0:>{width}}: {1}\\n'.format(key, val, width=field_width)\n\n # remove trailing whitespace\n return out.rstrip()\n\n @cooldown_with_role_bypass(2, 60 * 3, BucketType.member, bypass_roles=STAFF_ROLES)\n @group(invoke_without_command=True)\n @in_channel(Channels.bot, bypass_roles=STAFF_ROLES)\n async def raw(self, ctx: Context, *, message: discord.Message, json: bool = False) -> None:\n \"\"\"Shows information about the raw API response.\"\"\"\n # I *guess* it could be deleted right as the command is invoked but I felt like it wasn't worth handling\n # doing this extra request is also much easier than trying to convert everything back into a dictionary again\n raw_data = await ctx.bot.http.get_message(message.channel.id, message.id)\n\n paginator = commands.Paginator()\n\n def add_content(title: str, content: str) -> None:\n paginator.add_line(f'== {title} ==\\n')\n # replace backticks as it breaks out of code blocks. Spaces seemed to be the most reasonable solution.\n # we hope it's not close to 2000\n paginator.add_line(content.replace('```', '`` `'))\n paginator.close_page()\n\n if message.content:\n add_content('Raw message', message.content)\n\n transformer = pprint.pformat if json else self.format_fields\n for field_name in ('embeds', 'attachments'):\n data = raw_data[field_name]\n\n if not data:\n continue\n\n total = len(data)\n for current, item in enumerate(data, start=1):\n title = f'Raw {field_name} ({current}/{total})'\n add_content(title, transformer(item))\n\n for page in paginator.pages:\n await ctx.send(page)\n\n @raw.command()\n async def json(self, ctx: Context, message: discord.Message) -> None:\n \"\"\"Shows information about the raw API response in a copy-pasteable Python format.\"\"\"\n await ctx.invoke(self.raw, message=message, json=True)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Information cog load.\"\"\"\n bot.add_cog(Information(bot))\n log.info(\"Cog loaded: Information\")\n", "path": "bot/cogs/information.py"}]}
| 3,697 | 314 |
gh_patches_debug_29441
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-3254
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
404 error for kubernetes depoyment
**Describe the bug**
/if/flow/initial-setup/ endpoint in the browser gives a 404 not found error
**To Reproduce**
Steps to reproduce the behavior:
1. Go to 'https://goauthentik.io/docs/installation/kubernetes'
2. Create Values.yaml
3. run helm commands
4. all pods are up and running
5. go to browser http://<ing-url>/if/flow/initial-setup/
**Expected behavior**
Page should load letting me setup ak-admin password
**Screenshots**
404 not found
**Logs**
{"event": "/api/v3/outposts/instances/", "host": "localhost:8000", "level": "info", "logger": "authentik.asgi", "method": "GET", "pid": 24, "remote": "127.0.0.1", "request_id": "454efe5b57f34713bf837681449b91a6", "runtime": 35, "scheme": "http", "status": 403, "timestamp": "2022-07-11T10:39:00.436171", "user": "", "user_agent": "goauthentik.io/outpost/2022.7.2"}
{"event": "Forbidden: /api/v3/outposts/instances/", "level": "warning", "logger": "django.request", "timestamp": 1657535940.437195}
{"error":"403 Forbidden","event":"Failed to fetch outpost configuration, retrying in 3 seconds","level":"error","logger":"authentik.outpost.ak-api-controller","timestamp":"2022-07-11T10:39:00Z"}
**Version and Deployment (please complete the following information):**
- authentik version: authentik-2022.7.2
- Deployment: [kubectl 1.21, helm v3.1.0]
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lifecycle/migrate.py`
Content:
```
1 #!/usr/bin/env python
2 """System Migration handler"""
3 import os
4 from importlib.util import module_from_spec, spec_from_file_location
5 from inspect import getmembers, isclass
6 from pathlib import Path
7 from typing import Any
8
9 from psycopg2 import connect
10 from structlog.stdlib import get_logger
11
12 from authentik.lib.config import CONFIG
13
14 LOGGER = get_logger()
15 ADV_LOCK_UID = 1000
16 LOCKED = False
17
18
19 class BaseMigration:
20 """Base System Migration"""
21
22 cur: Any
23 con: Any
24
25 def __init__(self, cur: Any, con: Any):
26 self.cur = cur
27 self.con = con
28
29 def needs_migration(self) -> bool:
30 """Return true if Migration needs to be run"""
31 return False
32
33 def run(self):
34 """Run the actual migration"""
35
36
37 def wait_for_lock():
38 """lock an advisory lock to prevent multiple instances from migrating at once"""
39 LOGGER.info("waiting to acquire database lock")
40 curr.execute("SELECT pg_advisory_lock(%s)", (ADV_LOCK_UID,))
41 # pylint: disable=global-statement
42 global LOCKED
43 LOCKED = True
44
45
46 def release_lock():
47 """Release database lock"""
48 if not LOCKED:
49 return
50 curr.execute("SELECT pg_advisory_unlock(%s)", (ADV_LOCK_UID,))
51
52
53 if __name__ == "__main__":
54
55 conn = connect(
56 dbname=CONFIG.y("postgresql.name"),
57 user=CONFIG.y("postgresql.user"),
58 password=CONFIG.y("postgresql.password"),
59 host=CONFIG.y("postgresql.host"),
60 port=int(CONFIG.y("postgresql.port")),
61 )
62 curr = conn.cursor()
63 try:
64 for migration in Path(__file__).parent.absolute().glob("system_migrations/*.py"):
65 spec = spec_from_file_location("lifecycle.system_migrations", migration)
66 mod = module_from_spec(spec)
67 # pyright: reportGeneralTypeIssues=false
68 spec.loader.exec_module(mod)
69
70 for name, sub in getmembers(mod, isclass):
71 if name != "Migration":
72 continue
73 migration = sub(curr, conn)
74 if migration.needs_migration():
75 wait_for_lock()
76 LOGGER.info("Migration needs to be applied", migration=sub)
77 migration.run()
78 LOGGER.info("Migration finished applying", migration=sub)
79 release_lock()
80 LOGGER.info("applying django migrations")
81 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "authentik.root.settings")
82 wait_for_lock()
83 try:
84 from django.core.management import execute_from_command_line
85 except ImportError as exc:
86 raise ImportError(
87 "Couldn't import Django. Are you sure it's installed and "
88 "available on your PYTHONPATH environment variable? Did you "
89 "forget to activate a virtual environment?"
90 ) from exc
91 execute_from_command_line(["", "migrate"])
92 finally:
93 release_lock()
94
```
Path: `authentik/managed/tasks.py`
Content:
```
1 """managed tasks"""
2 from django.db import DatabaseError
3
4 from authentik.core.tasks import CELERY_APP
5 from authentik.events.monitored_tasks import (
6 MonitoredTask,
7 TaskResult,
8 TaskResultStatus,
9 prefill_task,
10 )
11 from authentik.managed.manager import ObjectManager
12
13
14 @CELERY_APP.task(bind=True, base=MonitoredTask)
15 @prefill_task
16 def managed_reconcile(self: MonitoredTask):
17 """Run ObjectManager to ensure objects are up-to-date"""
18 try:
19 ObjectManager().run()
20 self.set_status(
21 TaskResult(TaskResultStatus.SUCCESSFUL, ["Successfully updated managed models."])
22 )
23 except DatabaseError as exc: # pragma: no cover
24 self.set_status(TaskResult(TaskResultStatus.WARNING, [str(exc)]))
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/authentik/managed/tasks.py b/authentik/managed/tasks.py
--- a/authentik/managed/tasks.py
+++ b/authentik/managed/tasks.py
@@ -11,7 +11,11 @@
from authentik.managed.manager import ObjectManager
-@CELERY_APP.task(bind=True, base=MonitoredTask)
+@CELERY_APP.task(
+ bind=True,
+ base=MonitoredTask,
+ retry_backoff=True,
+)
@prefill_task
def managed_reconcile(self: MonitoredTask):
"""Run ObjectManager to ensure objects are up-to-date"""
@@ -22,3 +26,4 @@
)
except DatabaseError as exc: # pragma: no cover
self.set_status(TaskResult(TaskResultStatus.WARNING, [str(exc)]))
+ self.retry()
diff --git a/lifecycle/migrate.py b/lifecycle/migrate.py
--- a/lifecycle/migrate.py
+++ b/lifecycle/migrate.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python
"""System Migration handler"""
import os
+import sys
from importlib.util import module_from_spec, spec_from_file_location
from inspect import getmembers, isclass
from pathlib import Path
@@ -50,7 +51,16 @@
curr.execute("SELECT pg_advisory_unlock(%s)", (ADV_LOCK_UID,))
+def is_locked():
+ """Check if lock is currently active (used by worker to wait for migrations)"""
+ curr.executor("SELECT count(*) FROM pg_locks WHERE objid = %s", (ADV_LOCK_UID,))
+ return curr.rowcount
+
+
if __name__ == "__main__":
+ if len(sys.argv) > 1:
+ if sys.argv[1] == "check_lock":
+ sys.exit(is_locked())
conn = connect(
dbname=CONFIG.y("postgresql.name"),
|
{"golden_diff": "diff --git a/authentik/managed/tasks.py b/authentik/managed/tasks.py\n--- a/authentik/managed/tasks.py\n+++ b/authentik/managed/tasks.py\n@@ -11,7 +11,11 @@\n from authentik.managed.manager import ObjectManager\n \n \n-@CELERY_APP.task(bind=True, base=MonitoredTask)\n+@CELERY_APP.task(\n+ bind=True,\n+ base=MonitoredTask,\n+ retry_backoff=True,\n+)\n @prefill_task\n def managed_reconcile(self: MonitoredTask):\n \"\"\"Run ObjectManager to ensure objects are up-to-date\"\"\"\n@@ -22,3 +26,4 @@\n )\n except DatabaseError as exc: # pragma: no cover\n self.set_status(TaskResult(TaskResultStatus.WARNING, [str(exc)]))\n+ self.retry()\ndiff --git a/lifecycle/migrate.py b/lifecycle/migrate.py\n--- a/lifecycle/migrate.py\n+++ b/lifecycle/migrate.py\n@@ -1,6 +1,7 @@\n #!/usr/bin/env python\n \"\"\"System Migration handler\"\"\"\n import os\n+import sys\n from importlib.util import module_from_spec, spec_from_file_location\n from inspect import getmembers, isclass\n from pathlib import Path\n@@ -50,7 +51,16 @@\n curr.execute(\"SELECT pg_advisory_unlock(%s)\", (ADV_LOCK_UID,))\n \n \n+def is_locked():\n+ \"\"\"Check if lock is currently active (used by worker to wait for migrations)\"\"\"\n+ curr.executor(\"SELECT count(*) FROM pg_locks WHERE objid = %s\", (ADV_LOCK_UID,))\n+ return curr.rowcount\n+\n+\n if __name__ == \"__main__\":\n+ if len(sys.argv) > 1:\n+ if sys.argv[1] == \"check_lock\":\n+ sys.exit(is_locked())\n \n conn = connect(\n dbname=CONFIG.y(\"postgresql.name\"),\n", "issue": "404 error for kubernetes depoyment\n**Describe the bug**\r\n/if/flow/initial-setup/ endpoint in the browser gives a 404 not found error\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'https://goauthentik.io/docs/installation/kubernetes'\r\n2. Create Values.yaml\r\n3. run helm commands\r\n4. all pods are up and running\r\n5. go to browser http://<ing-url>/if/flow/initial-setup/\r\n\r\n**Expected behavior**\r\nPage should load letting me setup ak-admin password\r\n\r\n**Screenshots**\r\n404 not found\r\n\r\n**Logs**\r\n{\"event\": \"/api/v3/outposts/instances/\", \"host\": \"localhost:8000\", \"level\": \"info\", \"logger\": \"authentik.asgi\", \"method\": \"GET\", \"pid\": 24, \"remote\": \"127.0.0.1\", \"request_id\": \"454efe5b57f34713bf837681449b91a6\", \"runtime\": 35, \"scheme\": \"http\", \"status\": 403, \"timestamp\": \"2022-07-11T10:39:00.436171\", \"user\": \"\", \"user_agent\": \"goauthentik.io/outpost/2022.7.2\"}\r\n{\"event\": \"Forbidden: /api/v3/outposts/instances/\", \"level\": \"warning\", \"logger\": \"django.request\", \"timestamp\": 1657535940.437195}\r\n{\"error\":\"403 Forbidden\",\"event\":\"Failed to fetch outpost configuration, retrying in 3 seconds\",\"level\":\"error\",\"logger\":\"authentik.outpost.ak-api-controller\",\"timestamp\":\"2022-07-11T10:39:00Z\"}\r\n**Version and Deployment (please complete the following information):**\r\n - authentik version: authentik-2022.7.2 \r\n - Deployment: [kubectl 1.21, helm v3.1.0]\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"System Migration handler\"\"\"\nimport os\nfrom importlib.util import module_from_spec, spec_from_file_location\nfrom inspect import getmembers, isclass\nfrom pathlib import Path\nfrom typing import Any\n\nfrom psycopg2 import connect\nfrom structlog.stdlib import get_logger\n\nfrom authentik.lib.config import CONFIG\n\nLOGGER = get_logger()\nADV_LOCK_UID = 1000\nLOCKED = False\n\n\nclass BaseMigration:\n \"\"\"Base System Migration\"\"\"\n\n cur: Any\n con: Any\n\n def __init__(self, cur: Any, con: Any):\n self.cur = cur\n self.con = con\n\n def needs_migration(self) -> bool:\n \"\"\"Return true if Migration needs to be run\"\"\"\n return False\n\n def run(self):\n \"\"\"Run the actual migration\"\"\"\n\n\ndef wait_for_lock():\n \"\"\"lock an advisory lock to prevent multiple instances from migrating at once\"\"\"\n LOGGER.info(\"waiting to acquire database lock\")\n curr.execute(\"SELECT pg_advisory_lock(%s)\", (ADV_LOCK_UID,))\n # pylint: disable=global-statement\n global LOCKED\n LOCKED = True\n\n\ndef release_lock():\n \"\"\"Release database lock\"\"\"\n if not LOCKED:\n return\n curr.execute(\"SELECT pg_advisory_unlock(%s)\", (ADV_LOCK_UID,))\n\n\nif __name__ == \"__main__\":\n\n conn = connect(\n dbname=CONFIG.y(\"postgresql.name\"),\n user=CONFIG.y(\"postgresql.user\"),\n password=CONFIG.y(\"postgresql.password\"),\n host=CONFIG.y(\"postgresql.host\"),\n port=int(CONFIG.y(\"postgresql.port\")),\n )\n curr = conn.cursor()\n try:\n for migration in Path(__file__).parent.absolute().glob(\"system_migrations/*.py\"):\n spec = spec_from_file_location(\"lifecycle.system_migrations\", migration)\n mod = module_from_spec(spec)\n # pyright: reportGeneralTypeIssues=false\n spec.loader.exec_module(mod)\n\n for name, sub in getmembers(mod, isclass):\n if name != \"Migration\":\n continue\n migration = sub(curr, conn)\n if migration.needs_migration():\n wait_for_lock()\n LOGGER.info(\"Migration needs to be applied\", migration=sub)\n migration.run()\n LOGGER.info(\"Migration finished applying\", migration=sub)\n release_lock()\n LOGGER.info(\"applying django migrations\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"authentik.root.settings\")\n wait_for_lock()\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line([\"\", \"migrate\"])\n finally:\n release_lock()\n", "path": "lifecycle/migrate.py"}, {"content": "\"\"\"managed tasks\"\"\"\nfrom django.db import DatabaseError\n\nfrom authentik.core.tasks import CELERY_APP\nfrom authentik.events.monitored_tasks import (\n MonitoredTask,\n TaskResult,\n TaskResultStatus,\n prefill_task,\n)\nfrom authentik.managed.manager import ObjectManager\n\n\n@CELERY_APP.task(bind=True, base=MonitoredTask)\n@prefill_task\ndef managed_reconcile(self: MonitoredTask):\n \"\"\"Run ObjectManager to ensure objects are up-to-date\"\"\"\n try:\n ObjectManager().run()\n self.set_status(\n TaskResult(TaskResultStatus.SUCCESSFUL, [\"Successfully updated managed models.\"])\n )\n except DatabaseError as exc: # pragma: no cover\n self.set_status(TaskResult(TaskResultStatus.WARNING, [str(exc)]))\n", "path": "authentik/managed/tasks.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"System Migration handler\"\"\"\nimport os\nimport sys\nfrom importlib.util import module_from_spec, spec_from_file_location\nfrom inspect import getmembers, isclass\nfrom pathlib import Path\nfrom typing import Any\n\nfrom psycopg2 import connect\nfrom structlog.stdlib import get_logger\n\nfrom authentik.lib.config import CONFIG\n\nLOGGER = get_logger()\nADV_LOCK_UID = 1000\nLOCKED = False\n\n\nclass BaseMigration:\n \"\"\"Base System Migration\"\"\"\n\n cur: Any\n con: Any\n\n def __init__(self, cur: Any, con: Any):\n self.cur = cur\n self.con = con\n\n def needs_migration(self) -> bool:\n \"\"\"Return true if Migration needs to be run\"\"\"\n return False\n\n def run(self):\n \"\"\"Run the actual migration\"\"\"\n\n\ndef wait_for_lock():\n \"\"\"lock an advisory lock to prevent multiple instances from migrating at once\"\"\"\n LOGGER.info(\"waiting to acquire database lock\")\n curr.execute(\"SELECT pg_advisory_lock(%s)\", (ADV_LOCK_UID,))\n # pylint: disable=global-statement\n global LOCKED\n LOCKED = True\n\n\ndef release_lock():\n \"\"\"Release database lock\"\"\"\n if not LOCKED:\n return\n curr.execute(\"SELECT pg_advisory_unlock(%s)\", (ADV_LOCK_UID,))\n\n\ndef is_locked():\n \"\"\"Check if lock is currently active (used by worker to wait for migrations)\"\"\"\n curr.executor(\"SELECT count(*) FROM pg_locks WHERE objid = %s\", (ADV_LOCK_UID,))\n return curr.rowcount\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n if sys.argv[1] == \"check_lock\":\n sys.exit(is_locked())\n\n conn = connect(\n dbname=CONFIG.y(\"postgresql.name\"),\n user=CONFIG.y(\"postgresql.user\"),\n password=CONFIG.y(\"postgresql.password\"),\n host=CONFIG.y(\"postgresql.host\"),\n port=int(CONFIG.y(\"postgresql.port\")),\n )\n curr = conn.cursor()\n try:\n for migration in Path(__file__).parent.absolute().glob(\"system_migrations/*.py\"):\n spec = spec_from_file_location(\"lifecycle.system_migrations\", migration)\n mod = module_from_spec(spec)\n # pyright: reportGeneralTypeIssues=false\n spec.loader.exec_module(mod)\n\n for name, sub in getmembers(mod, isclass):\n if name != \"Migration\":\n continue\n migration = sub(curr, conn)\n if migration.needs_migration():\n wait_for_lock()\n LOGGER.info(\"Migration needs to be applied\", migration=sub)\n migration.run()\n LOGGER.info(\"Migration finished applying\", migration=sub)\n release_lock()\n LOGGER.info(\"applying django migrations\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"authentik.root.settings\")\n wait_for_lock()\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line([\"\", \"migrate\"])\n finally:\n release_lock()\n", "path": "lifecycle/migrate.py"}, {"content": "\"\"\"managed tasks\"\"\"\nfrom django.db import DatabaseError\n\nfrom authentik.core.tasks import CELERY_APP\nfrom authentik.events.monitored_tasks import (\n MonitoredTask,\n TaskResult,\n TaskResultStatus,\n prefill_task,\n)\nfrom authentik.managed.manager import ObjectManager\n\n\n@CELERY_APP.task(\n bind=True,\n base=MonitoredTask,\n retry_backoff=True,\n)\n@prefill_task\ndef managed_reconcile(self: MonitoredTask):\n \"\"\"Run ObjectManager to ensure objects are up-to-date\"\"\"\n try:\n ObjectManager().run()\n self.set_status(\n TaskResult(TaskResultStatus.SUCCESSFUL, [\"Successfully updated managed models.\"])\n )\n except DatabaseError as exc: # pragma: no cover\n self.set_status(TaskResult(TaskResultStatus.WARNING, [str(exc)]))\n self.retry()\n", "path": "authentik/managed/tasks.py"}]}
| 1,773 | 418 |
gh_patches_debug_22932
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-2568
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add type annontations
please add type annotations here
_Originally posted by @aabmass in https://github.com/open-telemetry/opentelemetry-python/pull/2400#discussion_r809406486_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 from logging import getLogger
17 from threading import Lock
18 from typing import TYPE_CHECKING, Iterable
19
20 from opentelemetry.sdk._metrics.aggregation import (
21 _convert_aggregation_temporality,
22 )
23 from opentelemetry.sdk._metrics.measurement import Measurement
24 from opentelemetry.sdk._metrics.point import AggregationTemporality, Metric
25 from opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration
26 from opentelemetry.sdk._metrics.view import View
27
28 if TYPE_CHECKING:
29 from opentelemetry.sdk._metrics.instrument import _Instrument
30
31 _logger = getLogger(__name__)
32
33
34 class _ViewInstrumentMatch:
35 def __init__(
36 self,
37 view: View,
38 instrument: "_Instrument",
39 sdk_config: SdkConfiguration,
40 ):
41 self._view = view
42 self._instrument = instrument
43 self._sdk_config = sdk_config
44 self._attributes_aggregation = {}
45 self._attributes_previous_point = {}
46 self._lock = Lock()
47
48 # pylint: disable=protected-access
49 def consume_measurement(self, measurement: Measurement) -> None:
50
51 if self._view._attribute_keys is not None:
52
53 attributes = {}
54
55 for key, value in (measurement.attributes or {}).items():
56 if key in self._view._attribute_keys:
57 attributes[key] = value
58 elif measurement.attributes is not None:
59 attributes = measurement.attributes
60 else:
61 attributes = {}
62
63 attributes = frozenset(attributes.items())
64
65 if attributes not in self._attributes_aggregation:
66 with self._lock:
67 if attributes not in self._attributes_aggregation:
68 if self._view._aggregation:
69 aggregation = (
70 self._view._aggregation._create_aggregation(
71 self._instrument
72 )
73 )
74 else:
75 aggregation = self._instrument._default_aggregation
76 self._attributes_aggregation[attributes] = aggregation
77
78 self._attributes_aggregation[attributes].aggregate(measurement)
79
80 def collect(self, temporality: int) -> Iterable[Metric]:
81
82 with self._lock:
83 for (
84 attributes,
85 aggregation,
86 ) in self._attributes_aggregation.items():
87
88 previous_point = self._attributes_previous_point.get(
89 attributes
90 )
91
92 current_point = aggregation.collect()
93
94 # pylint: disable=assignment-from-none
95 self._attributes_previous_point[
96 attributes
97 ] = _convert_aggregation_temporality(
98 previous_point,
99 current_point,
100 AggregationTemporality.CUMULATIVE,
101 )
102
103 if current_point is not None:
104
105 yield Metric(
106 attributes=dict(attributes),
107 description=(
108 self._view._description
109 or self._instrument.description
110 ),
111 instrumentation_info=self._instrument.instrumentation_info,
112 name=self._view._name or self._instrument.name,
113 resource=self._sdk_config.resource,
114 unit=self._instrument.unit,
115 point=_convert_aggregation_temporality(
116 previous_point,
117 current_point,
118 temporality,
119 ),
120 )
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py
@@ -15,10 +15,12 @@
from logging import getLogger
from threading import Lock
-from typing import TYPE_CHECKING, Iterable
+from typing import TYPE_CHECKING, Dict, Iterable
from opentelemetry.sdk._metrics.aggregation import (
+ _Aggregation,
_convert_aggregation_temporality,
+ _PointVarT,
)
from opentelemetry.sdk._metrics.measurement import Measurement
from opentelemetry.sdk._metrics.point import AggregationTemporality, Metric
@@ -41,8 +43,8 @@
self._view = view
self._instrument = instrument
self._sdk_config = sdk_config
- self._attributes_aggregation = {}
- self._attributes_previous_point = {}
+ self._attributes_aggregation: Dict[frozenset, _Aggregation] = {}
+ self._attributes_previous_point: Dict[frozenset, _PointVarT] = {}
self._lock = Lock()
# pylint: disable=protected-access
|
{"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py\n@@ -15,10 +15,12 @@\n \n from logging import getLogger\n from threading import Lock\n-from typing import TYPE_CHECKING, Iterable\n+from typing import TYPE_CHECKING, Dict, Iterable\n \n from opentelemetry.sdk._metrics.aggregation import (\n+ _Aggregation,\n _convert_aggregation_temporality,\n+ _PointVarT,\n )\n from opentelemetry.sdk._metrics.measurement import Measurement\n from opentelemetry.sdk._metrics.point import AggregationTemporality, Metric\n@@ -41,8 +43,8 @@\n self._view = view\n self._instrument = instrument\n self._sdk_config = sdk_config\n- self._attributes_aggregation = {}\n- self._attributes_previous_point = {}\n+ self._attributes_aggregation: Dict[frozenset, _Aggregation] = {}\n+ self._attributes_previous_point: Dict[frozenset, _PointVarT] = {}\n self._lock = Lock()\n \n # pylint: disable=protected-access\n", "issue": "Add type annontations\nplease add type annotations here\r\n\r\n_Originally posted by @aabmass in https://github.com/open-telemetry/opentelemetry-python/pull/2400#discussion_r809406486_\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom logging import getLogger\nfrom threading import Lock\nfrom typing import TYPE_CHECKING, Iterable\n\nfrom opentelemetry.sdk._metrics.aggregation import (\n _convert_aggregation_temporality,\n)\nfrom opentelemetry.sdk._metrics.measurement import Measurement\nfrom opentelemetry.sdk._metrics.point import AggregationTemporality, Metric\nfrom opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration\nfrom opentelemetry.sdk._metrics.view import View\n\nif TYPE_CHECKING:\n from opentelemetry.sdk._metrics.instrument import _Instrument\n\n_logger = getLogger(__name__)\n\n\nclass _ViewInstrumentMatch:\n def __init__(\n self,\n view: View,\n instrument: \"_Instrument\",\n sdk_config: SdkConfiguration,\n ):\n self._view = view\n self._instrument = instrument\n self._sdk_config = sdk_config\n self._attributes_aggregation = {}\n self._attributes_previous_point = {}\n self._lock = Lock()\n\n # pylint: disable=protected-access\n def consume_measurement(self, measurement: Measurement) -> None:\n\n if self._view._attribute_keys is not None:\n\n attributes = {}\n\n for key, value in (measurement.attributes or {}).items():\n if key in self._view._attribute_keys:\n attributes[key] = value\n elif measurement.attributes is not None:\n attributes = measurement.attributes\n else:\n attributes = {}\n\n attributes = frozenset(attributes.items())\n\n if attributes not in self._attributes_aggregation:\n with self._lock:\n if attributes not in self._attributes_aggregation:\n if self._view._aggregation:\n aggregation = (\n self._view._aggregation._create_aggregation(\n self._instrument\n )\n )\n else:\n aggregation = self._instrument._default_aggregation\n self._attributes_aggregation[attributes] = aggregation\n\n self._attributes_aggregation[attributes].aggregate(measurement)\n\n def collect(self, temporality: int) -> Iterable[Metric]:\n\n with self._lock:\n for (\n attributes,\n aggregation,\n ) in self._attributes_aggregation.items():\n\n previous_point = self._attributes_previous_point.get(\n attributes\n )\n\n current_point = aggregation.collect()\n\n # pylint: disable=assignment-from-none\n self._attributes_previous_point[\n attributes\n ] = _convert_aggregation_temporality(\n previous_point,\n current_point,\n AggregationTemporality.CUMULATIVE,\n )\n\n if current_point is not None:\n\n yield Metric(\n attributes=dict(attributes),\n description=(\n self._view._description\n or self._instrument.description\n ),\n instrumentation_info=self._instrument.instrumentation_info,\n name=self._view._name or self._instrument.name,\n resource=self._sdk_config.resource,\n unit=self._instrument.unit,\n point=_convert_aggregation_temporality(\n previous_point,\n current_point,\n temporality,\n ),\n )\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom logging import getLogger\nfrom threading import Lock\nfrom typing import TYPE_CHECKING, Dict, Iterable\n\nfrom opentelemetry.sdk._metrics.aggregation import (\n _Aggregation,\n _convert_aggregation_temporality,\n _PointVarT,\n)\nfrom opentelemetry.sdk._metrics.measurement import Measurement\nfrom opentelemetry.sdk._metrics.point import AggregationTemporality, Metric\nfrom opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration\nfrom opentelemetry.sdk._metrics.view import View\n\nif TYPE_CHECKING:\n from opentelemetry.sdk._metrics.instrument import _Instrument\n\n_logger = getLogger(__name__)\n\n\nclass _ViewInstrumentMatch:\n def __init__(\n self,\n view: View,\n instrument: \"_Instrument\",\n sdk_config: SdkConfiguration,\n ):\n self._view = view\n self._instrument = instrument\n self._sdk_config = sdk_config\n self._attributes_aggregation: Dict[frozenset, _Aggregation] = {}\n self._attributes_previous_point: Dict[frozenset, _PointVarT] = {}\n self._lock = Lock()\n\n # pylint: disable=protected-access\n def consume_measurement(self, measurement: Measurement) -> None:\n\n if self._view._attribute_keys is not None:\n\n attributes = {}\n\n for key, value in (measurement.attributes or {}).items():\n if key in self._view._attribute_keys:\n attributes[key] = value\n elif measurement.attributes is not None:\n attributes = measurement.attributes\n else:\n attributes = {}\n\n attributes = frozenset(attributes.items())\n\n if attributes not in self._attributes_aggregation:\n with self._lock:\n if attributes not in self._attributes_aggregation:\n if self._view._aggregation:\n aggregation = (\n self._view._aggregation._create_aggregation(\n self._instrument\n )\n )\n else:\n aggregation = self._instrument._default_aggregation\n self._attributes_aggregation[attributes] = aggregation\n\n self._attributes_aggregation[attributes].aggregate(measurement)\n\n def collect(self, temporality: int) -> Iterable[Metric]:\n\n with self._lock:\n for (\n attributes,\n aggregation,\n ) in self._attributes_aggregation.items():\n\n previous_point = self._attributes_previous_point.get(\n attributes\n )\n\n current_point = aggregation.collect()\n\n # pylint: disable=assignment-from-none\n self._attributes_previous_point[\n attributes\n ] = _convert_aggregation_temporality(\n previous_point,\n current_point,\n AggregationTemporality.CUMULATIVE,\n )\n\n if current_point is not None:\n\n yield Metric(\n attributes=dict(attributes),\n description=(\n self._view._description\n or self._instrument.description\n ),\n instrumentation_info=self._instrument.instrumentation_info,\n name=self._view._name or self._instrument.name,\n resource=self._sdk_config.resource,\n unit=self._instrument.unit,\n point=_convert_aggregation_temporality(\n previous_point,\n current_point,\n temporality,\n ),\n )\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py"}]}
| 1,341 | 300 |
gh_patches_debug_33550
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-594
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: logging queue listener writing to in-memory stream
The handler for the `queue_listener` handler is created as `StreamHandler(StringIO())`. Explicitly passing the stream to the the handler means that the output is no longer logged to stderr.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlite/logging/standard.py`
Content:
```
1 import atexit
2 from io import StringIO
3 from logging import StreamHandler
4 from logging.handlers import QueueHandler, QueueListener
5 from queue import Queue
6 from typing import Any, List, Optional
7
8 from starlite.logging.utils import resolve_handlers
9
10
11 class QueueListenerHandler(QueueHandler):
12 def __init__(self, handlers: Optional[List[Any]] = None) -> None:
13 """Configures queue listener and handler to support non-blocking
14 logging configuration.
15
16 Args:
17 handlers: Optional 'ConvertingList'
18 """
19 super().__init__(Queue(-1))
20 if handlers:
21 handlers = resolve_handlers(handlers)
22 else:
23 handlers = [StreamHandler(StringIO())]
24 self.listener = QueueListener(self.queue, *handlers)
25 self.listener.start()
26
27 atexit.register(self.listener.stop)
28
```
Path: `starlite/logging/picologging.py`
Content:
```
1 import atexit
2 from io import StringIO
3 from logging import StreamHandler
4 from queue import Queue
5 from typing import Any, List, Optional
6
7 from starlite.exceptions import MissingDependencyException
8 from starlite.logging.utils import resolve_handlers
9
10 try:
11 from picologging.handlers import QueueHandler, QueueListener
12 except ImportError as e:
13 raise MissingDependencyException("picologging is not installed") from e
14
15
16 class QueueListenerHandler(QueueHandler): # type: ignore[misc]
17 def __init__(self, handlers: Optional[List[Any]] = None) -> None:
18 """Configures queue listener and handler to support non-blocking
19 logging configuration.
20
21 Args:
22 handlers: Optional 'ConvertingList'
23
24 Notes:
25 - Requires `picologging` to be installed.
26 """
27 super().__init__(Queue(-1))
28 if handlers:
29 handlers = resolve_handlers(handlers)
30 else:
31 handlers = [StreamHandler(StringIO())]
32 self.listener = QueueListener(self.queue, *handlers)
33 self.listener.start()
34
35 atexit.register(self.listener.stop)
36
```
Path: `starlite/config/logging.py`
Content:
```
1 from abc import ABC, abstractmethod
2 from importlib.util import find_spec
3 from logging import INFO
4 from typing import (
5 TYPE_CHECKING,
6 Any,
7 Callable,
8 Dict,
9 Iterable,
10 List,
11 Optional,
12 Type,
13 Union,
14 cast,
15 )
16
17 from orjson import dumps
18 from pydantic import BaseModel, Field, validator
19 from typing_extensions import Literal
20
21 from starlite.exceptions import (
22 ImproperlyConfiguredException,
23 MissingDependencyException,
24 )
25
26 if TYPE_CHECKING:
27 from starlite.types import Logger
28 from starlite.types.callable_types import GetLogger
29
30 try:
31 from structlog.types import BindableLogger, Processor, WrappedLogger
32 except ImportError:
33 BindableLogger = Any # type: ignore
34 Processor = Any # type: ignore
35 WrappedLogger = Any # type: ignore
36
37
38 default_handlers: Dict[str, Dict[str, Any]] = {
39 "console": {
40 "class": "logging.StreamHandler",
41 "level": "DEBUG",
42 "formatter": "standard",
43 },
44 "queue_listener": {
45 "class": "starlite.logging.standard.QueueListenerHandler",
46 "level": "DEBUG",
47 "formatter": "standard",
48 },
49 }
50
51 default_picologging_handlers: Dict[str, Dict[str, Any]] = {
52 "console": {
53 "class": "picologging.StreamHandler",
54 "level": "DEBUG",
55 "formatter": "standard",
56 },
57 "queue_listener": {
58 "class": "starlite.logging.picologging.QueueListenerHandler",
59 "level": "DEBUG",
60 "formatter": "standard",
61 },
62 }
63
64
65 def get_default_handlers() -> Dict[str, Dict[str, Any]]:
66 """
67
68 Returns:
69 The default handlers for the config.
70 """
71 if find_spec("picologging"):
72 return default_picologging_handlers
73 return default_handlers
74
75
76 def get_logger_placeholder(_: str) -> Any: # pragma: no cover
77 """
78 Raises:
79 ImproperlyConfiguredException
80 """
81 raise ImproperlyConfiguredException(
82 "To use 'app.get_logger', 'request.get_logger' or 'socket.get_logger' pass 'logging_config' to the Starlite constructor"
83 )
84
85
86 class BaseLoggingConfig(ABC): # pragma: no cover
87 """Abstract class that should be extended by logging configs."""
88
89 __slots__ = ()
90
91 @abstractmethod
92 def configure(self) -> "GetLogger":
93 """Configured logger with the given configuration.
94
95 Returns:
96 A 'logging.getLogger' like function.
97 """
98 raise NotImplementedError("abstract method")
99
100
101 class LoggingConfig(BaseLoggingConfig, BaseModel):
102 """Configuration class for standard logging.
103
104 Notes:
105 - If 'picologging' is installed it will be used by default.
106 """
107
108 version: Literal[1] = 1
109 """The only valid value at present is 1."""
110 incremental: bool = False
111 """Whether the configuration is to be interpreted as incremental to the existing configuration.
112
113 Notes:
114 - This option is ignored for 'picologging'
115 """
116 disable_existing_loggers: bool = False
117 """Whether any existing non-root loggers are to be disabled."""
118 filters: Optional[Dict[str, Dict[str, Any]]] = None
119 """A dict in which each key is a filter id and each value is a dict describing how to configure the corresponding Filter instance."""
120 propagate: bool = True
121 """If messages must propagate to handlers higher up the logger hierarchy from this logger."""
122 formatters: Dict[str, Dict[str, Any]] = {
123 "standard": {"format": "%(levelname)s - %(asctime)s - %(name)s - %(module)s - %(message)s"}
124 }
125 handlers: Dict[str, Dict[str, Any]] = Field(default_factory=get_default_handlers)
126 """A dict in which each key is a handler id and each value is a dict describing how to configure the corresponding Handler instance."""
127 loggers: Dict[str, Dict[str, Any]] = {
128 "starlite": {
129 "level": "INFO",
130 "handlers": ["queue_listener"],
131 },
132 }
133 """A dict in which each key is a logger name and each value is a dict describing how to configure the corresponding Logger instance."""
134 root: Dict[str, Union[Dict[str, Any], List[Any], str]] = {
135 "handlers": ["queue_listener", "console"],
136 "level": "INFO",
137 }
138 """This will be the configuration for the root logger. Processing of the configuration will be as for any logger,
139 except that the propagate setting will not be applicable."""
140
141 @validator("handlers", always=True)
142 def validate_handlers( # pylint: disable=no-self-argument
143 cls, value: Dict[str, Dict[str, Any]]
144 ) -> Dict[str, Dict[str, Any]]:
145 """
146 Ensures that 'queue_listener' is always set
147 Args:
148 value: A dict of route handlers.
149
150 Returns:
151 A dict of route handlers.
152 """
153 if "queue_listener" not in value:
154 value["queue_listener"] = get_default_handlers()["queue_listener"]
155 return value
156
157 @validator("loggers", always=True)
158 def validate_loggers( # pylint: disable=no-self-argument
159 cls, value: Dict[str, Dict[str, Any]]
160 ) -> Dict[str, Dict[str, Any]]:
161 """Ensures that the 'starlite' logger is always set.
162
163 Args:
164 value: A dict of loggers.
165
166 Returns:
167 A dict of loggers.
168 """
169
170 if "starlite" not in value:
171 value["starlite"] = {
172 "level": "INFO",
173 "handlers": ["queue_listener"],
174 }
175 return value
176
177 def configure(self) -> "GetLogger":
178 """Configured logger with the given configuration.
179
180 Returns:
181 A 'logging.getLogger' like function.
182 """
183 try:
184 if "picologging" in str(dumps(self.handlers)):
185
186 from picologging import ( # pylint: disable=import-outside-toplevel
187 config,
188 getLogger,
189 )
190
191 values = self.dict(exclude_none=True, exclude={"incremental"})
192 else:
193 from logging import ( # type: ignore[no-redef] # pylint: disable=import-outside-toplevel
194 config,
195 getLogger,
196 )
197
198 values = self.dict(exclude_none=True)
199 config.dictConfig(values)
200 return cast("Callable[[str], Logger]", getLogger)
201 except ImportError as e: # pragma: no cover
202 raise MissingDependencyException("picologging is not installed") from e
203
204
205 def default_structlog_processors() -> Optional[Iterable[Processor]]: # pyright: ignore
206 """Sets the default processors for structlog.
207
208 Returns:
209 An optional list of processors.
210 """
211 try:
212 import structlog # pylint: disable=import-outside-toplevel
213
214 return [
215 structlog.contextvars.merge_contextvars,
216 structlog.processors.add_log_level,
217 structlog.processors.format_exc_info,
218 structlog.processors.TimeStamper(fmt="iso", utc=True),
219 structlog.processors.JSONRenderer(serializer=dumps),
220 ]
221 except ImportError: # pragma: no cover
222 return None
223
224
225 def default_wrapper_class() -> Optional[Type[BindableLogger]]: # pyright: ignore
226 """Sets the default wrapper class for structlog.
227
228 Returns:
229 An optional wrapper class.
230 """
231
232 try:
233 import structlog # pylint: disable=import-outside-toplevel
234
235 return structlog.make_filtering_bound_logger(INFO)
236 except ImportError: # pragma: no cover
237 return None
238
239
240 def default_logger_factory() -> Optional[Callable[..., WrappedLogger]]:
241 """Sets the default logger factory for structlog.
242
243 Returns:
244 An optional logger factory.
245 """
246 try:
247 import structlog # pylint: disable=import-outside-toplevel
248
249 return structlog.BytesLoggerFactory()
250 except ImportError: # pragma: no cover
251 return None
252
253
254 class StructLoggingConfig(BaseLoggingConfig, BaseModel):
255 """Configuration class for structlog.
256
257 Notes:
258 - requires 'structlog' to be installed.
259 """
260
261 processors: Optional[Iterable[Processor]] = Field(default_factory=default_structlog_processors) # pyright: ignore
262 """Iterable of structlog logging processors."""
263 wrapper_class: Optional[Type[BindableLogger]] = Field(default_factory=default_wrapper_class) # pyright: ignore
264 """Structlog bindable logger."""
265 context_class: Optional[Dict[str, Any]] = None
266 """Context class (a 'contextvar' context) for the logger"""
267 logger_factory: Optional[Callable[..., WrappedLogger]] = Field(default_factory=default_logger_factory)
268 """Logger factory to use."""
269 cache_logger_on_first_use: bool = True
270 """Whether to cache the logger configuration and reuse. """
271
272 def configure(self) -> "GetLogger":
273 """Configured logger with the given configuration.
274
275 Returns:
276 A 'logging.getLogger' like function.
277 """
278 try:
279 from structlog import ( # pylint: disable=import-outside-toplevel
280 configure,
281 get_logger,
282 )
283
284 # we now configure structlog
285 configure(**self.dict(exclude={"standard_lib_logging_config"}))
286 return get_logger
287 except ImportError as e: # pragma: no cover
288 raise MissingDependencyException("structlog is not installed") from e
289
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/starlite/config/logging.py b/starlite/config/logging.py
--- a/starlite/config/logging.py
+++ b/starlite/config/logging.py
@@ -132,7 +132,7 @@
}
"""A dict in which each key is a logger name and each value is a dict describing how to configure the corresponding Logger instance."""
root: Dict[str, Union[Dict[str, Any], List[Any], str]] = {
- "handlers": ["queue_listener", "console"],
+ "handlers": ["queue_listener"],
"level": "INFO",
}
"""This will be the configuration for the root logger. Processing of the configuration will be as for any logger,
diff --git a/starlite/logging/picologging.py b/starlite/logging/picologging.py
--- a/starlite/logging/picologging.py
+++ b/starlite/logging/picologging.py
@@ -1,5 +1,4 @@
import atexit
-from io import StringIO
from logging import StreamHandler
from queue import Queue
from typing import Any, List, Optional
@@ -28,7 +27,7 @@
if handlers:
handlers = resolve_handlers(handlers)
else:
- handlers = [StreamHandler(StringIO())]
+ handlers = [StreamHandler()]
self.listener = QueueListener(self.queue, *handlers)
self.listener.start()
diff --git a/starlite/logging/standard.py b/starlite/logging/standard.py
--- a/starlite/logging/standard.py
+++ b/starlite/logging/standard.py
@@ -1,5 +1,4 @@
import atexit
-from io import StringIO
from logging import StreamHandler
from logging.handlers import QueueHandler, QueueListener
from queue import Queue
@@ -20,7 +19,7 @@
if handlers:
handlers = resolve_handlers(handlers)
else:
- handlers = [StreamHandler(StringIO())]
+ handlers = [StreamHandler()]
self.listener = QueueListener(self.queue, *handlers)
self.listener.start()
|
{"golden_diff": "diff --git a/starlite/config/logging.py b/starlite/config/logging.py\n--- a/starlite/config/logging.py\n+++ b/starlite/config/logging.py\n@@ -132,7 +132,7 @@\n }\n \"\"\"A dict in which each key is a logger name and each value is a dict describing how to configure the corresponding Logger instance.\"\"\"\n root: Dict[str, Union[Dict[str, Any], List[Any], str]] = {\n- \"handlers\": [\"queue_listener\", \"console\"],\n+ \"handlers\": [\"queue_listener\"],\n \"level\": \"INFO\",\n }\n \"\"\"This will be the configuration for the root logger. Processing of the configuration will be as for any logger,\ndiff --git a/starlite/logging/picologging.py b/starlite/logging/picologging.py\n--- a/starlite/logging/picologging.py\n+++ b/starlite/logging/picologging.py\n@@ -1,5 +1,4 @@\n import atexit\n-from io import StringIO\n from logging import StreamHandler\n from queue import Queue\n from typing import Any, List, Optional\n@@ -28,7 +27,7 @@\n if handlers:\n handlers = resolve_handlers(handlers)\n else:\n- handlers = [StreamHandler(StringIO())]\n+ handlers = [StreamHandler()]\n self.listener = QueueListener(self.queue, *handlers)\n self.listener.start()\n \ndiff --git a/starlite/logging/standard.py b/starlite/logging/standard.py\n--- a/starlite/logging/standard.py\n+++ b/starlite/logging/standard.py\n@@ -1,5 +1,4 @@\n import atexit\n-from io import StringIO\n from logging import StreamHandler\n from logging.handlers import QueueHandler, QueueListener\n from queue import Queue\n@@ -20,7 +19,7 @@\n if handlers:\n handlers = resolve_handlers(handlers)\n else:\n- handlers = [StreamHandler(StringIO())]\n+ handlers = [StreamHandler()]\n self.listener = QueueListener(self.queue, *handlers)\n self.listener.start()\n", "issue": "Bug: logging queue listener writing to in-memory stream\nThe handler for the `queue_listener` handler is created as `StreamHandler(StringIO())`. Explicitly passing the stream to the the handler means that the output is no longer logged to stderr.\r\n\n", "before_files": [{"content": "import atexit\nfrom io import StringIO\nfrom logging import StreamHandler\nfrom logging.handlers import QueueHandler, QueueListener\nfrom queue import Queue\nfrom typing import Any, List, Optional\n\nfrom starlite.logging.utils import resolve_handlers\n\n\nclass QueueListenerHandler(QueueHandler):\n def __init__(self, handlers: Optional[List[Any]] = None) -> None:\n \"\"\"Configures queue listener and handler to support non-blocking\n logging configuration.\n\n Args:\n handlers: Optional 'ConvertingList'\n \"\"\"\n super().__init__(Queue(-1))\n if handlers:\n handlers = resolve_handlers(handlers)\n else:\n handlers = [StreamHandler(StringIO())]\n self.listener = QueueListener(self.queue, *handlers)\n self.listener.start()\n\n atexit.register(self.listener.stop)\n", "path": "starlite/logging/standard.py"}, {"content": "import atexit\nfrom io import StringIO\nfrom logging import StreamHandler\nfrom queue import Queue\nfrom typing import Any, List, Optional\n\nfrom starlite.exceptions import MissingDependencyException\nfrom starlite.logging.utils import resolve_handlers\n\ntry:\n from picologging.handlers import QueueHandler, QueueListener\nexcept ImportError as e:\n raise MissingDependencyException(\"picologging is not installed\") from e\n\n\nclass QueueListenerHandler(QueueHandler): # type: ignore[misc]\n def __init__(self, handlers: Optional[List[Any]] = None) -> None:\n \"\"\"Configures queue listener and handler to support non-blocking\n logging configuration.\n\n Args:\n handlers: Optional 'ConvertingList'\n\n Notes:\n - Requires `picologging` to be installed.\n \"\"\"\n super().__init__(Queue(-1))\n if handlers:\n handlers = resolve_handlers(handlers)\n else:\n handlers = [StreamHandler(StringIO())]\n self.listener = QueueListener(self.queue, *handlers)\n self.listener.start()\n\n atexit.register(self.listener.stop)\n", "path": "starlite/logging/picologging.py"}, {"content": "from abc import ABC, abstractmethod\nfrom importlib.util import find_spec\nfrom logging import INFO\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n Iterable,\n List,\n Optional,\n Type,\n Union,\n cast,\n)\n\nfrom orjson import dumps\nfrom pydantic import BaseModel, Field, validator\nfrom typing_extensions import Literal\n\nfrom starlite.exceptions import (\n ImproperlyConfiguredException,\n MissingDependencyException,\n)\n\nif TYPE_CHECKING:\n from starlite.types import Logger\n from starlite.types.callable_types import GetLogger\n\ntry:\n from structlog.types import BindableLogger, Processor, WrappedLogger\nexcept ImportError:\n BindableLogger = Any # type: ignore\n Processor = Any # type: ignore\n WrappedLogger = Any # type: ignore\n\n\ndefault_handlers: Dict[str, Dict[str, Any]] = {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"standard\",\n },\n \"queue_listener\": {\n \"class\": \"starlite.logging.standard.QueueListenerHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"standard\",\n },\n}\n\ndefault_picologging_handlers: Dict[str, Dict[str, Any]] = {\n \"console\": {\n \"class\": \"picologging.StreamHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"standard\",\n },\n \"queue_listener\": {\n \"class\": \"starlite.logging.picologging.QueueListenerHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"standard\",\n },\n}\n\n\ndef get_default_handlers() -> Dict[str, Dict[str, Any]]:\n \"\"\"\n\n Returns:\n The default handlers for the config.\n \"\"\"\n if find_spec(\"picologging\"):\n return default_picologging_handlers\n return default_handlers\n\n\ndef get_logger_placeholder(_: str) -> Any: # pragma: no cover\n \"\"\"\n Raises:\n ImproperlyConfiguredException\n \"\"\"\n raise ImproperlyConfiguredException(\n \"To use 'app.get_logger', 'request.get_logger' or 'socket.get_logger' pass 'logging_config' to the Starlite constructor\"\n )\n\n\nclass BaseLoggingConfig(ABC): # pragma: no cover\n \"\"\"Abstract class that should be extended by logging configs.\"\"\"\n\n __slots__ = ()\n\n @abstractmethod\n def configure(self) -> \"GetLogger\":\n \"\"\"Configured logger with the given configuration.\n\n Returns:\n A 'logging.getLogger' like function.\n \"\"\"\n raise NotImplementedError(\"abstract method\")\n\n\nclass LoggingConfig(BaseLoggingConfig, BaseModel):\n \"\"\"Configuration class for standard logging.\n\n Notes:\n - If 'picologging' is installed it will be used by default.\n \"\"\"\n\n version: Literal[1] = 1\n \"\"\"The only valid value at present is 1.\"\"\"\n incremental: bool = False\n \"\"\"Whether the configuration is to be interpreted as incremental to the existing configuration.\n\n Notes:\n - This option is ignored for 'picologging'\n \"\"\"\n disable_existing_loggers: bool = False\n \"\"\"Whether any existing non-root loggers are to be disabled.\"\"\"\n filters: Optional[Dict[str, Dict[str, Any]]] = None\n \"\"\"A dict in which each key is a filter id and each value is a dict describing how to configure the corresponding Filter instance.\"\"\"\n propagate: bool = True\n \"\"\"If messages must propagate to handlers higher up the logger hierarchy from this logger.\"\"\"\n formatters: Dict[str, Dict[str, Any]] = {\n \"standard\": {\"format\": \"%(levelname)s - %(asctime)s - %(name)s - %(module)s - %(message)s\"}\n }\n handlers: Dict[str, Dict[str, Any]] = Field(default_factory=get_default_handlers)\n \"\"\"A dict in which each key is a handler id and each value is a dict describing how to configure the corresponding Handler instance.\"\"\"\n loggers: Dict[str, Dict[str, Any]] = {\n \"starlite\": {\n \"level\": \"INFO\",\n \"handlers\": [\"queue_listener\"],\n },\n }\n \"\"\"A dict in which each key is a logger name and each value is a dict describing how to configure the corresponding Logger instance.\"\"\"\n root: Dict[str, Union[Dict[str, Any], List[Any], str]] = {\n \"handlers\": [\"queue_listener\", \"console\"],\n \"level\": \"INFO\",\n }\n \"\"\"This will be the configuration for the root logger. Processing of the configuration will be as for any logger,\n except that the propagate setting will not be applicable.\"\"\"\n\n @validator(\"handlers\", always=True)\n def validate_handlers( # pylint: disable=no-self-argument\n cls, value: Dict[str, Dict[str, Any]]\n ) -> Dict[str, Dict[str, Any]]:\n \"\"\"\n Ensures that 'queue_listener' is always set\n Args:\n value: A dict of route handlers.\n\n Returns:\n A dict of route handlers.\n \"\"\"\n if \"queue_listener\" not in value:\n value[\"queue_listener\"] = get_default_handlers()[\"queue_listener\"]\n return value\n\n @validator(\"loggers\", always=True)\n def validate_loggers( # pylint: disable=no-self-argument\n cls, value: Dict[str, Dict[str, Any]]\n ) -> Dict[str, Dict[str, Any]]:\n \"\"\"Ensures that the 'starlite' logger is always set.\n\n Args:\n value: A dict of loggers.\n\n Returns:\n A dict of loggers.\n \"\"\"\n\n if \"starlite\" not in value:\n value[\"starlite\"] = {\n \"level\": \"INFO\",\n \"handlers\": [\"queue_listener\"],\n }\n return value\n\n def configure(self) -> \"GetLogger\":\n \"\"\"Configured logger with the given configuration.\n\n Returns:\n A 'logging.getLogger' like function.\n \"\"\"\n try:\n if \"picologging\" in str(dumps(self.handlers)):\n\n from picologging import ( # pylint: disable=import-outside-toplevel\n config,\n getLogger,\n )\n\n values = self.dict(exclude_none=True, exclude={\"incremental\"})\n else:\n from logging import ( # type: ignore[no-redef] # pylint: disable=import-outside-toplevel\n config,\n getLogger,\n )\n\n values = self.dict(exclude_none=True)\n config.dictConfig(values)\n return cast(\"Callable[[str], Logger]\", getLogger)\n except ImportError as e: # pragma: no cover\n raise MissingDependencyException(\"picologging is not installed\") from e\n\n\ndef default_structlog_processors() -> Optional[Iterable[Processor]]: # pyright: ignore\n \"\"\"Sets the default processors for structlog.\n\n Returns:\n An optional list of processors.\n \"\"\"\n try:\n import structlog # pylint: disable=import-outside-toplevel\n\n return [\n structlog.contextvars.merge_contextvars,\n structlog.processors.add_log_level,\n structlog.processors.format_exc_info,\n structlog.processors.TimeStamper(fmt=\"iso\", utc=True),\n structlog.processors.JSONRenderer(serializer=dumps),\n ]\n except ImportError: # pragma: no cover\n return None\n\n\ndef default_wrapper_class() -> Optional[Type[BindableLogger]]: # pyright: ignore\n \"\"\"Sets the default wrapper class for structlog.\n\n Returns:\n An optional wrapper class.\n \"\"\"\n\n try:\n import structlog # pylint: disable=import-outside-toplevel\n\n return structlog.make_filtering_bound_logger(INFO)\n except ImportError: # pragma: no cover\n return None\n\n\ndef default_logger_factory() -> Optional[Callable[..., WrappedLogger]]:\n \"\"\"Sets the default logger factory for structlog.\n\n Returns:\n An optional logger factory.\n \"\"\"\n try:\n import structlog # pylint: disable=import-outside-toplevel\n\n return structlog.BytesLoggerFactory()\n except ImportError: # pragma: no cover\n return None\n\n\nclass StructLoggingConfig(BaseLoggingConfig, BaseModel):\n \"\"\"Configuration class for structlog.\n\n Notes:\n - requires 'structlog' to be installed.\n \"\"\"\n\n processors: Optional[Iterable[Processor]] = Field(default_factory=default_structlog_processors) # pyright: ignore\n \"\"\"Iterable of structlog logging processors.\"\"\"\n wrapper_class: Optional[Type[BindableLogger]] = Field(default_factory=default_wrapper_class) # pyright: ignore\n \"\"\"Structlog bindable logger.\"\"\"\n context_class: Optional[Dict[str, Any]] = None\n \"\"\"Context class (a 'contextvar' context) for the logger\"\"\"\n logger_factory: Optional[Callable[..., WrappedLogger]] = Field(default_factory=default_logger_factory)\n \"\"\"Logger factory to use.\"\"\"\n cache_logger_on_first_use: bool = True\n \"\"\"Whether to cache the logger configuration and reuse. \"\"\"\n\n def configure(self) -> \"GetLogger\":\n \"\"\"Configured logger with the given configuration.\n\n Returns:\n A 'logging.getLogger' like function.\n \"\"\"\n try:\n from structlog import ( # pylint: disable=import-outside-toplevel\n configure,\n get_logger,\n )\n\n # we now configure structlog\n configure(**self.dict(exclude={\"standard_lib_logging_config\"}))\n return get_logger\n except ImportError as e: # pragma: no cover\n raise MissingDependencyException(\"structlog is not installed\") from e\n", "path": "starlite/config/logging.py"}], "after_files": [{"content": "import atexit\nfrom logging import StreamHandler\nfrom logging.handlers import QueueHandler, QueueListener\nfrom queue import Queue\nfrom typing import Any, List, Optional\n\nfrom starlite.logging.utils import resolve_handlers\n\n\nclass QueueListenerHandler(QueueHandler):\n def __init__(self, handlers: Optional[List[Any]] = None) -> None:\n \"\"\"Configures queue listener and handler to support non-blocking\n logging configuration.\n\n Args:\n handlers: Optional 'ConvertingList'\n \"\"\"\n super().__init__(Queue(-1))\n if handlers:\n handlers = resolve_handlers(handlers)\n else:\n handlers = [StreamHandler()]\n self.listener = QueueListener(self.queue, *handlers)\n self.listener.start()\n\n atexit.register(self.listener.stop)\n", "path": "starlite/logging/standard.py"}, {"content": "import atexit\nfrom logging import StreamHandler\nfrom queue import Queue\nfrom typing import Any, List, Optional\n\nfrom starlite.exceptions import MissingDependencyException\nfrom starlite.logging.utils import resolve_handlers\n\ntry:\n from picologging.handlers import QueueHandler, QueueListener\nexcept ImportError as e:\n raise MissingDependencyException(\"picologging is not installed\") from e\n\n\nclass QueueListenerHandler(QueueHandler): # type: ignore[misc]\n def __init__(self, handlers: Optional[List[Any]] = None) -> None:\n \"\"\"Configures queue listener and handler to support non-blocking\n logging configuration.\n\n Args:\n handlers: Optional 'ConvertingList'\n\n Notes:\n - Requires `picologging` to be installed.\n \"\"\"\n super().__init__(Queue(-1))\n if handlers:\n handlers = resolve_handlers(handlers)\n else:\n handlers = [StreamHandler()]\n self.listener = QueueListener(self.queue, *handlers)\n self.listener.start()\n\n atexit.register(self.listener.stop)\n", "path": "starlite/logging/picologging.py"}, {"content": "from abc import ABC, abstractmethod\nfrom importlib.util import find_spec\nfrom logging import INFO\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n Iterable,\n List,\n Optional,\n Type,\n Union,\n cast,\n)\n\nfrom orjson import dumps\nfrom pydantic import BaseModel, Field, validator\nfrom typing_extensions import Literal\n\nfrom starlite.exceptions import (\n ImproperlyConfiguredException,\n MissingDependencyException,\n)\n\nif TYPE_CHECKING:\n from starlite.types import Logger\n from starlite.types.callable_types import GetLogger\n\ntry:\n from structlog.types import BindableLogger, Processor, WrappedLogger\nexcept ImportError:\n BindableLogger = Any # type: ignore\n Processor = Any # type: ignore\n WrappedLogger = Any # type: ignore\n\n\ndefault_handlers: Dict[str, Dict[str, Any]] = {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"standard\",\n },\n \"queue_listener\": {\n \"class\": \"starlite.logging.standard.QueueListenerHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"standard\",\n },\n}\n\ndefault_picologging_handlers: Dict[str, Dict[str, Any]] = {\n \"console\": {\n \"class\": \"picologging.StreamHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"standard\",\n },\n \"queue_listener\": {\n \"class\": \"starlite.logging.picologging.QueueListenerHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"standard\",\n },\n}\n\n\ndef get_default_handlers() -> Dict[str, Dict[str, Any]]:\n \"\"\"\n\n Returns:\n The default handlers for the config.\n \"\"\"\n if find_spec(\"picologging\"):\n return default_picologging_handlers\n return default_handlers\n\n\ndef get_logger_placeholder(_: str) -> Any: # pragma: no cover\n \"\"\"\n Raises:\n ImproperlyConfiguredException\n \"\"\"\n raise ImproperlyConfiguredException(\n \"To use 'app.get_logger', 'request.get_logger' or 'socket.get_logger' pass 'logging_config' to the Starlite constructor\"\n )\n\n\nclass BaseLoggingConfig(ABC): # pragma: no cover\n \"\"\"Abstract class that should be extended by logging configs.\"\"\"\n\n __slots__ = ()\n\n @abstractmethod\n def configure(self) -> \"GetLogger\":\n \"\"\"Configured logger with the given configuration.\n\n Returns:\n A 'logging.getLogger' like function.\n \"\"\"\n raise NotImplementedError(\"abstract method\")\n\n\nclass LoggingConfig(BaseLoggingConfig, BaseModel):\n \"\"\"Configuration class for standard logging.\n\n Notes:\n - If 'picologging' is installed it will be used by default.\n \"\"\"\n\n version: Literal[1] = 1\n \"\"\"The only valid value at present is 1.\"\"\"\n incremental: bool = False\n \"\"\"Whether the configuration is to be interpreted as incremental to the existing configuration.\n\n Notes:\n - This option is ignored for 'picologging'\n \"\"\"\n disable_existing_loggers: bool = False\n \"\"\"Whether any existing non-root loggers are to be disabled.\"\"\"\n filters: Optional[Dict[str, Dict[str, Any]]] = None\n \"\"\"A dict in which each key is a filter id and each value is a dict describing how to configure the corresponding Filter instance.\"\"\"\n propagate: bool = True\n \"\"\"If messages must propagate to handlers higher up the logger hierarchy from this logger.\"\"\"\n formatters: Dict[str, Dict[str, Any]] = {\n \"standard\": {\"format\": \"%(levelname)s - %(asctime)s - %(name)s - %(module)s - %(message)s\"}\n }\n handlers: Dict[str, Dict[str, Any]] = Field(default_factory=get_default_handlers)\n \"\"\"A dict in which each key is a handler id and each value is a dict describing how to configure the corresponding Handler instance.\"\"\"\n loggers: Dict[str, Dict[str, Any]] = {\n \"starlite\": {\n \"level\": \"INFO\",\n \"handlers\": [\"queue_listener\"],\n },\n }\n \"\"\"A dict in which each key is a logger name and each value is a dict describing how to configure the corresponding Logger instance.\"\"\"\n root: Dict[str, Union[Dict[str, Any], List[Any], str]] = {\n \"handlers\": [\"queue_listener\"],\n \"level\": \"INFO\",\n }\n \"\"\"This will be the configuration for the root logger. Processing of the configuration will be as for any logger,\n except that the propagate setting will not be applicable.\"\"\"\n\n @validator(\"handlers\", always=True)\n def validate_handlers( # pylint: disable=no-self-argument\n cls, value: Dict[str, Dict[str, Any]]\n ) -> Dict[str, Dict[str, Any]]:\n \"\"\"\n Ensures that 'queue_listener' is always set\n Args:\n value: A dict of route handlers.\n\n Returns:\n A dict of route handlers.\n \"\"\"\n if \"queue_listener\" not in value:\n value[\"queue_listener\"] = get_default_handlers()[\"queue_listener\"]\n return value\n\n @validator(\"loggers\", always=True)\n def validate_loggers( # pylint: disable=no-self-argument\n cls, value: Dict[str, Dict[str, Any]]\n ) -> Dict[str, Dict[str, Any]]:\n \"\"\"Ensures that the 'starlite' logger is always set.\n\n Args:\n value: A dict of loggers.\n\n Returns:\n A dict of loggers.\n \"\"\"\n\n if \"starlite\" not in value:\n value[\"starlite\"] = {\n \"level\": \"INFO\",\n \"handlers\": [\"queue_listener\"],\n }\n return value\n\n def configure(self) -> \"GetLogger\":\n \"\"\"Configured logger with the given configuration.\n\n Returns:\n A 'logging.getLogger' like function.\n \"\"\"\n try:\n if \"picologging\" in str(dumps(self.handlers)):\n\n from picologging import ( # pylint: disable=import-outside-toplevel\n config,\n getLogger,\n )\n\n values = self.dict(exclude_none=True, exclude={\"incremental\"})\n else:\n from logging import ( # type: ignore[no-redef] # pylint: disable=import-outside-toplevel\n config,\n getLogger,\n )\n\n values = self.dict(exclude_none=True)\n config.dictConfig(values)\n return cast(\"Callable[[str], Logger]\", getLogger)\n except ImportError as e: # pragma: no cover\n raise MissingDependencyException(\"picologging is not installed\") from e\n\n\ndef default_structlog_processors() -> Optional[Iterable[Processor]]: # pyright: ignore\n \"\"\"Sets the default processors for structlog.\n\n Returns:\n An optional list of processors.\n \"\"\"\n try:\n import structlog # pylint: disable=import-outside-toplevel\n\n return [\n structlog.contextvars.merge_contextvars,\n structlog.processors.add_log_level,\n structlog.processors.format_exc_info,\n structlog.processors.TimeStamper(fmt=\"iso\", utc=True),\n structlog.processors.JSONRenderer(serializer=dumps),\n ]\n except ImportError: # pragma: no cover\n return None\n\n\ndef default_wrapper_class() -> Optional[Type[BindableLogger]]: # pyright: ignore\n \"\"\"Sets the default wrapper class for structlog.\n\n Returns:\n An optional wrapper class.\n \"\"\"\n\n try:\n import structlog # pylint: disable=import-outside-toplevel\n\n return structlog.make_filtering_bound_logger(INFO)\n except ImportError: # pragma: no cover\n return None\n\n\ndef default_logger_factory() -> Optional[Callable[..., WrappedLogger]]:\n \"\"\"Sets the default logger factory for structlog.\n\n Returns:\n An optional logger factory.\n \"\"\"\n try:\n import structlog # pylint: disable=import-outside-toplevel\n\n return structlog.BytesLoggerFactory()\n except ImportError: # pragma: no cover\n return None\n\n\nclass StructLoggingConfig(BaseLoggingConfig, BaseModel):\n \"\"\"Configuration class for structlog.\n\n Notes:\n - requires 'structlog' to be installed.\n \"\"\"\n\n processors: Optional[Iterable[Processor]] = Field(default_factory=default_structlog_processors) # pyright: ignore\n \"\"\"Iterable of structlog logging processors.\"\"\"\n wrapper_class: Optional[Type[BindableLogger]] = Field(default_factory=default_wrapper_class) # pyright: ignore\n \"\"\"Structlog bindable logger.\"\"\"\n context_class: Optional[Dict[str, Any]] = None\n \"\"\"Context class (a 'contextvar' context) for the logger\"\"\"\n logger_factory: Optional[Callable[..., WrappedLogger]] = Field(default_factory=default_logger_factory)\n \"\"\"Logger factory to use.\"\"\"\n cache_logger_on_first_use: bool = True\n \"\"\"Whether to cache the logger configuration and reuse. \"\"\"\n\n def configure(self) -> \"GetLogger\":\n \"\"\"Configured logger with the given configuration.\n\n Returns:\n A 'logging.getLogger' like function.\n \"\"\"\n try:\n from structlog import ( # pylint: disable=import-outside-toplevel\n configure,\n get_logger,\n )\n\n # we now configure structlog\n configure(**self.dict(exclude={\"standard_lib_logging_config\"}))\n return get_logger\n except ImportError as e: # pragma: no cover\n raise MissingDependencyException(\"structlog is not installed\") from e\n", "path": "starlite/config/logging.py"}]}
| 3,670 | 434 |
gh_patches_debug_15669
|
rasdani/github-patches
|
git_diff
|
keras-team__autokeras-965
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tf.data.Dataset not support! [bug]
<!---
**If you are reporting a bug:**
* Verify that your issue is not being currently addressed by other issues or pull requests.
* Please note that Auto-Keras is only compatible with **Python 3.6**.
* Tag the issue with the `bug report` tag.
-->
### Bug Description
<!---
A clear and concise description of what the bug is.
-->
run with tensorflow_datasets 's mnist dataset, but show error message:
AttributeError: 'TakeDataset' object has no attribute 'shape'
### Reproducing Steps
```
# run codes:
import tensorflow_datasets as tfds
mnist_train = tfds.load('mnist', split="train", as_supervised=True)
import autokeras as ak
ak0 = ak.ImageClassifier(num_classes=10, max_trials=10)
ak0.fit(mnist_train, epochs=10)
```
### Expected Behavior
<!--
A clear and concise description of what you expected to happen.
-->
```
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-4-a90e48684d55> in <module>
----> 1 ak0.fit(mnist_train, epochs=10)
~/anaconda3/lib/python3.7/site-packages/autokeras/tasks/image.py in fit(self, x, y, epochs, callbacks, validation_split, validation_data, **kwargs)
119 validation_split=validation_split,
120 validation_data=validation_data,
--> 121 **kwargs)
122
123
~/anaconda3/lib/python3.7/site-packages/autokeras/auto_model.py in fit(self, x, y, batch_size, epochs, callbacks, validation_split, validation_data, **kwargs)
229 y=y,
230 validation_data=validation_data,
--> 231 validation_split=validation_split)
232
233 # Process the args.
~/anaconda3/lib/python3.7/site-packages/autokeras/auto_model.py in _prepare_data(self, x, y, validation_data, validation_split)
303 # TODO: Handle other types of input, zip dataset, tensor, dict.
304 # Prepare the dataset.
--> 305 dataset = self._process_xy(x, y, True)
306 if validation_data:
307 self._split_dataset = False
~/anaconda3/lib/python3.7/site-packages/autokeras/auto_model.py in _process_xy(self, x, y, fit)
291
292 x = self._process_x(x, fit)
--> 293 y = self._process_y(y, fit)
294
295 return tf.data.Dataset.zip((x, y))
~/anaconda3/lib/python3.7/site-packages/autokeras/auto_model.py in _process_y(self, y, fit)
267 for data, head, adapter in zip(y, self._heads, self._output_adapters):
268 if fit:
--> 269 data = adapter.fit_transform(data)
270 else:
271 data = adapter.transform(data)
~/anaconda3/lib/python3.7/site-packages/autokeras/engine/adapter.py in fit_transform(self, dataset)
66 def fit_transform(self, dataset):
67 self.check(dataset)
---> 68 self.fit_before_convert(dataset)
69 dataset = self.convert_to_dataset(dataset)
70 self.fit(dataset)
~/anaconda3/lib/python3.7/site-packages/autokeras/adapters/output_adapter.py in fit_before_convert(self, dataset)
65 if isinstance(dataset, tf.data.Dataset):
66 if not self.num_classes:
---> 67 shape = dataset.take(1).shape[1]
68 if shape == 1:
69 self.num_classes = 2
AttributeError: 'TakeDataset' object has no attribute 'shape'
```
### Setup Details
Include the details about the versions of:
- OS type and version:
- Python:
- autokeras: 1.0.1
- scikit-learn:
- numpy:
- keras:
- scipy:
- tensorflow: 2.1.0
- pytorch:
### Additional context
<!---
Add any other context about the problem here.
-->
TakeDataset doesn't have attribute 'shape', so the code may be wrong. You can also find I have set num_classes=10, so there are several bugs, not just one.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `autokeras/adapters/output_adapter.py`
Content:
```
1 import numpy as np
2 import pandas as pd
3 import tensorflow as tf
4
5 from autokeras import encoders
6 from autokeras.engine import adapter as adapter_module
7
8
9 class HeadAdapter(adapter_module.Adapter):
10
11 def __init__(self, name, **kwargs):
12 super().__init__(**kwargs)
13 self.name = name
14
15 def check(self, dataset):
16 supported_types = (tf.data.Dataset, np.ndarray, pd.DataFrame, pd.Series)
17 if not isinstance(dataset, supported_types):
18 raise TypeError('Expect the target data of {name} to be tf.data.Dataset,'
19 ' np.ndarray, pd.DataFrame or pd.Series, but got {type}.'
20 .format(name=self.name, type=type(dataset)))
21
22 def convert_to_dataset(self, dataset):
23 if isinstance(dataset, np.ndarray):
24 if len(dataset.shape) == 1:
25 dataset = dataset.reshape(-1, 1)
26 if isinstance(dataset, pd.DataFrame):
27 dataset = dataset.values
28 if isinstance(dataset, pd.Series):
29 dataset = dataset.values.reshape(-1, 1)
30 return super().convert_to_dataset(dataset)
31
32 def postprocess(self, y):
33 """Postprocess the output of the Keras Model."""
34 return y
35
36 def get_config(self):
37 config = super().get_config()
38 config.update({
39 'name': self.name,
40 })
41 return config
42
43
44 class ClassificationHeadAdapter(HeadAdapter):
45
46 def __init__(self, num_classes=None, **kwargs):
47 super().__init__(**kwargs)
48 self.num_classes = num_classes
49 self.label_encoder = None
50
51 def get_config(self):
52 config = super().get_config()
53 config.update({
54 'encoder': encoders.serialize(self.label_encoder),
55 })
56 return config
57
58 @classmethod
59 def from_config(cls, config):
60 obj = super().from_config(config)
61 obj.label_encoder = encoders.deserialize(config['encoder'])
62
63 def fit_before_convert(self, dataset):
64 # If in tf.data.Dataset, must be encoded already.
65 if isinstance(dataset, tf.data.Dataset):
66 if not self.num_classes:
67 shape = dataset.take(1).shape[1]
68 if shape == 1:
69 self.num_classes = 2
70 else:
71 self.num_classes = shape
72 return
73 if isinstance(dataset, pd.DataFrame):
74 dataset = dataset.values
75 if isinstance(dataset, pd.Series):
76 dataset = dataset.values.reshape(-1, 1)
77 # Not label.
78 if len(dataset.flatten()) != len(dataset):
79 self.num_classes = dataset.shape[1]
80 return
81 labels = set(dataset.flatten())
82 if self.num_classes is None:
83 self.num_classes = len(labels)
84 if self.num_classes == 2:
85 self.label_encoder = encoders.LabelEncoder()
86 elif self.num_classes > 2:
87 self.label_encoder = encoders.OneHotEncoder()
88 elif self.num_classes < 2:
89 raise ValueError('Expect the target data for {name} to have '
90 'at least 2 classes, but got {num_classes}.'
91 .format(name=self.name, num_classes=self.num_classes))
92 self.label_encoder.fit(dataset)
93
94 def convert_to_dataset(self, dataset):
95 if self.label_encoder:
96 dataset = self.label_encoder.encode(dataset)
97 return super().convert_to_dataset(dataset)
98
99 def postprocess(self, y):
100 if self.label_encoder:
101 y = self.label_encoder.decode(y)
102 return y
103
104
105 class RegressionHeadAdapter(HeadAdapter):
106 pass
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/autokeras/adapters/output_adapter.py b/autokeras/adapters/output_adapter.py
--- a/autokeras/adapters/output_adapter.py
+++ b/autokeras/adapters/output_adapter.py
@@ -3,6 +3,7 @@
import tensorflow as tf
from autokeras import encoders
+from autokeras import utils
from autokeras.engine import adapter as adapter_module
@@ -64,7 +65,8 @@
# If in tf.data.Dataset, must be encoded already.
if isinstance(dataset, tf.data.Dataset):
if not self.num_classes:
- shape = dataset.take(1).shape[1]
+ shape = utils.dataset_shape(dataset)[0]
+ # Single column with 0s and 1s.
if shape == 1:
self.num_classes = 2
else:
|
{"golden_diff": "diff --git a/autokeras/adapters/output_adapter.py b/autokeras/adapters/output_adapter.py\n--- a/autokeras/adapters/output_adapter.py\n+++ b/autokeras/adapters/output_adapter.py\n@@ -3,6 +3,7 @@\n import tensorflow as tf\n \n from autokeras import encoders\n+from autokeras import utils\n from autokeras.engine import adapter as adapter_module\n \n \n@@ -64,7 +65,8 @@\n # If in tf.data.Dataset, must be encoded already.\n if isinstance(dataset, tf.data.Dataset):\n if not self.num_classes:\n- shape = dataset.take(1).shape[1]\n+ shape = utils.dataset_shape(dataset)[0]\n+ # Single column with 0s and 1s.\n if shape == 1:\n self.num_classes = 2\n else:\n", "issue": "tf.data.Dataset not support! [bug]\n<!---\r\n**If you are reporting a bug:**\r\n* Verify that your issue is not being currently addressed by other issues or pull requests.\r\n* Please note that Auto-Keras is only compatible with **Python 3.6**.\r\n* Tag the issue with the `bug report` tag.\r\n-->\r\n\r\n### Bug Description\r\n<!---\r\nA clear and concise description of what the bug is.\r\n-->\r\nrun with tensorflow_datasets 's mnist dataset, but show error message:\r\nAttributeError: 'TakeDataset' object has no attribute 'shape'\r\n\r\n### Reproducing Steps\r\n```\r\n# run codes:\r\nimport tensorflow_datasets as tfds\r\nmnist_train = tfds.load('mnist', split=\"train\", as_supervised=True)\r\nimport autokeras as ak\r\nak0 = ak.ImageClassifier(num_classes=10, max_trials=10)\r\nak0.fit(mnist_train, epochs=10)\r\n```\r\n### Expected Behavior\r\n<!--\r\nA clear and concise description of what you expected to happen.\r\n-->\r\n```\r\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\n<ipython-input-4-a90e48684d55> in <module>\r\n----> 1 ak0.fit(mnist_train, epochs=10)\r\n\r\n~/anaconda3/lib/python3.7/site-packages/autokeras/tasks/image.py in fit(self, x, y, epochs, callbacks, validation_split, validation_data, **kwargs)\r\n 119 validation_split=validation_split,\r\n 120 validation_data=validation_data,\r\n--> 121 **kwargs)\r\n 122 \r\n 123 \r\n\r\n~/anaconda3/lib/python3.7/site-packages/autokeras/auto_model.py in fit(self, x, y, batch_size, epochs, callbacks, validation_split, validation_data, **kwargs)\r\n 229 y=y,\r\n 230 validation_data=validation_data,\r\n--> 231 validation_split=validation_split)\r\n 232 \r\n 233 # Process the args.\r\n\r\n~/anaconda3/lib/python3.7/site-packages/autokeras/auto_model.py in _prepare_data(self, x, y, validation_data, validation_split)\r\n 303 # TODO: Handle other types of input, zip dataset, tensor, dict.\r\n 304 # Prepare the dataset.\r\n--> 305 dataset = self._process_xy(x, y, True)\r\n 306 if validation_data:\r\n 307 self._split_dataset = False\r\n\r\n~/anaconda3/lib/python3.7/site-packages/autokeras/auto_model.py in _process_xy(self, x, y, fit)\r\n 291 \r\n 292 x = self._process_x(x, fit)\r\n--> 293 y = self._process_y(y, fit)\r\n 294 \r\n 295 return tf.data.Dataset.zip((x, y))\r\n\r\n~/anaconda3/lib/python3.7/site-packages/autokeras/auto_model.py in _process_y(self, y, fit)\r\n 267 for data, head, adapter in zip(y, self._heads, self._output_adapters):\r\n 268 if fit:\r\n--> 269 data = adapter.fit_transform(data)\r\n 270 else:\r\n 271 data = adapter.transform(data)\r\n\r\n~/anaconda3/lib/python3.7/site-packages/autokeras/engine/adapter.py in fit_transform(self, dataset)\r\n 66 def fit_transform(self, dataset):\r\n 67 self.check(dataset)\r\n---> 68 self.fit_before_convert(dataset)\r\n 69 dataset = self.convert_to_dataset(dataset)\r\n 70 self.fit(dataset)\r\n\r\n~/anaconda3/lib/python3.7/site-packages/autokeras/adapters/output_adapter.py in fit_before_convert(self, dataset)\r\n 65 if isinstance(dataset, tf.data.Dataset):\r\n 66 if not self.num_classes:\r\n---> 67 shape = dataset.take(1).shape[1]\r\n 68 if shape == 1:\r\n 69 self.num_classes = 2\r\n\r\nAttributeError: 'TakeDataset' object has no attribute 'shape'\r\n```\r\n### Setup Details\r\nInclude the details about the versions of:\r\n - OS type and version:\r\n - Python: \r\n - autokeras: 1.0.1\r\n - scikit-learn:\r\n - numpy:\r\n - keras:\r\n - scipy:\r\n - tensorflow: 2.1.0\r\n - pytorch:\r\n\r\n### Additional context\r\n<!---\r\nAdd any other context about the problem here.\r\n-->\r\nTakeDataset doesn't have attribute 'shape', so the code may be wrong. You can also find I have set num_classes=10, so there are several bugs, not just one.\r\n\n", "before_files": [{"content": "import numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nfrom autokeras import encoders\nfrom autokeras.engine import adapter as adapter_module\n\n\nclass HeadAdapter(adapter_module.Adapter):\n\n def __init__(self, name, **kwargs):\n super().__init__(**kwargs)\n self.name = name\n\n def check(self, dataset):\n supported_types = (tf.data.Dataset, np.ndarray, pd.DataFrame, pd.Series)\n if not isinstance(dataset, supported_types):\n raise TypeError('Expect the target data of {name} to be tf.data.Dataset,'\n ' np.ndarray, pd.DataFrame or pd.Series, but got {type}.'\n .format(name=self.name, type=type(dataset)))\n\n def convert_to_dataset(self, dataset):\n if isinstance(dataset, np.ndarray):\n if len(dataset.shape) == 1:\n dataset = dataset.reshape(-1, 1)\n if isinstance(dataset, pd.DataFrame):\n dataset = dataset.values\n if isinstance(dataset, pd.Series):\n dataset = dataset.values.reshape(-1, 1)\n return super().convert_to_dataset(dataset)\n\n def postprocess(self, y):\n \"\"\"Postprocess the output of the Keras Model.\"\"\"\n return y\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'name': self.name,\n })\n return config\n\n\nclass ClassificationHeadAdapter(HeadAdapter):\n\n def __init__(self, num_classes=None, **kwargs):\n super().__init__(**kwargs)\n self.num_classes = num_classes\n self.label_encoder = None\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'encoder': encoders.serialize(self.label_encoder),\n })\n return config\n\n @classmethod\n def from_config(cls, config):\n obj = super().from_config(config)\n obj.label_encoder = encoders.deserialize(config['encoder'])\n\n def fit_before_convert(self, dataset):\n # If in tf.data.Dataset, must be encoded already.\n if isinstance(dataset, tf.data.Dataset):\n if not self.num_classes:\n shape = dataset.take(1).shape[1]\n if shape == 1:\n self.num_classes = 2\n else:\n self.num_classes = shape\n return\n if isinstance(dataset, pd.DataFrame):\n dataset = dataset.values\n if isinstance(dataset, pd.Series):\n dataset = dataset.values.reshape(-1, 1)\n # Not label.\n if len(dataset.flatten()) != len(dataset):\n self.num_classes = dataset.shape[1]\n return\n labels = set(dataset.flatten())\n if self.num_classes is None:\n self.num_classes = len(labels)\n if self.num_classes == 2:\n self.label_encoder = encoders.LabelEncoder()\n elif self.num_classes > 2:\n self.label_encoder = encoders.OneHotEncoder()\n elif self.num_classes < 2:\n raise ValueError('Expect the target data for {name} to have '\n 'at least 2 classes, but got {num_classes}.'\n .format(name=self.name, num_classes=self.num_classes))\n self.label_encoder.fit(dataset)\n\n def convert_to_dataset(self, dataset):\n if self.label_encoder:\n dataset = self.label_encoder.encode(dataset)\n return super().convert_to_dataset(dataset)\n\n def postprocess(self, y):\n if self.label_encoder:\n y = self.label_encoder.decode(y)\n return y\n\n\nclass RegressionHeadAdapter(HeadAdapter):\n pass\n", "path": "autokeras/adapters/output_adapter.py"}], "after_files": [{"content": "import numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nfrom autokeras import encoders\nfrom autokeras import utils\nfrom autokeras.engine import adapter as adapter_module\n\n\nclass HeadAdapter(adapter_module.Adapter):\n\n def __init__(self, name, **kwargs):\n super().__init__(**kwargs)\n self.name = name\n\n def check(self, dataset):\n supported_types = (tf.data.Dataset, np.ndarray, pd.DataFrame, pd.Series)\n if not isinstance(dataset, supported_types):\n raise TypeError('Expect the target data of {name} to be tf.data.Dataset,'\n ' np.ndarray, pd.DataFrame or pd.Series, but got {type}.'\n .format(name=self.name, type=type(dataset)))\n\n def convert_to_dataset(self, dataset):\n if isinstance(dataset, np.ndarray):\n if len(dataset.shape) == 1:\n dataset = dataset.reshape(-1, 1)\n if isinstance(dataset, pd.DataFrame):\n dataset = dataset.values\n if isinstance(dataset, pd.Series):\n dataset = dataset.values.reshape(-1, 1)\n return super().convert_to_dataset(dataset)\n\n def postprocess(self, y):\n \"\"\"Postprocess the output of the Keras Model.\"\"\"\n return y\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'name': self.name,\n })\n return config\n\n\nclass ClassificationHeadAdapter(HeadAdapter):\n\n def __init__(self, num_classes=None, **kwargs):\n super().__init__(**kwargs)\n self.num_classes = num_classes\n self.label_encoder = None\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'encoder': encoders.serialize(self.label_encoder),\n })\n return config\n\n @classmethod\n def from_config(cls, config):\n obj = super().from_config(config)\n obj.label_encoder = encoders.deserialize(config['encoder'])\n\n def fit_before_convert(self, dataset):\n # If in tf.data.Dataset, must be encoded already.\n if isinstance(dataset, tf.data.Dataset):\n if not self.num_classes:\n shape = utils.dataset_shape(dataset)[0]\n # Single column with 0s and 1s.\n if shape == 1:\n self.num_classes = 2\n else:\n self.num_classes = shape\n return\n if isinstance(dataset, pd.DataFrame):\n dataset = dataset.values\n if isinstance(dataset, pd.Series):\n dataset = dataset.values.reshape(-1, 1)\n # Not label.\n if len(dataset.flatten()) != len(dataset):\n self.num_classes = dataset.shape[1]\n return\n labels = set(dataset.flatten())\n if self.num_classes is None:\n self.num_classes = len(labels)\n if self.num_classes == 2:\n self.label_encoder = encoders.LabelEncoder()\n elif self.num_classes > 2:\n self.label_encoder = encoders.OneHotEncoder()\n elif self.num_classes < 2:\n raise ValueError('Expect the target data for {name} to have '\n 'at least 2 classes, but got {num_classes}.'\n .format(name=self.name, num_classes=self.num_classes))\n self.label_encoder.fit(dataset)\n\n def convert_to_dataset(self, dataset):\n if self.label_encoder:\n dataset = self.label_encoder.encode(dataset)\n return super().convert_to_dataset(dataset)\n\n def postprocess(self, y):\n if self.label_encoder:\n y = self.label_encoder.decode(y)\n return y\n\n\nclass RegressionHeadAdapter(HeadAdapter):\n pass\n", "path": "autokeras/adapters/output_adapter.py"}]}
| 2,271 | 188 |
gh_patches_debug_3773
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-860
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Python 3.8 to CI
# Description
The branch [`ci/add-Python-3.8-to-CI`](https://github.com/diana-hep/pyhf/compare/ci/add-Python-3.8-to-CI) adds Python 3.8 to the CI. However, as [PyTorch won't have a Python 3.8 wheel until the next release](https://github.com/pytorch/pytorch/issues/21741#issuecomment-541242504) this won't be able to happen until around December 2019.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 from pathlib import Path
3
4 this_directory = Path(__file__).parent.resolve()
5 with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:
6 long_description = readme_rst.read()
7
8 extras_require = {
9 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
10 'torch': ['torch~=1.2'],
11 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
12 'xmlio': ['uproot'],
13 'minuit': ['iminuit'],
14 }
15 extras_require['backends'] = sorted(
16 set(
17 extras_require['tensorflow']
18 + extras_require['torch']
19 + extras_require['jax']
20 + extras_require['minuit']
21 )
22 )
23 extras_require['contrib'] = sorted(set(['matplotlib']))
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + [
31 'pyflakes',
32 'pytest~=3.5',
33 'pytest-cov>=2.5.1',
34 'pytest-mock',
35 'pytest-benchmark[histogram]',
36 'pytest-console-scripts',
37 'pytest-mpl',
38 'pydocstyle',
39 'coverage>=4.0', # coveralls
40 'papermill~=2.0',
41 'nteract-scrapbook~=0.2',
42 'check-manifest',
43 'jupyter',
44 'uproot~=3.3',
45 'graphviz',
46 'jsonpatch',
47 'black',
48 ]
49 )
50 )
51 extras_require['docs'] = sorted(
52 set(
53 [
54 'sphinx',
55 'sphinxcontrib-bibtex',
56 'sphinx-click',
57 'sphinx_rtd_theme',
58 'nbsphinx',
59 'ipywidgets',
60 'sphinx-issues',
61 'sphinx-copybutton>0.2.9',
62 ]
63 )
64 )
65 extras_require['develop'] = sorted(
66 set(
67 extras_require['docs']
68 + extras_require['test']
69 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']
70 )
71 )
72 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
73
74
75 setup(
76 name='pyhf',
77 version='0.4.1',
78 description='(partial) pure python histfactory implementation',
79 long_description=long_description,
80 long_description_content_type='text/x-rst',
81 url='https://github.com/scikit-hep/pyhf',
82 author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
83 author_email='[email protected], [email protected], [email protected]',
84 license='Apache',
85 keywords='physics fitting numpy scipy tensorflow pytorch',
86 classifiers=[
87 "Programming Language :: Python :: 3",
88 "Programming Language :: Python :: 3.6",
89 "Programming Language :: Python :: 3.7",
90 ],
91 package_dir={'': 'src'},
92 packages=find_packages(where='src'),
93 include_package_data=True,
94 python_requires=">=3.6",
95 install_requires=[
96 'scipy', # requires numpy, which is required by pyhf and tensorflow
97 'click>=6.0', # for console scripts,
98 'tqdm', # for readxml
99 'jsonschema>=3.2.0', # for utils
100 'jsonpatch',
101 'pyyaml', # for parsing CLI equal-delimited options
102 ],
103 extras_require=extras_require,
104 entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},
105 dependency_links=[],
106 use_scm_version=lambda: {'local_scheme': lambda version: ''},
107 )
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -87,6 +87,7 @@
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
],
package_dir={'': 'src'},
packages=find_packages(where='src'),
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -87,6 +87,7 @@\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n", "issue": "Add Python 3.8 to CI\n# Description\r\n\r\nThe branch [`ci/add-Python-3.8-to-CI`](https://github.com/diana-hep/pyhf/compare/ci/add-Python-3.8-to-CI) adds Python 3.8 to the CI. However, as [PyTorch won't have a Python 3.8 wheel until the next release](https://github.com/pytorch/pytorch/issues/21741#issuecomment-541242504) this won't be able to happen until around December 2019.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
| 1,446 | 97 |
gh_patches_debug_3061
|
rasdani/github-patches
|
git_diff
|
googleapis__python-bigquery-1567
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Warning on OpenTelemetry when some attributes are not set
#### Environment details
- OS type and version: Windows 10 21H2
- Python version: `3.9.4`
- pip version: `22.2.2`
- `google-cloud-bigquery` version: `3.3.5`
#### Steps to reproduce
1. Set up a query job with opentelemetry enabled
2. See warning in the console: `Invalid type NoneType for attribute value. Expected one of ['bool', 'str', 'bytes', 'int', 'float'] or a sequence of those types`
#### Code example
```python
import logging
from google.cloud import bigquery
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import (
SimpleSpanProcessor,
ConsoleSpanExporter,
)
provider = TracerProvider()
simple_processor = SimpleSpanProcessor(ConsoleSpanExporter())
provider.add_span_processor(simple_processor)
trace.set_tracer_provider(provider)
logging.basicConfig(level=10)
# Construct a BigQuery client object.
client = bigquery.Client()
query = "SELECT 1;"
query_job = client.query(query)
```
#### Stack trace
```
DEBUG:google.auth._default:Checking None for explicit credentials as part of auth process...
DEBUG:google.auth._default:Checking Cloud SDK credentials as part of auth process...
DEBUG:google.auth._default:Checking None for explicit credentials as part of auth process...
DEBUG:google.auth._default:Checking Cloud SDK credentials as part of auth process...
WARNING:opentelemetry.attributes:Invalid type NoneType for attribute value. Expected one of ['bool', 'str', 'bytes', 'int', 'float'] or a sequence of those types
WARNING:opentelemetry.attributes:Invalid type NoneType for attribute value. Expected one of ['bool', 'str', 'bytes', 'int', 'float'] or a sequence of those types
DEBUG:urllib3.util.retry:Converted retries value: 3 -> Retry(total=3, connect=None, read=None, redirect=None, status=None)
DEBUG:google.auth.transport.requests:Making request: POST https://oauth2.googleapis.com/token
DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): oauth2.googleapis.com:443
DEBUG:urllib3.connectionpool:https://oauth2.googleapis.com:443 "POST /token HTTP/1.1" 200 None
DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): bigquery.googleapis.com:443
DEBUG:urllib3.connectionpool:https://bigquery.googleapis.com:443 "POST /bigquery/v2/projects/my-project/jobs?prettyPrint=false HTTP/1.1" 200 None
{
"name": "BigQuery.job.begin",
"context": {
"trace_id": "0x192a0e4ec554c63f68525922208fed88",
"span_id": "0xfa29f0363122c4c4",
"trace_state": "[]"
},
"kind": "SpanKind.INTERNAL",
"parent_id": null,
"start_time": "2022-10-12T09:41:57.259114Z",
"end_time": "2022-10-12T09:41:57.934410Z",
"status": {
"status_code": "UNSET"
},
"attributes": {
"db.system": "BigQuery",
"db.name": "my-project",
"job_id": "fc1581e3-708b-4b51-9a05-e3ad52c68dec",
"hasErrors": false,
"num_child_jobs": 0,
"path": "/projects/my-project/jobs"
},
"events": [],
"links": [],
"resource": {
"attributes": {
"telemetry.sdk.language": "python",
"telemetry.sdk.name": "opentelemetry",
"telemetry.sdk.version": "1.13.0",
"service.name": "unknown_service"
},
"schema_url": ""
}
}
```
#### Analysis
Warnings appear when `location` and job `state` attributes are not set.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `google/cloud/bigquery/opentelemetry_tracing.py`
Content:
```
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 from contextlib import contextmanager
17 from google.api_core.exceptions import GoogleAPICallError # type: ignore
18
19 logger = logging.getLogger(__name__)
20 try:
21 from opentelemetry import trace # type: ignore
22 from opentelemetry.instrumentation.utils import http_status_to_status_code # type: ignore
23 from opentelemetry.trace.status import Status # type: ignore
24
25 HAS_OPENTELEMETRY = True
26 _warned_telemetry = True
27
28 except ImportError:
29 HAS_OPENTELEMETRY = False
30 _warned_telemetry = False
31
32 _default_attributes = {
33 "db.system": "BigQuery"
34 } # static, default values assigned to all spans
35
36
37 @contextmanager
38 def create_span(name, attributes=None, client=None, job_ref=None):
39 """Creates a ContextManager for a Span to be exported to the configured exporter.
40 If no configuration exists yields None.
41
42 Args:
43 name (str): Name that will be set for the span being created
44 attributes (Optional[dict]):
45 Additional attributes that pertain to
46 the specific API call (i.e. not a default attribute)
47 client (Optional[google.cloud.bigquery.client.Client]):
48 Pass in a Client object to extract any attributes that may be
49 relevant to it and add them to the created spans.
50 job_ref (Optional[google.cloud.bigquery.job._AsyncJob])
51 Pass in a _AsyncJob object to extract any attributes that may be
52 relevant to it and add them to the created spans.
53
54 Yields:
55 opentelemetry.trace.Span: Yields the newly created Span.
56
57 Raises:
58 google.api_core.exceptions.GoogleAPICallError:
59 Raised if a span could not be yielded or issue with call to
60 OpenTelemetry.
61 """
62 global _warned_telemetry
63 final_attributes = _get_final_span_attributes(attributes, client, job_ref)
64 if not HAS_OPENTELEMETRY:
65 if not _warned_telemetry:
66 logger.debug(
67 "This service is instrumented using OpenTelemetry. "
68 "OpenTelemetry or one of its components could not be imported; "
69 "please add compatible versions of opentelemetry-api and "
70 "opentelemetry-instrumentation packages in order to get BigQuery "
71 "Tracing data."
72 )
73 _warned_telemetry = True
74
75 yield None
76 return
77 tracer = trace.get_tracer(__name__)
78
79 # yield new span value
80 with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:
81 try:
82 yield span
83 except GoogleAPICallError as error:
84 if error.code is not None:
85 span.set_status(Status(http_status_to_status_code(error.code)))
86 raise
87
88
89 def _get_final_span_attributes(attributes=None, client=None, job_ref=None):
90 final_attributes = {}
91 final_attributes.update(_default_attributes.copy())
92 if client:
93 client_attributes = _set_client_attributes(client)
94 final_attributes.update(client_attributes)
95 if job_ref:
96 job_attributes = _set_job_attributes(job_ref)
97 final_attributes.update(job_attributes)
98 if attributes:
99 final_attributes.update(attributes)
100 return final_attributes
101
102
103 def _set_client_attributes(client):
104 return {"db.name": client.project, "location": client.location}
105
106
107 def _set_job_attributes(job_ref):
108 job_attributes = {
109 "db.name": job_ref.project,
110 "job_id": job_ref.job_id,
111 "state": job_ref.state,
112 }
113
114 job_attributes["hasErrors"] = job_ref.error_result is not None
115
116 if job_ref.created is not None:
117 job_attributes["timeCreated"] = job_ref.created.isoformat()
118
119 if job_ref.started is not None:
120 job_attributes["timeStarted"] = job_ref.started.isoformat()
121
122 if job_ref.ended is not None:
123 job_attributes["timeEnded"] = job_ref.ended.isoformat()
124
125 if job_ref.location is not None:
126 job_attributes["location"] = job_ref.location
127
128 if job_ref.parent_job_id is not None:
129 job_attributes["parent_job_id"] = job_ref.parent_job_id
130
131 if job_ref.num_child_jobs is not None:
132 job_attributes["num_child_jobs"] = job_ref.num_child_jobs
133
134 return job_attributes
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/google/cloud/bigquery/opentelemetry_tracing.py b/google/cloud/bigquery/opentelemetry_tracing.py
--- a/google/cloud/bigquery/opentelemetry_tracing.py
+++ b/google/cloud/bigquery/opentelemetry_tracing.py
@@ -97,6 +97,11 @@
final_attributes.update(job_attributes)
if attributes:
final_attributes.update(attributes)
+
+ filtered = {k: v for k, v in final_attributes.items() if v is not None}
+ final_attributes.clear()
+ final_attributes.update(filtered)
+
return final_attributes
|
{"golden_diff": "diff --git a/google/cloud/bigquery/opentelemetry_tracing.py b/google/cloud/bigquery/opentelemetry_tracing.py\n--- a/google/cloud/bigquery/opentelemetry_tracing.py\n+++ b/google/cloud/bigquery/opentelemetry_tracing.py\n@@ -97,6 +97,11 @@\n final_attributes.update(job_attributes)\n if attributes:\n final_attributes.update(attributes)\n+\n+ filtered = {k: v for k, v in final_attributes.items() if v is not None}\n+ final_attributes.clear()\n+ final_attributes.update(filtered)\n+\n return final_attributes\n", "issue": "Warning on OpenTelemetry when some attributes are not set\n#### Environment details\r\n\r\n - OS type and version: Windows 10 21H2\r\n - Python version: `3.9.4`\r\n - pip version: `22.2.2`\r\n - `google-cloud-bigquery` version: `3.3.5`\r\n\r\n#### Steps to reproduce\r\n\r\n 1. Set up a query job with opentelemetry enabled\r\n 2. See warning in the console: `Invalid type NoneType for attribute value. Expected one of ['bool', 'str', 'bytes', 'int', 'float'] or a sequence of those types`\r\n\r\n#### Code example\r\n\r\n```python\r\nimport logging\r\nfrom google.cloud import bigquery\r\nfrom opentelemetry import trace\r\nfrom opentelemetry.sdk.trace import TracerProvider\r\nfrom opentelemetry.sdk.trace.export import (\r\n SimpleSpanProcessor,\r\n ConsoleSpanExporter,\r\n)\r\n\r\nprovider = TracerProvider()\r\nsimple_processor = SimpleSpanProcessor(ConsoleSpanExporter())\r\nprovider.add_span_processor(simple_processor)\r\ntrace.set_tracer_provider(provider)\r\n\r\nlogging.basicConfig(level=10)\r\n\r\n# Construct a BigQuery client object.\r\nclient = bigquery.Client()\r\n\r\nquery = \"SELECT 1;\"\r\n\r\nquery_job = client.query(query)\r\n```\r\n#### Stack trace\r\n```\r\nDEBUG:google.auth._default:Checking None for explicit credentials as part of auth process...\r\nDEBUG:google.auth._default:Checking Cloud SDK credentials as part of auth process...\r\nDEBUG:google.auth._default:Checking None for explicit credentials as part of auth process...\r\nDEBUG:google.auth._default:Checking Cloud SDK credentials as part of auth process...\r\nWARNING:opentelemetry.attributes:Invalid type NoneType for attribute value. Expected one of ['bool', 'str', 'bytes', 'int', 'float'] or a sequence of those types\r\nWARNING:opentelemetry.attributes:Invalid type NoneType for attribute value. Expected one of ['bool', 'str', 'bytes', 'int', 'float'] or a sequence of those types\r\nDEBUG:urllib3.util.retry:Converted retries value: 3 -> Retry(total=3, connect=None, read=None, redirect=None, status=None)\r\nDEBUG:google.auth.transport.requests:Making request: POST https://oauth2.googleapis.com/token\r\nDEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): oauth2.googleapis.com:443\r\nDEBUG:urllib3.connectionpool:https://oauth2.googleapis.com:443 \"POST /token HTTP/1.1\" 200 None\r\nDEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): bigquery.googleapis.com:443\r\nDEBUG:urllib3.connectionpool:https://bigquery.googleapis.com:443 \"POST /bigquery/v2/projects/my-project/jobs?prettyPrint=false HTTP/1.1\" 200 None\r\n{\r\n \"name\": \"BigQuery.job.begin\",\r\n \"context\": {\r\n \"trace_id\": \"0x192a0e4ec554c63f68525922208fed88\",\r\n \"span_id\": \"0xfa29f0363122c4c4\",\r\n \"trace_state\": \"[]\"\r\n },\r\n \"kind\": \"SpanKind.INTERNAL\",\r\n \"parent_id\": null,\r\n \"start_time\": \"2022-10-12T09:41:57.259114Z\",\r\n \"end_time\": \"2022-10-12T09:41:57.934410Z\",\r\n \"status\": {\r\n \"status_code\": \"UNSET\"\r\n },\r\n \"attributes\": {\r\n \"db.system\": \"BigQuery\",\r\n \"db.name\": \"my-project\",\r\n \"job_id\": \"fc1581e3-708b-4b51-9a05-e3ad52c68dec\",\r\n \"hasErrors\": false,\r\n \"num_child_jobs\": 0,\r\n \"path\": \"/projects/my-project/jobs\"\r\n },\r\n \"events\": [],\r\n \"links\": [],\r\n \"resource\": {\r\n \"attributes\": {\r\n \"telemetry.sdk.language\": \"python\",\r\n \"telemetry.sdk.name\": \"opentelemetry\",\r\n \"telemetry.sdk.version\": \"1.13.0\",\r\n \"service.name\": \"unknown_service\"\r\n },\r\n \"schema_url\": \"\"\r\n }\r\n}\r\n```\r\n#### Analysis\r\n\r\nWarnings appear when `location` and job `state` attributes are not set.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom contextlib import contextmanager\nfrom google.api_core.exceptions import GoogleAPICallError # type: ignore\n\nlogger = logging.getLogger(__name__)\ntry:\n from opentelemetry import trace # type: ignore\n from opentelemetry.instrumentation.utils import http_status_to_status_code # type: ignore\n from opentelemetry.trace.status import Status # type: ignore\n\n HAS_OPENTELEMETRY = True\n _warned_telemetry = True\n\nexcept ImportError:\n HAS_OPENTELEMETRY = False\n _warned_telemetry = False\n\n_default_attributes = {\n \"db.system\": \"BigQuery\"\n} # static, default values assigned to all spans\n\n\n@contextmanager\ndef create_span(name, attributes=None, client=None, job_ref=None):\n \"\"\"Creates a ContextManager for a Span to be exported to the configured exporter.\n If no configuration exists yields None.\n\n Args:\n name (str): Name that will be set for the span being created\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the created spans.\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the created spans.\n\n Yields:\n opentelemetry.trace.Span: Yields the newly created Span.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError:\n Raised if a span could not be yielded or issue with call to\n OpenTelemetry.\n \"\"\"\n global _warned_telemetry\n final_attributes = _get_final_span_attributes(attributes, client, job_ref)\n if not HAS_OPENTELEMETRY:\n if not _warned_telemetry:\n logger.debug(\n \"This service is instrumented using OpenTelemetry. \"\n \"OpenTelemetry or one of its components could not be imported; \"\n \"please add compatible versions of opentelemetry-api and \"\n \"opentelemetry-instrumentation packages in order to get BigQuery \"\n \"Tracing data.\"\n )\n _warned_telemetry = True\n\n yield None\n return\n tracer = trace.get_tracer(__name__)\n\n # yield new span value\n with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:\n try:\n yield span\n except GoogleAPICallError as error:\n if error.code is not None:\n span.set_status(Status(http_status_to_status_code(error.code)))\n raise\n\n\ndef _get_final_span_attributes(attributes=None, client=None, job_ref=None):\n final_attributes = {}\n final_attributes.update(_default_attributes.copy())\n if client:\n client_attributes = _set_client_attributes(client)\n final_attributes.update(client_attributes)\n if job_ref:\n job_attributes = _set_job_attributes(job_ref)\n final_attributes.update(job_attributes)\n if attributes:\n final_attributes.update(attributes)\n return final_attributes\n\n\ndef _set_client_attributes(client):\n return {\"db.name\": client.project, \"location\": client.location}\n\n\ndef _set_job_attributes(job_ref):\n job_attributes = {\n \"db.name\": job_ref.project,\n \"job_id\": job_ref.job_id,\n \"state\": job_ref.state,\n }\n\n job_attributes[\"hasErrors\"] = job_ref.error_result is not None\n\n if job_ref.created is not None:\n job_attributes[\"timeCreated\"] = job_ref.created.isoformat()\n\n if job_ref.started is not None:\n job_attributes[\"timeStarted\"] = job_ref.started.isoformat()\n\n if job_ref.ended is not None:\n job_attributes[\"timeEnded\"] = job_ref.ended.isoformat()\n\n if job_ref.location is not None:\n job_attributes[\"location\"] = job_ref.location\n\n if job_ref.parent_job_id is not None:\n job_attributes[\"parent_job_id\"] = job_ref.parent_job_id\n\n if job_ref.num_child_jobs is not None:\n job_attributes[\"num_child_jobs\"] = job_ref.num_child_jobs\n\n return job_attributes\n", "path": "google/cloud/bigquery/opentelemetry_tracing.py"}], "after_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom contextlib import contextmanager\nfrom google.api_core.exceptions import GoogleAPICallError # type: ignore\n\nlogger = logging.getLogger(__name__)\ntry:\n from opentelemetry import trace # type: ignore\n from opentelemetry.instrumentation.utils import http_status_to_status_code # type: ignore\n from opentelemetry.trace.status import Status # type: ignore\n\n HAS_OPENTELEMETRY = True\n _warned_telemetry = True\n\nexcept ImportError:\n HAS_OPENTELEMETRY = False\n _warned_telemetry = False\n\n_default_attributes = {\n \"db.system\": \"BigQuery\"\n} # static, default values assigned to all spans\n\n\n@contextmanager\ndef create_span(name, attributes=None, client=None, job_ref=None):\n \"\"\"Creates a ContextManager for a Span to be exported to the configured exporter.\n If no configuration exists yields None.\n\n Args:\n name (str): Name that will be set for the span being created\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the created spans.\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the created spans.\n\n Yields:\n opentelemetry.trace.Span: Yields the newly created Span.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError:\n Raised if a span could not be yielded or issue with call to\n OpenTelemetry.\n \"\"\"\n global _warned_telemetry\n final_attributes = _get_final_span_attributes(attributes, client, job_ref)\n if not HAS_OPENTELEMETRY:\n if not _warned_telemetry:\n logger.debug(\n \"This service is instrumented using OpenTelemetry. \"\n \"OpenTelemetry or one of its components could not be imported; \"\n \"please add compatible versions of opentelemetry-api and \"\n \"opentelemetry-instrumentation packages in order to get BigQuery \"\n \"Tracing data.\"\n )\n _warned_telemetry = True\n\n yield None\n return\n tracer = trace.get_tracer(__name__)\n\n # yield new span value\n with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:\n try:\n yield span\n except GoogleAPICallError as error:\n if error.code is not None:\n span.set_status(Status(http_status_to_status_code(error.code)))\n raise\n\n\ndef _get_final_span_attributes(attributes=None, client=None, job_ref=None):\n final_attributes = {}\n final_attributes.update(_default_attributes.copy())\n if client:\n client_attributes = _set_client_attributes(client)\n final_attributes.update(client_attributes)\n if job_ref:\n job_attributes = _set_job_attributes(job_ref)\n final_attributes.update(job_attributes)\n if attributes:\n final_attributes.update(attributes)\n\n filtered = {k: v for k, v in final_attributes.items() if v is not None}\n final_attributes.clear()\n final_attributes.update(filtered)\n\n return final_attributes\n\n\ndef _set_client_attributes(client):\n return {\"db.name\": client.project, \"location\": client.location}\n\n\ndef _set_job_attributes(job_ref):\n job_attributes = {\n \"db.name\": job_ref.project,\n \"job_id\": job_ref.job_id,\n \"state\": job_ref.state,\n }\n\n job_attributes[\"hasErrors\"] = job_ref.error_result is not None\n\n if job_ref.created is not None:\n job_attributes[\"timeCreated\"] = job_ref.created.isoformat()\n\n if job_ref.started is not None:\n job_attributes[\"timeStarted\"] = job_ref.started.isoformat()\n\n if job_ref.ended is not None:\n job_attributes[\"timeEnded\"] = job_ref.ended.isoformat()\n\n if job_ref.location is not None:\n job_attributes[\"location\"] = job_ref.location\n\n if job_ref.parent_job_id is not None:\n job_attributes[\"parent_job_id\"] = job_ref.parent_job_id\n\n if job_ref.num_child_jobs is not None:\n job_attributes[\"num_child_jobs\"] = job_ref.num_child_jobs\n\n return job_attributes\n", "path": "google/cloud/bigquery/opentelemetry_tracing.py"}]}
| 2,599 | 125 |
gh_patches_debug_34144
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-3017
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Doesn't catch invalid `rate(1 hours)`
*cfn-lint version: (`cfn-lint --version`)*
0.44.7
*Description of issue.*
cfn-lint doesn't recognize that this ScheduledExpression is invalid (should be `rate(1 hour)`)
```yaml
ExampleRule:
Type: AWS::Events::Rule
Properties:
Description: desc
Name: name
ScheduleExpression: rate(1 hours)
State: ENABLED
```
But when building the cloudformation, I get the following error:
```
Parameter ScheduleExpression is not valid. (Service: AmazonCloudWatchEvents; Status Code: 400; Error Code: ValidationException; Request ID: ...; Proxy: null)
```
I saw #816, but since this is a `rate` issue, not a `cron` issue, I thought I should open a new ticket
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/events/RuleScheduleExpression.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 from cfnlint.rules import CloudFormationLintRule, RuleMatch
6
7
8 class RuleScheduleExpression(CloudFormationLintRule):
9 """Validate AWS Events Schedule expression format"""
10
11 id = "E3027"
12 shortdesc = "Validate AWS Event ScheduleExpression format"
13 description = "Validate the formation of the AWS::Event ScheduleExpression"
14 source_url = "https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html"
15 tags = ["resources", "events"]
16
17 def initialize(self, cfn):
18 """Initialize the rule"""
19 self.resource_property_types = ["AWS::Events::Rule"]
20
21 def check_rate(self, value, path):
22 """Check Rate configuration"""
23 matches = []
24 # Extract the expression from rate(XXX)
25 rate_expression = value[value.find("(") + 1 : value.find(")")]
26
27 if not rate_expression:
28 matches.append(
29 RuleMatch(path, "Rate value of ScheduleExpression cannot be empty")
30 )
31 else:
32 # Rate format: rate(Value Unit)
33 items = rate_expression.split(" ")
34
35 if len(items) != 2:
36 message = "Rate expression must contain 2 elements (Value Unit), rate contains {} elements"
37 matches.append(RuleMatch(path, message.format(len(items))))
38 else:
39 # Check the Value
40 if not items[0].isdigit():
41 message = "Rate Value ({}) should be of type Integer."
42 extra_args = {
43 "actual_type": type(items[0]).__name__,
44 "expected_type": int.__name__,
45 }
46 matches.append(
47 RuleMatch(path, message.format(items[0]), **extra_args)
48 )
49
50 return matches
51
52 def check_cron(self, value, path):
53 """Check Cron configuration"""
54 matches = []
55 # Extract the expression from cron(XXX)
56 cron_expression = value[value.find("(") + 1 : value.find(")")]
57
58 if not cron_expression:
59 matches.append(
60 RuleMatch(path, "Cron value of ScheduleExpression cannot be empty")
61 )
62 else:
63 # Rate format: cron(Minutes Hours Day-of-month Month Day-of-week Year)
64 items = cron_expression.split(" ")
65
66 if len(items) != 6:
67 message = "Cron expression must contain 6 elements (Minutes Hours Day-of-month Month Day-of-week Year), cron contains {} elements"
68 matches.append(RuleMatch(path, message.format(len(items))))
69 return matches
70
71 _, _, day_of_month, _, day_of_week, _ = cron_expression.split(" ")
72 if day_of_month != "?" and day_of_week != "?":
73 matches.append(
74 RuleMatch(
75 path,
76 "Don't specify the Day-of-month and Day-of-week fields in the same cron expression",
77 )
78 )
79
80 return matches
81
82 def check_value(self, value, path):
83 """Count ScheduledExpression value"""
84 matches = []
85
86 # Value is either "cron()" or "rate()"
87 if value.startswith("rate(") and value.endswith(")"):
88 matches.extend(self.check_rate(value, path))
89 elif value.startswith("cron(") and value.endswith(")"):
90 matches.extend(self.check_cron(value, path))
91 else:
92 message = "Invalid ScheduledExpression specified ({}). Value has to be either cron() or rate()"
93 matches.append(RuleMatch(path, message.format(value)))
94
95 return matches
96
97 def match_resource_properties(self, properties, _, path, cfn):
98 """Check CloudFormation Properties"""
99 matches = []
100
101 matches.extend(
102 cfn.check_value(
103 obj=properties,
104 key="ScheduleExpression",
105 path=path[:],
106 check_value=self.check_value,
107 )
108 )
109
110 return matches
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cfnlint/rules/resources/events/RuleScheduleExpression.py b/src/cfnlint/rules/resources/events/RuleScheduleExpression.py
--- a/src/cfnlint/rules/resources/events/RuleScheduleExpression.py
+++ b/src/cfnlint/rules/resources/events/RuleScheduleExpression.py
@@ -25,29 +25,43 @@
rate_expression = value[value.find("(") + 1 : value.find(")")]
if not rate_expression:
- matches.append(
- RuleMatch(path, "Rate value of ScheduleExpression cannot be empty")
- )
- else:
- # Rate format: rate(Value Unit)
- items = rate_expression.split(" ")
-
- if len(items) != 2:
- message = "Rate expression must contain 2 elements (Value Unit), rate contains {} elements"
- matches.append(RuleMatch(path, message.format(len(items))))
- else:
- # Check the Value
- if not items[0].isdigit():
- message = "Rate Value ({}) should be of type Integer."
- extra_args = {
- "actual_type": type(items[0]).__name__,
- "expected_type": int.__name__,
- }
- matches.append(
- RuleMatch(path, message.format(items[0]), **extra_args)
- )
+ return [RuleMatch(path, "Rate value of ScheduleExpression cannot be empty")]
+
+ # Rate format: rate(Value Unit)
+ items = rate_expression.split(" ")
+
+ if len(items) != 2:
+ message = "Rate expression must contain 2 elements (Value Unit), rate contains {} elements"
+ matches.append(RuleMatch(path, message.format(len(items))))
+ return [RuleMatch(path, message.format(len(items)))]
+
+ # Check the Value
+ if not items[0].isdigit():
+ message = "Rate Value ({}) should be of type Integer."
+ extra_args = {
+ "actual_type": type(items[0]).__name__,
+ "expected_type": int.__name__,
+ }
+ return [RuleMatch(path, message.format(items[0]), **extra_args)]
+
+ if float(items[0]) <= 0:
+ return [
+ RuleMatch(path, f"Rate Value {items[0]!r} should be greater than 0.")
+ ]
+
+ if float(items[0]) <= 1:
+ valid_periods = ["minute", "hour", "day"]
+ elif float(items[0]) > 1:
+ valid_periods = ["minutes", "hours", "days"]
+ # Check the Unit
+ if items[1] not in valid_periods:
+ return [
+ RuleMatch(
+ path, f"Rate Unit {items[1]!r} should be one of {valid_periods!r}."
+ )
+ ]
- return matches
+ return []
def check_cron(self, value, path):
"""Check Cron configuration"""
|
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/events/RuleScheduleExpression.py b/src/cfnlint/rules/resources/events/RuleScheduleExpression.py\n--- a/src/cfnlint/rules/resources/events/RuleScheduleExpression.py\n+++ b/src/cfnlint/rules/resources/events/RuleScheduleExpression.py\n@@ -25,29 +25,43 @@\n rate_expression = value[value.find(\"(\") + 1 : value.find(\")\")]\n \n if not rate_expression:\n- matches.append(\n- RuleMatch(path, \"Rate value of ScheduleExpression cannot be empty\")\n- )\n- else:\n- # Rate format: rate(Value Unit)\n- items = rate_expression.split(\" \")\n-\n- if len(items) != 2:\n- message = \"Rate expression must contain 2 elements (Value Unit), rate contains {} elements\"\n- matches.append(RuleMatch(path, message.format(len(items))))\n- else:\n- # Check the Value\n- if not items[0].isdigit():\n- message = \"Rate Value ({}) should be of type Integer.\"\n- extra_args = {\n- \"actual_type\": type(items[0]).__name__,\n- \"expected_type\": int.__name__,\n- }\n- matches.append(\n- RuleMatch(path, message.format(items[0]), **extra_args)\n- )\n+ return [RuleMatch(path, \"Rate value of ScheduleExpression cannot be empty\")]\n+\n+ # Rate format: rate(Value Unit)\n+ items = rate_expression.split(\" \")\n+\n+ if len(items) != 2:\n+ message = \"Rate expression must contain 2 elements (Value Unit), rate contains {} elements\"\n+ matches.append(RuleMatch(path, message.format(len(items))))\n+ return [RuleMatch(path, message.format(len(items)))]\n+\n+ # Check the Value\n+ if not items[0].isdigit():\n+ message = \"Rate Value ({}) should be of type Integer.\"\n+ extra_args = {\n+ \"actual_type\": type(items[0]).__name__,\n+ \"expected_type\": int.__name__,\n+ }\n+ return [RuleMatch(path, message.format(items[0]), **extra_args)]\n+\n+ if float(items[0]) <= 0:\n+ return [\n+ RuleMatch(path, f\"Rate Value {items[0]!r} should be greater than 0.\")\n+ ]\n+\n+ if float(items[0]) <= 1:\n+ valid_periods = [\"minute\", \"hour\", \"day\"]\n+ elif float(items[0]) > 1:\n+ valid_periods = [\"minutes\", \"hours\", \"days\"]\n+ # Check the Unit\n+ if items[1] not in valid_periods:\n+ return [\n+ RuleMatch(\n+ path, f\"Rate Unit {items[1]!r} should be one of {valid_periods!r}.\"\n+ )\n+ ]\n \n- return matches\n+ return []\n \n def check_cron(self, value, path):\n \"\"\"Check Cron configuration\"\"\"\n", "issue": "Doesn't catch invalid `rate(1 hours)`\n*cfn-lint version: (`cfn-lint --version`)*\r\n\r\n0.44.7\r\n\r\n*Description of issue.*\r\n\r\ncfn-lint doesn't recognize that this ScheduledExpression is invalid (should be `rate(1 hour)`)\r\n```yaml\r\n ExampleRule:\r\n Type: AWS::Events::Rule\r\n Properties:\r\n Description: desc\r\n Name: name\r\n ScheduleExpression: rate(1 hours)\r\n State: ENABLED\r\n```\r\n\r\nBut when building the cloudformation, I get the following error:\r\n\r\n```\r\nParameter ScheduleExpression is not valid. (Service: AmazonCloudWatchEvents; Status Code: 400; Error Code: ValidationException; Request ID: ...; Proxy: null)\r\n```\r\n\r\nI saw #816, but since this is a `rate` issue, not a `cron` issue, I thought I should open a new ticket\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass RuleScheduleExpression(CloudFormationLintRule):\n \"\"\"Validate AWS Events Schedule expression format\"\"\"\n\n id = \"E3027\"\n shortdesc = \"Validate AWS Event ScheduleExpression format\"\n description = \"Validate the formation of the AWS::Event ScheduleExpression\"\n source_url = \"https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html\"\n tags = [\"resources\", \"events\"]\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n self.resource_property_types = [\"AWS::Events::Rule\"]\n\n def check_rate(self, value, path):\n \"\"\"Check Rate configuration\"\"\"\n matches = []\n # Extract the expression from rate(XXX)\n rate_expression = value[value.find(\"(\") + 1 : value.find(\")\")]\n\n if not rate_expression:\n matches.append(\n RuleMatch(path, \"Rate value of ScheduleExpression cannot be empty\")\n )\n else:\n # Rate format: rate(Value Unit)\n items = rate_expression.split(\" \")\n\n if len(items) != 2:\n message = \"Rate expression must contain 2 elements (Value Unit), rate contains {} elements\"\n matches.append(RuleMatch(path, message.format(len(items))))\n else:\n # Check the Value\n if not items[0].isdigit():\n message = \"Rate Value ({}) should be of type Integer.\"\n extra_args = {\n \"actual_type\": type(items[0]).__name__,\n \"expected_type\": int.__name__,\n }\n matches.append(\n RuleMatch(path, message.format(items[0]), **extra_args)\n )\n\n return matches\n\n def check_cron(self, value, path):\n \"\"\"Check Cron configuration\"\"\"\n matches = []\n # Extract the expression from cron(XXX)\n cron_expression = value[value.find(\"(\") + 1 : value.find(\")\")]\n\n if not cron_expression:\n matches.append(\n RuleMatch(path, \"Cron value of ScheduleExpression cannot be empty\")\n )\n else:\n # Rate format: cron(Minutes Hours Day-of-month Month Day-of-week Year)\n items = cron_expression.split(\" \")\n\n if len(items) != 6:\n message = \"Cron expression must contain 6 elements (Minutes Hours Day-of-month Month Day-of-week Year), cron contains {} elements\"\n matches.append(RuleMatch(path, message.format(len(items))))\n return matches\n\n _, _, day_of_month, _, day_of_week, _ = cron_expression.split(\" \")\n if day_of_month != \"?\" and day_of_week != \"?\":\n matches.append(\n RuleMatch(\n path,\n \"Don't specify the Day-of-month and Day-of-week fields in the same cron expression\",\n )\n )\n\n return matches\n\n def check_value(self, value, path):\n \"\"\"Count ScheduledExpression value\"\"\"\n matches = []\n\n # Value is either \"cron()\" or \"rate()\"\n if value.startswith(\"rate(\") and value.endswith(\")\"):\n matches.extend(self.check_rate(value, path))\n elif value.startswith(\"cron(\") and value.endswith(\")\"):\n matches.extend(self.check_cron(value, path))\n else:\n message = \"Invalid ScheduledExpression specified ({}). Value has to be either cron() or rate()\"\n matches.append(RuleMatch(path, message.format(value)))\n\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n matches.extend(\n cfn.check_value(\n obj=properties,\n key=\"ScheduleExpression\",\n path=path[:],\n check_value=self.check_value,\n )\n )\n\n return matches\n", "path": "src/cfnlint/rules/resources/events/RuleScheduleExpression.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass RuleScheduleExpression(CloudFormationLintRule):\n \"\"\"Validate AWS Events Schedule expression format\"\"\"\n\n id = \"E3027\"\n shortdesc = \"Validate AWS Event ScheduleExpression format\"\n description = \"Validate the formation of the AWS::Event ScheduleExpression\"\n source_url = \"https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html\"\n tags = [\"resources\", \"events\"]\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n self.resource_property_types = [\"AWS::Events::Rule\"]\n\n def check_rate(self, value, path):\n \"\"\"Check Rate configuration\"\"\"\n matches = []\n # Extract the expression from rate(XXX)\n rate_expression = value[value.find(\"(\") + 1 : value.find(\")\")]\n\n if not rate_expression:\n return [RuleMatch(path, \"Rate value of ScheduleExpression cannot be empty\")]\n\n # Rate format: rate(Value Unit)\n items = rate_expression.split(\" \")\n\n if len(items) != 2:\n message = \"Rate expression must contain 2 elements (Value Unit), rate contains {} elements\"\n matches.append(RuleMatch(path, message.format(len(items))))\n return [RuleMatch(path, message.format(len(items)))]\n\n # Check the Value\n if not items[0].isdigit():\n message = \"Rate Value ({}) should be of type Integer.\"\n extra_args = {\n \"actual_type\": type(items[0]).__name__,\n \"expected_type\": int.__name__,\n }\n return [RuleMatch(path, message.format(items[0]), **extra_args)]\n\n if float(items[0]) <= 0:\n return [\n RuleMatch(path, f\"Rate Value {items[0]!r} should be greater than 0.\")\n ]\n\n if float(items[0]) <= 1:\n valid_periods = [\"minute\", \"hour\", \"day\"]\n elif float(items[0]) > 1:\n valid_periods = [\"minutes\", \"hours\", \"days\"]\n # Check the Unit\n if items[1] not in valid_periods:\n return [\n RuleMatch(\n path, f\"Rate Unit {items[1]!r} should be one of {valid_periods!r}.\"\n )\n ]\n\n return []\n\n def check_cron(self, value, path):\n \"\"\"Check Cron configuration\"\"\"\n matches = []\n # Extract the expression from cron(XXX)\n cron_expression = value[value.find(\"(\") + 1 : value.find(\")\")]\n\n if not cron_expression:\n matches.append(\n RuleMatch(path, \"Cron value of ScheduleExpression cannot be empty\")\n )\n else:\n # Rate format: cron(Minutes Hours Day-of-month Month Day-of-week Year)\n items = cron_expression.split(\" \")\n\n if len(items) != 6:\n message = \"Cron expression must contain 6 elements (Minutes Hours Day-of-month Month Day-of-week Year), cron contains {} elements\"\n matches.append(RuleMatch(path, message.format(len(items))))\n return matches\n\n _, _, day_of_month, _, day_of_week, _ = cron_expression.split(\" \")\n if day_of_month != \"?\" and day_of_week != \"?\":\n matches.append(\n RuleMatch(\n path,\n \"Don't specify the Day-of-month and Day-of-week fields in the same cron expression\",\n )\n )\n\n return matches\n\n def check_value(self, value, path):\n \"\"\"Count ScheduledExpression value\"\"\"\n matches = []\n\n # Value is either \"cron()\" or \"rate()\"\n if value.startswith(\"rate(\") and value.endswith(\")\"):\n matches.extend(self.check_rate(value, path))\n elif value.startswith(\"cron(\") and value.endswith(\")\"):\n matches.extend(self.check_cron(value, path))\n else:\n message = \"Invalid ScheduledExpression specified ({}). Value has to be either cron() or rate()\"\n matches.append(RuleMatch(path, message.format(value)))\n\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n matches.extend(\n cfn.check_value(\n obj=properties,\n key=\"ScheduleExpression\",\n path=path[:],\n check_value=self.check_value,\n )\n )\n\n return matches\n", "path": "src/cfnlint/rules/resources/events/RuleScheduleExpression.py"}]}
| 1,505 | 654 |
gh_patches_debug_10525
|
rasdani/github-patches
|
git_diff
|
easybuilders__easybuild-easyblocks-1924
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pylibdir of versionindependentpythonpackage.py is overwritten by pythonpackage.py
In 599869d `set_pylibdirs` of [pythonpackage.py](https://github.com/easybuilders/easybuild-easyblocks/blob/develop/easybuild/easyblocks/generic/pythonpackage.py) was changed to always set `self.pylibdir` (not only if its value is `UNKNOWN`). This seems to break the `prepare_step` of [versionindependentpythonpackage.py](https://github.com/easybuilders/easybuild-easyblocks/blob/develop/easybuild/easyblocks/generic/versionindependentpythonpackage.py) because pylibdir is now overwritten. This forces the `install_step` to crash afterwards due to missing subdirs within the `os.mkdir(full_pylibdir)` command.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `easybuild/easyblocks/generic/versionindependentpythonpackage.py`
Content:
```
1 ##
2 # Copyright 2013-2020 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 EasyBuild support for building and installing a Pythonpackage independend of a python version as an easyblock.
27
28 Python installs libraries by defailt in site-packages/python-xxx/
29 But packages that are not dependend on the python version can be installed in a different prefix, e.g. lib
30 as long as we add this folder to the pythonpath.
31
32 @author: Kenneth Hoste, Jens Timmerman (Ghent University)
33 """
34 import os
35 import re
36
37 import easybuild.tools.environment as env
38 from easybuild.easyblocks.generic.pythonpackage import PythonPackage
39 from easybuild.tools.build_log import EasyBuildError
40 from easybuild.tools.run import run_cmd
41
42
43 class VersionIndependentPythonPackage(PythonPackage):
44 """Support for building/installing python packages without requiring a specific python package."""
45
46 def build_step(self):
47 """No build procedure."""
48 pass
49
50 def prepare_step(self, *args, **kwargs):
51 """Set pylibdir"""
52 self.pylibdir = 'lib'
53 super(VersionIndependentPythonPackage, self).prepare_step(*args, **kwargs)
54
55 def install_step(self):
56 """Custom install procedure to skip selection of python package versions."""
57 full_pylibdir = os.path.join(self.installdir, self.pylibdir)
58
59 env.setvar('PYTHONPATH', '%s:%s' % (full_pylibdir, os.getenv('PYTHONPATH')))
60
61 try:
62 os.mkdir(full_pylibdir)
63 except OSError as err:
64 # this will raise an error and not return
65 raise EasyBuildError("Failed to install: %s", err)
66
67 if self.use_setup_py:
68 extra_installopts = [
69 '--install-lib=%s' % full_pylibdir,
70 '--single-version-externally-managed',
71 '--record %s' % os.path.join(self.builddir, 'record'),
72 '--no-compile',
73 ]
74 self.cfg.update('installopts', ' '.join(extra_installopts))
75 else:
76 # using easy_install or pip always results in installation that is specific to Python version
77 eb_name = self.__class__.__name__
78 raise EasyBuildError("%s easyblock is not compatible with using easy_install or pip", eb_name)
79
80 cmd = self.compose_install_command(self.installdir)
81 run_cmd(cmd, log_all=True, simple=True, log_output=True)
82
83 # setuptools stubbornly replaces the shebang line in scripts with
84 # the full path to the Python interpreter used to install;
85 # we change it (back) to '#!/usr/bin/env python' here
86 shebang_re = re.compile("^#!/.*python")
87 bindir = os.path.join(self.installdir, 'bin')
88 if os.path.exists(bindir):
89 for script in os.listdir(bindir):
90 script = os.path.join(bindir, script)
91 if os.path.isfile(script):
92 try:
93 txt = open(script, 'r').read()
94 if shebang_re.search(txt):
95 new_shebang = "#!/usr/bin/env python"
96 self.log.debug("Patching shebang header line in %s to '%s'" % (script, new_shebang))
97 txt = shebang_re.sub(new_shebang, txt)
98 open(script, 'w').write(txt)
99 except IOError as err:
100 raise EasyBuildError("Failed to patch shebang header line in %s: %s", script, err)
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/easybuild/easyblocks/generic/versionindependentpythonpackage.py b/easybuild/easyblocks/generic/versionindependentpythonpackage.py
--- a/easybuild/easyblocks/generic/versionindependentpythonpackage.py
+++ b/easybuild/easyblocks/generic/versionindependentpythonpackage.py
@@ -47,10 +47,11 @@
"""No build procedure."""
pass
- def prepare_step(self, *args, **kwargs):
- """Set pylibdir"""
+ def set_pylibdirs(self):
+ """Set pylibdir."""
+ super(VersionIndependentPythonPackage, self).set_pylibdirs()
self.pylibdir = 'lib'
- super(VersionIndependentPythonPackage, self).prepare_step(*args, **kwargs)
+ self.all_pylibdirs = ['lib']
def install_step(self):
"""Custom install procedure to skip selection of python package versions."""
|
{"golden_diff": "diff --git a/easybuild/easyblocks/generic/versionindependentpythonpackage.py b/easybuild/easyblocks/generic/versionindependentpythonpackage.py\n--- a/easybuild/easyblocks/generic/versionindependentpythonpackage.py\n+++ b/easybuild/easyblocks/generic/versionindependentpythonpackage.py\n@@ -47,10 +47,11 @@\n \"\"\"No build procedure.\"\"\"\n pass\n \n- def prepare_step(self, *args, **kwargs):\n- \"\"\"Set pylibdir\"\"\"\n+ def set_pylibdirs(self):\n+ \"\"\"Set pylibdir.\"\"\"\n+ super(VersionIndependentPythonPackage, self).set_pylibdirs()\n self.pylibdir = 'lib'\n- super(VersionIndependentPythonPackage, self).prepare_step(*args, **kwargs)\n+ self.all_pylibdirs = ['lib']\n \n def install_step(self):\n \"\"\"Custom install procedure to skip selection of python package versions.\"\"\"\n", "issue": "pylibdir of versionindependentpythonpackage.py is overwritten by pythonpackage.py\nIn 599869d `set_pylibdirs` of [pythonpackage.py](https://github.com/easybuilders/easybuild-easyblocks/blob/develop/easybuild/easyblocks/generic/pythonpackage.py) was changed to always set `self.pylibdir` (not only if its value is `UNKNOWN`). This seems to break the `prepare_step` of [versionindependentpythonpackage.py](https://github.com/easybuilders/easybuild-easyblocks/blob/develop/easybuild/easyblocks/generic/versionindependentpythonpackage.py) because pylibdir is now overwritten. This forces the `install_step` to crash afterwards due to missing subdirs within the `os.mkdir(full_pylibdir)` command.\n", "before_files": [{"content": "##\n# Copyright 2013-2020 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for building and installing a Pythonpackage independend of a python version as an easyblock.\n\nPython installs libraries by defailt in site-packages/python-xxx/\nBut packages that are not dependend on the python version can be installed in a different prefix, e.g. lib\nas long as we add this folder to the pythonpath.\n\n@author: Kenneth Hoste, Jens Timmerman (Ghent University)\n\"\"\"\nimport os\nimport re\n\nimport easybuild.tools.environment as env\nfrom easybuild.easyblocks.generic.pythonpackage import PythonPackage\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.run import run_cmd\n\n\nclass VersionIndependentPythonPackage(PythonPackage):\n \"\"\"Support for building/installing python packages without requiring a specific python package.\"\"\"\n\n def build_step(self):\n \"\"\"No build procedure.\"\"\"\n pass\n\n def prepare_step(self, *args, **kwargs):\n \"\"\"Set pylibdir\"\"\"\n self.pylibdir = 'lib'\n super(VersionIndependentPythonPackage, self).prepare_step(*args, **kwargs)\n\n def install_step(self):\n \"\"\"Custom install procedure to skip selection of python package versions.\"\"\"\n full_pylibdir = os.path.join(self.installdir, self.pylibdir)\n\n env.setvar('PYTHONPATH', '%s:%s' % (full_pylibdir, os.getenv('PYTHONPATH')))\n\n try:\n os.mkdir(full_pylibdir)\n except OSError as err:\n # this will raise an error and not return\n raise EasyBuildError(\"Failed to install: %s\", err)\n\n if self.use_setup_py:\n extra_installopts = [\n '--install-lib=%s' % full_pylibdir,\n '--single-version-externally-managed',\n '--record %s' % os.path.join(self.builddir, 'record'),\n '--no-compile',\n ]\n self.cfg.update('installopts', ' '.join(extra_installopts))\n else:\n # using easy_install or pip always results in installation that is specific to Python version\n eb_name = self.__class__.__name__\n raise EasyBuildError(\"%s easyblock is not compatible with using easy_install or pip\", eb_name)\n\n cmd = self.compose_install_command(self.installdir)\n run_cmd(cmd, log_all=True, simple=True, log_output=True)\n\n # setuptools stubbornly replaces the shebang line in scripts with\n # the full path to the Python interpreter used to install;\n # we change it (back) to '#!/usr/bin/env python' here\n shebang_re = re.compile(\"^#!/.*python\")\n bindir = os.path.join(self.installdir, 'bin')\n if os.path.exists(bindir):\n for script in os.listdir(bindir):\n script = os.path.join(bindir, script)\n if os.path.isfile(script):\n try:\n txt = open(script, 'r').read()\n if shebang_re.search(txt):\n new_shebang = \"#!/usr/bin/env python\"\n self.log.debug(\"Patching shebang header line in %s to '%s'\" % (script, new_shebang))\n txt = shebang_re.sub(new_shebang, txt)\n open(script, 'w').write(txt)\n except IOError as err:\n raise EasyBuildError(\"Failed to patch shebang header line in %s: %s\", script, err)\n", "path": "easybuild/easyblocks/generic/versionindependentpythonpackage.py"}], "after_files": [{"content": "##\n# Copyright 2013-2020 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for building and installing a Pythonpackage independend of a python version as an easyblock.\n\nPython installs libraries by defailt in site-packages/python-xxx/\nBut packages that are not dependend on the python version can be installed in a different prefix, e.g. lib\nas long as we add this folder to the pythonpath.\n\n@author: Kenneth Hoste, Jens Timmerman (Ghent University)\n\"\"\"\nimport os\nimport re\n\nimport easybuild.tools.environment as env\nfrom easybuild.easyblocks.generic.pythonpackage import PythonPackage\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.run import run_cmd\n\n\nclass VersionIndependentPythonPackage(PythonPackage):\n \"\"\"Support for building/installing python packages without requiring a specific python package.\"\"\"\n\n def build_step(self):\n \"\"\"No build procedure.\"\"\"\n pass\n\n def set_pylibdirs(self):\n \"\"\"Set pylibdir.\"\"\"\n super(VersionIndependentPythonPackage, self).set_pylibdirs()\n self.pylibdir = 'lib'\n self.all_pylibdirs = ['lib']\n\n def install_step(self):\n \"\"\"Custom install procedure to skip selection of python package versions.\"\"\"\n full_pylibdir = os.path.join(self.installdir, self.pylibdir)\n\n env.setvar('PYTHONPATH', '%s:%s' % (full_pylibdir, os.getenv('PYTHONPATH')))\n\n try:\n os.mkdir(full_pylibdir)\n except OSError as err:\n # this will raise an error and not return\n raise EasyBuildError(\"Failed to install: %s\", err)\n\n if self.use_setup_py:\n extra_installopts = [\n '--install-lib=%s' % full_pylibdir,\n '--single-version-externally-managed',\n '--record %s' % os.path.join(self.builddir, 'record'),\n '--no-compile',\n ]\n self.cfg.update('installopts', ' '.join(extra_installopts))\n else:\n # using easy_install or pip always results in installation that is specific to Python version\n eb_name = self.__class__.__name__\n raise EasyBuildError(\"%s easyblock is not compatible with using easy_install or pip\", eb_name)\n\n cmd = self.compose_install_command(self.installdir)\n run_cmd(cmd, log_all=True, simple=True, log_output=True)\n\n # setuptools stubbornly replaces the shebang line in scripts with\n # the full path to the Python interpreter used to install;\n # we change it (back) to '#!/usr/bin/env python' here\n shebang_re = re.compile(\"^#!/.*python\")\n bindir = os.path.join(self.installdir, 'bin')\n if os.path.exists(bindir):\n for script in os.listdir(bindir):\n script = os.path.join(bindir, script)\n if os.path.isfile(script):\n try:\n txt = open(script, 'r').read()\n if shebang_re.search(txt):\n new_shebang = \"#!/usr/bin/env python\"\n self.log.debug(\"Patching shebang header line in %s to '%s'\" % (script, new_shebang))\n txt = shebang_re.sub(new_shebang, txt)\n open(script, 'w').write(txt)\n except IOError as err:\n raise EasyBuildError(\"Failed to patch shebang header line in %s: %s\", script, err)\n", "path": "easybuild/easyblocks/generic/versionindependentpythonpackage.py"}]}
| 1,631 | 204 |
gh_patches_debug_22476
|
rasdani/github-patches
|
git_diff
|
carpentries__amy-714
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tokenize person lookup to work with "name last_name"
`lookups.PersonLookup` won't show 'Piotr Banaszkiewicz' for 'Piotr Ban' input – because it only looks up `personal` or `family` or `email`…
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `workshops/lookups.py`
Content:
```
1 from django.contrib.auth.models import Group
2 from django.db.models import Q
3
4 from selectable.base import ModelLookup
5 from selectable.registry import registry
6 from selectable.decorators import login_required
7
8 from workshops import models
9
10
11 @login_required
12 class EventLookup(ModelLookup):
13 model = models.Event
14 search_fields = ('slug__icontains', )
15
16
17 @login_required
18 class HostLookup(ModelLookup):
19 model = models.Host
20 search_fields = (
21 'domain__icontains',
22 'fullname__icontains'
23 )
24
25
26 @login_required
27 class PersonLookup(ModelLookup):
28 model = models.Person
29 search_fields = (
30 'personal__icontains',
31 'family__icontains',
32 'email__icontains',
33 'username__icontains'
34 )
35
36
37 @login_required
38 class AdminLookup(ModelLookup):
39 """The same as PersonLookup, but allows only to select administrators.
40
41 Administrator is anyone with superuser power or in "administrators" group.
42 """
43 model = models.Person
44 search_fields = (
45 'personal__icontains',
46 'family__icontains',
47 'email__icontains',
48 'username__icontains'
49 )
50
51 def get_query(self, request, term):
52 results = super().get_query(request, term)
53 admin_group = Group.objects.get(name='administrators')
54 results = results.filter(
55 Q(is_superuser=True) | Q(groups__in=[admin_group])
56 )
57 return results
58
59
60 @login_required
61 class AirportLookup(ModelLookup):
62 model = models.Airport
63 search_fields = (
64 'iata__icontains',
65 'fullname__icontains'
66 )
67
68
69 registry.register(EventLookup)
70 registry.register(HostLookup)
71 registry.register(PersonLookup)
72 registry.register(AdminLookup)
73 registry.register(AirportLookup)
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/workshops/lookups.py b/workshops/lookups.py
--- a/workshops/lookups.py
+++ b/workshops/lookups.py
@@ -1,3 +1,7 @@
+from functools import reduce
+import operator
+import re
+
from django.contrib.auth.models import Group
from django.db.models import Q
@@ -33,6 +37,32 @@
'username__icontains'
)
+ def get_query(self, request, term):
+ """Override this method to allow for additional lookup method: """
+ # original code from selectable.base.ModelLookup.get_query:
+ qs = self.get_queryset()
+ if term:
+ search_filters = []
+ if self.search_fields:
+ for field in self.search_fields:
+ search_filters.append(Q(**{field: term}))
+
+ # tokenizing part
+ tokens = re.split('\s+', term)
+ if len(tokens) == 2:
+ name1, name2 = tokens
+ complex_q = (
+ Q(personal__icontains=name1) & Q(family__icontains=name2)
+ ) | (
+ Q(personal__icontains=name2) & Q(family__icontains=name1)
+ )
+ search_filters.append(complex_q)
+
+ # this is brilliant: it applies OR to all search filters
+ qs = qs.filter(reduce(operator.or_, search_filters))
+
+ return qs
+
@login_required
class AdminLookup(ModelLookup):
|
{"golden_diff": "diff --git a/workshops/lookups.py b/workshops/lookups.py\n--- a/workshops/lookups.py\n+++ b/workshops/lookups.py\n@@ -1,3 +1,7 @@\n+from functools import reduce\n+import operator\n+import re\n+\n from django.contrib.auth.models import Group\n from django.db.models import Q\n \n@@ -33,6 +37,32 @@\n 'username__icontains'\n )\n \n+ def get_query(self, request, term):\n+ \"\"\"Override this method to allow for additional lookup method: \"\"\"\n+ # original code from selectable.base.ModelLookup.get_query:\n+ qs = self.get_queryset()\n+ if term:\n+ search_filters = []\n+ if self.search_fields:\n+ for field in self.search_fields:\n+ search_filters.append(Q(**{field: term}))\n+\n+ # tokenizing part\n+ tokens = re.split('\\s+', term)\n+ if len(tokens) == 2:\n+ name1, name2 = tokens\n+ complex_q = (\n+ Q(personal__icontains=name1) & Q(family__icontains=name2)\n+ ) | (\n+ Q(personal__icontains=name2) & Q(family__icontains=name1)\n+ )\n+ search_filters.append(complex_q)\n+\n+ # this is brilliant: it applies OR to all search filters\n+ qs = qs.filter(reduce(operator.or_, search_filters))\n+\n+ return qs\n+\n \n @login_required\n class AdminLookup(ModelLookup):\n", "issue": "Tokenize person lookup to work with \"name last_name\"\n`lookups.PersonLookup` won't show 'Piotr Banaszkiewicz' for 'Piotr Ban' input \u2013 because it only looks up `personal` or `family` or `email`\u2026\n\n", "before_files": [{"content": "from django.contrib.auth.models import Group\nfrom django.db.models import Q\n\nfrom selectable.base import ModelLookup\nfrom selectable.registry import registry\nfrom selectable.decorators import login_required\n\nfrom workshops import models\n\n\n@login_required\nclass EventLookup(ModelLookup):\n model = models.Event\n search_fields = ('slug__icontains', )\n\n\n@login_required\nclass HostLookup(ModelLookup):\n model = models.Host\n search_fields = (\n 'domain__icontains',\n 'fullname__icontains'\n )\n\n\n@login_required\nclass PersonLookup(ModelLookup):\n model = models.Person\n search_fields = (\n 'personal__icontains',\n 'family__icontains',\n 'email__icontains',\n 'username__icontains'\n )\n\n\n@login_required\nclass AdminLookup(ModelLookup):\n \"\"\"The same as PersonLookup, but allows only to select administrators.\n\n Administrator is anyone with superuser power or in \"administrators\" group.\n \"\"\"\n model = models.Person\n search_fields = (\n 'personal__icontains',\n 'family__icontains',\n 'email__icontains',\n 'username__icontains'\n )\n\n def get_query(self, request, term):\n results = super().get_query(request, term)\n admin_group = Group.objects.get(name='administrators')\n results = results.filter(\n Q(is_superuser=True) | Q(groups__in=[admin_group])\n )\n return results\n\n\n@login_required\nclass AirportLookup(ModelLookup):\n model = models.Airport\n search_fields = (\n 'iata__icontains',\n 'fullname__icontains'\n )\n\n\nregistry.register(EventLookup)\nregistry.register(HostLookup)\nregistry.register(PersonLookup)\nregistry.register(AdminLookup)\nregistry.register(AirportLookup)\n", "path": "workshops/lookups.py"}], "after_files": [{"content": "from functools import reduce\nimport operator\nimport re\n\nfrom django.contrib.auth.models import Group\nfrom django.db.models import Q\n\nfrom selectable.base import ModelLookup\nfrom selectable.registry import registry\nfrom selectable.decorators import login_required\n\nfrom workshops import models\n\n\n@login_required\nclass EventLookup(ModelLookup):\n model = models.Event\n search_fields = ('slug__icontains', )\n\n\n@login_required\nclass HostLookup(ModelLookup):\n model = models.Host\n search_fields = (\n 'domain__icontains',\n 'fullname__icontains'\n )\n\n\n@login_required\nclass PersonLookup(ModelLookup):\n model = models.Person\n search_fields = (\n 'personal__icontains',\n 'family__icontains',\n 'email__icontains',\n 'username__icontains'\n )\n\n def get_query(self, request, term):\n \"\"\"Override this method to allow for additional lookup method: \"\"\"\n # original code from selectable.base.ModelLookup.get_query:\n qs = self.get_queryset()\n if term:\n search_filters = []\n if self.search_fields:\n for field in self.search_fields:\n search_filters.append(Q(**{field: term}))\n\n # tokenizing part\n tokens = re.split('\\s+', term)\n if len(tokens) == 2:\n name1, name2 = tokens\n complex_q = (\n Q(personal__icontains=name1) & Q(family__icontains=name2)\n ) | (\n Q(personal__icontains=name2) & Q(family__icontains=name1)\n )\n search_filters.append(complex_q)\n\n # this is brilliant: it applies OR to all search filters\n qs = qs.filter(reduce(operator.or_, search_filters))\n\n return qs\n\n\n@login_required\nclass AdminLookup(ModelLookup):\n \"\"\"The same as PersonLookup, but allows only to select administrators.\n\n Administrator is anyone with superuser power or in \"administrators\" group.\n \"\"\"\n model = models.Person\n search_fields = (\n 'personal__icontains',\n 'family__icontains',\n 'email__icontains',\n 'username__icontains'\n )\n\n def get_query(self, request, term):\n results = super().get_query(request, term)\n admin_group = Group.objects.get(name='administrators')\n results = results.filter(\n Q(is_superuser=True) | Q(groups__in=[admin_group])\n )\n return results\n\n\n@login_required\nclass AirportLookup(ModelLookup):\n model = models.Airport\n search_fields = (\n 'iata__icontains',\n 'fullname__icontains'\n )\n\n\nregistry.register(EventLookup)\nregistry.register(HostLookup)\nregistry.register(PersonLookup)\nregistry.register(AdminLookup)\nregistry.register(AirportLookup)\n", "path": "workshops/lookups.py"}]}
| 827 | 331 |
gh_patches_debug_8564
|
rasdani/github-patches
|
git_diff
|
comfyanonymous__ComfyUI-2859
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Perp-Neg implementation is wrong, parallel component not ignored
https://github.com/comfyanonymous/ComfyUI/blob/18c151b3e3f6838fab4028e7a8ba526e30e610d3/comfy_extras/nodes_perpneg.py#L38-L40
The Perp-Neg node does not match the [paper](https://arxiv.org/pdf/2304.04968.pdf) (pytorch code in Appendix A.1).
When positive and negative prompt are the same, the result should be the same as an empty negative prompt because the prompts are completely parallel (i.e. there is no perpendicular component).
Positive: "forest"
Negative: ""

Positive: "forest"
Negative: "forest"

I'll submit a PR in a bit.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `comfy_extras/nodes_perpneg.py`
Content:
```
1 import torch
2 import comfy.model_management
3 import comfy.sample
4 import comfy.samplers
5 import comfy.utils
6
7
8 class PerpNeg:
9 @classmethod
10 def INPUT_TYPES(s):
11 return {"required": {"model": ("MODEL", ),
12 "empty_conditioning": ("CONDITIONING", ),
13 "neg_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}),
14 }}
15 RETURN_TYPES = ("MODEL",)
16 FUNCTION = "patch"
17
18 CATEGORY = "_for_testing"
19
20 def patch(self, model, empty_conditioning, neg_scale):
21 m = model.clone()
22 nocond = comfy.sample.convert_cond(empty_conditioning)
23
24 def cfg_function(args):
25 model = args["model"]
26 noise_pred_pos = args["cond_denoised"]
27 noise_pred_neg = args["uncond_denoised"]
28 cond_scale = args["cond_scale"]
29 x = args["input"]
30 sigma = args["sigma"]
31 model_options = args["model_options"]
32 nocond_processed = comfy.samplers.encode_model_conds(model.extra_conds, nocond, x, x.device, "negative")
33
34 (noise_pred_nocond, _) = comfy.samplers.calc_cond_uncond_batch(model, nocond_processed, None, x, sigma, model_options)
35
36 pos = noise_pred_pos - noise_pred_nocond
37 neg = noise_pred_neg - noise_pred_nocond
38 perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg
39 perp_neg = perp * neg_scale
40 cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)
41 cfg_result = x - cfg_result
42 return cfg_result
43
44 m.set_model_sampler_cfg_function(cfg_function)
45
46 return (m, )
47
48
49 NODE_CLASS_MAPPINGS = {
50 "PerpNeg": PerpNeg,
51 }
52
53 NODE_DISPLAY_NAME_MAPPINGS = {
54 "PerpNeg": "Perp-Neg",
55 }
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/comfy_extras/nodes_perpneg.py b/comfy_extras/nodes_perpneg.py
--- a/comfy_extras/nodes_perpneg.py
+++ b/comfy_extras/nodes_perpneg.py
@@ -35,7 +35,7 @@
pos = noise_pred_pos - noise_pred_nocond
neg = noise_pred_neg - noise_pred_nocond
- perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg
+ perp = neg - ((torch.mul(neg, pos).sum())/(torch.norm(pos)**2)) * pos
perp_neg = perp * neg_scale
cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)
cfg_result = x - cfg_result
|
{"golden_diff": "diff --git a/comfy_extras/nodes_perpneg.py b/comfy_extras/nodes_perpneg.py\n--- a/comfy_extras/nodes_perpneg.py\n+++ b/comfy_extras/nodes_perpneg.py\n@@ -35,7 +35,7 @@\n \n pos = noise_pred_pos - noise_pred_nocond\n neg = noise_pred_neg - noise_pred_nocond\n- perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg\n+ perp = neg - ((torch.mul(neg, pos).sum())/(torch.norm(pos)**2)) * pos\n perp_neg = perp * neg_scale\n cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)\n cfg_result = x - cfg_result\n", "issue": "Perp-Neg implementation is wrong, parallel component not ignored\nhttps://github.com/comfyanonymous/ComfyUI/blob/18c151b3e3f6838fab4028e7a8ba526e30e610d3/comfy_extras/nodes_perpneg.py#L38-L40\r\n\r\nThe Perp-Neg node does not match the [paper](https://arxiv.org/pdf/2304.04968.pdf) (pytorch code in Appendix A.1).\r\nWhen positive and negative prompt are the same, the result should be the same as an empty negative prompt because the prompts are completely parallel (i.e. there is no perpendicular component).\r\n\r\nPositive: \"forest\"\r\nNegative: \"\"\r\n\r\n\r\nPositive: \"forest\"\r\nNegative: \"forest\"\r\n\r\n\r\nI'll submit a PR in a bit.\n", "before_files": [{"content": "import torch\nimport comfy.model_management\nimport comfy.sample\nimport comfy.samplers\nimport comfy.utils\n\n\nclass PerpNeg:\n @classmethod\n def INPUT_TYPES(s):\n return {\"required\": {\"model\": (\"MODEL\", ),\n \"empty_conditioning\": (\"CONDITIONING\", ),\n \"neg_scale\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 100.0}),\n }}\n RETURN_TYPES = (\"MODEL\",)\n FUNCTION = \"patch\"\n\n CATEGORY = \"_for_testing\"\n\n def patch(self, model, empty_conditioning, neg_scale):\n m = model.clone()\n nocond = comfy.sample.convert_cond(empty_conditioning)\n\n def cfg_function(args):\n model = args[\"model\"]\n noise_pred_pos = args[\"cond_denoised\"]\n noise_pred_neg = args[\"uncond_denoised\"]\n cond_scale = args[\"cond_scale\"]\n x = args[\"input\"]\n sigma = args[\"sigma\"]\n model_options = args[\"model_options\"]\n nocond_processed = comfy.samplers.encode_model_conds(model.extra_conds, nocond, x, x.device, \"negative\")\n\n (noise_pred_nocond, _) = comfy.samplers.calc_cond_uncond_batch(model, nocond_processed, None, x, sigma, model_options)\n\n pos = noise_pred_pos - noise_pred_nocond\n neg = noise_pred_neg - noise_pred_nocond\n perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg\n perp_neg = perp * neg_scale\n cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)\n cfg_result = x - cfg_result\n return cfg_result\n\n m.set_model_sampler_cfg_function(cfg_function)\n\n return (m, )\n\n\nNODE_CLASS_MAPPINGS = {\n \"PerpNeg\": PerpNeg,\n}\n\nNODE_DISPLAY_NAME_MAPPINGS = {\n \"PerpNeg\": \"Perp-Neg\",\n}\n", "path": "comfy_extras/nodes_perpneg.py"}], "after_files": [{"content": "import torch\nimport comfy.model_management\nimport comfy.sample\nimport comfy.samplers\nimport comfy.utils\n\n\nclass PerpNeg:\n @classmethod\n def INPUT_TYPES(s):\n return {\"required\": {\"model\": (\"MODEL\", ),\n \"empty_conditioning\": (\"CONDITIONING\", ),\n \"neg_scale\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 100.0}),\n }}\n RETURN_TYPES = (\"MODEL\",)\n FUNCTION = \"patch\"\n\n CATEGORY = \"_for_testing\"\n\n def patch(self, model, empty_conditioning, neg_scale):\n m = model.clone()\n nocond = comfy.sample.convert_cond(empty_conditioning)\n\n def cfg_function(args):\n model = args[\"model\"]\n noise_pred_pos = args[\"cond_denoised\"]\n noise_pred_neg = args[\"uncond_denoised\"]\n cond_scale = args[\"cond_scale\"]\n x = args[\"input\"]\n sigma = args[\"sigma\"]\n model_options = args[\"model_options\"]\n nocond_processed = comfy.samplers.encode_model_conds(model.extra_conds, nocond, x, x.device, \"negative\")\n\n (noise_pred_nocond, _) = comfy.samplers.calc_cond_uncond_batch(model, nocond_processed, None, x, sigma, model_options)\n\n pos = noise_pred_pos - noise_pred_nocond\n neg = noise_pred_neg - noise_pred_nocond\n perp = neg - ((torch.mul(neg, pos).sum())/(torch.norm(pos)**2)) * pos\n perp_neg = perp * neg_scale\n cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)\n cfg_result = x - cfg_result\n return cfg_result\n\n m.set_model_sampler_cfg_function(cfg_function)\n\n return (m, )\n\n\nNODE_CLASS_MAPPINGS = {\n \"PerpNeg\": PerpNeg,\n}\n\nNODE_DISPLAY_NAME_MAPPINGS = {\n \"PerpNeg\": \"Perp-Neg\",\n}\n", "path": "comfy_extras/nodes_perpneg.py"}]}
| 1,127 | 179 |
gh_patches_debug_34384
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-6877
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/expectations/core/expect_column_values_to_not_match_regex_list.py`
Content:
```
1 from typing import TYPE_CHECKING, Optional
2
3 from great_expectations.core import (
4 ExpectationConfiguration,
5 ExpectationValidationResult,
6 )
7 from great_expectations.expectations.expectation import (
8 ColumnMapExpectation,
9 InvalidExpectationConfigurationError,
10 render_evaluation_parameter_string,
11 )
12 from great_expectations.render import LegacyRendererType, RenderedStringTemplateContent
13 from great_expectations.render.renderer.renderer import renderer
14 from great_expectations.render.renderer_configuration import (
15 RendererConfiguration,
16 RendererValueType,
17 )
18 from great_expectations.render.util import (
19 num_to_str,
20 parse_row_condition_string_pandas_engine,
21 substitute_none_for_missing,
22 )
23
24 if TYPE_CHECKING:
25 from great_expectations.render.renderer_configuration import AddParamArgs
26
27
28 class ExpectColumnValuesToNotMatchRegexList(ColumnMapExpectation):
29 """Expect the column entries to be strings that do not match any of a list of regular expressions. Matches can be anywhere in the string.
30
31 expect_column_values_to_not_match_regex_list is a \
32 [Column Map Expectation](https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_column_map_expectations).
33
34 Args:
35 column (str): \
36 The column name.
37 regex_list (list): \
38 The list of regular expressions which the column entries should not match
39
40 Keyword Args:
41 mostly (None or a float between 0 and 1): \
42 Successful if at least mostly fraction of values match the expectation. \
43 For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly).
44
45 Other Parameters:
46 result_format (str or None): \
47 Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
48 For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
49 include_config (boolean): \
50 If True, then include the expectation config as part of the result object.
51 catch_exceptions (boolean or None): \
52 If True, then catch exceptions and include them as part of the result object. \
53 For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
54 meta (dict or None): \
55 A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
56 modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
57
58 Returns:
59 An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
60
61 Exact fields vary depending on the values passed to result_format, include_config, catch_exceptions, and meta.
62
63 See Also:
64 [expect_column_values_to_match_regex](https://greatexpectations.io/expectations/expect_column_values_to_match_regex)
65 [expect_column_values_to_match_regex_list](https://greatexpectations.io/expectations/expect_column_values_to_match_regex_list)
66 [expect_column_values_to_not_match_regex](https://greatexpectations.io/expectations/expect_column_values_to_not_match_regex)
67 [expect_column_values_to_match_like_pattern](https://greatexpectations.io/expectations/expect_column_values_to_match_like_pattern)
68 [expect_column_values_to_match_like_pattern_list](https://greatexpectations.io/expectations/expect_column_values_to_match_like_pattern_list)
69 [expect_column_values_to_not_match_like_pattern](https://greatexpectations.io/expectations/expect_column_values_to_not_match_like_pattern)
70 [expect_column_values_to_not_match_like_pattern_list](https://greatexpectations.io/expectations/expect_column_values_to_not_match_like_pattern_list)
71 """
72
73 library_metadata = {
74 "maturity": "production",
75 "tags": ["core expectation", "column map expectation"],
76 "contributors": [
77 "@great_expectations",
78 ],
79 "requirements": [],
80 "has_full_test_suite": True,
81 "manually_reviewed_code": True,
82 }
83
84 map_metric = "column_values.not_match_regex_list"
85 success_keys = (
86 "regex_list",
87 "mostly",
88 )
89 default_kwarg_values = {
90 "row_condition": None,
91 "condition_parser": None, # we expect this to be explicitly set whenever a row_condition is passed
92 "mostly": 1,
93 "result_format": "BASIC",
94 "include_config": True,
95 "catch_exceptions": True,
96 }
97 args_keys = (
98 "column",
99 "regex_list",
100 )
101
102 def validate_configuration(
103 self, configuration: Optional[ExpectationConfiguration] = None
104 ) -> None:
105 """Validates the configuration for the Expectation.
106
107 For `expect_column_values_to_match_regex_list`
108 we require that the `configuraton.kwargs` contain a `regex_list` key that is either
109 a non-empty `list[str]` or a `dict[list]` with `$PARAMETER` key with list of regexes.
110
111 Args:
112 configuration: The ExpectationConfiguration to be validated.
113
114 Raises:
115 InvalidExpectationConfigurationError: The configuraton does not contain the values required by the Expectation
116 """
117 super().validate_configuration(configuration)
118 configuration = configuration or self.configuration
119 try:
120 assert "regex_list" in configuration.kwargs, "regex_list is required"
121 assert isinstance(
122 configuration.kwargs["regex_list"], (list, dict)
123 ), "regex_list must be a list of regexes"
124 if (
125 not isinstance(configuration.kwargs["regex_list"], dict)
126 and len(configuration.kwargs["regex_list"]) > 0
127 ):
128 for i in configuration.kwargs["regex_list"]:
129 assert isinstance(i, str), "regexes in list must be strings"
130 if isinstance(configuration.kwargs["regex_list"], dict):
131 assert (
132 "$PARAMETER" in configuration.kwargs["regex_list"]
133 ), 'Evaluation Parameter dict for regex_list kwarg must have "$PARAMETER" key.'
134 except AssertionError as e:
135 raise InvalidExpectationConfigurationError(str(e))
136
137 @classmethod
138 def _prescriptive_template(
139 cls,
140 renderer_configuration: RendererConfiguration,
141 ) -> RendererConfiguration:
142 add_param_args: AddParamArgs = (
143 ("column", RendererValueType.STRING),
144 ("regex_list", RendererValueType.ARRAY),
145 ("mostly", RendererValueType.NUMBER),
146 )
147 for name, param_type in add_param_args:
148 renderer_configuration.add_param(name=name, param_type=param_type)
149
150 params = renderer_configuration.params
151
152 if not params.regex_list or not params.regex_list.value:
153 values_string = "[ ]"
154 else:
155 array_param_name = "regex_list"
156 param_prefix = "v__"
157 renderer_configuration = cls._add_array_params(
158 array_param_name=array_param_name,
159 param_prefix=param_prefix,
160 renderer_configuration=renderer_configuration,
161 )
162 values_string: str = cls._get_array_string(
163 array_param_name=array_param_name,
164 param_prefix=param_prefix,
165 renderer_configuration=renderer_configuration,
166 )
167
168 template_str = (
169 "values must not match any of the following regular expressions: "
170 + values_string
171 )
172
173 if params.mostly and params.mostly.value < 1.0:
174 renderer_configuration = cls._add_mostly_pct_param(
175 renderer_configuration=renderer_configuration
176 )
177 template_str += ", at least $mostly_pct % of the time."
178 else:
179 template_str += "."
180
181 if renderer_configuration.include_column_name:
182 template_str = f"$column {template_str}"
183
184 renderer_configuration.template_str = template_str
185
186 return renderer_configuration
187
188 @classmethod
189 @renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
190 @render_evaluation_parameter_string
191 def _prescriptive_renderer(
192 cls,
193 configuration: Optional[ExpectationConfiguration] = None,
194 result: Optional[ExpectationValidationResult] = None,
195 runtime_configuration: Optional[dict] = None,
196 **kwargs,
197 ):
198 runtime_configuration = runtime_configuration or {}
199 include_column_name = (
200 False if runtime_configuration.get("include_column_name") is False else True
201 )
202 styling = runtime_configuration.get("styling")
203 params = substitute_none_for_missing(
204 configuration.kwargs,
205 ["column", "regex_list", "mostly", "row_condition", "condition_parser"],
206 )
207
208 if not params.get("regex_list") or len(params.get("regex_list")) == 0:
209 values_string = "[ ]"
210 else:
211 for i, v in enumerate(params["regex_list"]):
212 params[f"v__{str(i)}"] = v
213 values_string = " ".join(
214 [f"$v__{str(i)}" for i, v in enumerate(params["regex_list"])]
215 )
216
217 template_str = (
218 "values must not match any of the following regular expressions: "
219 + values_string
220 )
221
222 if params["mostly"] is not None and params["mostly"] < 1.0:
223 params["mostly_pct"] = num_to_str(
224 params["mostly"] * 100, precision=15, no_scientific=True
225 )
226 # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
227 template_str += ", at least $mostly_pct % of the time."
228 else:
229 template_str += "."
230
231 if include_column_name:
232 template_str = f"$column {template_str}"
233
234 if params["row_condition"] is not None:
235 (
236 conditional_template_str,
237 conditional_params,
238 ) = parse_row_condition_string_pandas_engine(params["row_condition"])
239 template_str = f"{conditional_template_str}, then {template_str}"
240 params.update(conditional_params)
241
242 return [
243 RenderedStringTemplateContent(
244 **{
245 "content_block_type": "string_template",
246 "string_template": {
247 "template": template_str,
248 "params": params,
249 "styling": styling,
250 },
251 }
252 )
253 ]
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/great_expectations/expectations/core/expect_column_values_to_not_match_regex_list.py b/great_expectations/expectations/core/expect_column_values_to_not_match_regex_list.py
--- a/great_expectations/expectations/core/expect_column_values_to_not_match_regex_list.py
+++ b/great_expectations/expectations/core/expect_column_values_to_not_match_regex_list.py
@@ -102,17 +102,20 @@
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration] = None
) -> None:
- """Validates the configuration for the Expectation.
+ """Validates the configuration of an Expectation.
- For `expect_column_values_to_match_regex_list`
- we require that the `configuraton.kwargs` contain a `regex_list` key that is either
- a non-empty `list[str]` or a `dict[list]` with `$PARAMETER` key with list of regexes.
+ For `expect_column_values_to_not_match_regex_list` it is required that:
+ - 'regex_list' kwarg is of type list or dict
+ - if 'regex_list' is list, assert is non-empty and each entry is of type str
+ - if 'regex_list' is dict, assert a key "$PARAMETER" is present
Args:
- configuration: The ExpectationConfiguration to be validated.
+ configuration: An `ExpectationConfiguration` to validate. If no configuration is provided, it will be pulled
+ from the configuration attribute of the Expectation instance.
Raises:
- InvalidExpectationConfigurationError: The configuraton does not contain the values required by the Expectation
+ InvalidExpectationConfigurationError: The configuration does not contain the values required by the
+ Expectation.
"""
super().validate_configuration(configuration)
configuration = configuration or self.configuration
@@ -120,7 +123,7 @@
assert "regex_list" in configuration.kwargs, "regex_list is required"
assert isinstance(
configuration.kwargs["regex_list"], (list, dict)
- ), "regex_list must be a list of regexes"
+ ), "regex_list must be a list or dict of regexes"
if (
not isinstance(configuration.kwargs["regex_list"], dict)
and len(configuration.kwargs["regex_list"]) > 0
|
{"golden_diff": "diff --git a/great_expectations/expectations/core/expect_column_values_to_not_match_regex_list.py b/great_expectations/expectations/core/expect_column_values_to_not_match_regex_list.py\n--- a/great_expectations/expectations/core/expect_column_values_to_not_match_regex_list.py\n+++ b/great_expectations/expectations/core/expect_column_values_to_not_match_regex_list.py\n@@ -102,17 +102,20 @@\n def validate_configuration(\n self, configuration: Optional[ExpectationConfiguration] = None\n ) -> None:\n- \"\"\"Validates the configuration for the Expectation.\n+ \"\"\"Validates the configuration of an Expectation.\n \n- For `expect_column_values_to_match_regex_list`\n- we require that the `configuraton.kwargs` contain a `regex_list` key that is either\n- a non-empty `list[str]` or a `dict[list]` with `$PARAMETER` key with list of regexes.\n+ For `expect_column_values_to_not_match_regex_list` it is required that:\n+ - 'regex_list' kwarg is of type list or dict\n+ - if 'regex_list' is list, assert is non-empty and each entry is of type str\n+ - if 'regex_list' is dict, assert a key \"$PARAMETER\" is present\n \n Args:\n- configuration: The ExpectationConfiguration to be validated.\n+ configuration: An `ExpectationConfiguration` to validate. If no configuration is provided, it will be pulled\n+ from the configuration attribute of the Expectation instance.\n \n Raises:\n- InvalidExpectationConfigurationError: The configuraton does not contain the values required by the Expectation\n+ InvalidExpectationConfigurationError: The configuration does not contain the values required by the\n+ Expectation.\n \"\"\"\n super().validate_configuration(configuration)\n configuration = configuration or self.configuration\n@@ -120,7 +123,7 @@\n assert \"regex_list\" in configuration.kwargs, \"regex_list is required\"\n assert isinstance(\n configuration.kwargs[\"regex_list\"], (list, dict)\n- ), \"regex_list must be a list of regexes\"\n+ ), \"regex_list must be a list or dict of regexes\"\n if (\n not isinstance(configuration.kwargs[\"regex_list\"], dict)\n and len(configuration.kwargs[\"regex_list\"]) > 0\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Optional\n\nfrom great_expectations.core import (\n ExpectationConfiguration,\n ExpectationValidationResult,\n)\nfrom great_expectations.expectations.expectation import (\n ColumnMapExpectation,\n InvalidExpectationConfigurationError,\n render_evaluation_parameter_string,\n)\nfrom great_expectations.render import LegacyRendererType, RenderedStringTemplateContent\nfrom great_expectations.render.renderer.renderer import renderer\nfrom great_expectations.render.renderer_configuration import (\n RendererConfiguration,\n RendererValueType,\n)\nfrom great_expectations.render.util import (\n num_to_str,\n parse_row_condition_string_pandas_engine,\n substitute_none_for_missing,\n)\n\nif TYPE_CHECKING:\n from great_expectations.render.renderer_configuration import AddParamArgs\n\n\nclass ExpectColumnValuesToNotMatchRegexList(ColumnMapExpectation):\n \"\"\"Expect the column entries to be strings that do not match any of a list of regular expressions. Matches can be anywhere in the string.\n\n expect_column_values_to_not_match_regex_list is a \\\n [Column Map Expectation](https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_column_map_expectations).\n\n Args:\n column (str): \\\n The column name.\n regex_list (list): \\\n The list of regular expressions which the column entries should not match\n\n Keyword Args:\n mostly (None or a float between 0 and 1): \\\n Successful if at least mostly fraction of values match the expectation. \\\n For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly).\n\n Other Parameters:\n result_format (str or None): \\\n Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \\\n For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).\n include_config (boolean): \\\n If True, then include the expectation config as part of the result object.\n catch_exceptions (boolean or None): \\\n If True, then catch exceptions and include them as part of the result object. \\\n For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).\n meta (dict or None): \\\n A JSON-serializable dictionary (nesting allowed) that will be included in the output without \\\n modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).\n\n Returns:\n An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)\n\n Exact fields vary depending on the values passed to result_format, include_config, catch_exceptions, and meta.\n\n See Also:\n [expect_column_values_to_match_regex](https://greatexpectations.io/expectations/expect_column_values_to_match_regex)\n [expect_column_values_to_match_regex_list](https://greatexpectations.io/expectations/expect_column_values_to_match_regex_list)\n [expect_column_values_to_not_match_regex](https://greatexpectations.io/expectations/expect_column_values_to_not_match_regex)\n [expect_column_values_to_match_like_pattern](https://greatexpectations.io/expectations/expect_column_values_to_match_like_pattern)\n [expect_column_values_to_match_like_pattern_list](https://greatexpectations.io/expectations/expect_column_values_to_match_like_pattern_list)\n [expect_column_values_to_not_match_like_pattern](https://greatexpectations.io/expectations/expect_column_values_to_not_match_like_pattern)\n [expect_column_values_to_not_match_like_pattern_list](https://greatexpectations.io/expectations/expect_column_values_to_not_match_like_pattern_list)\n \"\"\"\n\n library_metadata = {\n \"maturity\": \"production\",\n \"tags\": [\"core expectation\", \"column map expectation\"],\n \"contributors\": [\n \"@great_expectations\",\n ],\n \"requirements\": [],\n \"has_full_test_suite\": True,\n \"manually_reviewed_code\": True,\n }\n\n map_metric = \"column_values.not_match_regex_list\"\n success_keys = (\n \"regex_list\",\n \"mostly\",\n )\n default_kwarg_values = {\n \"row_condition\": None,\n \"condition_parser\": None, # we expect this to be explicitly set whenever a row_condition is passed\n \"mostly\": 1,\n \"result_format\": \"BASIC\",\n \"include_config\": True,\n \"catch_exceptions\": True,\n }\n args_keys = (\n \"column\",\n \"regex_list\",\n )\n\n def validate_configuration(\n self, configuration: Optional[ExpectationConfiguration] = None\n ) -> None:\n \"\"\"Validates the configuration for the Expectation.\n\n For `expect_column_values_to_match_regex_list`\n we require that the `configuraton.kwargs` contain a `regex_list` key that is either\n a non-empty `list[str]` or a `dict[list]` with `$PARAMETER` key with list of regexes.\n\n Args:\n configuration: The ExpectationConfiguration to be validated.\n\n Raises:\n InvalidExpectationConfigurationError: The configuraton does not contain the values required by the Expectation\n \"\"\"\n super().validate_configuration(configuration)\n configuration = configuration or self.configuration\n try:\n assert \"regex_list\" in configuration.kwargs, \"regex_list is required\"\n assert isinstance(\n configuration.kwargs[\"regex_list\"], (list, dict)\n ), \"regex_list must be a list of regexes\"\n if (\n not isinstance(configuration.kwargs[\"regex_list\"], dict)\n and len(configuration.kwargs[\"regex_list\"]) > 0\n ):\n for i in configuration.kwargs[\"regex_list\"]:\n assert isinstance(i, str), \"regexes in list must be strings\"\n if isinstance(configuration.kwargs[\"regex_list\"], dict):\n assert (\n \"$PARAMETER\" in configuration.kwargs[\"regex_list\"]\n ), 'Evaluation Parameter dict for regex_list kwarg must have \"$PARAMETER\" key.'\n except AssertionError as e:\n raise InvalidExpectationConfigurationError(str(e))\n\n @classmethod\n def _prescriptive_template(\n cls,\n renderer_configuration: RendererConfiguration,\n ) -> RendererConfiguration:\n add_param_args: AddParamArgs = (\n (\"column\", RendererValueType.STRING),\n (\"regex_list\", RendererValueType.ARRAY),\n (\"mostly\", RendererValueType.NUMBER),\n )\n for name, param_type in add_param_args:\n renderer_configuration.add_param(name=name, param_type=param_type)\n\n params = renderer_configuration.params\n\n if not params.regex_list or not params.regex_list.value:\n values_string = \"[ ]\"\n else:\n array_param_name = \"regex_list\"\n param_prefix = \"v__\"\n renderer_configuration = cls._add_array_params(\n array_param_name=array_param_name,\n param_prefix=param_prefix,\n renderer_configuration=renderer_configuration,\n )\n values_string: str = cls._get_array_string(\n array_param_name=array_param_name,\n param_prefix=param_prefix,\n renderer_configuration=renderer_configuration,\n )\n\n template_str = (\n \"values must not match any of the following regular expressions: \"\n + values_string\n )\n\n if params.mostly and params.mostly.value < 1.0:\n renderer_configuration = cls._add_mostly_pct_param(\n renderer_configuration=renderer_configuration\n )\n template_str += \", at least $mostly_pct % of the time.\"\n else:\n template_str += \".\"\n\n if renderer_configuration.include_column_name:\n template_str = f\"$column {template_str}\"\n\n renderer_configuration.template_str = template_str\n\n return renderer_configuration\n\n @classmethod\n @renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)\n @render_evaluation_parameter_string\n def _prescriptive_renderer(\n cls,\n configuration: Optional[ExpectationConfiguration] = None,\n result: Optional[ExpectationValidationResult] = None,\n runtime_configuration: Optional[dict] = None,\n **kwargs,\n ):\n runtime_configuration = runtime_configuration or {}\n include_column_name = (\n False if runtime_configuration.get(\"include_column_name\") is False else True\n )\n styling = runtime_configuration.get(\"styling\")\n params = substitute_none_for_missing(\n configuration.kwargs,\n [\"column\", \"regex_list\", \"mostly\", \"row_condition\", \"condition_parser\"],\n )\n\n if not params.get(\"regex_list\") or len(params.get(\"regex_list\")) == 0:\n values_string = \"[ ]\"\n else:\n for i, v in enumerate(params[\"regex_list\"]):\n params[f\"v__{str(i)}\"] = v\n values_string = \" \".join(\n [f\"$v__{str(i)}\" for i, v in enumerate(params[\"regex_list\"])]\n )\n\n template_str = (\n \"values must not match any of the following regular expressions: \"\n + values_string\n )\n\n if params[\"mostly\"] is not None and params[\"mostly\"] < 1.0:\n params[\"mostly_pct\"] = num_to_str(\n params[\"mostly\"] * 100, precision=15, no_scientific=True\n )\n # params[\"mostly_pct\"] = \"{:.14f}\".format(params[\"mostly\"]*100).rstrip(\"0\").rstrip(\".\")\n template_str += \", at least $mostly_pct % of the time.\"\n else:\n template_str += \".\"\n\n if include_column_name:\n template_str = f\"$column {template_str}\"\n\n if params[\"row_condition\"] is not None:\n (\n conditional_template_str,\n conditional_params,\n ) = parse_row_condition_string_pandas_engine(params[\"row_condition\"])\n template_str = f\"{conditional_template_str}, then {template_str}\"\n params.update(conditional_params)\n\n return [\n RenderedStringTemplateContent(\n **{\n \"content_block_type\": \"string_template\",\n \"string_template\": {\n \"template\": template_str,\n \"params\": params,\n \"styling\": styling,\n },\n }\n )\n ]\n", "path": "great_expectations/expectations/core/expect_column_values_to_not_match_regex_list.py"}], "after_files": [{"content": "from typing import TYPE_CHECKING, Optional\n\nfrom great_expectations.core import (\n ExpectationConfiguration,\n ExpectationValidationResult,\n)\nfrom great_expectations.expectations.expectation import (\n ColumnMapExpectation,\n InvalidExpectationConfigurationError,\n render_evaluation_parameter_string,\n)\nfrom great_expectations.render import LegacyRendererType, RenderedStringTemplateContent\nfrom great_expectations.render.renderer.renderer import renderer\nfrom great_expectations.render.renderer_configuration import (\n RendererConfiguration,\n RendererValueType,\n)\nfrom great_expectations.render.util import (\n num_to_str,\n parse_row_condition_string_pandas_engine,\n substitute_none_for_missing,\n)\n\nif TYPE_CHECKING:\n from great_expectations.render.renderer_configuration import AddParamArgs\n\n\nclass ExpectColumnValuesToNotMatchRegexList(ColumnMapExpectation):\n \"\"\"Expect the column entries to be strings that do not match any of a list of regular expressions. Matches can be anywhere in the string.\n\n expect_column_values_to_not_match_regex_list is a \\\n [Column Map Expectation](https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_column_map_expectations).\n\n Args:\n column (str): \\\n The column name.\n regex_list (list): \\\n The list of regular expressions which the column entries should not match\n\n Keyword Args:\n mostly (None or a float between 0 and 1): \\\n Successful if at least mostly fraction of values match the expectation. \\\n For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly).\n\n Other Parameters:\n result_format (str or None): \\\n Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \\\n For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).\n include_config (boolean): \\\n If True, then include the expectation config as part of the result object.\n catch_exceptions (boolean or None): \\\n If True, then catch exceptions and include them as part of the result object. \\\n For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).\n meta (dict or None): \\\n A JSON-serializable dictionary (nesting allowed) that will be included in the output without \\\n modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).\n\n Returns:\n An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)\n\n Exact fields vary depending on the values passed to result_format, include_config, catch_exceptions, and meta.\n\n See Also:\n [expect_column_values_to_match_regex](https://greatexpectations.io/expectations/expect_column_values_to_match_regex)\n [expect_column_values_to_match_regex_list](https://greatexpectations.io/expectations/expect_column_values_to_match_regex_list)\n [expect_column_values_to_not_match_regex](https://greatexpectations.io/expectations/expect_column_values_to_not_match_regex)\n [expect_column_values_to_match_like_pattern](https://greatexpectations.io/expectations/expect_column_values_to_match_like_pattern)\n [expect_column_values_to_match_like_pattern_list](https://greatexpectations.io/expectations/expect_column_values_to_match_like_pattern_list)\n [expect_column_values_to_not_match_like_pattern](https://greatexpectations.io/expectations/expect_column_values_to_not_match_like_pattern)\n [expect_column_values_to_not_match_like_pattern_list](https://greatexpectations.io/expectations/expect_column_values_to_not_match_like_pattern_list)\n \"\"\"\n\n library_metadata = {\n \"maturity\": \"production\",\n \"tags\": [\"core expectation\", \"column map expectation\"],\n \"contributors\": [\n \"@great_expectations\",\n ],\n \"requirements\": [],\n \"has_full_test_suite\": True,\n \"manually_reviewed_code\": True,\n }\n\n map_metric = \"column_values.not_match_regex_list\"\n success_keys = (\n \"regex_list\",\n \"mostly\",\n )\n default_kwarg_values = {\n \"row_condition\": None,\n \"condition_parser\": None, # we expect this to be explicitly set whenever a row_condition is passed\n \"mostly\": 1,\n \"result_format\": \"BASIC\",\n \"include_config\": True,\n \"catch_exceptions\": True,\n }\n args_keys = (\n \"column\",\n \"regex_list\",\n )\n\n def validate_configuration(\n self, configuration: Optional[ExpectationConfiguration] = None\n ) -> None:\n \"\"\"Validates the configuration of an Expectation.\n\n For `expect_column_values_to_not_match_regex_list` it is required that:\n - 'regex_list' kwarg is of type list or dict\n - if 'regex_list' is list, assert is non-empty and each entry is of type str\n - if 'regex_list' is dict, assert a key \"$PARAMETER\" is present\n\n Args:\n configuration: An `ExpectationConfiguration` to validate. If no configuration is provided, it will be pulled\n from the configuration attribute of the Expectation instance.\n\n Raises:\n InvalidExpectationConfigurationError: The configuration does not contain the values required by the\n Expectation.\n \"\"\"\n super().validate_configuration(configuration)\n configuration = configuration or self.configuration\n try:\n assert \"regex_list\" in configuration.kwargs, \"regex_list is required\"\n assert isinstance(\n configuration.kwargs[\"regex_list\"], (list, dict)\n ), \"regex_list must be a list or dict of regexes\"\n if (\n not isinstance(configuration.kwargs[\"regex_list\"], dict)\n and len(configuration.kwargs[\"regex_list\"]) > 0\n ):\n for i in configuration.kwargs[\"regex_list\"]:\n assert isinstance(i, str), \"regexes in list must be strings\"\n if isinstance(configuration.kwargs[\"regex_list\"], dict):\n assert (\n \"$PARAMETER\" in configuration.kwargs[\"regex_list\"]\n ), 'Evaluation Parameter dict for regex_list kwarg must have \"$PARAMETER\" key.'\n except AssertionError as e:\n raise InvalidExpectationConfigurationError(str(e))\n\n @classmethod\n def _prescriptive_template(\n cls,\n renderer_configuration: RendererConfiguration,\n ) -> RendererConfiguration:\n add_param_args: AddParamArgs = (\n (\"column\", RendererValueType.STRING),\n (\"regex_list\", RendererValueType.ARRAY),\n (\"mostly\", RendererValueType.NUMBER),\n )\n for name, param_type in add_param_args:\n renderer_configuration.add_param(name=name, param_type=param_type)\n\n params = renderer_configuration.params\n\n if not params.regex_list or not params.regex_list.value:\n values_string = \"[ ]\"\n else:\n array_param_name = \"regex_list\"\n param_prefix = \"v__\"\n renderer_configuration = cls._add_array_params(\n array_param_name=array_param_name,\n param_prefix=param_prefix,\n renderer_configuration=renderer_configuration,\n )\n values_string: str = cls._get_array_string(\n array_param_name=array_param_name,\n param_prefix=param_prefix,\n renderer_configuration=renderer_configuration,\n )\n\n template_str = (\n \"values must not match any of the following regular expressions: \"\n + values_string\n )\n\n if params.mostly and params.mostly.value < 1.0:\n renderer_configuration = cls._add_mostly_pct_param(\n renderer_configuration=renderer_configuration\n )\n template_str += \", at least $mostly_pct % of the time.\"\n else:\n template_str += \".\"\n\n if renderer_configuration.include_column_name:\n template_str = f\"$column {template_str}\"\n\n renderer_configuration.template_str = template_str\n\n return renderer_configuration\n\n @classmethod\n @renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)\n @render_evaluation_parameter_string\n def _prescriptive_renderer(\n cls,\n configuration: Optional[ExpectationConfiguration] = None,\n result: Optional[ExpectationValidationResult] = None,\n runtime_configuration: Optional[dict] = None,\n **kwargs,\n ):\n runtime_configuration = runtime_configuration or {}\n include_column_name = (\n False if runtime_configuration.get(\"include_column_name\") is False else True\n )\n styling = runtime_configuration.get(\"styling\")\n params = substitute_none_for_missing(\n configuration.kwargs,\n [\"column\", \"regex_list\", \"mostly\", \"row_condition\", \"condition_parser\"],\n )\n\n if not params.get(\"regex_list\") or len(params.get(\"regex_list\")) == 0:\n values_string = \"[ ]\"\n else:\n for i, v in enumerate(params[\"regex_list\"]):\n params[f\"v__{str(i)}\"] = v\n values_string = \" \".join(\n [f\"$v__{str(i)}\" for i, v in enumerate(params[\"regex_list\"])]\n )\n\n template_str = (\n \"values must not match any of the following regular expressions: \"\n + values_string\n )\n\n if params[\"mostly\"] is not None and params[\"mostly\"] < 1.0:\n params[\"mostly_pct\"] = num_to_str(\n params[\"mostly\"] * 100, precision=15, no_scientific=True\n )\n # params[\"mostly_pct\"] = \"{:.14f}\".format(params[\"mostly\"]*100).rstrip(\"0\").rstrip(\".\")\n template_str += \", at least $mostly_pct % of the time.\"\n else:\n template_str += \".\"\n\n if include_column_name:\n template_str = f\"$column {template_str}\"\n\n if params[\"row_condition\"] is not None:\n (\n conditional_template_str,\n conditional_params,\n ) = parse_row_condition_string_pandas_engine(params[\"row_condition\"])\n template_str = f\"{conditional_template_str}, then {template_str}\"\n params.update(conditional_params)\n\n return [\n RenderedStringTemplateContent(\n **{\n \"content_block_type\": \"string_template\",\n \"string_template\": {\n \"template\": template_str,\n \"params\": params,\n \"styling\": styling,\n },\n }\n )\n ]\n", "path": "great_expectations/expectations/core/expect_column_values_to_not_match_regex_list.py"}]}
| 3,110 | 512 |
gh_patches_debug_15326
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-1192
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Page tree broken after searching and going back
### Describe the Bug
When using the back button of the browser after searching, the page tree is expanded. After that, the collapsing function is broken.
Can we tell the browser to reload the full page after using the back button? However, it would be the best solution to actually keep the status of the tree after using the back button.
~~This could be related to #1131~~
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `integreat_cms/cms/views/pages/page_tree_view.py`
Content:
```
1 import logging
2
3 from django.contrib import messages
4 from django.shortcuts import render, redirect
5 from django.utils.decorators import method_decorator
6 from django.utils.translation import ugettext as _
7 from django.views.generic import TemplateView
8
9 from ...constants import translation_status
10 from ...decorators import permission_required
11 from ...forms import PageFilterForm
12 from ...models import Language
13 from .page_context_mixin import PageContextMixin
14
15 logger = logging.getLogger(__name__)
16
17
18 @method_decorator(permission_required("cms.view_page"), name="dispatch")
19 class PageTreeView(TemplateView, PageContextMixin):
20 """
21 View for showing the page tree
22 """
23
24 #: Template for list of non-archived pages
25 template = "pages/page_tree.html"
26 #: Template for list of archived pages
27 template_archived = "pages/page_tree_archived.html"
28 #: Whether or not to show archived pages
29 archived = False
30
31 @property
32 def template_name(self):
33 """
34 Select correct HTML template, depending on :attr:`~integreat_cms.cms.views.pages.page_tree_view.PageTreeView.archived` flag
35 (see :class:`~django.views.generic.base.TemplateResponseMixin`)
36
37 :return: Path to HTML template
38 :rtype: str
39 """
40
41 return self.template_archived if self.archived else self.template
42
43 def get(self, request, *args, **kwargs):
44 r"""
45 Render page tree
46
47 :param request: The current request
48 :type request: ~django.http.HttpResponse
49
50 :param \*args: The supplied arguments
51 :type \*args: list
52
53 :param \**kwargs: The supplied keyword arguments
54 :type \**kwargs: dict
55
56 :return: The rendered template response
57 :rtype: ~django.template.response.TemplateResponse
58 """
59
60 # current region
61 region_slug = kwargs.get("region_slug")
62 region = request.region
63
64 # current language
65 language_slug = kwargs.get("language_slug")
66 if language_slug:
67 language = Language.objects.get(slug=language_slug)
68 elif region.default_language:
69 return redirect(
70 "pages",
71 **{
72 "region_slug": region_slug,
73 "language_slug": region.default_language.slug,
74 }
75 )
76 else:
77 messages.error(
78 request,
79 _("Please create at least one language node before creating pages."),
80 )
81 return redirect(
82 "language_tree",
83 **{
84 "region_slug": region_slug,
85 }
86 )
87
88 if not request.user.has_perm("cms.change_page"):
89 messages.warning(
90 request, _("You don't have the permission to edit or create pages.")
91 )
92
93 # Filter pages according to given filters, if any
94 filter_data = kwargs.get("filter_data")
95
96 if filter_data or self.archived:
97 page_queryset = region.pages.all()
98 else:
99 page_queryset = region.pages.filter(lft=1)
100 pages = page_queryset.cache_tree(archived=self.archived)[0]
101
102 if filter_data:
103 # Set data for filter form rendering
104 filter_form = PageFilterForm(data=filter_data)
105 pages = self.filter_pages(pages, language_slug, filter_form)
106 else:
107 filter_form = PageFilterForm()
108 filter_form.changed_data.clear()
109
110 return render(
111 request,
112 self.template_name,
113 {
114 **self.get_context_data(**kwargs),
115 "pages": pages,
116 "language": language,
117 "languages": region.active_languages,
118 "filter_form": filter_form,
119 },
120 )
121
122 def post(self, request, *args, **kwargs):
123 r"""
124 Apply page filters and render page tree
125
126 :param request: The current request
127 :type request: ~django.http.HttpResponse
128
129 :param \*args: The supplied arguments
130 :type \*args: list
131
132 :param \**kwargs: The supplied keyword arguments
133 :type \**kwargs: dict
134
135 :return: The rendered template response
136 :rtype: ~django.template.response.TemplateResponse
137 """
138 return self.get(request, *args, **kwargs, filter_data=request.POST)
139
140 @staticmethod
141 def filter_pages(pages, language_slug, filter_form):
142 """
143 Filter the pages list according to the given filter data
144
145 :param pages: The list of pages
146 :type pages: list
147
148 :param language_slug: The slug of the current language
149 :type language_slug: str
150
151 :param filter_form: The filter form
152 :type filter_form: integreat_cms.cms.forms.pages.page_filter_form.PageFilterForm
153
154 :return: The filtered page list
155 :rtype: list
156 """
157 if filter_form.is_valid():
158 query = filter_form.cleaned_data["query"]
159 if query:
160 # Buffer variable because the pages list should not be modified during iteration
161 filtered_pages = []
162 for page in pages:
163 translation = page.get_translation(language_slug)
164 if translation and (
165 query.lower() in translation.slug
166 or query.lower() in translation.title.lower()
167 ):
168 filtered_pages.append(page)
169 pages = filtered_pages
170
171 selected_status = filter_form.cleaned_data["translation_status"]
172 # Only filter if at least one checkbox but not all are checked
173 if 0 < len(selected_status) < len(translation_status.CHOICES):
174 # Buffer variable because the pages list should not be modified during iteration
175 filtered_pages = []
176 for page in pages:
177 translation_state = page.translation_states.get(language_slug)
178 if translation_state and translation_state[1] in selected_status:
179 filtered_pages.append(page)
180 pages = filtered_pages
181 return pages
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/integreat_cms/cms/views/pages/page_tree_view.py b/integreat_cms/cms/views/pages/page_tree_view.py
--- a/integreat_cms/cms/views/pages/page_tree_view.py
+++ b/integreat_cms/cms/views/pages/page_tree_view.py
@@ -107,7 +107,7 @@
filter_form = PageFilterForm()
filter_form.changed_data.clear()
- return render(
+ response = render(
request,
self.template_name,
{
@@ -118,6 +118,9 @@
"filter_form": filter_form,
},
)
+ # Disable browser cache of page tree to prevent subpages from being expanded after using "back"-button
+ response["Cache-Control"] = "no-store, must-revalidate"
+ return response
def post(self, request, *args, **kwargs):
r"""
|
{"golden_diff": "diff --git a/integreat_cms/cms/views/pages/page_tree_view.py b/integreat_cms/cms/views/pages/page_tree_view.py\n--- a/integreat_cms/cms/views/pages/page_tree_view.py\n+++ b/integreat_cms/cms/views/pages/page_tree_view.py\n@@ -107,7 +107,7 @@\n filter_form = PageFilterForm()\n filter_form.changed_data.clear()\n \n- return render(\n+ response = render(\n request,\n self.template_name,\n {\n@@ -118,6 +118,9 @@\n \"filter_form\": filter_form,\n },\n )\n+ # Disable browser cache of page tree to prevent subpages from being expanded after using \"back\"-button\n+ response[\"Cache-Control\"] = \"no-store, must-revalidate\"\n+ return response\n \n def post(self, request, *args, **kwargs):\n r\"\"\"\n", "issue": "Page tree broken after searching and going back\n### Describe the Bug\r\nWhen using the back button of the browser after searching, the page tree is expanded. After that, the collapsing function is broken.\r\n\r\nCan we tell the browser to reload the full page after using the back button? However, it would be the best solution to actually keep the status of the tree after using the back button.\r\n\r\n~~This could be related to #1131~~\n", "before_files": [{"content": "import logging\n\nfrom django.contrib import messages\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\nfrom ...constants import translation_status\nfrom ...decorators import permission_required\nfrom ...forms import PageFilterForm\nfrom ...models import Language\nfrom .page_context_mixin import PageContextMixin\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(permission_required(\"cms.view_page\"), name=\"dispatch\")\nclass PageTreeView(TemplateView, PageContextMixin):\n \"\"\"\n View for showing the page tree\n \"\"\"\n\n #: Template for list of non-archived pages\n template = \"pages/page_tree.html\"\n #: Template for list of archived pages\n template_archived = \"pages/page_tree_archived.html\"\n #: Whether or not to show archived pages\n archived = False\n\n @property\n def template_name(self):\n \"\"\"\n Select correct HTML template, depending on :attr:`~integreat_cms.cms.views.pages.page_tree_view.PageTreeView.archived` flag\n (see :class:`~django.views.generic.base.TemplateResponseMixin`)\n\n :return: Path to HTML template\n :rtype: str\n \"\"\"\n\n return self.template_archived if self.archived else self.template\n\n def get(self, request, *args, **kwargs):\n r\"\"\"\n Render page tree\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n\n # current region\n region_slug = kwargs.get(\"region_slug\")\n region = request.region\n\n # current language\n language_slug = kwargs.get(\"language_slug\")\n if language_slug:\n language = Language.objects.get(slug=language_slug)\n elif region.default_language:\n return redirect(\n \"pages\",\n **{\n \"region_slug\": region_slug,\n \"language_slug\": region.default_language.slug,\n }\n )\n else:\n messages.error(\n request,\n _(\"Please create at least one language node before creating pages.\"),\n )\n return redirect(\n \"language_tree\",\n **{\n \"region_slug\": region_slug,\n }\n )\n\n if not request.user.has_perm(\"cms.change_page\"):\n messages.warning(\n request, _(\"You don't have the permission to edit or create pages.\")\n )\n\n # Filter pages according to given filters, if any\n filter_data = kwargs.get(\"filter_data\")\n\n if filter_data or self.archived:\n page_queryset = region.pages.all()\n else:\n page_queryset = region.pages.filter(lft=1)\n pages = page_queryset.cache_tree(archived=self.archived)[0]\n\n if filter_data:\n # Set data for filter form rendering\n filter_form = PageFilterForm(data=filter_data)\n pages = self.filter_pages(pages, language_slug, filter_form)\n else:\n filter_form = PageFilterForm()\n filter_form.changed_data.clear()\n\n return render(\n request,\n self.template_name,\n {\n **self.get_context_data(**kwargs),\n \"pages\": pages,\n \"language\": language,\n \"languages\": region.active_languages,\n \"filter_form\": filter_form,\n },\n )\n\n def post(self, request, *args, **kwargs):\n r\"\"\"\n Apply page filters and render page tree\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n return self.get(request, *args, **kwargs, filter_data=request.POST)\n\n @staticmethod\n def filter_pages(pages, language_slug, filter_form):\n \"\"\"\n Filter the pages list according to the given filter data\n\n :param pages: The list of pages\n :type pages: list\n\n :param language_slug: The slug of the current language\n :type language_slug: str\n\n :param filter_form: The filter form\n :type filter_form: integreat_cms.cms.forms.pages.page_filter_form.PageFilterForm\n\n :return: The filtered page list\n :rtype: list\n \"\"\"\n if filter_form.is_valid():\n query = filter_form.cleaned_data[\"query\"]\n if query:\n # Buffer variable because the pages list should not be modified during iteration\n filtered_pages = []\n for page in pages:\n translation = page.get_translation(language_slug)\n if translation and (\n query.lower() in translation.slug\n or query.lower() in translation.title.lower()\n ):\n filtered_pages.append(page)\n pages = filtered_pages\n\n selected_status = filter_form.cleaned_data[\"translation_status\"]\n # Only filter if at least one checkbox but not all are checked\n if 0 < len(selected_status) < len(translation_status.CHOICES):\n # Buffer variable because the pages list should not be modified during iteration\n filtered_pages = []\n for page in pages:\n translation_state = page.translation_states.get(language_slug)\n if translation_state and translation_state[1] in selected_status:\n filtered_pages.append(page)\n pages = filtered_pages\n return pages\n", "path": "integreat_cms/cms/views/pages/page_tree_view.py"}], "after_files": [{"content": "import logging\n\nfrom django.contrib import messages\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\nfrom ...constants import translation_status\nfrom ...decorators import permission_required\nfrom ...forms import PageFilterForm\nfrom ...models import Language\nfrom .page_context_mixin import PageContextMixin\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(permission_required(\"cms.view_page\"), name=\"dispatch\")\nclass PageTreeView(TemplateView, PageContextMixin):\n \"\"\"\n View for showing the page tree\n \"\"\"\n\n #: Template for list of non-archived pages\n template = \"pages/page_tree.html\"\n #: Template for list of archived pages\n template_archived = \"pages/page_tree_archived.html\"\n #: Whether or not to show archived pages\n archived = False\n\n @property\n def template_name(self):\n \"\"\"\n Select correct HTML template, depending on :attr:`~integreat_cms.cms.views.pages.page_tree_view.PageTreeView.archived` flag\n (see :class:`~django.views.generic.base.TemplateResponseMixin`)\n\n :return: Path to HTML template\n :rtype: str\n \"\"\"\n\n return self.template_archived if self.archived else self.template\n\n def get(self, request, *args, **kwargs):\n r\"\"\"\n Render page tree\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n\n # current region\n region_slug = kwargs.get(\"region_slug\")\n region = request.region\n\n # current language\n language_slug = kwargs.get(\"language_slug\")\n if language_slug:\n language = Language.objects.get(slug=language_slug)\n elif region.default_language:\n return redirect(\n \"pages\",\n **{\n \"region_slug\": region_slug,\n \"language_slug\": region.default_language.slug,\n }\n )\n else:\n messages.error(\n request,\n _(\"Please create at least one language node before creating pages.\"),\n )\n return redirect(\n \"language_tree\",\n **{\n \"region_slug\": region_slug,\n }\n )\n\n if not request.user.has_perm(\"cms.change_page\"):\n messages.warning(\n request, _(\"You don't have the permission to edit or create pages.\")\n )\n\n # Filter pages according to given filters, if any\n filter_data = kwargs.get(\"filter_data\")\n\n if filter_data or self.archived:\n page_queryset = region.pages.all()\n else:\n page_queryset = region.pages.filter(lft=1)\n pages = page_queryset.cache_tree(archived=self.archived)[0]\n\n if filter_data:\n # Set data for filter form rendering\n filter_form = PageFilterForm(data=filter_data)\n pages = self.filter_pages(pages, language_slug, filter_form)\n else:\n filter_form = PageFilterForm()\n filter_form.changed_data.clear()\n\n response = render(\n request,\n self.template_name,\n {\n **self.get_context_data(**kwargs),\n \"pages\": pages,\n \"language\": language,\n \"languages\": region.active_languages,\n \"filter_form\": filter_form,\n },\n )\n # Disable browser cache of page tree to prevent subpages from being expanded after using \"back\"-button\n response[\"Cache-Control\"] = \"no-store, must-revalidate\"\n return response\n\n def post(self, request, *args, **kwargs):\n r\"\"\"\n Apply page filters and render page tree\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n return self.get(request, *args, **kwargs, filter_data=request.POST)\n\n @staticmethod\n def filter_pages(pages, language_slug, filter_form):\n \"\"\"\n Filter the pages list according to the given filter data\n\n :param pages: The list of pages\n :type pages: list\n\n :param language_slug: The slug of the current language\n :type language_slug: str\n\n :param filter_form: The filter form\n :type filter_form: integreat_cms.cms.forms.pages.page_filter_form.PageFilterForm\n\n :return: The filtered page list\n :rtype: list\n \"\"\"\n if filter_form.is_valid():\n query = filter_form.cleaned_data[\"query\"]\n if query:\n # Buffer variable because the pages list should not be modified during iteration\n filtered_pages = []\n for page in pages:\n translation = page.get_translation(language_slug)\n if translation and (\n query.lower() in translation.slug\n or query.lower() in translation.title.lower()\n ):\n filtered_pages.append(page)\n pages = filtered_pages\n\n selected_status = filter_form.cleaned_data[\"translation_status\"]\n # Only filter if at least one checkbox but not all are checked\n if 0 < len(selected_status) < len(translation_status.CHOICES):\n # Buffer variable because the pages list should not be modified during iteration\n filtered_pages = []\n for page in pages:\n translation_state = page.translation_states.get(language_slug)\n if translation_state and translation_state[1] in selected_status:\n filtered_pages.append(page)\n pages = filtered_pages\n return pages\n", "path": "integreat_cms/cms/views/pages/page_tree_view.py"}]}
| 2,002 | 197 |
gh_patches_debug_17579
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-7697
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
examples/howto/{js_events,events_app} contain bad CSS
* https://github.com/bokeh/bokeh/blob/master/examples/howto/js_events.py#L19
* https://github.com/bokeh/bokeh/blob/master/examples/howto/events_app.py#L21
* the same in user_guide
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/howto/events_app.py`
Content:
```
1 """ Demonstration Bokeh app of how to register event callbacks in both
2 Javascript and Python using an adaptation of the color_scatter example
3 from the bokeh gallery. This example extends the js_events.py example
4 with corresponding Python event callbacks.
5 """
6
7 import numpy as np
8
9 from bokeh.io import curdoc
10 from bokeh.plotting import figure
11 from bokeh import events
12 from bokeh.models import CustomJS, Div, Button
13 from bokeh.layouts import column, row
14
15
16 def display_event(div, attributes=[]):
17 """
18 Function to build a suitable CustomJS to display the current event
19 in the div model.
20 """
21 style = 'float:left;clear:left;font_size=0.5pt'
22 return CustomJS(args=dict(div=div), code="""
23 var attrs = %s;
24 var args = [];
25 for (var i=0; i<attrs.length; i++ ) {
26 val = JSON.stringify(cb_obj[attrs[i]], function(key, val) {
27 return val.toFixed ? Number(val.toFixed(2)) : val;
28 })
29 args.push(attrs[i] + '=' + val)
30 }
31 var line = "<span style=%r><b>" + cb_obj.event_name + "</b>(" + args.join(", ") + ")</span>\\n";
32 var text = div.text.concat(line);
33 var lines = text.split("\\n")
34 if ( lines.length > 35 ) { lines.shift(); }
35 div.text = lines.join("\\n");
36 """ % (attributes, style))
37
38 def print_event(attributes=[]):
39 """
40 Function that returns a Python callback to pretty print the events.
41 """
42 def python_callback(event):
43 cls_name = event.__class__.__name__
44 attrs = ', '.join(['{attr}={val}'.format(attr=attr, val=event.__dict__[attr])
45 for attr in attributes])
46 print('{cls_name}({attrs})'.format(cls_name=cls_name, attrs=attrs))
47 return python_callback
48
49 # Follows the color_scatter gallery example
50
51 N = 4000
52 x = np.random.random(size=N) * 100
53 y = np.random.random(size=N) * 100
54 radii = np.random.random(size=N) * 1.5
55 colors = [
56 "#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)
57 ]
58
59 p = figure(tools="pan,wheel_zoom,zoom_in,zoom_out,reset,tap,lasso_select,box_select")
60
61 p.scatter(x, y, radius=radii,
62 fill_color=colors, fill_alpha=0.6,
63 line_color=None)
64
65 # Add a div to display events and a button to trigger button click events
66
67 div = Div(width=1000)
68 button = Button(label="Button", button_type="success")
69 layout = column(button, row(p, div))
70
71
72 point_attributes = ['x','y','sx','sy']
73 pan_attributes = point_attributes + ['delta_x', 'delta_y']
74 pinch_attributes = point_attributes + ['scale']
75 wheel_attributes = point_attributes+['delta']
76
77 ## Register Javascript event callbacks
78
79 # Button event
80 button.js_on_event(events.ButtonClick, display_event(div))
81
82 # LOD events
83 p.js_on_event(events.LODStart, display_event(div))
84 p.js_on_event(events.LODEnd, display_event(div))
85
86 # Point events
87
88 p.js_on_event(events.Tap, display_event(div, attributes=point_attributes))
89 p.js_on_event(events.DoubleTap, display_event(div, attributes=point_attributes))
90 p.js_on_event(events.Press, display_event(div, attributes=point_attributes))
91
92 # Mouse wheel event
93 p.js_on_event(events.MouseWheel, display_event(div,attributes=wheel_attributes))
94
95 # Mouse move, enter and leave
96 p.js_on_event(events.MouseMove, display_event(div, attributes=point_attributes))
97 p.js_on_event(events.MouseEnter, display_event(div, attributes=point_attributes))
98 p.js_on_event(events.MouseLeave, display_event(div, attributes=point_attributes))
99
100 # Pan events
101 p.js_on_event(events.Pan, display_event(div, attributes=pan_attributes))
102 p.js_on_event(events.PanStart, display_event(div, attributes=point_attributes))
103 p.js_on_event(events.PanEnd, display_event(div, attributes=point_attributes))
104
105 # Pinch events
106 p.js_on_event(events.Pinch, display_event(div, attributes=pinch_attributes))
107 p.js_on_event(events.PinchStart, display_event(div, attributes=point_attributes))
108 p.js_on_event(events.PinchEnd, display_event(div, attributes=point_attributes))
109
110 # Selection events
111 p.js_on_event(events.SelectionGeometry, display_event(div, attributes=['geometry', 'final']))
112
113 # Reset events
114 p.js_on_event(events.Reset, display_event(div))
115
116
117 ## Register Python event callbacks
118
119 # Button event
120 button.on_event(events.ButtonClick, print_event())
121
122 # LOD events
123 p.on_event(events.LODStart, print_event())
124 p.on_event(events.LODEnd, print_event())
125
126 # Point events
127
128 p.on_event(events.Tap, print_event(attributes=point_attributes))
129 p.on_event(events.DoubleTap, print_event(attributes=point_attributes))
130 p.on_event(events.Press, print_event(attributes=point_attributes))
131
132 # Mouse wheel event
133 p.on_event(events.MouseWheel, print_event(attributes=wheel_attributes))
134
135 # Mouse move, enter and leave
136 p.on_event(events.MouseMove, print_event(attributes=point_attributes))
137 p.on_event(events.MouseEnter, print_event(attributes=point_attributes))
138 p.on_event(events.MouseLeave, print_event(attributes=point_attributes))
139
140 # Pan events
141 p.on_event(events.Pan, print_event(attributes=pan_attributes))
142 p.on_event(events.PanStart, print_event(attributes=point_attributes))
143 p.on_event(events.PanEnd, print_event(attributes=point_attributes))
144
145 # Pinch events
146 p.on_event(events.Pinch, print_event(attributes=pinch_attributes))
147 p.on_event(events.PinchStart, print_event(attributes=point_attributes))
148 p.on_event(events.PinchEnd, print_event(attributes=point_attributes))
149
150 # Selection events
151 p.on_event(events.SelectionGeometry, print_event(attributes=['geometry', 'final']))
152
153 # Reset events
154 p.on_event(events.Reset, print_event())
155
156 curdoc().add_root(layout)
157
```
Path: `examples/howto/js_events.py`
Content:
```
1 """ Demonstration of how to register event callbacks using an adaptation
2 of the color_scatter example from the bokeh gallery
3 """
4
5 import numpy as np
6
7 from bokeh.io import show, output_file
8 from bokeh.plotting import figure
9 from bokeh import events
10 from bokeh.models import CustomJS, Div, Button
11 from bokeh.layouts import column, row
12
13
14 def display_event(div, attributes=[]):
15 """
16 Function to build a suitable CustomJS to display the current event
17 in the div model.
18 """
19 style = 'float:left;clear:left;font_size=0.5pt'
20 return CustomJS(args=dict(div=div), code="""
21 var attrs = %s;
22 var args = [];
23 for (var i=0; i<attrs.length; i++ ) {
24 val = JSON.stringify(cb_obj[attrs[i]], function(key, val) {
25 return val.toFixed ? Number(val.toFixed(2)) : val;
26 })
27 args.push(attrs[i] + '=' + val)
28 }
29 var line = "<span style=%r><b>" + cb_obj.event_name + "</b>(" + args.join(", ") + ")</span>\\n";
30 var text = div.text.concat(line);
31 var lines = text.split("\\n")
32 if ( lines.length > 35 ) { lines.shift(); }
33 div.text = lines.join("\\n");
34 """ % (attributes, style))
35
36 # Follows the color_scatter gallery example
37
38 N = 4000
39 x = np.random.random(size=N) * 100
40 y = np.random.random(size=N) * 100
41 radii = np.random.random(size=N) * 1.5
42 colors = [
43 "#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)
44 ]
45
46 p = figure(tools="pan,wheel_zoom,zoom_in,zoom_out,reset,tap,lasso_select,box_select")
47
48 p.scatter(x, y, radius=radii,
49 fill_color=colors, fill_alpha=0.6,
50 line_color=None)
51
52 # Add a div to display events and a button to trigger button click events
53
54 div = Div(width=1000)
55 button = Button(label="Button", button_type="success")
56 layout = column(button, row(p, div))
57
58
59 ## Register event callbacks
60
61 # Button event
62 button.js_on_event(events.ButtonClick, display_event(div))
63
64 # LOD events
65 p.js_on_event(events.LODStart, display_event(div))
66 p.js_on_event(events.LODEnd, display_event(div))
67
68 # Point events
69 point_attributes = ['x','y','sx','sy']
70 p.js_on_event(events.Tap, display_event(div, attributes=point_attributes))
71 p.js_on_event(events.DoubleTap, display_event(div, attributes=point_attributes))
72 p.js_on_event(events.Press, display_event(div, attributes=point_attributes))
73
74 # Mouse wheel event
75 p.js_on_event(events.MouseWheel, display_event(div,attributes=point_attributes+['delta']))
76
77 # Mouse move, enter and leave
78 p.js_on_event(events.MouseMove, display_event(div, attributes=point_attributes))
79 p.js_on_event(events.MouseEnter, display_event(div, attributes=point_attributes))
80 p.js_on_event(events.MouseLeave, display_event(div, attributes=point_attributes))
81
82 # Pan events
83 pan_attributes = point_attributes + ['delta_x', 'delta_y']
84 p.js_on_event(events.Pan, display_event(div, attributes=pan_attributes))
85 p.js_on_event(events.PanStart, display_event(div, attributes=point_attributes))
86 p.js_on_event(events.PanEnd, display_event(div, attributes=point_attributes))
87
88 # Pinch events
89 pinch_attributes = point_attributes + ['scale']
90 p.js_on_event(events.Pinch, display_event(div, attributes=pinch_attributes))
91 p.js_on_event(events.PinchStart, display_event(div, attributes=point_attributes))
92 p.js_on_event(events.PinchEnd, display_event(div, attributes=point_attributes))
93
94 # Selection events
95 p.js_on_event(events.SelectionGeometry, display_event(div, attributes=['geometry', 'final']))
96
97 output_file("js_events.html", title="JS Events Example")
98 show(layout)
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/howto/events_app.py b/examples/howto/events_app.py
--- a/examples/howto/events_app.py
+++ b/examples/howto/events_app.py
@@ -18,7 +18,7 @@
Function to build a suitable CustomJS to display the current event
in the div model.
"""
- style = 'float:left;clear:left;font_size=0.5pt'
+ style = 'float: left; clear: left; font-size: 0.5pt'
return CustomJS(args=dict(div=div), code="""
var attrs = %s;
var args = [];
diff --git a/examples/howto/js_events.py b/examples/howto/js_events.py
--- a/examples/howto/js_events.py
+++ b/examples/howto/js_events.py
@@ -16,7 +16,7 @@
Function to build a suitable CustomJS to display the current event
in the div model.
"""
- style = 'float:left;clear:left;font_size=0.5pt'
+ style = 'float: left; clear: left; font-size: 0.5pt'
return CustomJS(args=dict(div=div), code="""
var attrs = %s;
var args = [];
|
{"golden_diff": "diff --git a/examples/howto/events_app.py b/examples/howto/events_app.py\n--- a/examples/howto/events_app.py\n+++ b/examples/howto/events_app.py\n@@ -18,7 +18,7 @@\n Function to build a suitable CustomJS to display the current event\n in the div model.\n \"\"\"\n- style = 'float:left;clear:left;font_size=0.5pt'\n+ style = 'float: left; clear: left; font-size: 0.5pt'\n return CustomJS(args=dict(div=div), code=\"\"\"\n var attrs = %s;\n var args = [];\ndiff --git a/examples/howto/js_events.py b/examples/howto/js_events.py\n--- a/examples/howto/js_events.py\n+++ b/examples/howto/js_events.py\n@@ -16,7 +16,7 @@\n Function to build a suitable CustomJS to display the current event\n in the div model.\n \"\"\"\n- style = 'float:left;clear:left;font_size=0.5pt'\n+ style = 'float: left; clear: left; font-size: 0.5pt'\n return CustomJS(args=dict(div=div), code=\"\"\"\n var attrs = %s;\n var args = [];\n", "issue": "examples/howto/{js_events,events_app} contain bad CSS\n* https://github.com/bokeh/bokeh/blob/master/examples/howto/js_events.py#L19\r\n* https://github.com/bokeh/bokeh/blob/master/examples/howto/events_app.py#L21\r\n* the same in user_guide\n", "before_files": [{"content": "\"\"\" Demonstration Bokeh app of how to register event callbacks in both\nJavascript and Python using an adaptation of the color_scatter example\nfrom the bokeh gallery. This example extends the js_events.py example\nwith corresponding Python event callbacks.\n\"\"\"\n\nimport numpy as np\n\nfrom bokeh.io import curdoc\nfrom bokeh.plotting import figure\nfrom bokeh import events\nfrom bokeh.models import CustomJS, Div, Button\nfrom bokeh.layouts import column, row\n\n\ndef display_event(div, attributes=[]):\n \"\"\"\n Function to build a suitable CustomJS to display the current event\n in the div model.\n \"\"\"\n style = 'float:left;clear:left;font_size=0.5pt'\n return CustomJS(args=dict(div=div), code=\"\"\"\n var attrs = %s;\n var args = [];\n for (var i=0; i<attrs.length; i++ ) {\n val = JSON.stringify(cb_obj[attrs[i]], function(key, val) {\n return val.toFixed ? Number(val.toFixed(2)) : val;\n })\n args.push(attrs[i] + '=' + val)\n }\n var line = \"<span style=%r><b>\" + cb_obj.event_name + \"</b>(\" + args.join(\", \") + \")</span>\\\\n\";\n var text = div.text.concat(line);\n var lines = text.split(\"\\\\n\")\n if ( lines.length > 35 ) { lines.shift(); }\n div.text = lines.join(\"\\\\n\");\n \"\"\" % (attributes, style))\n\ndef print_event(attributes=[]):\n \"\"\"\n Function that returns a Python callback to pretty print the events.\n \"\"\"\n def python_callback(event):\n cls_name = event.__class__.__name__\n attrs = ', '.join(['{attr}={val}'.format(attr=attr, val=event.__dict__[attr])\n for attr in attributes])\n print('{cls_name}({attrs})'.format(cls_name=cls_name, attrs=attrs))\n return python_callback\n\n# Follows the color_scatter gallery example\n\nN = 4000\nx = np.random.random(size=N) * 100\ny = np.random.random(size=N) * 100\nradii = np.random.random(size=N) * 1.5\ncolors = [\n \"#%02x%02x%02x\" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)\n]\n\np = figure(tools=\"pan,wheel_zoom,zoom_in,zoom_out,reset,tap,lasso_select,box_select\")\n\np.scatter(x, y, radius=radii,\n fill_color=colors, fill_alpha=0.6,\n line_color=None)\n\n# Add a div to display events and a button to trigger button click events\n\ndiv = Div(width=1000)\nbutton = Button(label=\"Button\", button_type=\"success\")\nlayout = column(button, row(p, div))\n\n\npoint_attributes = ['x','y','sx','sy']\npan_attributes = point_attributes + ['delta_x', 'delta_y']\npinch_attributes = point_attributes + ['scale']\nwheel_attributes = point_attributes+['delta']\n\n## Register Javascript event callbacks\n\n# Button event\nbutton.js_on_event(events.ButtonClick, display_event(div))\n\n# LOD events\np.js_on_event(events.LODStart, display_event(div))\np.js_on_event(events.LODEnd, display_event(div))\n\n# Point events\n\np.js_on_event(events.Tap, display_event(div, attributes=point_attributes))\np.js_on_event(events.DoubleTap, display_event(div, attributes=point_attributes))\np.js_on_event(events.Press, display_event(div, attributes=point_attributes))\n\n# Mouse wheel event\np.js_on_event(events.MouseWheel, display_event(div,attributes=wheel_attributes))\n\n# Mouse move, enter and leave\np.js_on_event(events.MouseMove, display_event(div, attributes=point_attributes))\np.js_on_event(events.MouseEnter, display_event(div, attributes=point_attributes))\np.js_on_event(events.MouseLeave, display_event(div, attributes=point_attributes))\n\n# Pan events\np.js_on_event(events.Pan, display_event(div, attributes=pan_attributes))\np.js_on_event(events.PanStart, display_event(div, attributes=point_attributes))\np.js_on_event(events.PanEnd, display_event(div, attributes=point_attributes))\n\n# Pinch events\np.js_on_event(events.Pinch, display_event(div, attributes=pinch_attributes))\np.js_on_event(events.PinchStart, display_event(div, attributes=point_attributes))\np.js_on_event(events.PinchEnd, display_event(div, attributes=point_attributes))\n\n# Selection events\np.js_on_event(events.SelectionGeometry, display_event(div, attributes=['geometry', 'final']))\n\n# Reset events\np.js_on_event(events.Reset, display_event(div))\n\n\n## Register Python event callbacks\n\n# Button event\nbutton.on_event(events.ButtonClick, print_event())\n\n# LOD events\np.on_event(events.LODStart, print_event())\np.on_event(events.LODEnd, print_event())\n\n# Point events\n\np.on_event(events.Tap, print_event(attributes=point_attributes))\np.on_event(events.DoubleTap, print_event(attributes=point_attributes))\np.on_event(events.Press, print_event(attributes=point_attributes))\n\n# Mouse wheel event\np.on_event(events.MouseWheel, print_event(attributes=wheel_attributes))\n\n# Mouse move, enter and leave\np.on_event(events.MouseMove, print_event(attributes=point_attributes))\np.on_event(events.MouseEnter, print_event(attributes=point_attributes))\np.on_event(events.MouseLeave, print_event(attributes=point_attributes))\n\n# Pan events\np.on_event(events.Pan, print_event(attributes=pan_attributes))\np.on_event(events.PanStart, print_event(attributes=point_attributes))\np.on_event(events.PanEnd, print_event(attributes=point_attributes))\n\n# Pinch events\np.on_event(events.Pinch, print_event(attributes=pinch_attributes))\np.on_event(events.PinchStart, print_event(attributes=point_attributes))\np.on_event(events.PinchEnd, print_event(attributes=point_attributes))\n\n# Selection events\np.on_event(events.SelectionGeometry, print_event(attributes=['geometry', 'final']))\n\n# Reset events\np.on_event(events.Reset, print_event())\n\ncurdoc().add_root(layout)\n", "path": "examples/howto/events_app.py"}, {"content": "\"\"\" Demonstration of how to register event callbacks using an adaptation\nof the color_scatter example from the bokeh gallery\n\"\"\"\n\nimport numpy as np\n\nfrom bokeh.io import show, output_file\nfrom bokeh.plotting import figure\nfrom bokeh import events\nfrom bokeh.models import CustomJS, Div, Button\nfrom bokeh.layouts import column, row\n\n\ndef display_event(div, attributes=[]):\n \"\"\"\n Function to build a suitable CustomJS to display the current event\n in the div model.\n \"\"\"\n style = 'float:left;clear:left;font_size=0.5pt'\n return CustomJS(args=dict(div=div), code=\"\"\"\n var attrs = %s;\n var args = [];\n for (var i=0; i<attrs.length; i++ ) {\n val = JSON.stringify(cb_obj[attrs[i]], function(key, val) {\n return val.toFixed ? Number(val.toFixed(2)) : val;\n })\n args.push(attrs[i] + '=' + val)\n }\n var line = \"<span style=%r><b>\" + cb_obj.event_name + \"</b>(\" + args.join(\", \") + \")</span>\\\\n\";\n var text = div.text.concat(line);\n var lines = text.split(\"\\\\n\")\n if ( lines.length > 35 ) { lines.shift(); }\n div.text = lines.join(\"\\\\n\");\n \"\"\" % (attributes, style))\n\n# Follows the color_scatter gallery example\n\nN = 4000\nx = np.random.random(size=N) * 100\ny = np.random.random(size=N) * 100\nradii = np.random.random(size=N) * 1.5\ncolors = [\n \"#%02x%02x%02x\" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)\n]\n\np = figure(tools=\"pan,wheel_zoom,zoom_in,zoom_out,reset,tap,lasso_select,box_select\")\n\np.scatter(x, y, radius=radii,\n fill_color=colors, fill_alpha=0.6,\n line_color=None)\n\n# Add a div to display events and a button to trigger button click events\n\ndiv = Div(width=1000)\nbutton = Button(label=\"Button\", button_type=\"success\")\nlayout = column(button, row(p, div))\n\n\n## Register event callbacks\n\n# Button event\nbutton.js_on_event(events.ButtonClick, display_event(div))\n\n# LOD events\np.js_on_event(events.LODStart, display_event(div))\np.js_on_event(events.LODEnd, display_event(div))\n\n# Point events\npoint_attributes = ['x','y','sx','sy']\np.js_on_event(events.Tap, display_event(div, attributes=point_attributes))\np.js_on_event(events.DoubleTap, display_event(div, attributes=point_attributes))\np.js_on_event(events.Press, display_event(div, attributes=point_attributes))\n\n# Mouse wheel event\np.js_on_event(events.MouseWheel, display_event(div,attributes=point_attributes+['delta']))\n\n# Mouse move, enter and leave\np.js_on_event(events.MouseMove, display_event(div, attributes=point_attributes))\np.js_on_event(events.MouseEnter, display_event(div, attributes=point_attributes))\np.js_on_event(events.MouseLeave, display_event(div, attributes=point_attributes))\n\n# Pan events\npan_attributes = point_attributes + ['delta_x', 'delta_y']\np.js_on_event(events.Pan, display_event(div, attributes=pan_attributes))\np.js_on_event(events.PanStart, display_event(div, attributes=point_attributes))\np.js_on_event(events.PanEnd, display_event(div, attributes=point_attributes))\n\n# Pinch events\npinch_attributes = point_attributes + ['scale']\np.js_on_event(events.Pinch, display_event(div, attributes=pinch_attributes))\np.js_on_event(events.PinchStart, display_event(div, attributes=point_attributes))\np.js_on_event(events.PinchEnd, display_event(div, attributes=point_attributes))\n\n# Selection events\np.js_on_event(events.SelectionGeometry, display_event(div, attributes=['geometry', 'final']))\n\noutput_file(\"js_events.html\", title=\"JS Events Example\")\nshow(layout)\n", "path": "examples/howto/js_events.py"}], "after_files": [{"content": "\"\"\" Demonstration Bokeh app of how to register event callbacks in both\nJavascript and Python using an adaptation of the color_scatter example\nfrom the bokeh gallery. This example extends the js_events.py example\nwith corresponding Python event callbacks.\n\"\"\"\n\nimport numpy as np\n\nfrom bokeh.io import curdoc\nfrom bokeh.plotting import figure\nfrom bokeh import events\nfrom bokeh.models import CustomJS, Div, Button\nfrom bokeh.layouts import column, row\n\n\ndef display_event(div, attributes=[]):\n \"\"\"\n Function to build a suitable CustomJS to display the current event\n in the div model.\n \"\"\"\n style = 'float: left; clear: left; font-size: 0.5pt'\n return CustomJS(args=dict(div=div), code=\"\"\"\n var attrs = %s;\n var args = [];\n for (var i=0; i<attrs.length; i++ ) {\n val = JSON.stringify(cb_obj[attrs[i]], function(key, val) {\n return val.toFixed ? Number(val.toFixed(2)) : val;\n })\n args.push(attrs[i] + '=' + val)\n }\n var line = \"<span style=%r><b>\" + cb_obj.event_name + \"</b>(\" + args.join(\", \") + \")</span>\\\\n\";\n var text = div.text.concat(line);\n var lines = text.split(\"\\\\n\")\n if ( lines.length > 35 ) { lines.shift(); }\n div.text = lines.join(\"\\\\n\");\n \"\"\" % (attributes, style))\n\ndef print_event(attributes=[]):\n \"\"\"\n Function that returns a Python callback to pretty print the events.\n \"\"\"\n def python_callback(event):\n cls_name = event.__class__.__name__\n attrs = ', '.join(['{attr}={val}'.format(attr=attr, val=event.__dict__[attr])\n for attr in attributes])\n print('{cls_name}({attrs})'.format(cls_name=cls_name, attrs=attrs))\n return python_callback\n\n# Follows the color_scatter gallery example\n\nN = 4000\nx = np.random.random(size=N) * 100\ny = np.random.random(size=N) * 100\nradii = np.random.random(size=N) * 1.5\ncolors = [\n \"#%02x%02x%02x\" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)\n]\n\np = figure(tools=\"pan,wheel_zoom,zoom_in,zoom_out,reset,tap,lasso_select,box_select\")\n\np.scatter(x, y, radius=radii,\n fill_color=colors, fill_alpha=0.6,\n line_color=None)\n\n# Add a div to display events and a button to trigger button click events\n\ndiv = Div(width=1000)\nbutton = Button(label=\"Button\", button_type=\"success\")\nlayout = column(button, row(p, div))\n\n\npoint_attributes = ['x','y','sx','sy']\npan_attributes = point_attributes + ['delta_x', 'delta_y']\npinch_attributes = point_attributes + ['scale']\nwheel_attributes = point_attributes+['delta']\n\n## Register Javascript event callbacks\n\n# Button event\nbutton.js_on_event(events.ButtonClick, display_event(div))\n\n# LOD events\np.js_on_event(events.LODStart, display_event(div))\np.js_on_event(events.LODEnd, display_event(div))\n\n# Point events\n\np.js_on_event(events.Tap, display_event(div, attributes=point_attributes))\np.js_on_event(events.DoubleTap, display_event(div, attributes=point_attributes))\np.js_on_event(events.Press, display_event(div, attributes=point_attributes))\n\n# Mouse wheel event\np.js_on_event(events.MouseWheel, display_event(div,attributes=wheel_attributes))\n\n# Mouse move, enter and leave\np.js_on_event(events.MouseMove, display_event(div, attributes=point_attributes))\np.js_on_event(events.MouseEnter, display_event(div, attributes=point_attributes))\np.js_on_event(events.MouseLeave, display_event(div, attributes=point_attributes))\n\n# Pan events\np.js_on_event(events.Pan, display_event(div, attributes=pan_attributes))\np.js_on_event(events.PanStart, display_event(div, attributes=point_attributes))\np.js_on_event(events.PanEnd, display_event(div, attributes=point_attributes))\n\n# Pinch events\np.js_on_event(events.Pinch, display_event(div, attributes=pinch_attributes))\np.js_on_event(events.PinchStart, display_event(div, attributes=point_attributes))\np.js_on_event(events.PinchEnd, display_event(div, attributes=point_attributes))\n\n# Selection events\np.js_on_event(events.SelectionGeometry, display_event(div, attributes=['geometry', 'final']))\n\n# Reset events\np.js_on_event(events.Reset, display_event(div))\n\n\n## Register Python event callbacks\n\n# Button event\nbutton.on_event(events.ButtonClick, print_event())\n\n# LOD events\np.on_event(events.LODStart, print_event())\np.on_event(events.LODEnd, print_event())\n\n# Point events\n\np.on_event(events.Tap, print_event(attributes=point_attributes))\np.on_event(events.DoubleTap, print_event(attributes=point_attributes))\np.on_event(events.Press, print_event(attributes=point_attributes))\n\n# Mouse wheel event\np.on_event(events.MouseWheel, print_event(attributes=wheel_attributes))\n\n# Mouse move, enter and leave\np.on_event(events.MouseMove, print_event(attributes=point_attributes))\np.on_event(events.MouseEnter, print_event(attributes=point_attributes))\np.on_event(events.MouseLeave, print_event(attributes=point_attributes))\n\n# Pan events\np.on_event(events.Pan, print_event(attributes=pan_attributes))\np.on_event(events.PanStart, print_event(attributes=point_attributes))\np.on_event(events.PanEnd, print_event(attributes=point_attributes))\n\n# Pinch events\np.on_event(events.Pinch, print_event(attributes=pinch_attributes))\np.on_event(events.PinchStart, print_event(attributes=point_attributes))\np.on_event(events.PinchEnd, print_event(attributes=point_attributes))\n\n# Selection events\np.on_event(events.SelectionGeometry, print_event(attributes=['geometry', 'final']))\n\n# Reset events\np.on_event(events.Reset, print_event())\n\ncurdoc().add_root(layout)\n", "path": "examples/howto/events_app.py"}, {"content": "\"\"\" Demonstration of how to register event callbacks using an adaptation\nof the color_scatter example from the bokeh gallery\n\"\"\"\n\nimport numpy as np\n\nfrom bokeh.io import show, output_file\nfrom bokeh.plotting import figure\nfrom bokeh import events\nfrom bokeh.models import CustomJS, Div, Button\nfrom bokeh.layouts import column, row\n\n\ndef display_event(div, attributes=[]):\n \"\"\"\n Function to build a suitable CustomJS to display the current event\n in the div model.\n \"\"\"\n style = 'float: left; clear: left; font-size: 0.5pt'\n return CustomJS(args=dict(div=div), code=\"\"\"\n var attrs = %s;\n var args = [];\n for (var i=0; i<attrs.length; i++ ) {\n val = JSON.stringify(cb_obj[attrs[i]], function(key, val) {\n return val.toFixed ? Number(val.toFixed(2)) : val;\n })\n args.push(attrs[i] + '=' + val)\n }\n var line = \"<span style=%r><b>\" + cb_obj.event_name + \"</b>(\" + args.join(\", \") + \")</span>\\\\n\";\n var text = div.text.concat(line);\n var lines = text.split(\"\\\\n\")\n if ( lines.length > 35 ) { lines.shift(); }\n div.text = lines.join(\"\\\\n\");\n \"\"\" % (attributes, style))\n\n# Follows the color_scatter gallery example\n\nN = 4000\nx = np.random.random(size=N) * 100\ny = np.random.random(size=N) * 100\nradii = np.random.random(size=N) * 1.5\ncolors = [\n \"#%02x%02x%02x\" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)\n]\n\np = figure(tools=\"pan,wheel_zoom,zoom_in,zoom_out,reset,tap,lasso_select,box_select\")\n\np.scatter(x, y, radius=radii,\n fill_color=colors, fill_alpha=0.6,\n line_color=None)\n\n# Add a div to display events and a button to trigger button click events\n\ndiv = Div(width=1000)\nbutton = Button(label=\"Button\", button_type=\"success\")\nlayout = column(button, row(p, div))\n\n\n## Register event callbacks\n\n# Button event\nbutton.js_on_event(events.ButtonClick, display_event(div))\n\n# LOD events\np.js_on_event(events.LODStart, display_event(div))\np.js_on_event(events.LODEnd, display_event(div))\n\n# Point events\npoint_attributes = ['x','y','sx','sy']\np.js_on_event(events.Tap, display_event(div, attributes=point_attributes))\np.js_on_event(events.DoubleTap, display_event(div, attributes=point_attributes))\np.js_on_event(events.Press, display_event(div, attributes=point_attributes))\n\n# Mouse wheel event\np.js_on_event(events.MouseWheel, display_event(div,attributes=point_attributes+['delta']))\n\n# Mouse move, enter and leave\np.js_on_event(events.MouseMove, display_event(div, attributes=point_attributes))\np.js_on_event(events.MouseEnter, display_event(div, attributes=point_attributes))\np.js_on_event(events.MouseLeave, display_event(div, attributes=point_attributes))\n\n# Pan events\npan_attributes = point_attributes + ['delta_x', 'delta_y']\np.js_on_event(events.Pan, display_event(div, attributes=pan_attributes))\np.js_on_event(events.PanStart, display_event(div, attributes=point_attributes))\np.js_on_event(events.PanEnd, display_event(div, attributes=point_attributes))\n\n# Pinch events\npinch_attributes = point_attributes + ['scale']\np.js_on_event(events.Pinch, display_event(div, attributes=pinch_attributes))\np.js_on_event(events.PinchStart, display_event(div, attributes=point_attributes))\np.js_on_event(events.PinchEnd, display_event(div, attributes=point_attributes))\n\n# Selection events\np.js_on_event(events.SelectionGeometry, display_event(div, attributes=['geometry', 'final']))\n\noutput_file(\"js_events.html\", title=\"JS Events Example\")\nshow(layout)\n", "path": "examples/howto/js_events.py"}]}
| 3,192 | 270 |
gh_patches_debug_6676
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-1513
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fox.com.tr not work with Streamlink
## **Checklist**
- [x] This is a bug report.
- [ ] This is a feature request.
- [ ] ] This is a plugin (improvement) request.
- [ ] I have read the contribution guidelines.
## **Description**
i cant see anything at fox.com.tr
i have test it with this links but i became black screen
## **Reproduction steps / Explicit stream URLs to test**
https://www.fox.com.tr/canli-yayin
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/foxtr.py`
Content:
```
1 from __future__ import print_function
2 import re
3
4 from streamlink.plugin import Plugin
5 from streamlink.plugin.api import http
6 from streamlink.plugin.api import validate
7 from streamlink.stream import HLSStream
8
9
10 class FoxTR(Plugin):
11 """
12 Support for Turkish Fox live stream: http://www.fox.com.tr/canli-yayin
13 """
14 url_re = re.compile(r"https?://www.fox.com.tr/canli-yayin")
15 playervars_re = re.compile(r"desktop\s*:\s*\[\s*\{\s*src\s*:\s*'(.*?)'", re.DOTALL)
16
17 @classmethod
18 def can_handle_url(cls, url):
19 return cls.url_re.match(url) is not None
20
21 def _get_streams(self):
22 res = http.get(self.url)
23 match = self.playervars_re.search(res.text)
24 if match:
25 stream_url = match.group(1)
26 return HLSStream.parse_variant_playlist(self.session, stream_url)
27
28
29 __plugin__ = FoxTR
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/streamlink/plugins/foxtr.py b/src/streamlink/plugins/foxtr.py
--- a/src/streamlink/plugins/foxtr.py
+++ b/src/streamlink/plugins/foxtr.py
@@ -12,7 +12,7 @@
Support for Turkish Fox live stream: http://www.fox.com.tr/canli-yayin
"""
url_re = re.compile(r"https?://www.fox.com.tr/canli-yayin")
- playervars_re = re.compile(r"desktop\s*:\s*\[\s*\{\s*src\s*:\s*'(.*?)'", re.DOTALL)
+ playervars_re = re.compile(r"source\s*:\s*\[\s*\{\s*videoSrc\s*:\s*'(.*?)'", re.DOTALL)
@classmethod
def can_handle_url(cls, url):
|
{"golden_diff": "diff --git a/src/streamlink/plugins/foxtr.py b/src/streamlink/plugins/foxtr.py\n--- a/src/streamlink/plugins/foxtr.py\n+++ b/src/streamlink/plugins/foxtr.py\n@@ -12,7 +12,7 @@\n Support for Turkish Fox live stream: http://www.fox.com.tr/canli-yayin\n \"\"\"\n url_re = re.compile(r\"https?://www.fox.com.tr/canli-yayin\")\n- playervars_re = re.compile(r\"desktop\\s*:\\s*\\[\\s*\\{\\s*src\\s*:\\s*'(.*?)'\", re.DOTALL)\n+ playervars_re = re.compile(r\"source\\s*:\\s*\\[\\s*\\{\\s*videoSrc\\s*:\\s*'(.*?)'\", re.DOTALL)\n \n @classmethod\n def can_handle_url(cls, url):\n", "issue": "Fox.com.tr not work with Streamlink\n## **Checklist**\r\n\r\n- [x] This is a bug report.\r\n- [ ] This is a feature request.\r\n- [ ] ] This is a plugin (improvement) request.\r\n- [ ] I have read the contribution guidelines.\r\n\r\n## **Description**\r\n\r\n i cant see anything at fox.com.tr \r\n i have test it with this links but i became black screen \r\n\r\n## **Reproduction steps / Explicit stream URLs to test**\r\n\r\nhttps://www.fox.com.tr/canli-yayin\n", "before_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass FoxTR(Plugin):\n \"\"\"\n Support for Turkish Fox live stream: http://www.fox.com.tr/canli-yayin\n \"\"\"\n url_re = re.compile(r\"https?://www.fox.com.tr/canli-yayin\")\n playervars_re = re.compile(r\"desktop\\s*:\\s*\\[\\s*\\{\\s*src\\s*:\\s*'(.*?)'\", re.DOTALL)\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n res = http.get(self.url)\n match = self.playervars_re.search(res.text)\n if match:\n stream_url = match.group(1)\n return HLSStream.parse_variant_playlist(self.session, stream_url)\n\n\n__plugin__ = FoxTR\n", "path": "src/streamlink/plugins/foxtr.py"}], "after_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass FoxTR(Plugin):\n \"\"\"\n Support for Turkish Fox live stream: http://www.fox.com.tr/canli-yayin\n \"\"\"\n url_re = re.compile(r\"https?://www.fox.com.tr/canli-yayin\")\n playervars_re = re.compile(r\"source\\s*:\\s*\\[\\s*\\{\\s*videoSrc\\s*:\\s*'(.*?)'\", re.DOTALL)\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n res = http.get(self.url)\n match = self.playervars_re.search(res.text)\n if match:\n stream_url = match.group(1)\n return HLSStream.parse_variant_playlist(self.session, stream_url)\n\n\n__plugin__ = FoxTR\n", "path": "src/streamlink/plugins/foxtr.py"}]}
| 651 | 191 |
gh_patches_debug_66910
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-20200
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
randint_like
Add randint_like function to pytorch frontend
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/torch/random_sampling.py`
Content:
```
1 import ivy
2 from ivy.func_wrapper import with_supported_dtypes
3 from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
4
5
6 # ToDo: will need to create a Generator class to be able to fully test these functions
7
8
9 def seed() -> int:
10 """Return a 64 bit number used to seed the RNG."""
11 return int(ivy.randint(-(2**63), 2**63 - 1))
12
13
14 @to_ivy_arrays_and_back
15 def manual_seed(seed: int):
16 ivy.seed(seed_value=seed)
17 return None
18
19
20 @with_supported_dtypes(
21 {
22 "2.0.1 and below": (
23 "float32",
24 "float64",
25 )
26 },
27 "torch",
28 )
29 @to_ivy_arrays_and_back
30 def multinomial(input, num_samples, replacement=False, *, generator=None, out=None):
31 seed = generator.initial_seed() if generator is not None else None
32 return ivy.multinomial(
33 num_samples + 1, # doesn't matter because `probs` is provided, but should be
34 # greater than the number of samples
35 num_samples,
36 probs=input,
37 replace=replacement,
38 seed=seed,
39 out=out,
40 )
41
42
43 @with_supported_dtypes(
44 {
45 "2.0.1 and below": (
46 "float32",
47 "float64",
48 )
49 },
50 "torch",
51 )
52 @to_ivy_arrays_and_back
53 def poisson(input, generator=None):
54 seed = generator.initial_seed() if generator is not None else None
55 return ivy.poisson(input, seed=seed, shape=None)
56
57
58 @to_ivy_arrays_and_back
59 def randint(
60 low,
61 high,
62 size,
63 *,
64 generator=None,
65 out=None,
66 dtype=None,
67 layout=None,
68 device=None,
69 requires_grad=False,
70 ):
71 seed = generator.initial_seed() if generator is not None else None
72 return ivy.randint(
73 low,
74 high,
75 shape=size,
76 seed=seed,
77 out=out,
78 dtype=dtype,
79 device=device,
80 )
81
82
83 @to_ivy_arrays_and_back
84 def rand(
85 *size,
86 generator=None,
87 out=None,
88 dtype=None,
89 layout=None,
90 device=None,
91 requires_grad=False,
92 pin_memory=False,
93 **kwargs,
94 ):
95 if not size and "size" not in kwargs:
96 raise ValueError("Missing 1 required positional/keyword argument: size")
97 size = kwargs["size"] if not size else size
98 if (
99 isinstance(size, (list, tuple))
100 and len(size) == 1
101 and isinstance(size[0], (list, tuple))
102 ):
103 size = size[0]
104 seed = generator.initial_seed() if generator is not None else None
105 return ivy.random_uniform(
106 shape=size,
107 seed=seed,
108 out=out,
109 dtype=dtype,
110 device=device,
111 )
112
113
114 @with_supported_dtypes(
115 {
116 "2.0.1 and below": (
117 "float32",
118 "float64",
119 )
120 },
121 "torch",
122 )
123 @to_ivy_arrays_and_back
124 def normal(mean, std, *, generator=None, out=None):
125 seed = generator.initial_seed() if generator is not None else None
126 return ivy.random_normal(mean=mean, std=std, seed=seed, out=out)
127
128
129 @to_ivy_arrays_and_back
130 def rand_like(
131 input,
132 *,
133 dtype=None,
134 layout=None,
135 device=None,
136 requires_grad=False,
137 memory_format=False,
138 ):
139 shape = input.shape
140 if not dtype:
141 dtype = input.dtype
142
143 return ivy.random_uniform(
144 shape=shape,
145 dtype=dtype,
146 device=device,
147 )
148
149
150 @to_ivy_arrays_and_back
151 def randn(
152 *size,
153 generator=None,
154 out=None,
155 dtype=None,
156 layout=None,
157 device=None,
158 requires_grad=False,
159 pin_memory=False,
160 **kwargs,
161 ):
162 if not size and "size" not in kwargs:
163 raise ValueError("Missing 1 required positional/keyword argument: size")
164 size = kwargs["size"] if not size else size
165 if (
166 isinstance(size, (list, tuple))
167 and len(size) == 1
168 and isinstance(size[0], (list, tuple))
169 ):
170 size = size[0]
171 seed = generator.initial_seed() if generator is not None else None
172 return ivy.random_normal(
173 shape=size,
174 seed=seed,
175 out=out,
176 dtype=dtype,
177 device=device,
178 )
179
180
181 @to_ivy_arrays_and_back
182 def randn_like(
183 input,
184 *,
185 dtype=None,
186 layout=None,
187 device=None,
188 requires_grad=False,
189 memory_format=None,
190 ):
191 shape = input.shape
192 if not dtype:
193 dtype = input.dtype
194
195 return ivy.random_normal(
196 shape=shape,
197 dtype=dtype,
198 device=device,
199 )
200
201
202 @with_supported_dtypes(
203 {
204 "2.0.1 and below": (
205 "float32",
206 "float64",
207 )
208 },
209 "torch",
210 )
211 @to_ivy_arrays_and_back
212 def bernoulli(input, *, generator=None, out=None):
213 seed = generator.initial_seed() if generator is not None else None
214 return ivy.bernoulli(input, seed=seed, out=out)
215
216
217 @to_ivy_arrays_and_back
218 def randperm(
219 n,
220 *,
221 generator=None,
222 out=None,
223 dtype=ivy.int64,
224 layout=None,
225 device=None,
226 requires_grad=False,
227 pin_memory=False,
228 ):
229 seed = generator.initial_seed() if generator is not None else None
230 arr = ivy.arange(n, device=device, dtype=dtype)
231 ret = ivy.shuffle(arr, seed=seed, out=out)
232 return ret
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ivy/functional/frontends/torch/random_sampling.py b/ivy/functional/frontends/torch/random_sampling.py
--- a/ivy/functional/frontends/torch/random_sampling.py
+++ b/ivy/functional/frontends/torch/random_sampling.py
@@ -230,3 +230,26 @@
arr = ivy.arange(n, device=device, dtype=dtype)
ret = ivy.shuffle(arr, seed=seed, out=out)
return ret
+
+
+@to_ivy_arrays_and_back
+def randint_like(
+ input,
+ low,
+ high,
+ *,
+ dtype=None,
+ layout=None,
+ device=None,
+ requires_grad=False,
+ memory_format=None,
+):
+ shape = input.shape
+
+ return ivy.randint(
+ low,
+ high,
+ shape=shape,
+ device=device,
+ dtype=dtype,
+ )
|
{"golden_diff": "diff --git a/ivy/functional/frontends/torch/random_sampling.py b/ivy/functional/frontends/torch/random_sampling.py\n--- a/ivy/functional/frontends/torch/random_sampling.py\n+++ b/ivy/functional/frontends/torch/random_sampling.py\n@@ -230,3 +230,26 @@\n arr = ivy.arange(n, device=device, dtype=dtype)\n ret = ivy.shuffle(arr, seed=seed, out=out)\n return ret\n+\n+\n+@to_ivy_arrays_and_back\n+def randint_like(\n+ input,\n+ low,\n+ high,\n+ *,\n+ dtype=None,\n+ layout=None,\n+ device=None,\n+ requires_grad=False,\n+ memory_format=None,\n+):\n+ shape = input.shape\n+\n+ return ivy.randint(\n+ low,\n+ high,\n+ shape=shape,\n+ device=device,\n+ dtype=dtype,\n+ )\n", "issue": "randint_like\nAdd randint_like function to pytorch frontend\n", "before_files": [{"content": "import ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\n\n\n# ToDo: will need to create a Generator class to be able to fully test these functions\n\n\ndef seed() -> int:\n \"\"\"Return a 64 bit number used to seed the RNG.\"\"\"\n return int(ivy.randint(-(2**63), 2**63 - 1))\n\n\n@to_ivy_arrays_and_back\ndef manual_seed(seed: int):\n ivy.seed(seed_value=seed)\n return None\n\n\n@with_supported_dtypes(\n {\n \"2.0.1 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef multinomial(input, num_samples, replacement=False, *, generator=None, out=None):\n seed = generator.initial_seed() if generator is not None else None\n return ivy.multinomial(\n num_samples + 1, # doesn't matter because `probs` is provided, but should be\n # greater than the number of samples\n num_samples,\n probs=input,\n replace=replacement,\n seed=seed,\n out=out,\n )\n\n\n@with_supported_dtypes(\n {\n \"2.0.1 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef poisson(input, generator=None):\n seed = generator.initial_seed() if generator is not None else None\n return ivy.poisson(input, seed=seed, shape=None)\n\n\n@to_ivy_arrays_and_back\ndef randint(\n low,\n high,\n size,\n *,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n):\n seed = generator.initial_seed() if generator is not None else None\n return ivy.randint(\n low,\n high,\n shape=size,\n seed=seed,\n out=out,\n dtype=dtype,\n device=device,\n )\n\n\n@to_ivy_arrays_and_back\ndef rand(\n *size,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False,\n **kwargs,\n):\n if not size and \"size\" not in kwargs:\n raise ValueError(\"Missing 1 required positional/keyword argument: size\")\n size = kwargs[\"size\"] if not size else size\n if (\n isinstance(size, (list, tuple))\n and len(size) == 1\n and isinstance(size[0], (list, tuple))\n ):\n size = size[0]\n seed = generator.initial_seed() if generator is not None else None\n return ivy.random_uniform(\n shape=size,\n seed=seed,\n out=out,\n dtype=dtype,\n device=device,\n )\n\n\n@with_supported_dtypes(\n {\n \"2.0.1 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef normal(mean, std, *, generator=None, out=None):\n seed = generator.initial_seed() if generator is not None else None\n return ivy.random_normal(mean=mean, std=std, seed=seed, out=out)\n\n\n@to_ivy_arrays_and_back\ndef rand_like(\n input,\n *,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n memory_format=False,\n):\n shape = input.shape\n if not dtype:\n dtype = input.dtype\n\n return ivy.random_uniform(\n shape=shape,\n dtype=dtype,\n device=device,\n )\n\n\n@to_ivy_arrays_and_back\ndef randn(\n *size,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False,\n **kwargs,\n):\n if not size and \"size\" not in kwargs:\n raise ValueError(\"Missing 1 required positional/keyword argument: size\")\n size = kwargs[\"size\"] if not size else size\n if (\n isinstance(size, (list, tuple))\n and len(size) == 1\n and isinstance(size[0], (list, tuple))\n ):\n size = size[0]\n seed = generator.initial_seed() if generator is not None else None\n return ivy.random_normal(\n shape=size,\n seed=seed,\n out=out,\n dtype=dtype,\n device=device,\n )\n\n\n@to_ivy_arrays_and_back\ndef randn_like(\n input,\n *,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n memory_format=None,\n):\n shape = input.shape\n if not dtype:\n dtype = input.dtype\n\n return ivy.random_normal(\n shape=shape,\n dtype=dtype,\n device=device,\n )\n\n\n@with_supported_dtypes(\n {\n \"2.0.1 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef bernoulli(input, *, generator=None, out=None):\n seed = generator.initial_seed() if generator is not None else None\n return ivy.bernoulli(input, seed=seed, out=out)\n\n\n@to_ivy_arrays_and_back\ndef randperm(\n n,\n *,\n generator=None,\n out=None,\n dtype=ivy.int64,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False,\n):\n seed = generator.initial_seed() if generator is not None else None\n arr = ivy.arange(n, device=device, dtype=dtype)\n ret = ivy.shuffle(arr, seed=seed, out=out)\n return ret\n", "path": "ivy/functional/frontends/torch/random_sampling.py"}], "after_files": [{"content": "import ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\n\n\n# ToDo: will need to create a Generator class to be able to fully test these functions\n\n\ndef seed() -> int:\n \"\"\"Return a 64 bit number used to seed the RNG.\"\"\"\n return int(ivy.randint(-(2**63), 2**63 - 1))\n\n\n@to_ivy_arrays_and_back\ndef manual_seed(seed: int):\n ivy.seed(seed_value=seed)\n return None\n\n\n@with_supported_dtypes(\n {\n \"2.0.1 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef multinomial(input, num_samples, replacement=False, *, generator=None, out=None):\n seed = generator.initial_seed() if generator is not None else None\n return ivy.multinomial(\n num_samples + 1, # doesn't matter because `probs` is provided, but should be\n # greater than the number of samples\n num_samples,\n probs=input,\n replace=replacement,\n seed=seed,\n out=out,\n )\n\n\n@with_supported_dtypes(\n {\n \"2.0.1 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef poisson(input, generator=None):\n seed = generator.initial_seed() if generator is not None else None\n return ivy.poisson(input, seed=seed, shape=None)\n\n\n@to_ivy_arrays_and_back\ndef randint(\n low,\n high,\n size,\n *,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n):\n seed = generator.initial_seed() if generator is not None else None\n return ivy.randint(\n low,\n high,\n shape=size,\n seed=seed,\n out=out,\n dtype=dtype,\n device=device,\n )\n\n\n@to_ivy_arrays_and_back\ndef rand(\n *size,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False,\n **kwargs,\n):\n if not size and \"size\" not in kwargs:\n raise ValueError(\"Missing 1 required positional/keyword argument: size\")\n size = kwargs[\"size\"] if not size else size\n if (\n isinstance(size, (list, tuple))\n and len(size) == 1\n and isinstance(size[0], (list, tuple))\n ):\n size = size[0]\n seed = generator.initial_seed() if generator is not None else None\n return ivy.random_uniform(\n shape=size,\n seed=seed,\n out=out,\n dtype=dtype,\n device=device,\n )\n\n\n@with_supported_dtypes(\n {\n \"2.0.1 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef normal(mean, std, *, generator=None, out=None):\n seed = generator.initial_seed() if generator is not None else None\n return ivy.random_normal(mean=mean, std=std, seed=seed, out=out)\n\n\n@to_ivy_arrays_and_back\ndef rand_like(\n input,\n *,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n memory_format=False,\n):\n shape = input.shape\n if not dtype:\n dtype = input.dtype\n\n return ivy.random_uniform(\n shape=shape,\n dtype=dtype,\n device=device,\n )\n\n\n@to_ivy_arrays_and_back\ndef randn(\n *size,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False,\n **kwargs,\n):\n if not size and \"size\" not in kwargs:\n raise ValueError(\"Missing 1 required positional/keyword argument: size\")\n size = kwargs[\"size\"] if not size else size\n if (\n isinstance(size, (list, tuple))\n and len(size) == 1\n and isinstance(size[0], (list, tuple))\n ):\n size = size[0]\n seed = generator.initial_seed() if generator is not None else None\n return ivy.random_normal(\n shape=size,\n seed=seed,\n out=out,\n dtype=dtype,\n device=device,\n )\n\n\n@to_ivy_arrays_and_back\ndef randn_like(\n input,\n *,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n memory_format=None,\n):\n shape = input.shape\n if not dtype:\n dtype = input.dtype\n\n return ivy.random_normal(\n shape=shape,\n dtype=dtype,\n device=device,\n )\n\n\n@with_supported_dtypes(\n {\n \"2.0.1 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef bernoulli(input, *, generator=None, out=None):\n seed = generator.initial_seed() if generator is not None else None\n return ivy.bernoulli(input, seed=seed, out=out)\n\n\n@to_ivy_arrays_and_back\ndef randperm(\n n,\n *,\n generator=None,\n out=None,\n dtype=ivy.int64,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False,\n):\n seed = generator.initial_seed() if generator is not None else None\n arr = ivy.arange(n, device=device, dtype=dtype)\n ret = ivy.shuffle(arr, seed=seed, out=out)\n return ret\n\n\n@to_ivy_arrays_and_back\ndef randint_like(\n input,\n low,\n high,\n *,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n memory_format=None,\n):\n shape = input.shape\n\n return ivy.randint(\n low,\n high,\n shape=shape,\n device=device,\n dtype=dtype,\n )\n", "path": "ivy/functional/frontends/torch/random_sampling.py"}]}
| 2,159 | 208 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.