problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_6542 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-3071 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update to use new version of Hologram
As an extension of https://github.com/fishtown-analytics/hologram/issues/40 -- support Mashumaro in Hologram -- makes changes to pull in version 0.0.13 of Hologram.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/setup.py`
Content:
```
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 6):
6 print('Error: dbt does not support this version of Python.')
7 print('Please upgrade to Python 3.6 or higher.')
8 sys.exit(1)
9
10
11 from setuptools import setup
12 try:
13 from setuptools import find_namespace_packages
14 except ImportError:
15 # the user has a downlevel version of setuptools.
16 print('Error: dbt requires setuptools v40.1.0 or higher.')
17 print('Please upgrade setuptools with "pip install --upgrade setuptools" '
18 'and try again')
19 sys.exit(1)
20
21
22 def read(fname):
23 return open(os.path.join(os.path.dirname(__file__), fname)).read()
24
25
26 package_name = "dbt-core"
27 package_version = "0.19.0"
28 description = """dbt (data build tool) is a command line tool that helps \
29 analysts and engineers transform data in their warehouse more effectively"""
30
31
32 setup(
33 name=package_name,
34 version=package_version,
35 description=description,
36 long_description=description,
37 author="Fishtown Analytics",
38 author_email="[email protected]",
39 url="https://github.com/fishtown-analytics/dbt",
40 packages=find_namespace_packages(include=['dbt', 'dbt.*']),
41 package_data={
42 'dbt': [
43 'include/index.html',
44 'include/global_project/dbt_project.yml',
45 'include/global_project/docs/*.md',
46 'include/global_project/macros/*.sql',
47 'include/global_project/macros/**/*.sql',
48 'include/global_project/macros/**/**/*.sql',
49 'py.typed',
50 ]
51 },
52 test_suite='test',
53 entry_points={
54 'console_scripts': [
55 'dbt = dbt.main:main',
56 ],
57 },
58 scripts=[
59 'scripts/dbt',
60 ],
61 install_requires=[
62 'Jinja2==2.11.2',
63 'PyYAML>=3.11',
64 'sqlparse>=0.2.3,<0.4',
65 'networkx>=2.3,<3',
66 'minimal-snowplow-tracker==0.0.2',
67 'colorama>=0.3.9,<0.4.4',
68 'agate>=1.6,<2',
69 'isodate>=0.6,<0.7',
70 'json-rpc>=1.12,<2',
71 'werkzeug>=0.15,<2.0',
72 'dataclasses==0.6;python_version<"3.7"',
73 # 'hologram==0.0.12', # must be updated prior to release
74 'logbook>=1.5,<1.6',
75 'typing-extensions>=3.7.4,<3.8',
76 # the following are all to match snowflake-connector-python
77 'requests>=2.18.0,<2.24.0',
78 'idna<2.10',
79 'cffi>=1.9,<1.15',
80 ],
81 zip_safe=False,
82 classifiers=[
83 'Development Status :: 5 - Production/Stable',
84
85 'License :: OSI Approved :: Apache Software License',
86
87 'Operating System :: Microsoft :: Windows',
88 'Operating System :: MacOS :: MacOS X',
89 'Operating System :: POSIX :: Linux',
90
91 'Programming Language :: Python :: 3.6',
92 'Programming Language :: Python :: 3.7',
93 'Programming Language :: Python :: 3.8',
94 'Programming Language :: Python :: 3.9',
95 ],
96 python_requires=">=3.6.3",
97 )
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -70,7 +70,7 @@
'json-rpc>=1.12,<2',
'werkzeug>=0.15,<2.0',
'dataclasses==0.6;python_version<"3.7"',
- # 'hologram==0.0.12', # must be updated prior to release
+ 'hologram==0.0.13',
'logbook>=1.5,<1.6',
'typing-extensions>=3.7.4,<3.8',
# the following are all to match snowflake-connector-python
| {"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -70,7 +70,7 @@\n 'json-rpc>=1.12,<2',\n 'werkzeug>=0.15,<2.0',\n 'dataclasses==0.6;python_version<\"3.7\"',\n- # 'hologram==0.0.12', # must be updated prior to release\n+ 'hologram==0.0.13',\n 'logbook>=1.5,<1.6',\n 'typing-extensions>=3.7.4,<3.8',\n # the following are all to match snowflake-connector-python\n", "issue": "Update to use new version of Hologram\nAs an extension of https://github.com/fishtown-analytics/hologram/issues/40 -- support Mashumaro in Hologram -- makes changes to pull in version 0.0.13 of Hologram. \n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 6):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.6 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"0.19.0\"\ndescription = \"\"\"dbt (data build tool) is a command line tool that helps \\\nanalysts and engineers transform data in their warehouse more effectively\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=description,\n author=\"Fishtown Analytics\",\n author_email=\"[email protected]\",\n url=\"https://github.com/fishtown-analytics/dbt\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/index.html',\n 'include/global_project/dbt_project.yml',\n 'include/global_project/docs/*.md',\n 'include/global_project/macros/*.sql',\n 'include/global_project/macros/**/*.sql',\n 'include/global_project/macros/**/**/*.sql',\n 'py.typed',\n ]\n },\n test_suite='test',\n entry_points={\n 'console_scripts': [\n 'dbt = dbt.main:main',\n ],\n },\n scripts=[\n 'scripts/dbt',\n ],\n install_requires=[\n 'Jinja2==2.11.2',\n 'PyYAML>=3.11',\n 'sqlparse>=0.2.3,<0.4',\n 'networkx>=2.3,<3',\n 'minimal-snowplow-tracker==0.0.2',\n 'colorama>=0.3.9,<0.4.4',\n 'agate>=1.6,<2',\n 'isodate>=0.6,<0.7',\n 'json-rpc>=1.12,<2',\n 'werkzeug>=0.15,<2.0',\n 'dataclasses==0.6;python_version<\"3.7\"',\n # 'hologram==0.0.12', # must be updated prior to release\n 'logbook>=1.5,<1.6',\n 'typing-extensions>=3.7.4,<3.8',\n # the following are all to match snowflake-connector-python\n 'requests>=2.18.0,<2.24.0',\n 'idna<2.10',\n 'cffi>=1.9,<1.15',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires=\">=3.6.3\",\n)\n", "path": "core/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 6):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.6 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"0.19.0\"\ndescription = \"\"\"dbt (data build tool) is a command line tool that helps \\\nanalysts and engineers transform data in their warehouse more effectively\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=description,\n author=\"Fishtown Analytics\",\n author_email=\"[email protected]\",\n url=\"https://github.com/fishtown-analytics/dbt\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/index.html',\n 'include/global_project/dbt_project.yml',\n 'include/global_project/docs/*.md',\n 'include/global_project/macros/*.sql',\n 'include/global_project/macros/**/*.sql',\n 'include/global_project/macros/**/**/*.sql',\n 'py.typed',\n ]\n },\n test_suite='test',\n entry_points={\n 'console_scripts': [\n 'dbt = dbt.main:main',\n ],\n },\n scripts=[\n 'scripts/dbt',\n ],\n install_requires=[\n 'Jinja2==2.11.2',\n 'PyYAML>=3.11',\n 'sqlparse>=0.2.3,<0.4',\n 'networkx>=2.3,<3',\n 'minimal-snowplow-tracker==0.0.2',\n 'colorama>=0.3.9,<0.4.4',\n 'agate>=1.6,<2',\n 'isodate>=0.6,<0.7',\n 'json-rpc>=1.12,<2',\n 'werkzeug>=0.15,<2.0',\n 'dataclasses==0.6;python_version<\"3.7\"',\n 'hologram==0.0.13',\n 'logbook>=1.5,<1.6',\n 'typing-extensions>=3.7.4,<3.8',\n # the following are all to match snowflake-connector-python\n 'requests>=2.18.0,<2.24.0',\n 'idna<2.10',\n 'cffi>=1.9,<1.15',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires=\">=3.6.3\",\n)\n", "path": "core/setup.py"}]} | 1,296 | 159 |
gh_patches_debug_25528 | rasdani/github-patches | git_diff | scrapy__scrapy-2464 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
String value for order of Scrapy component
If Scrapy component order is defined as a string, it leads to undefined behaviour on Python 2 and to the following errors on Python 3:
```
File "/usr/local/lib/python3.5/site-packages/scrapy/middleware.py", line 58, in from_crawler
return cls.from_settings(crawler.settings, crawler)
File "/usr/local/lib/python3.5/site-packages/scrapy/middleware.py", line 29, in from_settings
mwlist = cls._get_mwlist_from_settings(settings)
File "/usr/local/lib/python3.5/site-packages/scrapy/core/spidermw.py", line 21, in _get_mwlist_from_settings
return build_component_list(settings.getwithbase('SPIDER_MIDDLEWARES'))
File "/usr/local/lib/python3.5/site-packages/scrapy/utils/conf.py", line 47, in build_component_list
return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]
builtins.TypeError: unorderable types: str() < int()
```
My guess that 1) order of a Scrapy component should be stated as of integer type (or `None`) and there should be a check somewhere, 2) or the sorting logic should be fixed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/conf.py`
Content:
```
1 import os
2 import sys
3 from operator import itemgetter
4
5 import six
6 from six.moves.configparser import SafeConfigParser
7
8 from scrapy.settings import BaseSettings
9 from scrapy.utils.deprecate import update_classpath
10 from scrapy.utils.python import without_none_values
11
12
13 def build_component_list(compdict, custom=None, convert=update_classpath):
14 """Compose a component list from a { class: order } dictionary."""
15
16 def _check_components(complist):
17 if len({convert(c) for c in complist}) != len(complist):
18 raise ValueError('Some paths in {!r} convert to the same object, '
19 'please update your settings'.format(complist))
20
21 def _map_keys(compdict):
22 if isinstance(compdict, BaseSettings):
23 compbs = BaseSettings()
24 for k, v in six.iteritems(compdict):
25 prio = compdict.getpriority(k)
26 if compbs.getpriority(convert(k)) == prio:
27 raise ValueError('Some paths in {!r} convert to the same '
28 'object, please update your settings'
29 ''.format(list(compdict.keys())))
30 else:
31 compbs.set(convert(k), v, priority=prio)
32 return compbs
33 else:
34 _check_components(compdict)
35 return {convert(k): v for k, v in six.iteritems(compdict)}
36
37 # BEGIN Backwards compatibility for old (base, custom) call signature
38 if isinstance(custom, (list, tuple)):
39 _check_components(custom)
40 return type(custom)(convert(c) for c in custom)
41
42 if custom is not None:
43 compdict.update(custom)
44 # END Backwards compatibility
45
46 compdict = without_none_values(_map_keys(compdict))
47 return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]
48
49
50 def arglist_to_dict(arglist):
51 """Convert a list of arguments like ['arg1=val1', 'arg2=val2', ...] to a
52 dict
53 """
54 return dict(x.split('=', 1) for x in arglist)
55
56
57 def closest_scrapy_cfg(path='.', prevpath=None):
58 """Return the path to the closest scrapy.cfg file by traversing the current
59 directory and its parents
60 """
61 if path == prevpath:
62 return ''
63 path = os.path.abspath(path)
64 cfgfile = os.path.join(path, 'scrapy.cfg')
65 if os.path.exists(cfgfile):
66 return cfgfile
67 return closest_scrapy_cfg(os.path.dirname(path), path)
68
69
70 def init_env(project='default', set_syspath=True):
71 """Initialize environment to use command-line tool from inside a project
72 dir. This sets the Scrapy settings module and modifies the Python path to
73 be able to locate the project module.
74 """
75 cfg = get_config()
76 if cfg.has_option('settings', project):
77 os.environ['SCRAPY_SETTINGS_MODULE'] = cfg.get('settings', project)
78 closest = closest_scrapy_cfg()
79 if closest:
80 projdir = os.path.dirname(closest)
81 if set_syspath and projdir not in sys.path:
82 sys.path.append(projdir)
83
84
85 def get_config(use_closest=True):
86 """Get Scrapy config file as a SafeConfigParser"""
87 sources = get_sources(use_closest)
88 cfg = SafeConfigParser()
89 cfg.read(sources)
90 return cfg
91
92
93 def get_sources(use_closest=True):
94 xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \
95 os.path.expanduser('~/.config')
96 sources = ['/etc/scrapy.cfg', r'c:\scrapy\scrapy.cfg',
97 xdg_config_home + '/scrapy.cfg',
98 os.path.expanduser('~/.scrapy.cfg')]
99 if use_closest:
100 sources.append(closest_scrapy_cfg())
101 return sources
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/utils/conf.py b/scrapy/utils/conf.py
--- a/scrapy/utils/conf.py
+++ b/scrapy/utils/conf.py
@@ -1,5 +1,6 @@
import os
import sys
+import numbers
from operator import itemgetter
import six
@@ -34,6 +35,13 @@
_check_components(compdict)
return {convert(k): v for k, v in six.iteritems(compdict)}
+ def _validate_values(compdict):
+ """Fail if a value in the components dict is not a real number or None."""
+ for name, value in six.iteritems(compdict):
+ if value is not None and not isinstance(value, numbers.Real):
+ raise ValueError('Invalid value {} for component {}, please provide ' \
+ 'a real number or None instead'.format(value, name))
+
# BEGIN Backwards compatibility for old (base, custom) call signature
if isinstance(custom, (list, tuple)):
_check_components(custom)
@@ -43,6 +51,7 @@
compdict.update(custom)
# END Backwards compatibility
+ _validate_values(compdict)
compdict = without_none_values(_map_keys(compdict))
return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]
| {"golden_diff": "diff --git a/scrapy/utils/conf.py b/scrapy/utils/conf.py\n--- a/scrapy/utils/conf.py\n+++ b/scrapy/utils/conf.py\n@@ -1,5 +1,6 @@\n import os\n import sys\n+import numbers\n from operator import itemgetter\n \n import six\n@@ -34,6 +35,13 @@\n _check_components(compdict)\n return {convert(k): v for k, v in six.iteritems(compdict)}\n \n+ def _validate_values(compdict):\n+ \"\"\"Fail if a value in the components dict is not a real number or None.\"\"\"\n+ for name, value in six.iteritems(compdict):\n+ if value is not None and not isinstance(value, numbers.Real):\n+ raise ValueError('Invalid value {} for component {}, please provide ' \\\n+ 'a real number or None instead'.format(value, name))\n+\n # BEGIN Backwards compatibility for old (base, custom) call signature\n if isinstance(custom, (list, tuple)):\n _check_components(custom)\n@@ -43,6 +51,7 @@\n compdict.update(custom)\n # END Backwards compatibility\n \n+ _validate_values(compdict)\n compdict = without_none_values(_map_keys(compdict))\n return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]\n", "issue": "String value for order of Scrapy component\nIf Scrapy component order is defined as a string, it leads to undefined behaviour on Python 2 and to the following errors on Python 3:\r\n```\r\nFile \"/usr/local/lib/python3.5/site-packages/scrapy/middleware.py\", line 58, in from_crawler\r\n return cls.from_settings(crawler.settings, crawler)\r\nFile \"/usr/local/lib/python3.5/site-packages/scrapy/middleware.py\", line 29, in from_settings\r\n mwlist = cls._get_mwlist_from_settings(settings)\r\nFile \"/usr/local/lib/python3.5/site-packages/scrapy/core/spidermw.py\", line 21, in _get_mwlist_from_settings\r\n return build_component_list(settings.getwithbase('SPIDER_MIDDLEWARES'))\r\nFile \"/usr/local/lib/python3.5/site-packages/scrapy/utils/conf.py\", line 47, in build_component_list\r\n return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]\r\nbuiltins.TypeError: unorderable types: str() < int()\r\n```\r\n\r\nMy guess that 1) order of a Scrapy component should be stated as of integer type (or `None`) and there should be a check somewhere, 2) or the sorting logic should be fixed.\n", "before_files": [{"content": "import os\nimport sys\nfrom operator import itemgetter\n\nimport six\nfrom six.moves.configparser import SafeConfigParser\n\nfrom scrapy.settings import BaseSettings\nfrom scrapy.utils.deprecate import update_classpath\nfrom scrapy.utils.python import without_none_values\n\n\ndef build_component_list(compdict, custom=None, convert=update_classpath):\n \"\"\"Compose a component list from a { class: order } dictionary.\"\"\"\n\n def _check_components(complist):\n if len({convert(c) for c in complist}) != len(complist):\n raise ValueError('Some paths in {!r} convert to the same object, '\n 'please update your settings'.format(complist))\n\n def _map_keys(compdict):\n if isinstance(compdict, BaseSettings):\n compbs = BaseSettings()\n for k, v in six.iteritems(compdict):\n prio = compdict.getpriority(k)\n if compbs.getpriority(convert(k)) == prio:\n raise ValueError('Some paths in {!r} convert to the same '\n 'object, please update your settings'\n ''.format(list(compdict.keys())))\n else:\n compbs.set(convert(k), v, priority=prio)\n return compbs\n else:\n _check_components(compdict)\n return {convert(k): v for k, v in six.iteritems(compdict)}\n\n # BEGIN Backwards compatibility for old (base, custom) call signature\n if isinstance(custom, (list, tuple)):\n _check_components(custom)\n return type(custom)(convert(c) for c in custom)\n\n if custom is not None:\n compdict.update(custom)\n # END Backwards compatibility\n\n compdict = without_none_values(_map_keys(compdict))\n return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]\n\n\ndef arglist_to_dict(arglist):\n \"\"\"Convert a list of arguments like ['arg1=val1', 'arg2=val2', ...] to a\n dict\n \"\"\"\n return dict(x.split('=', 1) for x in arglist)\n\n\ndef closest_scrapy_cfg(path='.', prevpath=None):\n \"\"\"Return the path to the closest scrapy.cfg file by traversing the current\n directory and its parents\n \"\"\"\n if path == prevpath:\n return ''\n path = os.path.abspath(path)\n cfgfile = os.path.join(path, 'scrapy.cfg')\n if os.path.exists(cfgfile):\n return cfgfile\n return closest_scrapy_cfg(os.path.dirname(path), path)\n\n\ndef init_env(project='default', set_syspath=True):\n \"\"\"Initialize environment to use command-line tool from inside a project\n dir. This sets the Scrapy settings module and modifies the Python path to\n be able to locate the project module.\n \"\"\"\n cfg = get_config()\n if cfg.has_option('settings', project):\n os.environ['SCRAPY_SETTINGS_MODULE'] = cfg.get('settings', project)\n closest = closest_scrapy_cfg()\n if closest:\n projdir = os.path.dirname(closest)\n if set_syspath and projdir not in sys.path:\n sys.path.append(projdir)\n\n\ndef get_config(use_closest=True):\n \"\"\"Get Scrapy config file as a SafeConfigParser\"\"\"\n sources = get_sources(use_closest)\n cfg = SafeConfigParser()\n cfg.read(sources)\n return cfg\n\n\ndef get_sources(use_closest=True):\n xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \\\n os.path.expanduser('~/.config')\n sources = ['/etc/scrapy.cfg', r'c:\\scrapy\\scrapy.cfg',\n xdg_config_home + '/scrapy.cfg',\n os.path.expanduser('~/.scrapy.cfg')]\n if use_closest:\n sources.append(closest_scrapy_cfg())\n return sources\n", "path": "scrapy/utils/conf.py"}], "after_files": [{"content": "import os\nimport sys\nimport numbers\nfrom operator import itemgetter\n\nimport six\nfrom six.moves.configparser import SafeConfigParser\n\nfrom scrapy.settings import BaseSettings\nfrom scrapy.utils.deprecate import update_classpath\nfrom scrapy.utils.python import without_none_values\n\n\ndef build_component_list(compdict, custom=None, convert=update_classpath):\n \"\"\"Compose a component list from a { class: order } dictionary.\"\"\"\n\n def _check_components(complist):\n if len({convert(c) for c in complist}) != len(complist):\n raise ValueError('Some paths in {!r} convert to the same object, '\n 'please update your settings'.format(complist))\n\n def _map_keys(compdict):\n if isinstance(compdict, BaseSettings):\n compbs = BaseSettings()\n for k, v in six.iteritems(compdict):\n prio = compdict.getpriority(k)\n if compbs.getpriority(convert(k)) == prio:\n raise ValueError('Some paths in {!r} convert to the same '\n 'object, please update your settings'\n ''.format(list(compdict.keys())))\n else:\n compbs.set(convert(k), v, priority=prio)\n return compbs\n else:\n _check_components(compdict)\n return {convert(k): v for k, v in six.iteritems(compdict)}\n\n def _validate_values(compdict):\n \"\"\"Fail if a value in the components dict is not a real number or None.\"\"\"\n for name, value in six.iteritems(compdict):\n if value is not None and not isinstance(value, numbers.Real):\n raise ValueError('Invalid value {} for component {}, please provide ' \\\n 'a real number or None instead'.format(value, name))\n\n # BEGIN Backwards compatibility for old (base, custom) call signature\n if isinstance(custom, (list, tuple)):\n _check_components(custom)\n return type(custom)(convert(c) for c in custom)\n\n if custom is not None:\n compdict.update(custom)\n # END Backwards compatibility\n\n _validate_values(compdict)\n compdict = without_none_values(_map_keys(compdict))\n return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]\n\n\ndef arglist_to_dict(arglist):\n \"\"\"Convert a list of arguments like ['arg1=val1', 'arg2=val2', ...] to a\n dict\n \"\"\"\n return dict(x.split('=', 1) for x in arglist)\n\n\ndef closest_scrapy_cfg(path='.', prevpath=None):\n \"\"\"Return the path to the closest scrapy.cfg file by traversing the current\n directory and its parents\n \"\"\"\n if path == prevpath:\n return ''\n path = os.path.abspath(path)\n cfgfile = os.path.join(path, 'scrapy.cfg')\n if os.path.exists(cfgfile):\n return cfgfile\n return closest_scrapy_cfg(os.path.dirname(path), path)\n\n\ndef init_env(project='default', set_syspath=True):\n \"\"\"Initialize environment to use command-line tool from inside a project\n dir. This sets the Scrapy settings module and modifies the Python path to\n be able to locate the project module.\n \"\"\"\n cfg = get_config()\n if cfg.has_option('settings', project):\n os.environ['SCRAPY_SETTINGS_MODULE'] = cfg.get('settings', project)\n closest = closest_scrapy_cfg()\n if closest:\n projdir = os.path.dirname(closest)\n if set_syspath and projdir not in sys.path:\n sys.path.append(projdir)\n\n\ndef get_config(use_closest=True):\n \"\"\"Get Scrapy config file as a SafeConfigParser\"\"\"\n sources = get_sources(use_closest)\n cfg = SafeConfigParser()\n cfg.read(sources)\n return cfg\n\n\ndef get_sources(use_closest=True):\n xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \\\n os.path.expanduser('~/.config')\n sources = ['/etc/scrapy.cfg', r'c:\\scrapy\\scrapy.cfg',\n xdg_config_home + '/scrapy.cfg',\n os.path.expanduser('~/.scrapy.cfg')]\n if use_closest:\n sources.append(closest_scrapy_cfg())\n return sources\n", "path": "scrapy/utils/conf.py"}]} | 1,544 | 285 |
gh_patches_debug_26341 | rasdani/github-patches | git_diff | freqtrade__freqtrade-1896 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
--help produces traceback
Seems I broke it somehow.
`python3 freqtrade hyperopt --help`
produces traceback
```
Fatal exception!
Traceback (most recent call last):
File "/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/main.py", line 42, in main
args: Namespace = arguments.get_parsed_arg()
File "/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/arguments.py", line 46, in get_parsed_arg
self.parsed_arg = self.parse_args()
File "/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/arguments.py", line 54, in parse_args
parsed_arg = self.parser.parse_args(self.args)
File "/usr/lib/python3.6/argparse.py", line 1743, in parse_args
args, argv = self.parse_known_args(args, namespace)
File "/usr/lib/python3.6/argparse.py", line 1775, in parse_known_args
namespace, args = self._parse_known_args(args, namespace)
File "/usr/lib/python3.6/argparse.py", line 1963, in _parse_known_args
positionals_end_index = consume_positionals(start_index)
File "/usr/lib/python3.6/argparse.py", line 1940, in consume_positionals
take_action(action, args)
File "/usr/lib/python3.6/argparse.py", line 1849, in take_action
action(self, namespace, argument_values, option_string)
File "/usr/lib/python3.6/argparse.py", line 1146, in __call__
subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)
File "/usr/lib/python3.6/argparse.py", line 1775, in parse_known_args
namespace, args = self._parse_known_args(args, namespace)
File "/usr/lib/python3.6/argparse.py", line 1981, in _parse_known_args
start_index = consume_optional(start_index)
File "/usr/lib/python3.6/argparse.py", line 1921, in consume_optional
take_action(action, args, option_string)
File "/usr/lib/python3.6/argparse.py", line 1849, in take_action
action(self, namespace, argument_values, option_string)
File "/usr/lib/python3.6/argparse.py", line 1034, in __call__
parser.exit()
File "/usr/lib/python3.6/argparse.py", line 2389, in exit
_sys.exit(status)
SystemExit: 0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `freqtrade/main.py`
Content:
```
1 #!/usr/bin/env python3
2 """
3 Main Freqtrade bot script.
4 Read the documentation to know what cli arguments you need.
5 """
6
7 import sys
8 # check min. python version
9 if sys.version_info < (3, 6):
10 sys.exit("Freqtrade requires Python version >= 3.6")
11
12 # flake8: noqa E402
13 import logging
14 from argparse import Namespace
15 from typing import List
16
17 from freqtrade import OperationalException
18 from freqtrade.arguments import Arguments
19 from freqtrade.configuration import set_loggers
20 from freqtrade.worker import Worker
21
22
23 logger = logging.getLogger('freqtrade')
24
25
26 def main(sysargv: List[str] = None) -> None:
27 """
28 This function will initiate the bot and start the trading loop.
29 :return: None
30 """
31
32 try:
33 set_loggers()
34
35 worker = None
36 return_code = 1
37
38 arguments = Arguments(
39 sysargv,
40 'Free, open source crypto trading bot'
41 )
42 args: Namespace = arguments.get_parsed_arg()
43
44 # A subcommand has been issued.
45 # Means if Backtesting or Hyperopt have been called we exit the bot
46 if hasattr(args, 'func'):
47 args.func(args)
48 # TODO: fetch return_code as returned by the command function here
49 return_code = 0
50 else:
51 # Load and run worker
52 worker = Worker(args)
53 worker.run()
54
55 except KeyboardInterrupt:
56 logger.info('SIGINT received, aborting ...')
57 return_code = 0
58 except OperationalException as e:
59 logger.error(str(e))
60 return_code = 2
61 except BaseException:
62 logger.exception('Fatal exception!')
63 finally:
64 if worker:
65 worker.exit()
66 sys.exit(return_code)
67
68
69 if __name__ == '__main__':
70 main()
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/freqtrade/main.py b/freqtrade/main.py
--- a/freqtrade/main.py
+++ b/freqtrade/main.py
@@ -12,7 +12,7 @@
# flake8: noqa E402
import logging
from argparse import Namespace
-from typing import List
+from typing import Any, List
from freqtrade import OperationalException
from freqtrade.arguments import Arguments
@@ -29,12 +29,11 @@
:return: None
"""
+ return_code: Any = 1
+ worker = None
try:
set_loggers()
- worker = None
- return_code = 1
-
arguments = Arguments(
sysargv,
'Free, open source crypto trading bot'
@@ -52,13 +51,15 @@
worker = Worker(args)
worker.run()
+ except SystemExit as e:
+ return_code = e
except KeyboardInterrupt:
logger.info('SIGINT received, aborting ...')
return_code = 0
except OperationalException as e:
logger.error(str(e))
return_code = 2
- except BaseException:
+ except Exception:
logger.exception('Fatal exception!')
finally:
if worker:
| {"golden_diff": "diff --git a/freqtrade/main.py b/freqtrade/main.py\n--- a/freqtrade/main.py\n+++ b/freqtrade/main.py\n@@ -12,7 +12,7 @@\n # flake8: noqa E402\n import logging\n from argparse import Namespace\n-from typing import List\n+from typing import Any, List\n \n from freqtrade import OperationalException\n from freqtrade.arguments import Arguments\n@@ -29,12 +29,11 @@\n :return: None\n \"\"\"\n \n+ return_code: Any = 1\n+ worker = None\n try:\n set_loggers()\n \n- worker = None\n- return_code = 1\n-\n arguments = Arguments(\n sysargv,\n 'Free, open source crypto trading bot'\n@@ -52,13 +51,15 @@\n worker = Worker(args)\n worker.run()\n \n+ except SystemExit as e:\n+ return_code = e\n except KeyboardInterrupt:\n logger.info('SIGINT received, aborting ...')\n return_code = 0\n except OperationalException as e:\n logger.error(str(e))\n return_code = 2\n- except BaseException:\n+ except Exception:\n logger.exception('Fatal exception!')\n finally:\n if worker:\n", "issue": "--help produces traceback\nSeems I broke it somehow.\r\n\r\n`python3 freqtrade hyperopt --help`\r\nproduces traceback \r\n```\r\nFatal exception!\r\nTraceback (most recent call last):\r\n File \"/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/main.py\", line 42, in main\r\n args: Namespace = arguments.get_parsed_arg()\r\n File \"/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/arguments.py\", line 46, in get_parsed_arg\r\n self.parsed_arg = self.parse_args()\r\n File \"/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/arguments.py\", line 54, in parse_args\r\n parsed_arg = self.parser.parse_args(self.args)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1743, in parse_args\r\n args, argv = self.parse_known_args(args, namespace)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1775, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1963, in _parse_known_args\r\n positionals_end_index = consume_positionals(start_index)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1940, in consume_positionals\r\n take_action(action, args)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1849, in take_action\r\n action(self, namespace, argument_values, option_string)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1146, in __call__\r\n subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1775, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1981, in _parse_known_args\r\n start_index = consume_optional(start_index)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1921, in consume_optional\r\n take_action(action, args, option_string)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1849, in take_action\r\n action(self, namespace, argument_values, option_string)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1034, in __call__\r\n parser.exit()\r\n File \"/usr/lib/python3.6/argparse.py\", line 2389, in exit\r\n _sys.exit(status)\r\nSystemExit: 0\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\"\nMain Freqtrade bot script.\nRead the documentation to know what cli arguments you need.\n\"\"\"\n\nimport sys\n# check min. python version\nif sys.version_info < (3, 6):\n sys.exit(\"Freqtrade requires Python version >= 3.6\")\n\n# flake8: noqa E402\nimport logging\nfrom argparse import Namespace\nfrom typing import List\n\nfrom freqtrade import OperationalException\nfrom freqtrade.arguments import Arguments\nfrom freqtrade.configuration import set_loggers\nfrom freqtrade.worker import Worker\n\n\nlogger = logging.getLogger('freqtrade')\n\n\ndef main(sysargv: List[str] = None) -> None:\n \"\"\"\n This function will initiate the bot and start the trading loop.\n :return: None\n \"\"\"\n\n try:\n set_loggers()\n\n worker = None\n return_code = 1\n\n arguments = Arguments(\n sysargv,\n 'Free, open source crypto trading bot'\n )\n args: Namespace = arguments.get_parsed_arg()\n\n # A subcommand has been issued.\n # Means if Backtesting or Hyperopt have been called we exit the bot\n if hasattr(args, 'func'):\n args.func(args)\n # TODO: fetch return_code as returned by the command function here\n return_code = 0\n else:\n # Load and run worker\n worker = Worker(args)\n worker.run()\n\n except KeyboardInterrupt:\n logger.info('SIGINT received, aborting ...')\n return_code = 0\n except OperationalException as e:\n logger.error(str(e))\n return_code = 2\n except BaseException:\n logger.exception('Fatal exception!')\n finally:\n if worker:\n worker.exit()\n sys.exit(return_code)\n\n\nif __name__ == '__main__':\n main()\n", "path": "freqtrade/main.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\"\"\"\nMain Freqtrade bot script.\nRead the documentation to know what cli arguments you need.\n\"\"\"\n\nimport sys\n# check min. python version\nif sys.version_info < (3, 6):\n sys.exit(\"Freqtrade requires Python version >= 3.6\")\n\n# flake8: noqa E402\nimport logging\nfrom argparse import Namespace\nfrom typing import Any, List\n\nfrom freqtrade import OperationalException\nfrom freqtrade.arguments import Arguments\nfrom freqtrade.configuration import set_loggers\nfrom freqtrade.worker import Worker\n\n\nlogger = logging.getLogger('freqtrade')\n\n\ndef main(sysargv: List[str] = None) -> None:\n \"\"\"\n This function will initiate the bot and start the trading loop.\n :return: None\n \"\"\"\n\n return_code: Any = 1\n worker = None\n try:\n set_loggers()\n\n arguments = Arguments(\n sysargv,\n 'Free, open source crypto trading bot'\n )\n args: Namespace = arguments.get_parsed_arg()\n\n # A subcommand has been issued.\n # Means if Backtesting or Hyperopt have been called we exit the bot\n if hasattr(args, 'func'):\n args.func(args)\n # TODO: fetch return_code as returned by the command function here\n return_code = 0\n else:\n # Load and run worker\n worker = Worker(args)\n worker.run()\n\n except SystemExit as e:\n return_code = e\n except KeyboardInterrupt:\n logger.info('SIGINT received, aborting ...')\n return_code = 0\n except OperationalException as e:\n logger.error(str(e))\n return_code = 2\n except Exception:\n logger.exception('Fatal exception!')\n finally:\n if worker:\n worker.exit()\n sys.exit(return_code)\n\n\nif __name__ == '__main__':\n main()\n", "path": "freqtrade/main.py"}]} | 1,410 | 280 |
gh_patches_debug_31268 | rasdani/github-patches | git_diff | kornia__kornia-2131 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Weird behavior of LongestMaxSize
### Describe the bug
Hello me again,
I might be doing something wrong with the way I use kornia augmentations, please let me know if it is the case.
I was expecting `LongestMaxSize` in kornia to perform similarily as the albumentation implementation. Meaning that I can throw any images with different shapes to the the transformation function and get an image with different shapes but similar ratios. The largest size being equal to the value given to `LongestMaxSize`.
See bellow a small code sample that disturbs me.
### Reproduction steps
```bash
import kornia.augmentation as K
a = torch.ones((512, 256))
b = torch.ones((512, 756))
print("first try")
transfo = K.LongestMaxSize(max_size=256, p=1.)
print(transfo(a).shape)
print(transfo(b).shape)
print("second try")
a = torch.ones((512, 256))
b = torch.ones((512, 756))
transfo = K.LongestMaxSize(max_size=256, p=1.)
print(transfo(b).shape)
print(transfo(a).shape)
Outputs:
first try
torch.Size([1, 1, 256, 128])
torch.Size([1, 1, 256, 128])
second try
torch.Size([1, 1, 173, 256])
torch.Size([1, 1, 173, 256])
```
### Expected behavior
I would expect to have the same values for the transformations no matter the order of the elements.
ie `transfo(a).shape == torch.Size([1, 1, 256, 128])` and `transfo(b).shape ==torch.Size([1, 1, 173, 256])`
Am I missing something here ?
### Environment
```shell
kornia='0.6.9'
torch='1.12.1+cu113'
```
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kornia/augmentation/random_generator/_2d/resize.py`
Content:
```
1 from typing import Dict, Tuple, Union
2
3 import torch
4
5 from kornia.augmentation.random_generator.base import RandomGeneratorBase
6 from kornia.augmentation.utils import _common_param_check
7 from kornia.core import Device, Tensor, tensor
8 from kornia.geometry.bbox import bbox_generator
9 from kornia.geometry.transform.affwarp import _side_to_image_size
10
11
12 class ResizeGenerator(RandomGeneratorBase):
13 r"""Get parameters for ```resize``` transformation for resize transform.
14
15 Args:
16 resize_to: Desired output size of the crop, like (h, w).
17 side: Which side to resize if `resize_to` is only of type int.
18
19 Returns:
20 parameters to be passed for transformation.
21 - src (Tensor): cropping bounding boxes with a shape of (B, 4, 2).
22 - dst (Tensor): output bounding boxes with a shape (B, 4, 2).
23 - input_size (Tensor): (h, w) from batch input.
24 - resize_to (tuple): new (h, w) for batch input.
25
26 Note:
27 The generated random numbers are not reproducible across different devices and dtypes. By default,
28 the parameters will be generated on CPU in float32. This can be changed by calling
29 ``self.set_rng_device_and_dtype(device="cuda", dtype=torch.float64)``.
30 """
31
32 def __init__(self, resize_to: Union[int, Tuple[int, int]], side: str = "short") -> None:
33 super().__init__()
34 self.output_size = resize_to
35 self.side = side
36
37 def __repr__(self) -> str:
38 repr = f"output_size={self.output_size}"
39 return repr
40
41 def make_samplers(self, device: Device, dtype: torch.dtype) -> None:
42 self.device = device
43 self.dtype = dtype
44 pass
45
46 def forward(self, batch_shape: torch.Size, same_on_batch: bool = False) -> Dict[str, Tensor]:
47 batch_size = batch_shape[0]
48 _common_param_check(batch_size, same_on_batch)
49 _device = self.device
50 _dtype = self.dtype
51
52 if batch_size == 0:
53 return dict(
54 src=torch.zeros([0, 4, 2], device=_device, dtype=_dtype),
55 dst=torch.zeros([0, 4, 2], device=_device, dtype=_dtype),
56 )
57
58 input_size = h, w = (batch_shape[-2], batch_shape[-1])
59
60 src = bbox_generator(
61 tensor(0, device=_device, dtype=_dtype),
62 tensor(0, device=_device, dtype=_dtype),
63 tensor(input_size[1], device=_device, dtype=_dtype),
64 tensor(input_size[0], device=_device, dtype=_dtype),
65 ).repeat(batch_size, 1, 1)
66
67 if isinstance(self.output_size, int):
68 aspect_ratio = w / h
69 self.output_size = _side_to_image_size(self.output_size, aspect_ratio, self.side)
70
71 if not (
72 len(self.output_size) == 2
73 and isinstance(self.output_size[0], (int,))
74 and isinstance(self.output_size[1], (int,))
75 and self.output_size[0] > 0
76 and self.output_size[1] > 0
77 ):
78 raise AssertionError(f"`resize_to` must be a tuple of 2 positive integers. Got {self.output_size}.")
79
80 dst = bbox_generator(
81 tensor(0, device=_device, dtype=_dtype),
82 tensor(0, device=_device, dtype=_dtype),
83 tensor(self.output_size[1], device=_device, dtype=_dtype),
84 tensor(self.output_size[0], device=_device, dtype=_dtype),
85 ).repeat(batch_size, 1, 1)
86
87 _input_size = tensor(input_size, device=_device, dtype=torch.long).expand(batch_size, -1)
88 _output_size = tensor(self.output_size, device=_device, dtype=torch.long).expand(batch_size, -1)
89
90 return dict(src=src, dst=dst, input_size=_input_size, output_size=_output_size)
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kornia/augmentation/random_generator/_2d/resize.py b/kornia/augmentation/random_generator/_2d/resize.py
--- a/kornia/augmentation/random_generator/_2d/resize.py
+++ b/kornia/augmentation/random_generator/_2d/resize.py
@@ -66,25 +66,27 @@
if isinstance(self.output_size, int):
aspect_ratio = w / h
- self.output_size = _side_to_image_size(self.output_size, aspect_ratio, self.side)
+ output_size = _side_to_image_size(self.output_size, aspect_ratio, self.side)
+ else:
+ output_size = self.output_size
if not (
- len(self.output_size) == 2
- and isinstance(self.output_size[0], (int,))
- and isinstance(self.output_size[1], (int,))
- and self.output_size[0] > 0
- and self.output_size[1] > 0
+ len(output_size) == 2
+ and isinstance(output_size[0], (int,))
+ and isinstance(output_size[1], (int,))
+ and output_size[0] > 0
+ and output_size[1] > 0
):
- raise AssertionError(f"`resize_to` must be a tuple of 2 positive integers. Got {self.output_size}.")
+ raise AssertionError(f"`resize_to` must be a tuple of 2 positive integers. Got {output_size}.")
dst = bbox_generator(
tensor(0, device=_device, dtype=_dtype),
tensor(0, device=_device, dtype=_dtype),
- tensor(self.output_size[1], device=_device, dtype=_dtype),
- tensor(self.output_size[0], device=_device, dtype=_dtype),
+ tensor(output_size[1], device=_device, dtype=_dtype),
+ tensor(output_size[0], device=_device, dtype=_dtype),
).repeat(batch_size, 1, 1)
_input_size = tensor(input_size, device=_device, dtype=torch.long).expand(batch_size, -1)
- _output_size = tensor(self.output_size, device=_device, dtype=torch.long).expand(batch_size, -1)
+ _output_size = tensor(output_size, device=_device, dtype=torch.long).expand(batch_size, -1)
return dict(src=src, dst=dst, input_size=_input_size, output_size=_output_size)
| {"golden_diff": "diff --git a/kornia/augmentation/random_generator/_2d/resize.py b/kornia/augmentation/random_generator/_2d/resize.py\n--- a/kornia/augmentation/random_generator/_2d/resize.py\n+++ b/kornia/augmentation/random_generator/_2d/resize.py\n@@ -66,25 +66,27 @@\n \n if isinstance(self.output_size, int):\n aspect_ratio = w / h\n- self.output_size = _side_to_image_size(self.output_size, aspect_ratio, self.side)\n+ output_size = _side_to_image_size(self.output_size, aspect_ratio, self.side)\n+ else:\n+ output_size = self.output_size\n \n if not (\n- len(self.output_size) == 2\n- and isinstance(self.output_size[0], (int,))\n- and isinstance(self.output_size[1], (int,))\n- and self.output_size[0] > 0\n- and self.output_size[1] > 0\n+ len(output_size) == 2\n+ and isinstance(output_size[0], (int,))\n+ and isinstance(output_size[1], (int,))\n+ and output_size[0] > 0\n+ and output_size[1] > 0\n ):\n- raise AssertionError(f\"`resize_to` must be a tuple of 2 positive integers. Got {self.output_size}.\")\n+ raise AssertionError(f\"`resize_to` must be a tuple of 2 positive integers. Got {output_size}.\")\n \n dst = bbox_generator(\n tensor(0, device=_device, dtype=_dtype),\n tensor(0, device=_device, dtype=_dtype),\n- tensor(self.output_size[1], device=_device, dtype=_dtype),\n- tensor(self.output_size[0], device=_device, dtype=_dtype),\n+ tensor(output_size[1], device=_device, dtype=_dtype),\n+ tensor(output_size[0], device=_device, dtype=_dtype),\n ).repeat(batch_size, 1, 1)\n \n _input_size = tensor(input_size, device=_device, dtype=torch.long).expand(batch_size, -1)\n- _output_size = tensor(self.output_size, device=_device, dtype=torch.long).expand(batch_size, -1)\n+ _output_size = tensor(output_size, device=_device, dtype=torch.long).expand(batch_size, -1)\n \n return dict(src=src, dst=dst, input_size=_input_size, output_size=_output_size)\n", "issue": "Weird behavior of LongestMaxSize\n### Describe the bug\r\n\r\nHello me again,\r\n\r\nI might be doing something wrong with the way I use kornia augmentations, please let me know if it is the case.\r\n\r\nI was expecting `LongestMaxSize` in kornia to perform similarily as the albumentation implementation. Meaning that I can throw any images with different shapes to the the transformation function and get an image with different shapes but similar ratios. The largest size being equal to the value given to `LongestMaxSize`.\r\n\r\nSee bellow a small code sample that disturbs me.\r\n\r\n### Reproduction steps\r\n\r\n```bash\r\nimport kornia.augmentation as K\r\na = torch.ones((512, 256))\r\nb = torch.ones((512, 756))\r\n\r\nprint(\"first try\")\r\ntransfo = K.LongestMaxSize(max_size=256, p=1.)\r\n\r\nprint(transfo(a).shape)\r\nprint(transfo(b).shape)\r\n\r\nprint(\"second try\")\r\n\r\na = torch.ones((512, 256))\r\nb = torch.ones((512, 756))\r\n\r\ntransfo = K.LongestMaxSize(max_size=256, p=1.)\r\nprint(transfo(b).shape)\r\nprint(transfo(a).shape)\r\n\r\nOutputs:\r\nfirst try\r\ntorch.Size([1, 1, 256, 128])\r\ntorch.Size([1, 1, 256, 128])\r\nsecond try\r\ntorch.Size([1, 1, 173, 256])\r\ntorch.Size([1, 1, 173, 256])\r\n```\r\n\r\n\r\n### Expected behavior\r\n\r\nI would expect to have the same values for the transformations no matter the order of the elements.\r\n\r\nie `transfo(a).shape == torch.Size([1, 1, 256, 128])` and `transfo(b).shape ==torch.Size([1, 1, 173, 256])`\r\n\r\nAm I missing something here ?\r\n\r\n### Environment\r\n\r\n```shell\r\nkornia='0.6.9'\r\ntorch='1.12.1+cu113'\r\n```\r\n\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "from typing import Dict, Tuple, Union\n\nimport torch\n\nfrom kornia.augmentation.random_generator.base import RandomGeneratorBase\nfrom kornia.augmentation.utils import _common_param_check\nfrom kornia.core import Device, Tensor, tensor\nfrom kornia.geometry.bbox import bbox_generator\nfrom kornia.geometry.transform.affwarp import _side_to_image_size\n\n\nclass ResizeGenerator(RandomGeneratorBase):\n r\"\"\"Get parameters for ```resize``` transformation for resize transform.\n\n Args:\n resize_to: Desired output size of the crop, like (h, w).\n side: Which side to resize if `resize_to` is only of type int.\n\n Returns:\n parameters to be passed for transformation.\n - src (Tensor): cropping bounding boxes with a shape of (B, 4, 2).\n - dst (Tensor): output bounding boxes with a shape (B, 4, 2).\n - input_size (Tensor): (h, w) from batch input.\n - resize_to (tuple): new (h, w) for batch input.\n\n Note:\n The generated random numbers are not reproducible across different devices and dtypes. By default,\n the parameters will be generated on CPU in float32. This can be changed by calling\n ``self.set_rng_device_and_dtype(device=\"cuda\", dtype=torch.float64)``.\n \"\"\"\n\n def __init__(self, resize_to: Union[int, Tuple[int, int]], side: str = \"short\") -> None:\n super().__init__()\n self.output_size = resize_to\n self.side = side\n\n def __repr__(self) -> str:\n repr = f\"output_size={self.output_size}\"\n return repr\n\n def make_samplers(self, device: Device, dtype: torch.dtype) -> None:\n self.device = device\n self.dtype = dtype\n pass\n\n def forward(self, batch_shape: torch.Size, same_on_batch: bool = False) -> Dict[str, Tensor]:\n batch_size = batch_shape[0]\n _common_param_check(batch_size, same_on_batch)\n _device = self.device\n _dtype = self.dtype\n\n if batch_size == 0:\n return dict(\n src=torch.zeros([0, 4, 2], device=_device, dtype=_dtype),\n dst=torch.zeros([0, 4, 2], device=_device, dtype=_dtype),\n )\n\n input_size = h, w = (batch_shape[-2], batch_shape[-1])\n\n src = bbox_generator(\n tensor(0, device=_device, dtype=_dtype),\n tensor(0, device=_device, dtype=_dtype),\n tensor(input_size[1], device=_device, dtype=_dtype),\n tensor(input_size[0], device=_device, dtype=_dtype),\n ).repeat(batch_size, 1, 1)\n\n if isinstance(self.output_size, int):\n aspect_ratio = w / h\n self.output_size = _side_to_image_size(self.output_size, aspect_ratio, self.side)\n\n if not (\n len(self.output_size) == 2\n and isinstance(self.output_size[0], (int,))\n and isinstance(self.output_size[1], (int,))\n and self.output_size[0] > 0\n and self.output_size[1] > 0\n ):\n raise AssertionError(f\"`resize_to` must be a tuple of 2 positive integers. Got {self.output_size}.\")\n\n dst = bbox_generator(\n tensor(0, device=_device, dtype=_dtype),\n tensor(0, device=_device, dtype=_dtype),\n tensor(self.output_size[1], device=_device, dtype=_dtype),\n tensor(self.output_size[0], device=_device, dtype=_dtype),\n ).repeat(batch_size, 1, 1)\n\n _input_size = tensor(input_size, device=_device, dtype=torch.long).expand(batch_size, -1)\n _output_size = tensor(self.output_size, device=_device, dtype=torch.long).expand(batch_size, -1)\n\n return dict(src=src, dst=dst, input_size=_input_size, output_size=_output_size)\n", "path": "kornia/augmentation/random_generator/_2d/resize.py"}], "after_files": [{"content": "from typing import Dict, Tuple, Union\n\nimport torch\n\nfrom kornia.augmentation.random_generator.base import RandomGeneratorBase\nfrom kornia.augmentation.utils import _common_param_check\nfrom kornia.core import Device, Tensor, tensor\nfrom kornia.geometry.bbox import bbox_generator\nfrom kornia.geometry.transform.affwarp import _side_to_image_size\n\n\nclass ResizeGenerator(RandomGeneratorBase):\n r\"\"\"Get parameters for ```resize``` transformation for resize transform.\n\n Args:\n resize_to: Desired output size of the crop, like (h, w).\n side: Which side to resize if `resize_to` is only of type int.\n\n Returns:\n parameters to be passed for transformation.\n - src (Tensor): cropping bounding boxes with a shape of (B, 4, 2).\n - dst (Tensor): output bounding boxes with a shape (B, 4, 2).\n - input_size (Tensor): (h, w) from batch input.\n - resize_to (tuple): new (h, w) for batch input.\n\n Note:\n The generated random numbers are not reproducible across different devices and dtypes. By default,\n the parameters will be generated on CPU in float32. This can be changed by calling\n ``self.set_rng_device_and_dtype(device=\"cuda\", dtype=torch.float64)``.\n \"\"\"\n\n def __init__(self, resize_to: Union[int, Tuple[int, int]], side: str = \"short\") -> None:\n super().__init__()\n self.output_size = resize_to\n self.side = side\n\n def __repr__(self) -> str:\n repr = f\"output_size={self.output_size}\"\n return repr\n\n def make_samplers(self, device: Device, dtype: torch.dtype) -> None:\n self.device = device\n self.dtype = dtype\n pass\n\n def forward(self, batch_shape: torch.Size, same_on_batch: bool = False) -> Dict[str, Tensor]:\n batch_size = batch_shape[0]\n _common_param_check(batch_size, same_on_batch)\n _device = self.device\n _dtype = self.dtype\n\n if batch_size == 0:\n return dict(\n src=torch.zeros([0, 4, 2], device=_device, dtype=_dtype),\n dst=torch.zeros([0, 4, 2], device=_device, dtype=_dtype),\n )\n\n input_size = h, w = (batch_shape[-2], batch_shape[-1])\n\n src = bbox_generator(\n tensor(0, device=_device, dtype=_dtype),\n tensor(0, device=_device, dtype=_dtype),\n tensor(input_size[1], device=_device, dtype=_dtype),\n tensor(input_size[0], device=_device, dtype=_dtype),\n ).repeat(batch_size, 1, 1)\n\n if isinstance(self.output_size, int):\n aspect_ratio = w / h\n output_size = _side_to_image_size(self.output_size, aspect_ratio, self.side)\n else:\n output_size = self.output_size\n\n if not (\n len(output_size) == 2\n and isinstance(output_size[0], (int,))\n and isinstance(output_size[1], (int,))\n and output_size[0] > 0\n and output_size[1] > 0\n ):\n raise AssertionError(f\"`resize_to` must be a tuple of 2 positive integers. Got {output_size}.\")\n\n dst = bbox_generator(\n tensor(0, device=_device, dtype=_dtype),\n tensor(0, device=_device, dtype=_dtype),\n tensor(output_size[1], device=_device, dtype=_dtype),\n tensor(output_size[0], device=_device, dtype=_dtype),\n ).repeat(batch_size, 1, 1)\n\n _input_size = tensor(input_size, device=_device, dtype=torch.long).expand(batch_size, -1)\n _output_size = tensor(output_size, device=_device, dtype=torch.long).expand(batch_size, -1)\n\n return dict(src=src, dst=dst, input_size=_input_size, output_size=_output_size)\n", "path": "kornia/augmentation/random_generator/_2d/resize.py"}]} | 1,824 | 543 |
gh_patches_debug_39944 | rasdani/github-patches | git_diff | cobbler__cobbler-2919 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Security: Stabalize the MongoDB serializer
### Describe the bug
This is the upstream bug report for SUSE/spacewalk#16737 which is a mirror issue of an internal Bugzilla issue.
Copied from the Bugzilla comment by @thesp0nge:
In mongodb serializer class, when the config file is read, there is no sanity check.
If the file get somewhat corrupted, it can lead to unexpected behaviour.
```python
def __connect(configfile: str = "/etc/cobbler/mongodb.conf"):
"""
Reads the config file for mongodb and then connects to the mongodb.
"""
cp = ConfigParser()
cp.read(configfile)
host = cp.get("connection", "host")
port = int(cp.get("connection", "port"))
# pylint: disable=global-statement
global mongodb
mongodb = MongoClient(host, port)['cobbler']
```
### Steps to reproduce
1. Corrupt the `mongodb.conf`
2. Start Cobbler
3. See error
### Expected behavior
We get a better exception and Cobbler is prevented from starting up.
### Cobbler version
<!--- Paste output from `cobbler version` -->
````paste below
````
### Operating system
<!--- On which operating system do you use Cobbler? -->
### Cobbler log
<!--- Paste (partial) output from `/var/log/cobbler/cobbler.log` -->
````paste below
````
### Screenshots
<!--- If applicable, add screenshots to help explain your problem. -->
### Additional information
<!--- Add any other context about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cobbler/modules/serializers/mongodb.py`
Content:
```
1 """
2 Cobbler's Mongo database based object serializer.
3 Experimental version.
4
5 Copyright 2006-2009, Red Hat, Inc and Others
6 Michael DeHaan <michael.dehaan AT gmail>
7 James Cammarata <[email protected]>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 02110-1301 USA
23 """
24
25 from configparser import ConfigParser
26
27 from cobbler import settings
28 from cobbler.cexceptions import CX
29
30 try:
31 from pymongo import MongoClient
32 from pymongo.errors import ConnectionFailure, ConfigurationError
33 pymongo_loaded = True
34 except ModuleNotFoundError:
35 # FIXME: log message
36 pymongo_loaded = False
37
38 mongodb = None
39
40
41 def __connect(configfile: str = "/etc/cobbler/mongodb.conf"):
42 """
43 Reads the config file for mongodb and then connects to the mongodb.
44 """
45 cp = ConfigParser()
46 cp.read(configfile)
47
48 host = cp.get("connection", "host")
49 port = int(cp.get("connection", "port"))
50 # pylint: disable=global-statement
51 global mongodb
52 mongodb = MongoClient(host, port)['cobbler']
53 try:
54 # The ismaster command is cheap and doesn't require auth.
55 mongodb.admin.command('ismaster')
56 except ConnectionFailure as e:
57 # FIXME: log error
58 raise CX("Unable to connect to Mongo database or get database \"cobbler\"") from e
59 except ConfigurationError as e:
60 raise CX("The configuration of the MongoDB connection isn't correct, please check the Cobbler settings.") from e
61
62
63 def register() -> str:
64 """
65 The mandatory Cobbler module registration hook.
66 """
67 # FIXME: only run this if enabled.
68 if not pymongo_loaded:
69 return ""
70 return "serializer"
71
72
73 def what() -> str:
74 """
75 Module identification function
76 """
77 return "serializer/mongodb"
78
79
80 def serialize_item(collection, item):
81 """
82 Save a collection item to database.
83
84 :param collection: collection
85 :param item: collection item
86 """
87
88 __connect()
89 collection = mongodb[collection.collection_type()]
90 data = collection.find_one({'name': item.name})
91 if data:
92 collection.update({'name': item.name}, item.serialize())
93 else:
94 collection.insert(item.serialize())
95
96
97 def serialize_delete(collection, item):
98 """
99 Delete a collection item from database.
100
101 :param collection: collection
102 :param item: collection item
103 """
104
105 __connect()
106 collection = mongodb[collection.collection_type()]
107 collection.remove({'name': item.name})
108
109
110 def serialize(collection):
111 """
112 Save a collection to database
113
114 :param collection: collection
115 """
116
117 # TODO: error detection
118 ctype = collection.collection_type()
119 if ctype != "settings":
120 for x in collection:
121 serialize_item(collection, x)
122
123
124 def deserialize_raw(collection_type: str):
125 """
126 Get a collection from mongodb and parse it into an object.
127
128 :param collection_type: The collection type to fetch.
129 :return: The first element of the collection requested.
130 """
131 if collection_type == "settings":
132 return settings.read_settings_file()
133 else:
134 __connect()
135 collection = mongodb[collection_type]
136 return collection.find()
137
138
139 def deserialize(collection, topological: bool = True):
140 """
141 Load a collection from the database.
142
143 :param collection: The collection to deserialize.
144 :param topological: If the collection list should be sorted by the collection dict depth value or not.
145 """
146
147 datastruct = deserialize_raw(collection.collection_type())
148 if topological and type(datastruct) == list:
149 datastruct.sort(key=lambda x: x["depth"])
150 if type(datastruct) == dict:
151 collection.from_dict(datastruct)
152 elif type(datastruct) == list:
153 collection.from_list(datastruct)
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cobbler/modules/serializers/mongodb.py b/cobbler/modules/serializers/mongodb.py
--- a/cobbler/modules/serializers/mongodb.py
+++ b/cobbler/modules/serializers/mongodb.py
@@ -21,7 +21,8 @@
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
-
+import configparser
+import pathlib
from configparser import ConfigParser
from cobbler import settings
@@ -30,6 +31,7 @@
try:
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure, ConfigurationError
+
pymongo_loaded = True
except ModuleNotFoundError:
# FIXME: log message
@@ -42,22 +44,34 @@
"""
Reads the config file for mongodb and then connects to the mongodb.
"""
- cp = ConfigParser()
- cp.read(configfile)
+ if not pathlib.Path(configfile).is_file():
+ raise FileNotFoundError(
+ "Specified Cobbler MongoDB config file could not be found!"
+ )
- host = cp.get("connection", "host")
- port = int(cp.get("connection", "port"))
+ cp = ConfigParser()
+ try:
+ cp.read(configfile)
+ except configparser.Error as cp_error:
+ raise configparser.Error(
+ "Could not read Cobbler MongoDB config file!"
+ ) from cp_error
+
+ host = cp.get("connection", "host", fallback="localhost")
+ port = cp.getint("connection", "port", fallback=27017)
# pylint: disable=global-statement
global mongodb
- mongodb = MongoClient(host, port)['cobbler']
+ mongodb = MongoClient(host, port)["cobbler"]
try:
# The ismaster command is cheap and doesn't require auth.
- mongodb.admin.command('ismaster')
+ mongodb.admin.command("ismaster")
except ConnectionFailure as e:
# FIXME: log error
- raise CX("Unable to connect to Mongo database or get database \"cobbler\"") from e
+ raise CX('Unable to connect to Mongo database or get database "cobbler"') from e
except ConfigurationError as e:
- raise CX("The configuration of the MongoDB connection isn't correct, please check the Cobbler settings.") from e
+ raise CX(
+ "The configuration of the MongoDB connection isn't correct, please check the Cobbler settings."
+ ) from e
def register() -> str:
@@ -87,9 +101,9 @@
__connect()
collection = mongodb[collection.collection_type()]
- data = collection.find_one({'name': item.name})
+ data = collection.find_one({"name": item.name})
if data:
- collection.update({'name': item.name}, item.serialize())
+ collection.update({"name": item.name}, item.serialize())
else:
collection.insert(item.serialize())
@@ -104,7 +118,7 @@
__connect()
collection = mongodb[collection.collection_type()]
- collection.remove({'name': item.name})
+ collection.remove({"name": item.name})
def serialize(collection):
| {"golden_diff": "diff --git a/cobbler/modules/serializers/mongodb.py b/cobbler/modules/serializers/mongodb.py\n--- a/cobbler/modules/serializers/mongodb.py\n+++ b/cobbler/modules/serializers/mongodb.py\n@@ -21,7 +21,8 @@\n Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n 02110-1301 USA\n \"\"\"\n-\n+import configparser\n+import pathlib\n from configparser import ConfigParser\n \n from cobbler import settings\n@@ -30,6 +31,7 @@\n try:\n from pymongo import MongoClient\n from pymongo.errors import ConnectionFailure, ConfigurationError\n+\n pymongo_loaded = True\n except ModuleNotFoundError:\n # FIXME: log message\n@@ -42,22 +44,34 @@\n \"\"\"\n Reads the config file for mongodb and then connects to the mongodb.\n \"\"\"\n- cp = ConfigParser()\n- cp.read(configfile)\n+ if not pathlib.Path(configfile).is_file():\n+ raise FileNotFoundError(\n+ \"Specified Cobbler MongoDB config file could not be found!\"\n+ )\n \n- host = cp.get(\"connection\", \"host\")\n- port = int(cp.get(\"connection\", \"port\"))\n+ cp = ConfigParser()\n+ try:\n+ cp.read(configfile)\n+ except configparser.Error as cp_error:\n+ raise configparser.Error(\n+ \"Could not read Cobbler MongoDB config file!\"\n+ ) from cp_error\n+\n+ host = cp.get(\"connection\", \"host\", fallback=\"localhost\")\n+ port = cp.getint(\"connection\", \"port\", fallback=27017)\n # pylint: disable=global-statement\n global mongodb\n- mongodb = MongoClient(host, port)['cobbler']\n+ mongodb = MongoClient(host, port)[\"cobbler\"]\n try:\n # The ismaster command is cheap and doesn't require auth.\n- mongodb.admin.command('ismaster')\n+ mongodb.admin.command(\"ismaster\")\n except ConnectionFailure as e:\n # FIXME: log error\n- raise CX(\"Unable to connect to Mongo database or get database \\\"cobbler\\\"\") from e\n+ raise CX('Unable to connect to Mongo database or get database \"cobbler\"') from e\n except ConfigurationError as e:\n- raise CX(\"The configuration of the MongoDB connection isn't correct, please check the Cobbler settings.\") from e\n+ raise CX(\n+ \"The configuration of the MongoDB connection isn't correct, please check the Cobbler settings.\"\n+ ) from e\n \n \n def register() -> str:\n@@ -87,9 +101,9 @@\n \n __connect()\n collection = mongodb[collection.collection_type()]\n- data = collection.find_one({'name': item.name})\n+ data = collection.find_one({\"name\": item.name})\n if data:\n- collection.update({'name': item.name}, item.serialize())\n+ collection.update({\"name\": item.name}, item.serialize())\n else:\n collection.insert(item.serialize())\n \n@@ -104,7 +118,7 @@\n \n __connect()\n collection = mongodb[collection.collection_type()]\n- collection.remove({'name': item.name})\n+ collection.remove({\"name\": item.name})\n \n \n def serialize(collection):\n", "issue": "Security: Stabalize the MongoDB serializer\n### Describe the bug\r\n\r\nThis is the upstream bug report for SUSE/spacewalk#16737 which is a mirror issue of an internal Bugzilla issue.\r\n\r\nCopied from the Bugzilla comment by @thesp0nge:\r\n\r\nIn mongodb serializer class, when the config file is read, there is no sanity check.\r\nIf the file get somewhat corrupted, it can lead to unexpected behaviour.\r\n\r\n```python\r\ndef __connect(configfile: str = \"/etc/cobbler/mongodb.conf\"):\r\n \"\"\"\r\n Reads the config file for mongodb and then connects to the mongodb.\r\n \"\"\"\r\n cp = ConfigParser()\r\n cp.read(configfile)\r\n\r\n host = cp.get(\"connection\", \"host\")\r\n port = int(cp.get(\"connection\", \"port\"))\r\n # pylint: disable=global-statement\r\n global mongodb\r\n mongodb = MongoClient(host, port)['cobbler']\r\n```\r\n\r\n### Steps to reproduce\r\n\r\n1. Corrupt the `mongodb.conf`\r\n2. Start Cobbler\r\n3. See error\r\n\r\n### Expected behavior\r\n\r\nWe get a better exception and Cobbler is prevented from starting up.\r\n\r\n### Cobbler version\r\n\r\n<!--- Paste output from `cobbler version` -->\r\n````paste below\r\n````\r\n\r\n### Operating system\r\n\r\n<!--- On which operating system do you use Cobbler? -->\r\n\r\n### Cobbler log\r\n\r\n<!--- Paste (partial) output from `/var/log/cobbler/cobbler.log` -->\r\n````paste below\r\n````\r\n\r\n### Screenshots\r\n\r\n<!--- If applicable, add screenshots to help explain your problem. -->\r\n\r\n### Additional information\r\n\r\n<!--- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "\"\"\"\nCobbler's Mongo database based object serializer.\nExperimental version.\n\nCopyright 2006-2009, Red Hat, Inc and Others\nMichael DeHaan <michael.dehaan AT gmail>\nJames Cammarata <[email protected]>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\"\"\"\n\nfrom configparser import ConfigParser\n\nfrom cobbler import settings\nfrom cobbler.cexceptions import CX\n\ntry:\n from pymongo import MongoClient\n from pymongo.errors import ConnectionFailure, ConfigurationError\n pymongo_loaded = True\nexcept ModuleNotFoundError:\n # FIXME: log message\n pymongo_loaded = False\n\nmongodb = None\n\n\ndef __connect(configfile: str = \"/etc/cobbler/mongodb.conf\"):\n \"\"\"\n Reads the config file for mongodb and then connects to the mongodb.\n \"\"\"\n cp = ConfigParser()\n cp.read(configfile)\n\n host = cp.get(\"connection\", \"host\")\n port = int(cp.get(\"connection\", \"port\"))\n # pylint: disable=global-statement\n global mongodb\n mongodb = MongoClient(host, port)['cobbler']\n try:\n # The ismaster command is cheap and doesn't require auth.\n mongodb.admin.command('ismaster')\n except ConnectionFailure as e:\n # FIXME: log error\n raise CX(\"Unable to connect to Mongo database or get database \\\"cobbler\\\"\") from e\n except ConfigurationError as e:\n raise CX(\"The configuration of the MongoDB connection isn't correct, please check the Cobbler settings.\") from e\n\n\ndef register() -> str:\n \"\"\"\n The mandatory Cobbler module registration hook.\n \"\"\"\n # FIXME: only run this if enabled.\n if not pymongo_loaded:\n return \"\"\n return \"serializer\"\n\n\ndef what() -> str:\n \"\"\"\n Module identification function\n \"\"\"\n return \"serializer/mongodb\"\n\n\ndef serialize_item(collection, item):\n \"\"\"\n Save a collection item to database.\n\n :param collection: collection\n :param item: collection item\n \"\"\"\n\n __connect()\n collection = mongodb[collection.collection_type()]\n data = collection.find_one({'name': item.name})\n if data:\n collection.update({'name': item.name}, item.serialize())\n else:\n collection.insert(item.serialize())\n\n\ndef serialize_delete(collection, item):\n \"\"\"\n Delete a collection item from database.\n\n :param collection: collection\n :param item: collection item\n \"\"\"\n\n __connect()\n collection = mongodb[collection.collection_type()]\n collection.remove({'name': item.name})\n\n\ndef serialize(collection):\n \"\"\"\n Save a collection to database\n\n :param collection: collection\n \"\"\"\n\n # TODO: error detection\n ctype = collection.collection_type()\n if ctype != \"settings\":\n for x in collection:\n serialize_item(collection, x)\n\n\ndef deserialize_raw(collection_type: str):\n \"\"\"\n Get a collection from mongodb and parse it into an object.\n\n :param collection_type: The collection type to fetch.\n :return: The first element of the collection requested.\n \"\"\"\n if collection_type == \"settings\":\n return settings.read_settings_file()\n else:\n __connect()\n collection = mongodb[collection_type]\n return collection.find()\n\n\ndef deserialize(collection, topological: bool = True):\n \"\"\"\n Load a collection from the database.\n\n :param collection: The collection to deserialize.\n :param topological: If the collection list should be sorted by the collection dict depth value or not.\n \"\"\"\n\n datastruct = deserialize_raw(collection.collection_type())\n if topological and type(datastruct) == list:\n datastruct.sort(key=lambda x: x[\"depth\"])\n if type(datastruct) == dict:\n collection.from_dict(datastruct)\n elif type(datastruct) == list:\n collection.from_list(datastruct)\n", "path": "cobbler/modules/serializers/mongodb.py"}], "after_files": [{"content": "\"\"\"\nCobbler's Mongo database based object serializer.\nExperimental version.\n\nCopyright 2006-2009, Red Hat, Inc and Others\nMichael DeHaan <michael.dehaan AT gmail>\nJames Cammarata <[email protected]>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\"\"\"\nimport configparser\nimport pathlib\nfrom configparser import ConfigParser\n\nfrom cobbler import settings\nfrom cobbler.cexceptions import CX\n\ntry:\n from pymongo import MongoClient\n from pymongo.errors import ConnectionFailure, ConfigurationError\n\n pymongo_loaded = True\nexcept ModuleNotFoundError:\n # FIXME: log message\n pymongo_loaded = False\n\nmongodb = None\n\n\ndef __connect(configfile: str = \"/etc/cobbler/mongodb.conf\"):\n \"\"\"\n Reads the config file for mongodb and then connects to the mongodb.\n \"\"\"\n if not pathlib.Path(configfile).is_file():\n raise FileNotFoundError(\n \"Specified Cobbler MongoDB config file could not be found!\"\n )\n\n cp = ConfigParser()\n try:\n cp.read(configfile)\n except configparser.Error as cp_error:\n raise configparser.Error(\n \"Could not read Cobbler MongoDB config file!\"\n ) from cp_error\n\n host = cp.get(\"connection\", \"host\", fallback=\"localhost\")\n port = cp.getint(\"connection\", \"port\", fallback=27017)\n # pylint: disable=global-statement\n global mongodb\n mongodb = MongoClient(host, port)[\"cobbler\"]\n try:\n # The ismaster command is cheap and doesn't require auth.\n mongodb.admin.command(\"ismaster\")\n except ConnectionFailure as e:\n # FIXME: log error\n raise CX('Unable to connect to Mongo database or get database \"cobbler\"') from e\n except ConfigurationError as e:\n raise CX(\n \"The configuration of the MongoDB connection isn't correct, please check the Cobbler settings.\"\n ) from e\n\n\ndef register() -> str:\n \"\"\"\n The mandatory Cobbler module registration hook.\n \"\"\"\n # FIXME: only run this if enabled.\n if not pymongo_loaded:\n return \"\"\n return \"serializer\"\n\n\ndef what() -> str:\n \"\"\"\n Module identification function\n \"\"\"\n return \"serializer/mongodb\"\n\n\ndef serialize_item(collection, item):\n \"\"\"\n Save a collection item to database.\n\n :param collection: collection\n :param item: collection item\n \"\"\"\n\n __connect()\n collection = mongodb[collection.collection_type()]\n data = collection.find_one({\"name\": item.name})\n if data:\n collection.update({\"name\": item.name}, item.serialize())\n else:\n collection.insert(item.serialize())\n\n\ndef serialize_delete(collection, item):\n \"\"\"\n Delete a collection item from database.\n\n :param collection: collection\n :param item: collection item\n \"\"\"\n\n __connect()\n collection = mongodb[collection.collection_type()]\n collection.remove({\"name\": item.name})\n\n\ndef serialize(collection):\n \"\"\"\n Save a collection to database\n\n :param collection: collection\n \"\"\"\n\n # TODO: error detection\n ctype = collection.collection_type()\n if ctype != \"settings\":\n for x in collection:\n serialize_item(collection, x)\n\n\ndef deserialize_raw(collection_type: str):\n \"\"\"\n Get a collection from mongodb and parse it into an object.\n\n :param collection_type: The collection type to fetch.\n :return: The first element of the collection requested.\n \"\"\"\n if collection_type == \"settings\":\n return settings.read_settings_file()\n else:\n __connect()\n collection = mongodb[collection_type]\n return collection.find()\n\n\ndef deserialize(collection, topological: bool = True):\n \"\"\"\n Load a collection from the database.\n\n :param collection: The collection to deserialize.\n :param topological: If the collection list should be sorted by the collection dict depth value or not.\n \"\"\"\n\n datastruct = deserialize_raw(collection.collection_type())\n if topological and type(datastruct) == list:\n datastruct.sort(key=lambda x: x[\"depth\"])\n if type(datastruct) == dict:\n collection.from_dict(datastruct)\n elif type(datastruct) == list:\n collection.from_list(datastruct)\n", "path": "cobbler/modules/serializers/mongodb.py"}]} | 1,931 | 715 |
gh_patches_debug_37710 | rasdani/github-patches | git_diff | localstack__localstack-4575 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: State Machine references don't get resolved properly
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
Lambda refs get lost
### Expected Behavior
Lambda refs work in state machines
### How are you starting LocalStack?
With a docker-compose file
### Steps To Reproduce
#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)
docker run localstack/localstack
#### Client commands (e.g., AWS SDK code snippet, or sequence of "awslocal" commands)
awslocal s3 mb s3://mybucket
### Environment
```markdown
- OS:
- LocalStack:
```
### Anything else?
This is based on a conversation I had with @dominikschubert
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/utils/generic/wait_utils.py`
Content:
```
1 import time
2 from typing import Callable
3
4 from typing_extensions import Literal
5
6
7 def wait_until(
8 fn: Callable[[], bool],
9 wait: float = 1.0,
10 max_retries: int = 10,
11 strategy: Literal["exponential", "static", "linear"] = "exponential",
12 _retries: int = 0,
13 _max_wait: float = 240,
14 ) -> None:
15 """waits until a given condition is true, rechecking it periodically"""
16 if max_retries < _retries:
17 return
18 completed = fn()
19 if not completed:
20 if wait > _max_wait:
21 return
22 time.sleep(wait)
23 next_wait = wait # default: static
24 if strategy == "linear":
25 next_wait = (wait / _retries) * (_retries + 1)
26 elif strategy == "exponential":
27 next_wait = wait ** 2
28 wait_until(fn, next_wait, max_retries, strategy, _retries + 1, _max_wait)
29
```
Path: `localstack/services/cloudformation/models/stepfunctions.py`
Content:
```
1 from localstack.services.cloudformation.deployment_utils import PLACEHOLDER_RESOURCE_NAME
2 from localstack.services.cloudformation.service_models import GenericBaseModel
3 from localstack.utils.aws import aws_stack
4
5
6 class SFNActivity(GenericBaseModel):
7 @staticmethod
8 def cloudformation_type():
9 return "AWS::StepFunctions::Activity"
10
11 def fetch_state(self, stack_name, resources):
12 activity_arn = self.physical_resource_id
13 if not activity_arn:
14 return None
15 client = aws_stack.connect_to_service("stepfunctions")
16 result = client.describe_activity(activityArn=activity_arn)
17 return result
18
19 @staticmethod
20 def get_deploy_templates():
21 return {
22 "create": {
23 "function": "create_activity",
24 "parameters": {"name": ["Name", PLACEHOLDER_RESOURCE_NAME], "tags": "Tags"},
25 },
26 "delete": {
27 "function": "delete_activity",
28 "parameters": {"activityArn": "PhysicalResourceId"},
29 },
30 }
31
32
33 class SFNStateMachine(GenericBaseModel):
34 @staticmethod
35 def cloudformation_type():
36 return "AWS::StepFunctions::StateMachine"
37
38 def get_resource_name(self):
39 return self.props.get("StateMachineName")
40
41 def get_physical_resource_id(self, attribute=None, **kwargs):
42 return self.props.get("stateMachineArn")
43
44 def fetch_state(self, stack_name, resources):
45 sm_name = self.props.get("StateMachineName") or self.resource_id
46 sm_name = self.resolve_refs_recursively(stack_name, sm_name, resources)
47 sfn_client = aws_stack.connect_to_service("stepfunctions")
48 state_machines = sfn_client.list_state_machines()["stateMachines"]
49 sm_arn = [m["stateMachineArn"] for m in state_machines if m["name"] == sm_name]
50 if not sm_arn:
51 return None
52 result = sfn_client.describe_state_machine(stateMachineArn=sm_arn[0])
53 return result
54
55 def update_resource(self, new_resource, stack_name, resources):
56 props = new_resource["Properties"]
57 client = aws_stack.connect_to_service("stepfunctions")
58 sm_arn = self.props.get("stateMachineArn")
59 if not sm_arn:
60 self.state = self.fetch_state(stack_name=stack_name, resources=resources)
61 sm_arn = self.state["stateMachineArn"]
62 kwargs = {
63 "stateMachineArn": sm_arn,
64 "definition": props["DefinitionString"],
65 }
66 return client.update_state_machine(**kwargs)
67
68 @staticmethod
69 def get_deploy_templates():
70 return {
71 "create": {
72 "function": "create_state_machine",
73 "parameters": {
74 "name": ["StateMachineName", PLACEHOLDER_RESOURCE_NAME],
75 "definition": "DefinitionString",
76 "roleArn": "RoleArn",
77 },
78 },
79 "delete": {
80 "function": "delete_state_machine",
81 "parameters": {"stateMachineArn": "PhysicalResourceId"},
82 },
83 }
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/localstack/services/cloudformation/models/stepfunctions.py b/localstack/services/cloudformation/models/stepfunctions.py
--- a/localstack/services/cloudformation/models/stepfunctions.py
+++ b/localstack/services/cloudformation/models/stepfunctions.py
@@ -1,3 +1,6 @@
+import re
+from typing import Dict
+
from localstack.services.cloudformation.deployment_utils import PLACEHOLDER_RESOURCE_NAME
from localstack.services.cloudformation.service_models import GenericBaseModel
from localstack.utils.aws import aws_stack
@@ -65,19 +68,43 @@
}
return client.update_state_machine(**kwargs)
- @staticmethod
- def get_deploy_templates():
+ @classmethod
+ def get_deploy_templates(cls):
+ def _create_params(params, **kwargs):
+ def _get_definition(params):
+ definition_str = params.get("DefinitionString")
+ substitutions = params.get("DefinitionSubstitutions")
+ if substitutions is not None:
+ definition_str = _apply_substitutions(definition_str, substitutions)
+ return definition_str
+
+ return {
+ "name": params.get("StateMachineName", PLACEHOLDER_RESOURCE_NAME),
+ "definition": _get_definition(params),
+ "roleArn": params.get("RoleArn"),
+ "type": params.get("StateMachineTyp", None),
+ }
+
return {
"create": {
"function": "create_state_machine",
- "parameters": {
- "name": ["StateMachineName", PLACEHOLDER_RESOURCE_NAME],
- "definition": "DefinitionString",
- "roleArn": "RoleArn",
- },
+ "parameters": _create_params,
},
"delete": {
"function": "delete_state_machine",
"parameters": {"stateMachineArn": "PhysicalResourceId"},
},
}
+
+
+def _apply_substitutions(definition: str, substitutions: Dict[str, str]) -> str:
+ substitution_regex = re.compile("\\${[a-zA-Z0-9_]+}") # might be a bit too strict in some cases
+ tokens = substitution_regex.findall(definition)
+ result = definition
+ for token in tokens:
+ raw_token = token[2:-1] # strip ${ and }
+ if raw_token not in substitutions.keys():
+ raise
+ result = result.replace(token, substitutions[raw_token])
+
+ return result
diff --git a/localstack/utils/generic/wait_utils.py b/localstack/utils/generic/wait_utils.py
--- a/localstack/utils/generic/wait_utils.py
+++ b/localstack/utils/generic/wait_utils.py
@@ -24,5 +24,5 @@
if strategy == "linear":
next_wait = (wait / _retries) * (_retries + 1)
elif strategy == "exponential":
- next_wait = wait ** 2
+ next_wait = wait * 2
wait_until(fn, next_wait, max_retries, strategy, _retries + 1, _max_wait)
| {"golden_diff": "diff --git a/localstack/services/cloudformation/models/stepfunctions.py b/localstack/services/cloudformation/models/stepfunctions.py\n--- a/localstack/services/cloudformation/models/stepfunctions.py\n+++ b/localstack/services/cloudformation/models/stepfunctions.py\n@@ -1,3 +1,6 @@\n+import re\n+from typing import Dict\n+\n from localstack.services.cloudformation.deployment_utils import PLACEHOLDER_RESOURCE_NAME\n from localstack.services.cloudformation.service_models import GenericBaseModel\n from localstack.utils.aws import aws_stack\n@@ -65,19 +68,43 @@\n }\n return client.update_state_machine(**kwargs)\n \n- @staticmethod\n- def get_deploy_templates():\n+ @classmethod\n+ def get_deploy_templates(cls):\n+ def _create_params(params, **kwargs):\n+ def _get_definition(params):\n+ definition_str = params.get(\"DefinitionString\")\n+ substitutions = params.get(\"DefinitionSubstitutions\")\n+ if substitutions is not None:\n+ definition_str = _apply_substitutions(definition_str, substitutions)\n+ return definition_str\n+\n+ return {\n+ \"name\": params.get(\"StateMachineName\", PLACEHOLDER_RESOURCE_NAME),\n+ \"definition\": _get_definition(params),\n+ \"roleArn\": params.get(\"RoleArn\"),\n+ \"type\": params.get(\"StateMachineTyp\", None),\n+ }\n+\n return {\n \"create\": {\n \"function\": \"create_state_machine\",\n- \"parameters\": {\n- \"name\": [\"StateMachineName\", PLACEHOLDER_RESOURCE_NAME],\n- \"definition\": \"DefinitionString\",\n- \"roleArn\": \"RoleArn\",\n- },\n+ \"parameters\": _create_params,\n },\n \"delete\": {\n \"function\": \"delete_state_machine\",\n \"parameters\": {\"stateMachineArn\": \"PhysicalResourceId\"},\n },\n }\n+\n+\n+def _apply_substitutions(definition: str, substitutions: Dict[str, str]) -> str:\n+ substitution_regex = re.compile(\"\\\\${[a-zA-Z0-9_]+}\") # might be a bit too strict in some cases\n+ tokens = substitution_regex.findall(definition)\n+ result = definition\n+ for token in tokens:\n+ raw_token = token[2:-1] # strip ${ and }\n+ if raw_token not in substitutions.keys():\n+ raise\n+ result = result.replace(token, substitutions[raw_token])\n+\n+ return result\ndiff --git a/localstack/utils/generic/wait_utils.py b/localstack/utils/generic/wait_utils.py\n--- a/localstack/utils/generic/wait_utils.py\n+++ b/localstack/utils/generic/wait_utils.py\n@@ -24,5 +24,5 @@\n if strategy == \"linear\":\n next_wait = (wait / _retries) * (_retries + 1)\n elif strategy == \"exponential\":\n- next_wait = wait ** 2\n+ next_wait = wait * 2\n wait_until(fn, next_wait, max_retries, strategy, _retries + 1, _max_wait)\n", "issue": "bug: State Machine references don't get resolved properly\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Behavior\n\nLambda refs get lost\n\n### Expected Behavior\n\nLambda refs work in state machines\n\n### How are you starting LocalStack?\n\nWith a docker-compose file\n\n### Steps To Reproduce\n\n#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)\r\n\r\n docker run localstack/localstack\r\n\r\n#### Client commands (e.g., AWS SDK code snippet, or sequence of \"awslocal\" commands)\r\n\r\n awslocal s3 mb s3://mybucket\r\n\n\n### Environment\n\n```markdown\n- OS: \r\n- LocalStack:\n```\n\n\n### Anything else?\n\nThis is based on a conversation I had with @dominikschubert \n", "before_files": [{"content": "import time\nfrom typing import Callable\n\nfrom typing_extensions import Literal\n\n\ndef wait_until(\n fn: Callable[[], bool],\n wait: float = 1.0,\n max_retries: int = 10,\n strategy: Literal[\"exponential\", \"static\", \"linear\"] = \"exponential\",\n _retries: int = 0,\n _max_wait: float = 240,\n) -> None:\n \"\"\"waits until a given condition is true, rechecking it periodically\"\"\"\n if max_retries < _retries:\n return\n completed = fn()\n if not completed:\n if wait > _max_wait:\n return\n time.sleep(wait)\n next_wait = wait # default: static\n if strategy == \"linear\":\n next_wait = (wait / _retries) * (_retries + 1)\n elif strategy == \"exponential\":\n next_wait = wait ** 2\n wait_until(fn, next_wait, max_retries, strategy, _retries + 1, _max_wait)\n", "path": "localstack/utils/generic/wait_utils.py"}, {"content": "from localstack.services.cloudformation.deployment_utils import PLACEHOLDER_RESOURCE_NAME\nfrom localstack.services.cloudformation.service_models import GenericBaseModel\nfrom localstack.utils.aws import aws_stack\n\n\nclass SFNActivity(GenericBaseModel):\n @staticmethod\n def cloudformation_type():\n return \"AWS::StepFunctions::Activity\"\n\n def fetch_state(self, stack_name, resources):\n activity_arn = self.physical_resource_id\n if not activity_arn:\n return None\n client = aws_stack.connect_to_service(\"stepfunctions\")\n result = client.describe_activity(activityArn=activity_arn)\n return result\n\n @staticmethod\n def get_deploy_templates():\n return {\n \"create\": {\n \"function\": \"create_activity\",\n \"parameters\": {\"name\": [\"Name\", PLACEHOLDER_RESOURCE_NAME], \"tags\": \"Tags\"},\n },\n \"delete\": {\n \"function\": \"delete_activity\",\n \"parameters\": {\"activityArn\": \"PhysicalResourceId\"},\n },\n }\n\n\nclass SFNStateMachine(GenericBaseModel):\n @staticmethod\n def cloudformation_type():\n return \"AWS::StepFunctions::StateMachine\"\n\n def get_resource_name(self):\n return self.props.get(\"StateMachineName\")\n\n def get_physical_resource_id(self, attribute=None, **kwargs):\n return self.props.get(\"stateMachineArn\")\n\n def fetch_state(self, stack_name, resources):\n sm_name = self.props.get(\"StateMachineName\") or self.resource_id\n sm_name = self.resolve_refs_recursively(stack_name, sm_name, resources)\n sfn_client = aws_stack.connect_to_service(\"stepfunctions\")\n state_machines = sfn_client.list_state_machines()[\"stateMachines\"]\n sm_arn = [m[\"stateMachineArn\"] for m in state_machines if m[\"name\"] == sm_name]\n if not sm_arn:\n return None\n result = sfn_client.describe_state_machine(stateMachineArn=sm_arn[0])\n return result\n\n def update_resource(self, new_resource, stack_name, resources):\n props = new_resource[\"Properties\"]\n client = aws_stack.connect_to_service(\"stepfunctions\")\n sm_arn = self.props.get(\"stateMachineArn\")\n if not sm_arn:\n self.state = self.fetch_state(stack_name=stack_name, resources=resources)\n sm_arn = self.state[\"stateMachineArn\"]\n kwargs = {\n \"stateMachineArn\": sm_arn,\n \"definition\": props[\"DefinitionString\"],\n }\n return client.update_state_machine(**kwargs)\n\n @staticmethod\n def get_deploy_templates():\n return {\n \"create\": {\n \"function\": \"create_state_machine\",\n \"parameters\": {\n \"name\": [\"StateMachineName\", PLACEHOLDER_RESOURCE_NAME],\n \"definition\": \"DefinitionString\",\n \"roleArn\": \"RoleArn\",\n },\n },\n \"delete\": {\n \"function\": \"delete_state_machine\",\n \"parameters\": {\"stateMachineArn\": \"PhysicalResourceId\"},\n },\n }\n", "path": "localstack/services/cloudformation/models/stepfunctions.py"}], "after_files": [{"content": "import time\nfrom typing import Callable\n\nfrom typing_extensions import Literal\n\n\ndef wait_until(\n fn: Callable[[], bool],\n wait: float = 1.0,\n max_retries: int = 10,\n strategy: Literal[\"exponential\", \"static\", \"linear\"] = \"exponential\",\n _retries: int = 0,\n _max_wait: float = 240,\n) -> None:\n \"\"\"waits until a given condition is true, rechecking it periodically\"\"\"\n if max_retries < _retries:\n return\n completed = fn()\n if not completed:\n if wait > _max_wait:\n return\n time.sleep(wait)\n next_wait = wait # default: static\n if strategy == \"linear\":\n next_wait = (wait / _retries) * (_retries + 1)\n elif strategy == \"exponential\":\n next_wait = wait * 2\n wait_until(fn, next_wait, max_retries, strategy, _retries + 1, _max_wait)\n", "path": "localstack/utils/generic/wait_utils.py"}, {"content": "import re\nfrom typing import Dict\n\nfrom localstack.services.cloudformation.deployment_utils import PLACEHOLDER_RESOURCE_NAME\nfrom localstack.services.cloudformation.service_models import GenericBaseModel\nfrom localstack.utils.aws import aws_stack\n\n\nclass SFNActivity(GenericBaseModel):\n @staticmethod\n def cloudformation_type():\n return \"AWS::StepFunctions::Activity\"\n\n def fetch_state(self, stack_name, resources):\n activity_arn = self.physical_resource_id\n if not activity_arn:\n return None\n client = aws_stack.connect_to_service(\"stepfunctions\")\n result = client.describe_activity(activityArn=activity_arn)\n return result\n\n @staticmethod\n def get_deploy_templates():\n return {\n \"create\": {\n \"function\": \"create_activity\",\n \"parameters\": {\"name\": [\"Name\", PLACEHOLDER_RESOURCE_NAME], \"tags\": \"Tags\"},\n },\n \"delete\": {\n \"function\": \"delete_activity\",\n \"parameters\": {\"activityArn\": \"PhysicalResourceId\"},\n },\n }\n\n\nclass SFNStateMachine(GenericBaseModel):\n @staticmethod\n def cloudformation_type():\n return \"AWS::StepFunctions::StateMachine\"\n\n def get_resource_name(self):\n return self.props.get(\"StateMachineName\")\n\n def get_physical_resource_id(self, attribute=None, **kwargs):\n return self.props.get(\"stateMachineArn\")\n\n def fetch_state(self, stack_name, resources):\n sm_name = self.props.get(\"StateMachineName\") or self.resource_id\n sm_name = self.resolve_refs_recursively(stack_name, sm_name, resources)\n sfn_client = aws_stack.connect_to_service(\"stepfunctions\")\n state_machines = sfn_client.list_state_machines()[\"stateMachines\"]\n sm_arn = [m[\"stateMachineArn\"] for m in state_machines if m[\"name\"] == sm_name]\n if not sm_arn:\n return None\n result = sfn_client.describe_state_machine(stateMachineArn=sm_arn[0])\n return result\n\n def update_resource(self, new_resource, stack_name, resources):\n props = new_resource[\"Properties\"]\n client = aws_stack.connect_to_service(\"stepfunctions\")\n sm_arn = self.props.get(\"stateMachineArn\")\n if not sm_arn:\n self.state = self.fetch_state(stack_name=stack_name, resources=resources)\n sm_arn = self.state[\"stateMachineArn\"]\n kwargs = {\n \"stateMachineArn\": sm_arn,\n \"definition\": props[\"DefinitionString\"],\n }\n return client.update_state_machine(**kwargs)\n\n @classmethod\n def get_deploy_templates(cls):\n def _create_params(params, **kwargs):\n def _get_definition(params):\n definition_str = params.get(\"DefinitionString\")\n substitutions = params.get(\"DefinitionSubstitutions\")\n if substitutions is not None:\n definition_str = _apply_substitutions(definition_str, substitutions)\n return definition_str\n\n return {\n \"name\": params.get(\"StateMachineName\", PLACEHOLDER_RESOURCE_NAME),\n \"definition\": _get_definition(params),\n \"roleArn\": params.get(\"RoleArn\"),\n \"type\": params.get(\"StateMachineTyp\", None),\n }\n\n return {\n \"create\": {\n \"function\": \"create_state_machine\",\n \"parameters\": _create_params,\n },\n \"delete\": {\n \"function\": \"delete_state_machine\",\n \"parameters\": {\"stateMachineArn\": \"PhysicalResourceId\"},\n },\n }\n\n\ndef _apply_substitutions(definition: str, substitutions: Dict[str, str]) -> str:\n substitution_regex = re.compile(\"\\\\${[a-zA-Z0-9_]+}\") # might be a bit too strict in some cases\n tokens = substitution_regex.findall(definition)\n result = definition\n for token in tokens:\n raw_token = token[2:-1] # strip ${ and }\n if raw_token not in substitutions.keys():\n raise\n result = result.replace(token, substitutions[raw_token])\n\n return result\n", "path": "localstack/services/cloudformation/models/stepfunctions.py"}]} | 1,533 | 654 |
gh_patches_debug_19491 | rasdani/github-patches | git_diff | sunpy__sunpy-5493 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix CROTA keyword in EUI maps
Currently EUI maps have a `CROTA` keyword, which by the FITS standard should really be a `CROTA2` keyword. This results in the warning
```python
/home/docs/checkouts/readthedocs.org/user_builds/solar-orbiter-python/envs/latest/lib/python3.8/site-packages/astropy/wcs/wcs.py:482: FITSFixedWarning: CROTA = 2.486914995997215 / [deg] rotation angle
keyword looks very much like CROTAn but isn't.
wcsprm = _wcs.Wcsprm(header=header_bytes, key=key,
```
It would be good to
- Check if CROTA is in the header and CROTA2 isn't
- If so, rename the CROTA keyword to CROTA2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/map/sources/solo.py`
Content:
```
1 """
2 Solar Orbiter Map subclass definitions.
3 """
4 import astropy.units as u
5 from astropy.coordinates import CartesianRepresentation
6 from astropy.visualization import ImageNormalize, LinearStretch
7
8 from sunpy.coordinates import HeliocentricInertial
9 from sunpy.map import GenericMap
10 from sunpy.map.sources.source_type import source_stretch
11 from sunpy.time import parse_time
12
13 __all__ = ['EUIMap']
14
15
16 class EUIMap(GenericMap):
17 """
18 EUI Image Map
19
20 The Extreme Ultraviolet Imager (EUI) is a remote sensing instrument onboard the
21 Solar Orbiter (SolO) spacecraft. EUI has three telescopes that image the Sun in
22 Lyman-alpha (1216 Å) and the EUV (174 Å and 304 Å). The three telescopes are the
23 Full Sun Imager (FSI) and two High Resolution Imagers (HRI). The FSI images the
24 whole Sun in both 174 Å and 304 Å. The EUV and Lyman-alpha HRI telescopes image a
25 1000"-by-1000" patch in 174 Å and 1216 Å, respectively.
26
27 References
28 ----------
29 * `Solar Orbiter Mission Page <https://sci.esa.int/web/solar-orbiter/>`__
30 * `EUI Instrument Page <https://wwwbis.sidc.be/EUI/EUI/EUI/EUI/EUI/>`__
31 * `Instrument Paper <https://doi.org/10.1051/0004-6361/201936663>`__
32 """
33
34 def __init__(self, data, header, **kwargs):
35 super().__init__(data, header, **kwargs)
36 self._nickname = self.detector
37 self.plot_settings['cmap'] = self._get_cmap_name()
38 self.plot_settings['norm'] = ImageNormalize(
39 stretch=source_stretch(self.meta, LinearStretch()), clip=False)
40
41 @property
42 def processing_level(self):
43 if self.meta.get('level'):
44 # The level number is prepended by the letter L
45 return int(self.meta.get('level')[1:])
46
47 @property
48 def exposure_time(self):
49 return self.meta.get('xposure', 0.0) * self.timeunit
50
51 @property
52 def date(self):
53 t = self.meta.get('date-avg')
54 timesys = self.meta.get('timesys')
55 return parse_time(t, scale=timesys.lower())
56
57 @property
58 def _supported_observer_coordinates(self):
59 return [(('hcix_obs', 'hciy_obs', 'hciz_obs'),
60 {'x': self.meta.get('hcix_obs'),
61 'y': self.meta.get('hciy_obs'),
62 'z': self.meta.get('hciz_obs'),
63 'unit': u.m,
64 'representation_type': CartesianRepresentation,
65 'frame': HeliocentricInertial})] + super()._supported_observer_coordinates
66
67 @classmethod
68 def is_datasource_for(cls, data, header, **kwargs):
69 """Determines if header corresponds to an EUI image"""
70 is_solo = 'solar orbiter' in str(header.get('obsrvtry', '')).lower()
71 is_eui = str(header.get('instrume', '')).startswith('EUI')
72 return is_solo and is_eui
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sunpy/map/sources/solo.py b/sunpy/map/sources/solo.py
--- a/sunpy/map/sources/solo.py
+++ b/sunpy/map/sources/solo.py
@@ -5,6 +5,7 @@
from astropy.coordinates import CartesianRepresentation
from astropy.visualization import ImageNormalize, LinearStretch
+from sunpy import log
from sunpy.coordinates import HeliocentricInertial
from sunpy.map import GenericMap
from sunpy.map.sources.source_type import source_stretch
@@ -38,6 +39,10 @@
self.plot_settings['norm'] = ImageNormalize(
stretch=source_stretch(self.meta, LinearStretch()), clip=False)
+ if 'CROTA' in self.meta and 'CROTA2' not in self.meta:
+ log.debug("Renaming 'CROTA' to 'CROTA2'")
+ self.meta['CROTA2'] = self.meta.pop('CROTA')
+
@property
def processing_level(self):
if self.meta.get('level'):
| {"golden_diff": "diff --git a/sunpy/map/sources/solo.py b/sunpy/map/sources/solo.py\n--- a/sunpy/map/sources/solo.py\n+++ b/sunpy/map/sources/solo.py\n@@ -5,6 +5,7 @@\n from astropy.coordinates import CartesianRepresentation\n from astropy.visualization import ImageNormalize, LinearStretch\n \n+from sunpy import log\n from sunpy.coordinates import HeliocentricInertial\n from sunpy.map import GenericMap\n from sunpy.map.sources.source_type import source_stretch\n@@ -38,6 +39,10 @@\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, LinearStretch()), clip=False)\n \n+ if 'CROTA' in self.meta and 'CROTA2' not in self.meta:\n+ log.debug(\"Renaming 'CROTA' to 'CROTA2'\")\n+ self.meta['CROTA2'] = self.meta.pop('CROTA')\n+\n @property\n def processing_level(self):\n if self.meta.get('level'):\n", "issue": "Fix CROTA keyword in EUI maps\nCurrently EUI maps have a `CROTA` keyword, which by the FITS standard should really be a `CROTA2` keyword. This results in the warning\r\n```python\r\n/home/docs/checkouts/readthedocs.org/user_builds/solar-orbiter-python/envs/latest/lib/python3.8/site-packages/astropy/wcs/wcs.py:482: FITSFixedWarning: CROTA = 2.486914995997215 / [deg] rotation angle\r\nkeyword looks very much like CROTAn but isn't.\r\n wcsprm = _wcs.Wcsprm(header=header_bytes, key=key,\r\n```\r\nIt would be good to\r\n- Check if CROTA is in the header and CROTA2 isn't\r\n- If so, rename the CROTA keyword to CROTA2\n", "before_files": [{"content": "\"\"\"\nSolar Orbiter Map subclass definitions.\n\"\"\"\nimport astropy.units as u\nfrom astropy.coordinates import CartesianRepresentation\nfrom astropy.visualization import ImageNormalize, LinearStretch\n\nfrom sunpy.coordinates import HeliocentricInertial\nfrom sunpy.map import GenericMap\nfrom sunpy.map.sources.source_type import source_stretch\nfrom sunpy.time import parse_time\n\n__all__ = ['EUIMap']\n\n\nclass EUIMap(GenericMap):\n \"\"\"\n EUI Image Map\n\n The Extreme Ultraviolet Imager (EUI) is a remote sensing instrument onboard the\n Solar Orbiter (SolO) spacecraft. EUI has three telescopes that image the Sun in\n Lyman-alpha (1216 \u00c5) and the EUV (174 \u00c5 and 304 \u00c5). The three telescopes are the\n Full Sun Imager (FSI) and two High Resolution Imagers (HRI). The FSI images the\n whole Sun in both 174 \u00c5 and 304 \u00c5. The EUV and Lyman-alpha HRI telescopes image a\n 1000\"-by-1000\" patch in 174 \u00c5 and 1216 \u00c5, respectively.\n\n References\n ----------\n * `Solar Orbiter Mission Page <https://sci.esa.int/web/solar-orbiter/>`__\n * `EUI Instrument Page <https://wwwbis.sidc.be/EUI/EUI/EUI/EUI/EUI/>`__\n * `Instrument Paper <https://doi.org/10.1051/0004-6361/201936663>`__\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n super().__init__(data, header, **kwargs)\n self._nickname = self.detector\n self.plot_settings['cmap'] = self._get_cmap_name()\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, LinearStretch()), clip=False)\n\n @property\n def processing_level(self):\n if self.meta.get('level'):\n # The level number is prepended by the letter L\n return int(self.meta.get('level')[1:])\n\n @property\n def exposure_time(self):\n return self.meta.get('xposure', 0.0) * self.timeunit\n\n @property\n def date(self):\n t = self.meta.get('date-avg')\n timesys = self.meta.get('timesys')\n return parse_time(t, scale=timesys.lower())\n\n @property\n def _supported_observer_coordinates(self):\n return [(('hcix_obs', 'hciy_obs', 'hciz_obs'),\n {'x': self.meta.get('hcix_obs'),\n 'y': self.meta.get('hciy_obs'),\n 'z': self.meta.get('hciz_obs'),\n 'unit': u.m,\n 'representation_type': CartesianRepresentation,\n 'frame': HeliocentricInertial})] + super()._supported_observer_coordinates\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an EUI image\"\"\"\n is_solo = 'solar orbiter' in str(header.get('obsrvtry', '')).lower()\n is_eui = str(header.get('instrume', '')).startswith('EUI')\n return is_solo and is_eui\n", "path": "sunpy/map/sources/solo.py"}], "after_files": [{"content": "\"\"\"\nSolar Orbiter Map subclass definitions.\n\"\"\"\nimport astropy.units as u\nfrom astropy.coordinates import CartesianRepresentation\nfrom astropy.visualization import ImageNormalize, LinearStretch\n\nfrom sunpy import log\nfrom sunpy.coordinates import HeliocentricInertial\nfrom sunpy.map import GenericMap\nfrom sunpy.map.sources.source_type import source_stretch\nfrom sunpy.time import parse_time\n\n__all__ = ['EUIMap']\n\n\nclass EUIMap(GenericMap):\n \"\"\"\n EUI Image Map\n\n The Extreme Ultraviolet Imager (EUI) is a remote sensing instrument onboard the\n Solar Orbiter (SolO) spacecraft. EUI has three telescopes that image the Sun in\n Lyman-alpha (1216 \u00c5) and the EUV (174 \u00c5 and 304 \u00c5). The three telescopes are the\n Full Sun Imager (FSI) and two High Resolution Imagers (HRI). The FSI images the\n whole Sun in both 174 \u00c5 and 304 \u00c5. The EUV and Lyman-alpha HRI telescopes image a\n 1000\"-by-1000\" patch in 174 \u00c5 and 1216 \u00c5, respectively.\n\n References\n ----------\n * `Solar Orbiter Mission Page <https://sci.esa.int/web/solar-orbiter/>`__\n * `EUI Instrument Page <https://wwwbis.sidc.be/EUI/EUI/EUI/EUI/EUI/>`__\n * `Instrument Paper <https://doi.org/10.1051/0004-6361/201936663>`__\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n super().__init__(data, header, **kwargs)\n self._nickname = self.detector\n self.plot_settings['cmap'] = self._get_cmap_name()\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, LinearStretch()), clip=False)\n\n if 'CROTA' in self.meta and 'CROTA2' not in self.meta:\n log.debug(\"Renaming 'CROTA' to 'CROTA2'\")\n self.meta['CROTA2'] = self.meta.pop('CROTA')\n\n @property\n def processing_level(self):\n if self.meta.get('level'):\n # The level number is prepended by the letter L\n return int(self.meta.get('level')[1:])\n\n @property\n def exposure_time(self):\n return self.meta.get('xposure', 0.0) * self.timeunit\n\n @property\n def date(self):\n t = self.meta.get('date-avg')\n timesys = self.meta.get('timesys')\n return parse_time(t, scale=timesys.lower())\n\n @property\n def _supported_observer_coordinates(self):\n return [(('hcix_obs', 'hciy_obs', 'hciz_obs'),\n {'x': self.meta.get('hcix_obs'),\n 'y': self.meta.get('hciy_obs'),\n 'z': self.meta.get('hciz_obs'),\n 'unit': u.m,\n 'representation_type': CartesianRepresentation,\n 'frame': HeliocentricInertial})] + super()._supported_observer_coordinates\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an EUI image\"\"\"\n is_solo = 'solar orbiter' in str(header.get('obsrvtry', '')).lower()\n is_eui = str(header.get('instrume', '')).startswith('EUI')\n return is_solo and is_eui\n", "path": "sunpy/map/sources/solo.py"}]} | 1,344 | 233 |
gh_patches_debug_10058 | rasdani/github-patches | git_diff | docker__docker-py-1972 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Build context (.tar) is not prepared properly
Hey,
This morning I've updated to version `3.1.1` however, using this version i'm getting wired error from docker-engine build:
```
ERROR: Error processing tar file(exit status 1): mkdir /foodir/bardir: no such file or directory
```
and the actual building does not start.
took me some time to realise this is related to update i got this morning,
Reverting back to version `3.0.1`, I could build again.
*NOTE*: `/foodir/bardir` is censored due to security policy in my company,
so for the sake of this issue, lets assume this is the context:
- Dockerfile
- foodir
- bardir
- file
Also, path in error did start with `/` so i kept it there.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/utils/build.py`
Content:
```
1 import os
2 import re
3
4 from ..constants import IS_WINDOWS_PLATFORM
5 from fnmatch import fnmatch
6 from itertools import chain
7 from .utils import create_archive
8
9
10 def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
11 root = os.path.abspath(path)
12 exclude = exclude or []
13 return create_archive(
14 files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile)),
15 root=root, fileobj=fileobj, gzip=gzip
16 )
17
18
19 _SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')
20
21
22 def exclude_paths(root, patterns, dockerfile=None):
23 """
24 Given a root directory path and a list of .dockerignore patterns, return
25 an iterator of all paths (both regular files and directories) in the root
26 directory that do *not* match any of the patterns.
27
28 All paths returned are relative to the root.
29 """
30
31 if dockerfile is None:
32 dockerfile = 'Dockerfile'
33
34 def split_path(p):
35 return [pt for pt in re.split(_SEP, p) if pt and pt != '.']
36
37 def normalize(p):
38 # Leading and trailing slashes are not relevant. Yes,
39 # "foo.py/" must exclude the "foo.py" regular file. "."
40 # components are not relevant either, even if the whole
41 # pattern is only ".", as the Docker reference states: "For
42 # historical reasons, the pattern . is ignored."
43 # ".." component must be cleared with the potential previous
44 # component, regardless of whether it exists: "A preprocessing
45 # step [...] eliminates . and .. elements using Go's
46 # filepath.".
47 i = 0
48 split = split_path(p)
49 while i < len(split):
50 if split[i] == '..':
51 del split[i]
52 if i > 0:
53 del split[i - 1]
54 i -= 1
55 else:
56 i += 1
57 return split
58
59 patterns = (
60 (True, normalize(p[1:]))
61 if p.startswith('!') else
62 (False, normalize(p))
63 for p in patterns)
64 patterns = list(reversed(list(chain(
65 # Exclude empty patterns such as "." or the empty string.
66 filter(lambda p: p[1], patterns),
67 # Always include the Dockerfile and .dockerignore
68 [(True, split_path(dockerfile)), (True, ['.dockerignore'])]))))
69 return set(walk(root, patterns))
70
71
72 def walk(root, patterns, default=True):
73 """
74 A collection of file lying below root that should be included according to
75 patterns.
76 """
77
78 def match(p):
79 if p[1][0] == '**':
80 rec = (p[0], p[1][1:])
81 return [p] + (match(rec) if rec[1] else [rec])
82 elif fnmatch(f, p[1][0]):
83 return [(p[0], p[1][1:])]
84 else:
85 return []
86
87 for f in os.listdir(root):
88 cur = os.path.join(root, f)
89 # The patterns if recursing in that directory.
90 sub = list(chain(*(match(p) for p in patterns)))
91 # Whether this file is explicitely included / excluded.
92 hit = next((p[0] for p in sub if not p[1]), None)
93 # Whether this file is implicitely included / excluded.
94 matched = default if hit is None else hit
95 sub = list(filter(lambda p: p[1], sub))
96 if os.path.isdir(cur):
97 # Entirely skip directories if there are no chance any subfile will
98 # be included.
99 if all(not p[0] for p in sub) and not matched:
100 continue
101 # I think this would greatly speed up dockerignore handling by not
102 # recursing into directories we are sure would be entirely
103 # included, and only yielding the directory itself, which will be
104 # recursively archived anyway. However the current unit test expect
105 # the full list of subfiles and I'm not 100% sure it would make no
106 # difference yet.
107 # if all(p[0] for p in sub) and matched:
108 # yield f
109 # continue
110 children = False
111 for r in (os.path.join(f, p) for p in walk(cur, sub, matched)):
112 yield r
113 children = True
114 # The current unit tests expect directories only under those
115 # conditions. It might be simplifiable though.
116 if (not sub or not children) and hit or hit is None and default:
117 yield f
118 elif matched:
119 yield f
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/utils/build.py b/docker/utils/build.py
--- a/docker/utils/build.py
+++ b/docker/utils/build.py
@@ -93,7 +93,7 @@
# Whether this file is implicitely included / excluded.
matched = default if hit is None else hit
sub = list(filter(lambda p: p[1], sub))
- if os.path.isdir(cur):
+ if os.path.isdir(cur) and not os.path.islink(cur):
# Entirely skip directories if there are no chance any subfile will
# be included.
if all(not p[0] for p in sub) and not matched:
| {"golden_diff": "diff --git a/docker/utils/build.py b/docker/utils/build.py\n--- a/docker/utils/build.py\n+++ b/docker/utils/build.py\n@@ -93,7 +93,7 @@\n # Whether this file is implicitely included / excluded.\n matched = default if hit is None else hit\n sub = list(filter(lambda p: p[1], sub))\n- if os.path.isdir(cur):\n+ if os.path.isdir(cur) and not os.path.islink(cur):\n # Entirely skip directories if there are no chance any subfile will\n # be included.\n if all(not p[0] for p in sub) and not matched:\n", "issue": "Build context (.tar) is not prepared properly\nHey,\r\n\r\nThis morning I've updated to version `3.1.1` however, using this version i'm getting wired error from docker-engine build:\r\n```\r\nERROR: Error processing tar file(exit status 1): mkdir /foodir/bardir: no such file or directory\r\n```\r\nand the actual building does not start.\r\ntook me some time to realise this is related to update i got this morning, \r\nReverting back to version `3.0.1`, I could build again.\r\n\r\n*NOTE*: `/foodir/bardir` is censored due to security policy in my company,\r\nso for the sake of this issue, lets assume this is the context:\r\n- Dockerfile\r\n- foodir\r\n - bardir\r\n - file\r\n\r\nAlso, path in error did start with `/` so i kept it there.\r\n\n", "before_files": [{"content": "import os\nimport re\n\nfrom ..constants import IS_WINDOWS_PLATFORM\nfrom fnmatch import fnmatch\nfrom itertools import chain\nfrom .utils import create_archive\n\n\ndef tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):\n root = os.path.abspath(path)\n exclude = exclude or []\n return create_archive(\n files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile)),\n root=root, fileobj=fileobj, gzip=gzip\n )\n\n\n_SEP = re.compile('/|\\\\\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')\n\n\ndef exclude_paths(root, patterns, dockerfile=None):\n \"\"\"\n Given a root directory path and a list of .dockerignore patterns, return\n an iterator of all paths (both regular files and directories) in the root\n directory that do *not* match any of the patterns.\n\n All paths returned are relative to the root.\n \"\"\"\n\n if dockerfile is None:\n dockerfile = 'Dockerfile'\n\n def split_path(p):\n return [pt for pt in re.split(_SEP, p) if pt and pt != '.']\n\n def normalize(p):\n # Leading and trailing slashes are not relevant. Yes,\n # \"foo.py/\" must exclude the \"foo.py\" regular file. \".\"\n # components are not relevant either, even if the whole\n # pattern is only \".\", as the Docker reference states: \"For\n # historical reasons, the pattern . is ignored.\"\n # \"..\" component must be cleared with the potential previous\n # component, regardless of whether it exists: \"A preprocessing\n # step [...] eliminates . and .. elements using Go's\n # filepath.\".\n i = 0\n split = split_path(p)\n while i < len(split):\n if split[i] == '..':\n del split[i]\n if i > 0:\n del split[i - 1]\n i -= 1\n else:\n i += 1\n return split\n\n patterns = (\n (True, normalize(p[1:]))\n if p.startswith('!') else\n (False, normalize(p))\n for p in patterns)\n patterns = list(reversed(list(chain(\n # Exclude empty patterns such as \".\" or the empty string.\n filter(lambda p: p[1], patterns),\n # Always include the Dockerfile and .dockerignore\n [(True, split_path(dockerfile)), (True, ['.dockerignore'])]))))\n return set(walk(root, patterns))\n\n\ndef walk(root, patterns, default=True):\n \"\"\"\n A collection of file lying below root that should be included according to\n patterns.\n \"\"\"\n\n def match(p):\n if p[1][0] == '**':\n rec = (p[0], p[1][1:])\n return [p] + (match(rec) if rec[1] else [rec])\n elif fnmatch(f, p[1][0]):\n return [(p[0], p[1][1:])]\n else:\n return []\n\n for f in os.listdir(root):\n cur = os.path.join(root, f)\n # The patterns if recursing in that directory.\n sub = list(chain(*(match(p) for p in patterns)))\n # Whether this file is explicitely included / excluded.\n hit = next((p[0] for p in sub if not p[1]), None)\n # Whether this file is implicitely included / excluded.\n matched = default if hit is None else hit\n sub = list(filter(lambda p: p[1], sub))\n if os.path.isdir(cur):\n # Entirely skip directories if there are no chance any subfile will\n # be included.\n if all(not p[0] for p in sub) and not matched:\n continue\n # I think this would greatly speed up dockerignore handling by not\n # recursing into directories we are sure would be entirely\n # included, and only yielding the directory itself, which will be\n # recursively archived anyway. However the current unit test expect\n # the full list of subfiles and I'm not 100% sure it would make no\n # difference yet.\n # if all(p[0] for p in sub) and matched:\n # yield f\n # continue\n children = False\n for r in (os.path.join(f, p) for p in walk(cur, sub, matched)):\n yield r\n children = True\n # The current unit tests expect directories only under those\n # conditions. It might be simplifiable though.\n if (not sub or not children) and hit or hit is None and default:\n yield f\n elif matched:\n yield f\n", "path": "docker/utils/build.py"}], "after_files": [{"content": "import os\nimport re\n\nfrom ..constants import IS_WINDOWS_PLATFORM\nfrom fnmatch import fnmatch\nfrom itertools import chain\nfrom .utils import create_archive\n\n\ndef tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):\n root = os.path.abspath(path)\n exclude = exclude or []\n return create_archive(\n files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile)),\n root=root, fileobj=fileobj, gzip=gzip\n )\n\n\n_SEP = re.compile('/|\\\\\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')\n\n\ndef exclude_paths(root, patterns, dockerfile=None):\n \"\"\"\n Given a root directory path and a list of .dockerignore patterns, return\n an iterator of all paths (both regular files and directories) in the root\n directory that do *not* match any of the patterns.\n\n All paths returned are relative to the root.\n \"\"\"\n\n if dockerfile is None:\n dockerfile = 'Dockerfile'\n\n def split_path(p):\n return [pt for pt in re.split(_SEP, p) if pt and pt != '.']\n\n def normalize(p):\n # Leading and trailing slashes are not relevant. Yes,\n # \"foo.py/\" must exclude the \"foo.py\" regular file. \".\"\n # components are not relevant either, even if the whole\n # pattern is only \".\", as the Docker reference states: \"For\n # historical reasons, the pattern . is ignored.\"\n # \"..\" component must be cleared with the potential previous\n # component, regardless of whether it exists: \"A preprocessing\n # step [...] eliminates . and .. elements using Go's\n # filepath.\".\n i = 0\n split = split_path(p)\n while i < len(split):\n if split[i] == '..':\n del split[i]\n if i > 0:\n del split[i - 1]\n i -= 1\n else:\n i += 1\n return split\n\n patterns = (\n (True, normalize(p[1:]))\n if p.startswith('!') else\n (False, normalize(p))\n for p in patterns)\n patterns = list(reversed(list(chain(\n # Exclude empty patterns such as \".\" or the empty string.\n filter(lambda p: p[1], patterns),\n # Always include the Dockerfile and .dockerignore\n [(True, split_path(dockerfile)), (True, ['.dockerignore'])]))))\n return set(walk(root, patterns))\n\n\ndef walk(root, patterns, default=True):\n \"\"\"\n A collection of file lying below root that should be included according to\n patterns.\n \"\"\"\n\n def match(p):\n if p[1][0] == '**':\n rec = (p[0], p[1][1:])\n return [p] + (match(rec) if rec[1] else [rec])\n elif fnmatch(f, p[1][0]):\n return [(p[0], p[1][1:])]\n else:\n return []\n\n for f in os.listdir(root):\n cur = os.path.join(root, f)\n # The patterns if recursing in that directory.\n sub = list(chain(*(match(p) for p in patterns)))\n # Whether this file is explicitely included / excluded.\n hit = next((p[0] for p in sub if not p[1]), None)\n # Whether this file is implicitely included / excluded.\n matched = default if hit is None else hit\n sub = list(filter(lambda p: p[1], sub))\n if os.path.isdir(cur) and not os.path.islink(cur):\n # Entirely skip directories if there are no chance any subfile will\n # be included.\n if all(not p[0] for p in sub) and not matched:\n continue\n # I think this would greatly speed up dockerignore handling by not\n # recursing into directories we are sure would be entirely\n # included, and only yielding the directory itself, which will be\n # recursively archived anyway. However the current unit test expect\n # the full list of subfiles and I'm not 100% sure it would make no\n # difference yet.\n # if all(p[0] for p in sub) and matched:\n # yield f\n # continue\n children = False\n for r in (os.path.join(f, p) for p in walk(cur, sub, matched)):\n yield r\n children = True\n # The current unit tests expect directories only under those\n # conditions. It might be simplifiable though.\n if (not sub or not children) and hit or hit is None and default:\n yield f\n elif matched:\n yield f\n", "path": "docker/utils/build.py"}]} | 1,713 | 139 |
gh_patches_debug_742 | rasdani/github-patches | git_diff | streamlink__streamlink-3952 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add lxml dependency
### Checklist
- [X] This is a feature request and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin requests](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22feature+request%22)
### Description
Streamlink should finally switch to a proper HTML/XML parser for extracting data instead of using cheap regex workarounds which don't work properly. I've already commented on this issue last year:
https://github.com/streamlink/streamlink/issues/3241#issuecomment-706486239
The reason why I'm suggesting this again right now is that I was trying to fix the deutschewelle plugin (https://dw.com) yesterday and ran into issues with the `itertags` utility method, which is based on simple regexes for iterating HTML nodes and their attributes+body. `itertags` for example does not work with nested nodes, which makes adding ridiculous custom regexes necessary. Just take a look at this madness:
https://github.com/streamlink/streamlink/blob/3668770d608f0fab54d40a46acd6720a97f63775/src/streamlink/plugins/deutschewelle.py#L18-L29
With `lxml` (https://lxml.de/), HTML page contents can be parsed and the data extracted via XPath queries and/or the respective API methods. The methods are similar to python's native `xml.etree.ElementTree`, which itself is considered too slow and unsafe in certain cases. I am by no means an expert regarding python's standard library though, so if someone has better insight here, please share. In regards to packaging, this lib is available on basically every packaging system and adding it as a dependency here only has benefits.
I'd suggest that we add `lxml` as a dependency now and start using it for extracting data from HTML documents. The validation schema methods could be improved for this as well. There's also the `parse_xml` utility method, which is currently based on the native module.
Comments?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import codecs
3 from os import environ, path
4 from sys import argv, path as sys_path
5
6 from setuptools import find_packages, setup
7
8 import versioneer
9
10
11 data_files = []
12 deps = [
13 "requests>=2.26.0,<3.0",
14 "isodate",
15 "websocket-client>=0.58.0",
16 # Support for SOCKS proxies
17 "PySocks!=1.5.7,>=1.5.6",
18 ]
19
20 # for encrypted streams
21 if environ.get("STREAMLINK_USE_PYCRYPTO"):
22 deps.append("pycrypto")
23 else:
24 # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6
25 deps.append("pycryptodome>=3.4.3,<4")
26
27 # for localization
28 if environ.get("STREAMLINK_USE_PYCOUNTRY"):
29 deps.append("pycountry")
30 else:
31 deps.append("iso-639")
32 deps.append("iso3166")
33
34 # When we build an egg for the Win32 bootstrap we don"t want dependency
35 # information built into it.
36 if environ.get("NO_DEPS"):
37 deps = []
38
39 this_directory = path.abspath(path.dirname(__file__))
40 srcdir = path.join(this_directory, "src/")
41 sys_path.insert(0, srcdir)
42
43 with codecs.open(path.join(this_directory, "README.md"), 'r', "utf8") as f:
44 long_description = f.read()
45
46
47 def is_wheel_for_windows():
48 if "bdist_wheel" in argv:
49 names = ["win32", "win-amd64", "cygwin"]
50 length = len(argv)
51 for pos in range(argv.index("bdist_wheel") + 1, length):
52 if argv[pos] == "--plat-name" and pos + 1 < length:
53 return argv[pos + 1] in names
54 elif argv[pos][:12] == "--plat-name=":
55 return argv[pos][12:] in names
56 return False
57
58
59 entry_points = {
60 "console_scripts": ["streamlink=streamlink_cli.main:main"]
61 }
62
63 if is_wheel_for_windows():
64 entry_points["gui_scripts"] = ["streamlinkw=streamlink_cli.main:main"]
65
66
67 additional_files = [
68 ("share/man/man1", ["docs/_build/man/streamlink.1"])
69 ]
70
71 for destdir, srcfiles in additional_files:
72 files = []
73 for srcfile in srcfiles:
74 if path.exists(srcfile):
75 files.append(srcfile)
76 if files:
77 data_files.append((destdir, files))
78
79
80 setup(name="streamlink",
81 version=versioneer.get_version(),
82 cmdclass=versioneer.get_cmdclass(),
83 description="Streamlink is a command-line utility that extracts streams "
84 "from various services and pipes them into a video player of "
85 "choice.",
86 long_description=long_description,
87 long_description_content_type="text/markdown",
88 url="https://github.com/streamlink/streamlink",
89 project_urls={
90 "Documentation": "https://streamlink.github.io/",
91 "Tracker": "https://github.com/streamlink/streamlink/issues",
92 "Source": "https://github.com/streamlink/streamlink",
93 "Funding": "https://opencollective.com/streamlink"
94 },
95 author="Streamlink",
96 # temp until we have a mailing list / global email
97 author_email="[email protected]",
98 license="Simplified BSD",
99 packages=find_packages("src"),
100 package_dir={"": "src"},
101 package_data={"streamlink.plugins": [".removed"]},
102 entry_points=entry_points,
103 data_files=data_files,
104 install_requires=deps,
105 test_suite="tests",
106 python_requires=">=3.6, <4",
107 classifiers=["Development Status :: 5 - Production/Stable",
108 "License :: OSI Approved :: BSD License",
109 "Environment :: Console",
110 "Intended Audience :: End Users/Desktop",
111 "Operating System :: POSIX",
112 "Operating System :: Microsoft :: Windows",
113 "Operating System :: MacOS",
114 "Programming Language :: Python :: 3",
115 "Programming Language :: Python :: 3 :: Only",
116 "Programming Language :: Python :: 3.6",
117 "Programming Language :: Python :: 3.7",
118 "Programming Language :: Python :: 3.8",
119 "Programming Language :: Python :: 3.9",
120 "Topic :: Internet :: WWW/HTTP",
121 "Topic :: Multimedia :: Sound/Audio",
122 "Topic :: Multimedia :: Video",
123 "Topic :: Utilities"])
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,6 +12,7 @@
deps = [
"requests>=2.26.0,<3.0",
"isodate",
+ "lxml>=4.6.3",
"websocket-client>=0.58.0",
# Support for SOCKS proxies
"PySocks!=1.5.7,>=1.5.6",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,6 +12,7 @@\n deps = [\n \"requests>=2.26.0,<3.0\",\n \"isodate\",\n+ \"lxml>=4.6.3\",\n \"websocket-client>=0.58.0\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n", "issue": "Add lxml dependency\n### Checklist\n\n- [X] This is a feature request and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin requests](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22feature+request%22)\n\n### Description\n\nStreamlink should finally switch to a proper HTML/XML parser for extracting data instead of using cheap regex workarounds which don't work properly. I've already commented on this issue last year:\r\nhttps://github.com/streamlink/streamlink/issues/3241#issuecomment-706486239\r\n\r\nThe reason why I'm suggesting this again right now is that I was trying to fix the deutschewelle plugin (https://dw.com) yesterday and ran into issues with the `itertags` utility method, which is based on simple regexes for iterating HTML nodes and their attributes+body. `itertags` for example does not work with nested nodes, which makes adding ridiculous custom regexes necessary. Just take a look at this madness:\r\nhttps://github.com/streamlink/streamlink/blob/3668770d608f0fab54d40a46acd6720a97f63775/src/streamlink/plugins/deutschewelle.py#L18-L29\r\n\r\nWith `lxml` (https://lxml.de/), HTML page contents can be parsed and the data extracted via XPath queries and/or the respective API methods. The methods are similar to python's native `xml.etree.ElementTree`, which itself is considered too slow and unsafe in certain cases. I am by no means an expert regarding python's standard library though, so if someone has better insight here, please share. In regards to packaging, this lib is available on basically every packaging system and adding it as a dependency here only has benefits.\r\n\r\nI'd suggest that we add `lxml` as a dependency now and start using it for extracting data from HTML documents. The validation schema methods could be improved for this as well. There's also the `parse_xml` utility method, which is currently based on the native module.\r\n\r\nComments?\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nfrom os import environ, path\nfrom sys import argv, path as sys_path\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\n\ndata_files = []\ndeps = [\n \"requests>=2.26.0,<3.0\",\n \"isodate\",\n \"websocket-client>=0.58.0\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n]\n\n# for encrypted streams\nif environ.get(\"STREAMLINK_USE_PYCRYPTO\"):\n deps.append(\"pycrypto\")\nelse:\n # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\n deps.append(\"pycryptodome>=3.4.3,<4\")\n\n# for localization\nif environ.get(\"STREAMLINK_USE_PYCOUNTRY\"):\n deps.append(\"pycountry\")\nelse:\n deps.append(\"iso-639\")\n deps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don\"t want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nthis_directory = path.abspath(path.dirname(__file__))\nsrcdir = path.join(this_directory, \"src/\")\nsys_path.insert(0, srcdir)\n\nwith codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n\n\ndef is_wheel_for_windows():\n if \"bdist_wheel\" in argv:\n names = [\"win32\", \"win-amd64\", \"cygwin\"]\n length = len(argv)\n for pos in range(argv.index(\"bdist_wheel\") + 1, length):\n if argv[pos] == \"--plat-name\" and pos + 1 < length:\n return argv[pos + 1] in names\n elif argv[pos][:12] == \"--plat-name=\":\n return argv[pos][12:] in names\n return False\n\n\nentry_points = {\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n}\n\nif is_wheel_for_windows():\n entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n\n\nadditional_files = [\n (\"share/man/man1\", [\"docs/_build/man/streamlink.1\"])\n]\n\nfor destdir, srcfiles in additional_files:\n files = []\n for srcfile in srcfiles:\n if path.exists(srcfile):\n files.append(srcfile)\n if files:\n data_files.append((destdir, files))\n\n\nsetup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Streamlink is a command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/streamlink/streamlink\",\n project_urls={\n \"Documentation\": \"https://streamlink.github.io/\",\n \"Tracker\": \"https://github.com/streamlink/streamlink/issues\",\n \"Source\": \"https://github.com/streamlink/streamlink\",\n \"Funding\": \"https://opencollective.com/streamlink\"\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n package_data={\"streamlink.plugins\": [\".removed\"]},\n entry_points=entry_points,\n data_files=data_files,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=3.6, <4\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: BSD License\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport codecs\nfrom os import environ, path\nfrom sys import argv, path as sys_path\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\n\ndata_files = []\ndeps = [\n \"requests>=2.26.0,<3.0\",\n \"isodate\",\n \"lxml>=4.6.3\",\n \"websocket-client>=0.58.0\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n]\n\n# for encrypted streams\nif environ.get(\"STREAMLINK_USE_PYCRYPTO\"):\n deps.append(\"pycrypto\")\nelse:\n # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\n deps.append(\"pycryptodome>=3.4.3,<4\")\n\n# for localization\nif environ.get(\"STREAMLINK_USE_PYCOUNTRY\"):\n deps.append(\"pycountry\")\nelse:\n deps.append(\"iso-639\")\n deps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don\"t want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nthis_directory = path.abspath(path.dirname(__file__))\nsrcdir = path.join(this_directory, \"src/\")\nsys_path.insert(0, srcdir)\n\nwith codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n\n\ndef is_wheel_for_windows():\n if \"bdist_wheel\" in argv:\n names = [\"win32\", \"win-amd64\", \"cygwin\"]\n length = len(argv)\n for pos in range(argv.index(\"bdist_wheel\") + 1, length):\n if argv[pos] == \"--plat-name\" and pos + 1 < length:\n return argv[pos + 1] in names\n elif argv[pos][:12] == \"--plat-name=\":\n return argv[pos][12:] in names\n return False\n\n\nentry_points = {\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n}\n\nif is_wheel_for_windows():\n entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n\n\nadditional_files = [\n (\"share/man/man1\", [\"docs/_build/man/streamlink.1\"])\n]\n\nfor destdir, srcfiles in additional_files:\n files = []\n for srcfile in srcfiles:\n if path.exists(srcfile):\n files.append(srcfile)\n if files:\n data_files.append((destdir, files))\n\n\nsetup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Streamlink is a command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/streamlink/streamlink\",\n project_urls={\n \"Documentation\": \"https://streamlink.github.io/\",\n \"Tracker\": \"https://github.com/streamlink/streamlink/issues\",\n \"Source\": \"https://github.com/streamlink/streamlink\",\n \"Funding\": \"https://opencollective.com/streamlink\"\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n package_data={\"streamlink.plugins\": [\".removed\"]},\n entry_points=entry_points,\n data_files=data_files,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=3.6, <4\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: BSD License\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}]} | 2,015 | 106 |
gh_patches_debug_5701 | rasdani/github-patches | git_diff | getpelican__pelican-3094 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
A dead link on PyPI for the contributions and feedback
I just stumbled upon [Pelican's page in PyPI](https://pypi.org/project/pelican/) and found that the l[ink for the contributions and feedback](https://pypi.org/project/pelican/CONTRIBUTING.rst) is dead. Perhaps, it needs to be updated?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from os import walk
4 from os.path import join, relpath
5
6 from setuptools import find_packages, setup
7
8
9 version = "4.8.0"
10
11 requires = ['feedgenerator >= 1.9', 'jinja2 >= 2.7', 'pygments',
12 'docutils>=0.15', 'pytz >= 0a', 'blinker', 'unidecode',
13 'python-dateutil', 'rich']
14
15 entry_points = {
16 'console_scripts': [
17 'pelican = pelican.__main__:main',
18 'pelican-import = pelican.tools.pelican_import:main',
19 'pelican-quickstart = pelican.tools.pelican_quickstart:main',
20 'pelican-themes = pelican.tools.pelican_themes:main',
21 'pelican-plugins = pelican.plugins._utils:list_plugins'
22 ]
23 }
24
25 README = open('README.rst', encoding='utf-8').read()
26 CHANGELOG = open('docs/changelog.rst', encoding='utf-8').read()
27
28 description = '\n'.join([README, CHANGELOG])
29
30 setup(
31 name='pelican',
32 version=version,
33 url='https://getpelican.com/',
34 author='Justin Mayer',
35 author_email='[email protected]',
36 description="Static site generator supporting reStructuredText and "
37 "Markdown source content.",
38 project_urls={
39 'Documentation': 'https://docs.getpelican.com/',
40 'Funding': 'https://donate.getpelican.com/',
41 'Source': 'https://github.com/getpelican/pelican',
42 'Tracker': 'https://github.com/getpelican/pelican/issues',
43 },
44 keywords='static web site generator SSG reStructuredText Markdown',
45 license='AGPLv3',
46 long_description=description,
47 long_description_content_type='text/x-rst',
48 packages=find_packages(),
49 include_package_data=True, # includes all in MANIFEST.in if in package
50 # NOTE : This will collect any files that happen to be in the themes
51 # directory, even though they may not be checked into version control.
52 package_data={ # pelican/themes is not a package, so include manually
53 'pelican': [relpath(join(root, name), 'pelican')
54 for root, _, names in walk(join('pelican', 'themes'))
55 for name in names],
56 },
57 install_requires=requires,
58 extras_require={
59 'Markdown': ['markdown~=3.1.1']
60 },
61 entry_points=entry_points,
62 classifiers=[
63 'Development Status :: 5 - Production/Stable',
64 'Environment :: Console',
65 'Framework :: Pelican',
66 'License :: OSI Approved :: GNU Affero General Public License v3',
67 'Operating System :: OS Independent',
68 'Programming Language :: Python :: 3',
69 'Programming Language :: Python :: 3.7',
70 'Programming Language :: Python :: 3.8',
71 'Programming Language :: Python :: 3.9',
72 'Programming Language :: Python :: 3.10',
73 'Programming Language :: Python :: Implementation :: CPython',
74 'Topic :: Internet :: WWW/HTTP',
75 'Topic :: Software Development :: Libraries :: Python Modules',
76 ],
77 test_suite='pelican.tests',
78 )
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -25,6 +25,13 @@
README = open('README.rst', encoding='utf-8').read()
CHANGELOG = open('docs/changelog.rst', encoding='utf-8').read()
+# Relative links in the README must be converted to absolute URL's
+# so that they render correctly on PyPI.
+README = README.replace(
+ "<CONTRIBUTING.rst>",
+ "<https://docs.getpelican.com/en/latest/contribute.html>",
+)
+
description = '\n'.join([README, CHANGELOG])
setup(
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -25,6 +25,13 @@\n README = open('README.rst', encoding='utf-8').read()\n CHANGELOG = open('docs/changelog.rst', encoding='utf-8').read()\n \n+# Relative links in the README must be converted to absolute URL's\n+# so that they render correctly on PyPI.\n+README = README.replace(\n+ \"<CONTRIBUTING.rst>\",\n+ \"<https://docs.getpelican.com/en/latest/contribute.html>\",\n+)\n+\n description = '\\n'.join([README, CHANGELOG])\n \n setup(\n", "issue": "A dead link on PyPI for the contributions and feedback\nI just stumbled upon [Pelican's page in PyPI](https://pypi.org/project/pelican/) and found that the l[ink for the contributions and feedback](https://pypi.org/project/pelican/CONTRIBUTING.rst) is dead. Perhaps, it needs to be updated?\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom os import walk\nfrom os.path import join, relpath\n\nfrom setuptools import find_packages, setup\n\n\nversion = \"4.8.0\"\n\nrequires = ['feedgenerator >= 1.9', 'jinja2 >= 2.7', 'pygments',\n 'docutils>=0.15', 'pytz >= 0a', 'blinker', 'unidecode',\n 'python-dateutil', 'rich']\n\nentry_points = {\n 'console_scripts': [\n 'pelican = pelican.__main__:main',\n 'pelican-import = pelican.tools.pelican_import:main',\n 'pelican-quickstart = pelican.tools.pelican_quickstart:main',\n 'pelican-themes = pelican.tools.pelican_themes:main',\n 'pelican-plugins = pelican.plugins._utils:list_plugins'\n ]\n}\n\nREADME = open('README.rst', encoding='utf-8').read()\nCHANGELOG = open('docs/changelog.rst', encoding='utf-8').read()\n\ndescription = '\\n'.join([README, CHANGELOG])\n\nsetup(\n name='pelican',\n version=version,\n url='https://getpelican.com/',\n author='Justin Mayer',\n author_email='[email protected]',\n description=\"Static site generator supporting reStructuredText and \"\n \"Markdown source content.\",\n project_urls={\n 'Documentation': 'https://docs.getpelican.com/',\n 'Funding': 'https://donate.getpelican.com/',\n 'Source': 'https://github.com/getpelican/pelican',\n 'Tracker': 'https://github.com/getpelican/pelican/issues',\n },\n keywords='static web site generator SSG reStructuredText Markdown',\n license='AGPLv3',\n long_description=description,\n long_description_content_type='text/x-rst',\n packages=find_packages(),\n include_package_data=True, # includes all in MANIFEST.in if in package\n # NOTE : This will collect any files that happen to be in the themes\n # directory, even though they may not be checked into version control.\n package_data={ # pelican/themes is not a package, so include manually\n 'pelican': [relpath(join(root, name), 'pelican')\n for root, _, names in walk(join('pelican', 'themes'))\n for name in names],\n },\n install_requires=requires,\n extras_require={\n 'Markdown': ['markdown~=3.1.1']\n },\n entry_points=entry_points,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Framework :: Pelican',\n 'License :: OSI Approved :: GNU Affero General Public License v3',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n test_suite='pelican.tests',\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom os import walk\nfrom os.path import join, relpath\n\nfrom setuptools import find_packages, setup\n\n\nversion = \"4.8.0\"\n\nrequires = ['feedgenerator >= 1.9', 'jinja2 >= 2.7', 'pygments',\n 'docutils>=0.15', 'pytz >= 0a', 'blinker', 'unidecode',\n 'python-dateutil', 'rich']\n\nentry_points = {\n 'console_scripts': [\n 'pelican = pelican.__main__:main',\n 'pelican-import = pelican.tools.pelican_import:main',\n 'pelican-quickstart = pelican.tools.pelican_quickstart:main',\n 'pelican-themes = pelican.tools.pelican_themes:main',\n 'pelican-plugins = pelican.plugins._utils:list_plugins'\n ]\n}\n\nREADME = open('README.rst', encoding='utf-8').read()\nCHANGELOG = open('docs/changelog.rst', encoding='utf-8').read()\n\n# Relative links in the README must be converted to absolute URL's\n# so that they render correctly on PyPI.\nREADME = README.replace(\n \"<CONTRIBUTING.rst>\",\n \"<https://docs.getpelican.com/en/latest/contribute.html>\",\n)\n\ndescription = '\\n'.join([README, CHANGELOG])\n\nsetup(\n name='pelican',\n version=version,\n url='https://getpelican.com/',\n author='Justin Mayer',\n author_email='[email protected]',\n description=\"Static site generator supporting reStructuredText and \"\n \"Markdown source content.\",\n project_urls={\n 'Documentation': 'https://docs.getpelican.com/',\n 'Funding': 'https://donate.getpelican.com/',\n 'Source': 'https://github.com/getpelican/pelican',\n 'Tracker': 'https://github.com/getpelican/pelican/issues',\n },\n keywords='static web site generator SSG reStructuredText Markdown',\n license='AGPLv3',\n long_description=description,\n long_description_content_type='text/x-rst',\n packages=find_packages(),\n include_package_data=True, # includes all in MANIFEST.in if in package\n # NOTE : This will collect any files that happen to be in the themes\n # directory, even though they may not be checked into version control.\n package_data={ # pelican/themes is not a package, so include manually\n 'pelican': [relpath(join(root, name), 'pelican')\n for root, _, names in walk(join('pelican', 'themes'))\n for name in names],\n },\n install_requires=requires,\n extras_require={\n 'Markdown': ['markdown~=3.1.1']\n },\n entry_points=entry_points,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Framework :: Pelican',\n 'License :: OSI Approved :: GNU Affero General Public License v3',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n test_suite='pelican.tests',\n)\n", "path": "setup.py"}]} | 1,194 | 143 |
gh_patches_debug_5379 | rasdani/github-patches | git_diff | cltk__cltk-399 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Compile Poeti d’Italia for CLTK
http://www.mqdq.it/mqdq/poetiditalia/indice_autori_alfa.jsp?scelta=AZ&path=metri_opere
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cltk/corpus/latin/corpora.py`
Content:
```
1 """Latin language corpora available for download or loading locally.
2 All remote corpora hosted by github on the cltk organization account, eg:
3 'http://github.com/cltk' + name
4 """
5
6 LATIN_CORPORA = [
7 {'encoding': 'utf-8',
8 'markup': 'tei_xml',
9 'location': 'remote',
10 'type': 'text',
11 'name': 'latin_text_perseus',
12 'origin': 'https://github.com/cltk/latin_text_perseus.git'},
13 {'encoding': 'utf-8',
14 'markup': 'xml',
15 'name': 'latin_treebank_perseus',
16 'origin': 'https://github.com/cltk/latin_treebank_perseus.git',
17 'location': 'remote',
18 'type': 'treebank'},
19 {'encoding': 'utf-8',
20 'markup': 'plaintext',
21 'name': 'latin_treebank_perseus',
22 'origin': 'https://github.com/cltk/latin_treebank_perseus.git',
23 'location': 'remote',
24 'type': 'text'},
25 {'encoding': 'utf-8',
26 'markup': 'plaintext',
27 'name': 'latin_text_latin_library',
28 'origin': 'https://github.com/cltk/latin_text_latin_library.git',
29 'location': 'remote',
30 'type': 'text'},
31 {'encoding': 'latin-1',
32 'markup': 'beta_code',
33 'name': '',
34 'location': 'local',
35 'name': 'phi5',
36 'origin': None,
37 'type': 'text'},
38 {'encoding': 'latin-1',
39 'markup': 'beta_code',
40 'origin': None,
41 'name': 'phi7',
42 'location': 'local',
43 'type': 'text'},
44 {'encoding': 'utf-8',
45 'markup': 'plaintext',
46 'name': 'latin_proper_names_cltk',
47 'origin': 'https://github.com/cltk/latin_proper_names_cltk.git',
48 'location': 'remote',
49 'type': 'lexicon'},
50 {'origin': 'https://github.com/cltk/latin_models_cltk.git',
51 'name': 'latin_models_cltk',
52 'location': 'remote',
53 'type': 'model'},
54 {'encoding': 'utf-8',
55 'markup': 'python',
56 'name': 'latin_pos_lemmata_cltk',
57 'origin': 'https://github.com/cltk/latin_pos_lemmata_cltk.git',
58 'location': 'remote',
59 'type': 'lemma'},
60 {'encoding': 'utf-8',
61 'markup': 'xml',
62 'name': 'latin_treebank_index_thomisticus',
63 'origin': 'https://github.com/cltk/latin_treebank_index_thomisticus.git',
64 'location': 'remote',
65 'type': 'treebank'},
66 {'encoding': 'xml',
67 'markup': 'plaintext',
68 'name': 'latin_lexica_perseus',
69 'origin': 'https://github.com/cltk/latin_lexica_perseus.git',
70 'location': 'remote',
71 'type': 'lexicon'},
72 {'encoding': 'utf-8',
73 'markup': 'plaintext',
74 'name': 'latin_training_set_sentence_cltk',
75 'origin': 'https://github.com/cltk/latin_training_set_sentence_cltk.git',
76 'location': 'remote',
77 'type': 'training_set'},
78 {'origin': 'https://github.com/cltk/latin_word2vec_cltk.git',
79 'name': 'latin_word2vec_cltk',
80 'location': 'remote',
81 'type': 'model'},
82 {'encoding': 'utf-8',
83 'markup': 'tei_xml',
84 'location': 'remote',
85 'type': 'text',
86 'name': 'latin_text_antique_digiliblt',
87 'origin': 'https://github.com/cltk/latin_text_antique_digiliblt.git'},
88 {'location': 'remote',
89 'type': 'text',
90 'name': 'latin_text_corpus_grammaticorum_latinorum',
91 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'}
92 ]
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cltk/corpus/latin/corpora.py b/cltk/corpus/latin/corpora.py
--- a/cltk/corpus/latin/corpora.py
+++ b/cltk/corpus/latin/corpora.py
@@ -88,5 +88,9 @@
{'location': 'remote',
'type': 'text',
'name': 'latin_text_corpus_grammaticorum_latinorum',
- 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'}
+ 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'},
+ {'location': 'remote',
+ 'type': 'text',
+ 'name': 'latin_text_poeti_ditalia',
+ 'origin': 'https://github.com/cltk/latin_text_poeti_ditalia.git'}
]
| {"golden_diff": "diff --git a/cltk/corpus/latin/corpora.py b/cltk/corpus/latin/corpora.py\n--- a/cltk/corpus/latin/corpora.py\n+++ b/cltk/corpus/latin/corpora.py\n@@ -88,5 +88,9 @@\n {'location': 'remote',\n 'type': 'text',\n 'name': 'latin_text_corpus_grammaticorum_latinorum',\n- 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'}\n+ 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'},\n+ {'location': 'remote',\n+ 'type': 'text',\n+ 'name': 'latin_text_poeti_ditalia',\n+ 'origin': 'https://github.com/cltk/latin_text_poeti_ditalia.git'}\n ]\n", "issue": "Compile Poeti d\u2019Italia for CLTK\nhttp://www.mqdq.it/mqdq/poetiditalia/indice_autori_alfa.jsp?scelta=AZ&path=metri_opere\n\n", "before_files": [{"content": "\"\"\"Latin language corpora available for download or loading locally.\nAll remote corpora hosted by github on the cltk organization account, eg:\n'http://github.com/cltk' + name\n\"\"\"\n\nLATIN_CORPORA = [\n {'encoding': 'utf-8',\n 'markup': 'tei_xml',\n 'location': 'remote',\n 'type': 'text',\n 'name': 'latin_text_perseus',\n 'origin': 'https://github.com/cltk/latin_text_perseus.git'},\n {'encoding': 'utf-8',\n 'markup': 'xml',\n 'name': 'latin_treebank_perseus',\n 'origin': 'https://github.com/cltk/latin_treebank_perseus.git',\n 'location': 'remote',\n 'type': 'treebank'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'name': 'latin_treebank_perseus',\n 'origin': 'https://github.com/cltk/latin_treebank_perseus.git',\n 'location': 'remote',\n 'type': 'text'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'name': 'latin_text_latin_library',\n 'origin': 'https://github.com/cltk/latin_text_latin_library.git',\n 'location': 'remote',\n 'type': 'text'},\n {'encoding': 'latin-1',\n 'markup': 'beta_code',\n 'name': '',\n 'location': 'local',\n 'name': 'phi5',\n 'origin': None,\n 'type': 'text'},\n {'encoding': 'latin-1',\n 'markup': 'beta_code',\n 'origin': None,\n 'name': 'phi7',\n 'location': 'local',\n 'type': 'text'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'name': 'latin_proper_names_cltk',\n 'origin': 'https://github.com/cltk/latin_proper_names_cltk.git',\n 'location': 'remote',\n 'type': 'lexicon'},\n {'origin': 'https://github.com/cltk/latin_models_cltk.git',\n 'name': 'latin_models_cltk',\n 'location': 'remote',\n 'type': 'model'},\n {'encoding': 'utf-8',\n 'markup': 'python',\n 'name': 'latin_pos_lemmata_cltk',\n 'origin': 'https://github.com/cltk/latin_pos_lemmata_cltk.git',\n 'location': 'remote',\n 'type': 'lemma'},\n {'encoding': 'utf-8',\n 'markup': 'xml',\n 'name': 'latin_treebank_index_thomisticus',\n 'origin': 'https://github.com/cltk/latin_treebank_index_thomisticus.git',\n 'location': 'remote',\n 'type': 'treebank'},\n {'encoding': 'xml',\n 'markup': 'plaintext',\n 'name': 'latin_lexica_perseus',\n 'origin': 'https://github.com/cltk/latin_lexica_perseus.git',\n 'location': 'remote',\n 'type': 'lexicon'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'name': 'latin_training_set_sentence_cltk',\n 'origin': 'https://github.com/cltk/latin_training_set_sentence_cltk.git',\n 'location': 'remote',\n 'type': 'training_set'},\n {'origin': 'https://github.com/cltk/latin_word2vec_cltk.git',\n 'name': 'latin_word2vec_cltk',\n 'location': 'remote',\n 'type': 'model'},\n {'encoding': 'utf-8',\n 'markup': 'tei_xml',\n 'location': 'remote',\n 'type': 'text',\n 'name': 'latin_text_antique_digiliblt',\n 'origin': 'https://github.com/cltk/latin_text_antique_digiliblt.git'},\n {'location': 'remote',\n 'type': 'text',\n 'name': 'latin_text_corpus_grammaticorum_latinorum',\n 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'}\n]\n", "path": "cltk/corpus/latin/corpora.py"}], "after_files": [{"content": "\"\"\"Latin language corpora available for download or loading locally.\nAll remote corpora hosted by github on the cltk organization account, eg:\n'http://github.com/cltk' + name\n\"\"\"\n\nLATIN_CORPORA = [\n {'encoding': 'utf-8',\n 'markup': 'tei_xml',\n 'location': 'remote',\n 'type': 'text',\n 'name': 'latin_text_perseus',\n 'origin': 'https://github.com/cltk/latin_text_perseus.git'},\n {'encoding': 'utf-8',\n 'markup': 'xml',\n 'name': 'latin_treebank_perseus',\n 'origin': 'https://github.com/cltk/latin_treebank_perseus.git',\n 'location': 'remote',\n 'type': 'treebank'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'name': 'latin_treebank_perseus',\n 'origin': 'https://github.com/cltk/latin_treebank_perseus.git',\n 'location': 'remote',\n 'type': 'text'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'name': 'latin_text_latin_library',\n 'origin': 'https://github.com/cltk/latin_text_latin_library.git',\n 'location': 'remote',\n 'type': 'text'},\n {'encoding': 'latin-1',\n 'markup': 'beta_code',\n 'name': '',\n 'location': 'local',\n 'name': 'phi5',\n 'origin': None,\n 'type': 'text'},\n {'encoding': 'latin-1',\n 'markup': 'beta_code',\n 'origin': None,\n 'name': 'phi7',\n 'location': 'local',\n 'type': 'text'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'name': 'latin_proper_names_cltk',\n 'origin': 'https://github.com/cltk/latin_proper_names_cltk.git',\n 'location': 'remote',\n 'type': 'lexicon'},\n {'origin': 'https://github.com/cltk/latin_models_cltk.git',\n 'name': 'latin_models_cltk',\n 'location': 'remote',\n 'type': 'model'},\n {'encoding': 'utf-8',\n 'markup': 'python',\n 'name': 'latin_pos_lemmata_cltk',\n 'origin': 'https://github.com/cltk/latin_pos_lemmata_cltk.git',\n 'location': 'remote',\n 'type': 'lemma'},\n {'encoding': 'utf-8',\n 'markup': 'xml',\n 'name': 'latin_treebank_index_thomisticus',\n 'origin': 'https://github.com/cltk/latin_treebank_index_thomisticus.git',\n 'location': 'remote',\n 'type': 'treebank'},\n {'encoding': 'xml',\n 'markup': 'plaintext',\n 'name': 'latin_lexica_perseus',\n 'origin': 'https://github.com/cltk/latin_lexica_perseus.git',\n 'location': 'remote',\n 'type': 'lexicon'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'name': 'latin_training_set_sentence_cltk',\n 'origin': 'https://github.com/cltk/latin_training_set_sentence_cltk.git',\n 'location': 'remote',\n 'type': 'training_set'},\n {'origin': 'https://github.com/cltk/latin_word2vec_cltk.git',\n 'name': 'latin_word2vec_cltk',\n 'location': 'remote',\n 'type': 'model'},\n {'encoding': 'utf-8',\n 'markup': 'tei_xml',\n 'location': 'remote',\n 'type': 'text',\n 'name': 'latin_text_antique_digiliblt',\n 'origin': 'https://github.com/cltk/latin_text_antique_digiliblt.git'},\n {'location': 'remote',\n 'type': 'text',\n 'name': 'latin_text_corpus_grammaticorum_latinorum',\n 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'},\n {'location': 'remote',\n 'type': 'text',\n 'name': 'latin_text_poeti_ditalia',\n 'origin': 'https://github.com/cltk/latin_text_poeti_ditalia.git'}\n]\n", "path": "cltk/corpus/latin/corpora.py"}]} | 1,409 | 201 |
gh_patches_debug_9824 | rasdani/github-patches | git_diff | coala__coala-1585 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DefaultArgParser: Fix spelling mistake
Change analaysis on line 30 to analysis
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `coalib/parsing/DefaultArgParser.py`
Content:
```
1 import argparse
2 import sys
3
4 from coalib.misc import Constants
5
6
7 def default_arg_parser(formatter_class=None):
8 """
9 This function creates an ArgParser to parse command line arguments.
10
11 :param formatter_class: Formatting the arg_parser output into a specific
12 form. For example: In the manpage format.
13 """
14 formatter_class = formatter_class or argparse.RawDescriptionHelpFormatter
15
16 entry_point = sys.argv[0]
17 for entry in ['coala-ci', 'coala-dbus', 'coala-format', 'coala-json',
18 'coala-delete-orig']:
19 if entry_point.endswith(entry):
20 parser_type = entry
21 break
22 else:
23 parser_type = 'coala'
24
25 arg_parser = argparse.ArgumentParser(
26 formatter_class=formatter_class,
27 prog="coala",
28 description="coala is a simple COde AnaLysis Application. Its goal "
29 "is to make static code analysis easy and convenient "
30 "for all languages. coala uses bears, which are analaysis "
31 "routines that can be combined arbitrarily.")
32
33 arg_parser.add_argument('TARGETS',
34 nargs='*',
35 help="Sections to be executed exclusively.")
36 arg_parser.add_argument('-c',
37 '--config',
38 nargs=1,
39 metavar='FILE',
40 help='Configuration file to be used, defaults to '
41 + repr(Constants.default_coafile))
42 FIND_CONFIG_HELP = ('Attempt to find config file by checking parent '
43 'directories of the current working directory. It is '
44 'assumed that the config file is named '
45 + repr(Constants.default_coafile) + '. This arg is '
46 'ignored if --config is also given')
47 arg_parser.add_argument('-F',
48 '--find-config',
49 nargs='?',
50 const=True,
51 metavar='BOOL',
52 help=FIND_CONFIG_HELP)
53 arg_parser.add_argument('-f',
54 '--files',
55 nargs='+',
56 metavar='FILE',
57 help='Files that should be checked')
58 arg_parser.add_argument('-i',
59 '--ignore',
60 nargs='+',
61 metavar='FILE',
62 help='Files that should be ignored')
63 arg_parser.add_argument('--limit-files',
64 nargs='+',
65 metavar='FILE',
66 help='Files that will be analyzed will be '
67 'restricted to those in the globs listed '
68 'in this argument as well the files setting')
69 arg_parser.add_argument('-b',
70 '--bears',
71 nargs='+',
72 metavar='NAME',
73 help='Names of bears to use')
74 BEAR_DIRS_HELP = 'Additional directories where bears may lie'
75 arg_parser.add_argument('-d',
76 '--bear-dirs',
77 nargs='+',
78 metavar='DIR',
79 help=BEAR_DIRS_HELP)
80 LOG_LEVEL_HELP = ("Enum('ERROR','INFO','WARNING','DEBUG') to set level of "
81 "log output")
82 arg_parser.add_argument('-L',
83 '--log-level',
84 nargs=1,
85 choices=['ERROR', 'INFO', 'WARNING', 'DEBUG'],
86 metavar='ENUM',
87 help=LOG_LEVEL_HELP)
88 MIN_SEVERITY_HELP = ("Enum('INFO', 'NORMAL', 'MAJOR') to set the minimal "
89 "result severity.")
90 arg_parser.add_argument('-m',
91 '--min-severity',
92 nargs=1,
93 choices=('INFO', 'NORMAL', 'MAJOR'),
94 metavar='ENUM',
95 help=MIN_SEVERITY_HELP)
96 SETTINGS_HELP = 'Arbitrary settings in the form of section.key=value'
97 arg_parser.add_argument('-S',
98 '--settings',
99 nargs='+',
100 metavar='SETTING',
101 help=SETTINGS_HELP)
102 if parser_type == 'coala-json':
103 arg_parser.add_argument('--text-logs',
104 nargs='?',
105 const=True,
106 metavar='BOOL',
107 help='Don\'t display logs as json, display '
108 'them as we normally do in the console.')
109 if parser_type == 'coala':
110 SHOW_BEARS_HELP = ("Display bears and its metadata with the sections "
111 "that they belong to")
112 arg_parser.add_argument('-B',
113 '--show-bears',
114 nargs='?',
115 const=True,
116 metavar='BOOL',
117 help=SHOW_BEARS_HELP)
118 arg_parser.add_argument('-A',
119 '--show-all-bears',
120 nargs='?',
121 const=True,
122 metavar='BOOL',
123 help="Display all bears.")
124 SAVE_HELP = ('Filename of file to be saved to, if provided with no '
125 'arguments, settings will be stored back to the file given '
126 'by -c')
127 arg_parser.add_argument('-s',
128 '--save',
129 nargs='?',
130 const=True,
131 metavar='FILE',
132 help=SAVE_HELP)
133 TAG_HELP = ('Tag results with a specific name. You can access the results'
134 ' later with that tag.')
135 arg_parser.add_argument('-t',
136 '--tag',
137 nargs='?',
138 const=True,
139 metavar='STRING',
140 help=TAG_HELP)
141
142 DELETE_TAG_HELP = 'Delete pre-tagged results with tag name.'
143 arg_parser.add_argument('-g',
144 '--dtag',
145 nargs='?',
146 const=True,
147 metavar='STRING',
148 help=DELETE_TAG_HELP)
149
150 arg_parser.add_argument("-j",
151 "--jobs",
152 type=int,
153 help="Number of jobs to use in parallel.")
154
155 arg_parser.add_argument('-v',
156 '--version',
157 action='version',
158 version=Constants.VERSION)
159
160 arg_parser.add_argument('-n',
161 '--no-orig',
162 nargs='?',
163 const=True,
164 help="Deactivate creation of .orig files,"
165 ".orig backup files before applying patches")
166
167 return arg_parser
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/coalib/parsing/DefaultArgParser.py b/coalib/parsing/DefaultArgParser.py
--- a/coalib/parsing/DefaultArgParser.py
+++ b/coalib/parsing/DefaultArgParser.py
@@ -27,7 +27,7 @@
prog="coala",
description="coala is a simple COde AnaLysis Application. Its goal "
"is to make static code analysis easy and convenient "
- "for all languages. coala uses bears, which are analaysis "
+ "for all languages. coala uses bears, which are analysis "
"routines that can be combined arbitrarily.")
arg_parser.add_argument('TARGETS',
| {"golden_diff": "diff --git a/coalib/parsing/DefaultArgParser.py b/coalib/parsing/DefaultArgParser.py\n--- a/coalib/parsing/DefaultArgParser.py\n+++ b/coalib/parsing/DefaultArgParser.py\n@@ -27,7 +27,7 @@\n prog=\"coala\",\n description=\"coala is a simple COde AnaLysis Application. Its goal \"\n \"is to make static code analysis easy and convenient \"\n- \"for all languages. coala uses bears, which are analaysis \"\n+ \"for all languages. coala uses bears, which are analysis \"\n \"routines that can be combined arbitrarily.\")\n \n arg_parser.add_argument('TARGETS',\n", "issue": "DefaultArgParser: Fix spelling mistake\nChange analaysis on line 30 to analysis\n\n", "before_files": [{"content": "import argparse\nimport sys\n\nfrom coalib.misc import Constants\n\n\ndef default_arg_parser(formatter_class=None):\n \"\"\"\n This function creates an ArgParser to parse command line arguments.\n\n :param formatter_class: Formatting the arg_parser output into a specific\n form. For example: In the manpage format.\n \"\"\"\n formatter_class = formatter_class or argparse.RawDescriptionHelpFormatter\n\n entry_point = sys.argv[0]\n for entry in ['coala-ci', 'coala-dbus', 'coala-format', 'coala-json',\n 'coala-delete-orig']:\n if entry_point.endswith(entry):\n parser_type = entry\n break\n else:\n parser_type = 'coala'\n\n arg_parser = argparse.ArgumentParser(\n formatter_class=formatter_class,\n prog=\"coala\",\n description=\"coala is a simple COde AnaLysis Application. Its goal \"\n \"is to make static code analysis easy and convenient \"\n \"for all languages. coala uses bears, which are analaysis \"\n \"routines that can be combined arbitrarily.\")\n\n arg_parser.add_argument('TARGETS',\n nargs='*',\n help=\"Sections to be executed exclusively.\")\n arg_parser.add_argument('-c',\n '--config',\n nargs=1,\n metavar='FILE',\n help='Configuration file to be used, defaults to '\n + repr(Constants.default_coafile))\n FIND_CONFIG_HELP = ('Attempt to find config file by checking parent '\n 'directories of the current working directory. It is '\n 'assumed that the config file is named '\n + repr(Constants.default_coafile) + '. This arg is '\n 'ignored if --config is also given')\n arg_parser.add_argument('-F',\n '--find-config',\n nargs='?',\n const=True,\n metavar='BOOL',\n help=FIND_CONFIG_HELP)\n arg_parser.add_argument('-f',\n '--files',\n nargs='+',\n metavar='FILE',\n help='Files that should be checked')\n arg_parser.add_argument('-i',\n '--ignore',\n nargs='+',\n metavar='FILE',\n help='Files that should be ignored')\n arg_parser.add_argument('--limit-files',\n nargs='+',\n metavar='FILE',\n help='Files that will be analyzed will be '\n 'restricted to those in the globs listed '\n 'in this argument as well the files setting')\n arg_parser.add_argument('-b',\n '--bears',\n nargs='+',\n metavar='NAME',\n help='Names of bears to use')\n BEAR_DIRS_HELP = 'Additional directories where bears may lie'\n arg_parser.add_argument('-d',\n '--bear-dirs',\n nargs='+',\n metavar='DIR',\n help=BEAR_DIRS_HELP)\n LOG_LEVEL_HELP = (\"Enum('ERROR','INFO','WARNING','DEBUG') to set level of \"\n \"log output\")\n arg_parser.add_argument('-L',\n '--log-level',\n nargs=1,\n choices=['ERROR', 'INFO', 'WARNING', 'DEBUG'],\n metavar='ENUM',\n help=LOG_LEVEL_HELP)\n MIN_SEVERITY_HELP = (\"Enum('INFO', 'NORMAL', 'MAJOR') to set the minimal \"\n \"result severity.\")\n arg_parser.add_argument('-m',\n '--min-severity',\n nargs=1,\n choices=('INFO', 'NORMAL', 'MAJOR'),\n metavar='ENUM',\n help=MIN_SEVERITY_HELP)\n SETTINGS_HELP = 'Arbitrary settings in the form of section.key=value'\n arg_parser.add_argument('-S',\n '--settings',\n nargs='+',\n metavar='SETTING',\n help=SETTINGS_HELP)\n if parser_type == 'coala-json':\n arg_parser.add_argument('--text-logs',\n nargs='?',\n const=True,\n metavar='BOOL',\n help='Don\\'t display logs as json, display '\n 'them as we normally do in the console.')\n if parser_type == 'coala':\n SHOW_BEARS_HELP = (\"Display bears and its metadata with the sections \"\n \"that they belong to\")\n arg_parser.add_argument('-B',\n '--show-bears',\n nargs='?',\n const=True,\n metavar='BOOL',\n help=SHOW_BEARS_HELP)\n arg_parser.add_argument('-A',\n '--show-all-bears',\n nargs='?',\n const=True,\n metavar='BOOL',\n help=\"Display all bears.\")\n SAVE_HELP = ('Filename of file to be saved to, if provided with no '\n 'arguments, settings will be stored back to the file given '\n 'by -c')\n arg_parser.add_argument('-s',\n '--save',\n nargs='?',\n const=True,\n metavar='FILE',\n help=SAVE_HELP)\n TAG_HELP = ('Tag results with a specific name. You can access the results'\n ' later with that tag.')\n arg_parser.add_argument('-t',\n '--tag',\n nargs='?',\n const=True,\n metavar='STRING',\n help=TAG_HELP)\n\n DELETE_TAG_HELP = 'Delete pre-tagged results with tag name.'\n arg_parser.add_argument('-g',\n '--dtag',\n nargs='?',\n const=True,\n metavar='STRING',\n help=DELETE_TAG_HELP)\n\n arg_parser.add_argument(\"-j\",\n \"--jobs\",\n type=int,\n help=\"Number of jobs to use in parallel.\")\n\n arg_parser.add_argument('-v',\n '--version',\n action='version',\n version=Constants.VERSION)\n\n arg_parser.add_argument('-n',\n '--no-orig',\n nargs='?',\n const=True,\n help=\"Deactivate creation of .orig files,\"\n \".orig backup files before applying patches\")\n\n return arg_parser\n", "path": "coalib/parsing/DefaultArgParser.py"}], "after_files": [{"content": "import argparse\nimport sys\n\nfrom coalib.misc import Constants\n\n\ndef default_arg_parser(formatter_class=None):\n \"\"\"\n This function creates an ArgParser to parse command line arguments.\n\n :param formatter_class: Formatting the arg_parser output into a specific\n form. For example: In the manpage format.\n \"\"\"\n formatter_class = formatter_class or argparse.RawDescriptionHelpFormatter\n\n entry_point = sys.argv[0]\n for entry in ['coala-ci', 'coala-dbus', 'coala-format', 'coala-json',\n 'coala-delete-orig']:\n if entry_point.endswith(entry):\n parser_type = entry\n break\n else:\n parser_type = 'coala'\n\n arg_parser = argparse.ArgumentParser(\n formatter_class=formatter_class,\n prog=\"coala\",\n description=\"coala is a simple COde AnaLysis Application. Its goal \"\n \"is to make static code analysis easy and convenient \"\n \"for all languages. coala uses bears, which are analysis \"\n \"routines that can be combined arbitrarily.\")\n\n arg_parser.add_argument('TARGETS',\n nargs='*',\n help=\"Sections to be executed exclusively.\")\n arg_parser.add_argument('-c',\n '--config',\n nargs=1,\n metavar='FILE',\n help='Configuration file to be used, defaults to '\n + repr(Constants.default_coafile))\n FIND_CONFIG_HELP = ('Attempt to find config file by checking parent '\n 'directories of the current working directory. It is '\n 'assumed that the config file is named '\n + repr(Constants.default_coafile) + '. This arg is '\n 'ignored if --config is also given')\n arg_parser.add_argument('-F',\n '--find-config',\n nargs='?',\n const=True,\n metavar='BOOL',\n help=FIND_CONFIG_HELP)\n arg_parser.add_argument('-f',\n '--files',\n nargs='+',\n metavar='FILE',\n help='Files that should be checked')\n arg_parser.add_argument('-i',\n '--ignore',\n nargs='+',\n metavar='FILE',\n help='Files that should be ignored')\n arg_parser.add_argument('--limit-files',\n nargs='+',\n metavar='FILE',\n help='Files that will be analyzed will be '\n 'restricted to those in the globs listed '\n 'in this argument as well the files setting')\n arg_parser.add_argument('-b',\n '--bears',\n nargs='+',\n metavar='NAME',\n help='Names of bears to use')\n BEAR_DIRS_HELP = 'Additional directories where bears may lie'\n arg_parser.add_argument('-d',\n '--bear-dirs',\n nargs='+',\n metavar='DIR',\n help=BEAR_DIRS_HELP)\n LOG_LEVEL_HELP = (\"Enum('ERROR','INFO','WARNING','DEBUG') to set level of \"\n \"log output\")\n arg_parser.add_argument('-L',\n '--log-level',\n nargs=1,\n choices=['ERROR', 'INFO', 'WARNING', 'DEBUG'],\n metavar='ENUM',\n help=LOG_LEVEL_HELP)\n MIN_SEVERITY_HELP = (\"Enum('INFO', 'NORMAL', 'MAJOR') to set the minimal \"\n \"result severity.\")\n arg_parser.add_argument('-m',\n '--min-severity',\n nargs=1,\n choices=('INFO', 'NORMAL', 'MAJOR'),\n metavar='ENUM',\n help=MIN_SEVERITY_HELP)\n SETTINGS_HELP = 'Arbitrary settings in the form of section.key=value'\n arg_parser.add_argument('-S',\n '--settings',\n nargs='+',\n metavar='SETTING',\n help=SETTINGS_HELP)\n if parser_type == 'coala-json':\n arg_parser.add_argument('--text-logs',\n nargs='?',\n const=True,\n metavar='BOOL',\n help='Don\\'t display logs as json, display '\n 'them as we normally do in the console.')\n if parser_type == 'coala':\n SHOW_BEARS_HELP = (\"Display bears and its metadata with the sections \"\n \"that they belong to\")\n arg_parser.add_argument('-B',\n '--show-bears',\n nargs='?',\n const=True,\n metavar='BOOL',\n help=SHOW_BEARS_HELP)\n arg_parser.add_argument('-A',\n '--show-all-bears',\n nargs='?',\n const=True,\n metavar='BOOL',\n help=\"Display all bears.\")\n SAVE_HELP = ('Filename of file to be saved to, if provided with no '\n 'arguments, settings will be stored back to the file given '\n 'by -c')\n arg_parser.add_argument('-s',\n '--save',\n nargs='?',\n const=True,\n metavar='FILE',\n help=SAVE_HELP)\n TAG_HELP = ('Tag results with a specific name. You can access the results'\n ' later with that tag.')\n arg_parser.add_argument('-t',\n '--tag',\n nargs='?',\n const=True,\n metavar='STRING',\n help=TAG_HELP)\n\n DELETE_TAG_HELP = 'Delete pre-tagged results with tag name.'\n arg_parser.add_argument('-g',\n '--dtag',\n nargs='?',\n const=True,\n metavar='STRING',\n help=DELETE_TAG_HELP)\n\n arg_parser.add_argument(\"-j\",\n \"--jobs\",\n type=int,\n help=\"Number of jobs to use in parallel.\")\n\n arg_parser.add_argument('-v',\n '--version',\n action='version',\n version=Constants.VERSION)\n\n arg_parser.add_argument('-n',\n '--no-orig',\n nargs='?',\n const=True,\n help=\"Deactivate creation of .orig files,\"\n \".orig backup files before applying patches\")\n\n return arg_parser\n", "path": "coalib/parsing/DefaultArgParser.py"}]} | 1,880 | 154 |
gh_patches_debug_18951 | rasdani/github-patches | git_diff | ansible__ansible-23067 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
win_domain_controller: documentation error in examples '_pass' should be '_password'
<!---
Verify first that your issue/request is not already reported on GitHub.
Also test if the latest release, and master branch are affected too.
-->
##### ISSUE TYPE
<!--- Pick one below and delete the rest: -->
- Documentation Report
##### COMPONENT NAME
<!--- Name of the module/plugin/task/feature -->
win_domain_controller
##### ANSIBLE VERSION
<!--- Paste verbatim output from “ansible --version” between quotes below -->
```
ansible 2.3.0.0 (detached HEAD e4494f85b6) last updated 2017/03/17 12:34:17 (GMT +100)
config file = /home/jon/ansible/ansible.cfg
configured module search path = [u'/home/jon/ansible/library']
python version = 2.7.6 (default, Jun 22 2015, 17:58:13) [GCC 4.8.2]
```
##### CONFIGURATION
<!---
Mention any settings you have changed/added/removed in ansible.cfg
(or using the ANSIBLE_* environment variables).
-->
##### OS / ENVIRONMENT
<!---
Mention the OS you are running Ansible from, and the OS you are
managing, or say “N/A” for anything that is not platform-specific.
-->
Ansible controller Windows 10 WSL (ubuntu)
targets: Windows Server 2012 R2
##### SUMMARY
<!--- Explain the problem briefly -->
The examples need updating to match the correct parameter names for the _password parameters.
For example in the examples domain_admin_pass, but the module actually expects 'domain_admin_password'. The same thing is true for 'safe_mode_password' and 'local_admin_password'.
Would create a PR for this but am away from dev machine at the moment.
##### STEPS TO REPRODUCE
<!---
For bugs, show exactly how to reproduce the problem, using a minimal test-case.
For new features, show how the feature would be used.
-->
N/A
<!--- Paste example playbooks or commands between quotes below -->
```yaml
```
<!--- You can also paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- What did you expect to happen when running the steps above? -->
N/A
##### ACTUAL RESULTS
<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->
N/A
<!--- Paste verbatim command output between quotes below -->
```
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/ansible/modules/windows/win_domain_controller.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2017, Red Hat, Inc.
5 #
6 # This file is part of Ansible
7 #
8 # Ansible is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # Ansible is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
20
21 ANSIBLE_METADATA = {'metadata_version': '1.0',
22 'status': ['preview'],
23 'supported_by': 'core'}
24
25
26 DOCUMENTATION='''
27 module: win_domain_controller
28 short_description: Manage domain controller/member server state for a Windows host
29 version_added: 2.3
30 description:
31 - Ensure that a Windows Server 2012+ host is configured as a domain controller or demoted to member server. This module may require
32 subsequent use of the M(win_reboot) action if changes are made.
33 options:
34 dns_domain_name:
35 description:
36 - when C(state) is C(domain_controller), the DNS name of the domain for which the targeted Windows host should be a DC
37 domain_admin_user:
38 description:
39 - username of a domain admin for the target domain (necessary to promote or demote a domain controller)
40 required: true
41 domain_admin_password:
42 description:
43 - password for the specified C(domain_admin_user)
44 required: true
45 safe_mode_password:
46 description:
47 - safe mode password for the domain controller (required when C(state) is C(domain_controller))
48 local_admin_password:
49 description:
50 - password to be assigned to the local C(Administrator) user (required when C(state) is C(member_server))
51 state:
52 description:
53 - whether the target host should be a domain controller or a member server
54 choices:
55 - domain_controller
56 - member_server
57 author:
58 - Matt Davis (@nitzmahone)
59 '''
60
61 RETURN='''
62 reboot_required:
63 description: True if changes were made that require a reboot.
64 returned: always
65 type: boolean
66 sample: true
67
68 '''
69
70 EXAMPLES=r'''
71 # ensure a server is a domain controller
72 - hosts: winclient
73 gather_facts: no
74 tasks:
75 - win_domain_controller:
76 dns_domain_name: ansible.vagrant
77 domain_admin_user: [email protected]
78 domain_admin_pass: password123!
79 safe_mode_pass: password123!
80 state: domain_controller
81 log_path: c:\ansible_win_domain_controller.txt
82
83 # ensure a server is not a domain controller
84 # note that without an action wrapper, in the case where a DC is demoted,
85 # the task will fail with a 401 Unauthorized, because the domain credential
86 # becomes invalid to fetch the final output over WinRM. This requires win_async
87 # with credential switching (or other clever credential-switching
88 # mechanism to get the output and trigger the required reboot)
89 - hosts: winclient
90 gather_facts: no
91 tasks:
92 - win_domain_controller:
93 domain_admin_user: [email protected]
94 domain_admin_pass: password123!
95 local_admin_pass: password123!
96 state: member_server
97 log_path: c:\ansible_win_domain_controller.txt
98
99 '''
100
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/ansible/modules/windows/win_domain_controller.py b/lib/ansible/modules/windows/win_domain_controller.py
--- a/lib/ansible/modules/windows/win_domain_controller.py
+++ b/lib/ansible/modules/windows/win_domain_controller.py
@@ -75,8 +75,8 @@
- win_domain_controller:
dns_domain_name: ansible.vagrant
domain_admin_user: [email protected]
- domain_admin_pass: password123!
- safe_mode_pass: password123!
+ domain_admin_password: password123!
+ safe_mode_password: password123!
state: domain_controller
log_path: c:\ansible_win_domain_controller.txt
@@ -91,8 +91,8 @@
tasks:
- win_domain_controller:
domain_admin_user: [email protected]
- domain_admin_pass: password123!
- local_admin_pass: password123!
+ domain_admin_password: password123!
+ local_admin_password: password123!
state: member_server
log_path: c:\ansible_win_domain_controller.txt
| {"golden_diff": "diff --git a/lib/ansible/modules/windows/win_domain_controller.py b/lib/ansible/modules/windows/win_domain_controller.py\n--- a/lib/ansible/modules/windows/win_domain_controller.py\n+++ b/lib/ansible/modules/windows/win_domain_controller.py\n@@ -75,8 +75,8 @@\n - win_domain_controller:\n dns_domain_name: ansible.vagrant\n domain_admin_user: [email protected]\n- domain_admin_pass: password123!\n- safe_mode_pass: password123!\n+ domain_admin_password: password123!\n+ safe_mode_password: password123!\n state: domain_controller\n log_path: c:\\ansible_win_domain_controller.txt\n \n@@ -91,8 +91,8 @@\n tasks:\n - win_domain_controller:\n domain_admin_user: [email protected]\n- domain_admin_pass: password123!\n- local_admin_pass: password123!\n+ domain_admin_password: password123!\n+ local_admin_password: password123!\n state: member_server\n log_path: c:\\ansible_win_domain_controller.txt\n", "issue": "win_domain_controller: documentation error in examples '_pass' should be '_password'\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nAlso test if the latest release, and master branch are affected too.\r\n-->\r\n\r\n##### ISSUE TYPE\r\n<!--- Pick one below and delete the rest: -->\r\n\r\n - Documentation Report\r\n\r\n##### COMPONENT NAME\r\n<!--- Name of the module/plugin/task/feature -->\r\nwin_domain_controller\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \u201cansible --version\u201d between quotes below -->\r\n```\r\nansible 2.3.0.0 (detached HEAD e4494f85b6) last updated 2017/03/17 12:34:17 (GMT +100)\r\n config file = /home/jon/ansible/ansible.cfg\r\n configured module search path = [u'/home/jon/ansible/library']\r\n python version = 2.7.6 (default, Jun 22 2015, 17:58:13) [GCC 4.8.2]\r\n```\r\n\r\n##### CONFIGURATION\r\n<!---\r\nMention any settings you have changed/added/removed in ansible.cfg\r\n(or using the ANSIBLE_* environment variables).\r\n-->\r\n\r\n##### OS / ENVIRONMENT\r\n<!---\r\nMention the OS you are running Ansible from, and the OS you are\r\nmanaging, or say \u201cN/A\u201d for anything that is not platform-specific.\r\n-->\r\nAnsible controller Windows 10 WSL (ubuntu)\r\ntargets: Windows Server 2012 R2\r\n##### SUMMARY\r\n<!--- Explain the problem briefly -->\r\n\r\nThe examples need updating to match the correct parameter names for the _password parameters.\r\nFor example in the examples domain_admin_pass, but the module actually expects 'domain_admin_password'. The same thing is true for 'safe_mode_password' and 'local_admin_password'.\r\n\r\nWould create a PR for this but am away from dev machine at the moment.\r\n\r\n\r\n##### STEPS TO REPRODUCE\r\n<!---\r\nFor bugs, show exactly how to reproduce the problem, using a minimal test-case.\r\nFor new features, show how the feature would be used.\r\n-->\r\nN/A\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n\r\n```\r\n\r\n<!--- You can also paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- What did you expect to happen when running the steps above? -->\r\nN/A\r\n##### ACTUAL RESULTS\r\n<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->\r\nN/A\r\n<!--- Paste verbatim command output between quotes below -->\r\n```\r\n\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2017, Red Hat, Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nANSIBLE_METADATA = {'metadata_version': '1.0',\n 'status': ['preview'],\n 'supported_by': 'core'}\n\n\nDOCUMENTATION='''\nmodule: win_domain_controller\nshort_description: Manage domain controller/member server state for a Windows host\nversion_added: 2.3\ndescription:\n - Ensure that a Windows Server 2012+ host is configured as a domain controller or demoted to member server. This module may require\n subsequent use of the M(win_reboot) action if changes are made.\noptions:\n dns_domain_name:\n description:\n - when C(state) is C(domain_controller), the DNS name of the domain for which the targeted Windows host should be a DC\n domain_admin_user:\n description:\n - username of a domain admin for the target domain (necessary to promote or demote a domain controller)\n required: true\n domain_admin_password:\n description:\n - password for the specified C(domain_admin_user)\n required: true\n safe_mode_password:\n description:\n - safe mode password for the domain controller (required when C(state) is C(domain_controller))\n local_admin_password:\n description:\n - password to be assigned to the local C(Administrator) user (required when C(state) is C(member_server))\n state:\n description:\n - whether the target host should be a domain controller or a member server\n choices:\n - domain_controller\n - member_server\nauthor:\n - Matt Davis (@nitzmahone)\n'''\n\nRETURN='''\nreboot_required:\n description: True if changes were made that require a reboot.\n returned: always\n type: boolean\n sample: true\n\n'''\n\nEXAMPLES=r'''\n# ensure a server is a domain controller\n- hosts: winclient\n gather_facts: no\n tasks:\n - win_domain_controller:\n dns_domain_name: ansible.vagrant\n domain_admin_user: [email protected]\n domain_admin_pass: password123!\n safe_mode_pass: password123!\n state: domain_controller\n log_path: c:\\ansible_win_domain_controller.txt\n\n# ensure a server is not a domain controller\n# note that without an action wrapper, in the case where a DC is demoted,\n# the task will fail with a 401 Unauthorized, because the domain credential\n# becomes invalid to fetch the final output over WinRM. This requires win_async\n# with credential switching (or other clever credential-switching\n# mechanism to get the output and trigger the required reboot)\n- hosts: winclient\n gather_facts: no\n tasks:\n - win_domain_controller:\n domain_admin_user: [email protected]\n domain_admin_pass: password123!\n local_admin_pass: password123!\n state: member_server\n log_path: c:\\ansible_win_domain_controller.txt\n\n'''\n\n", "path": "lib/ansible/modules/windows/win_domain_controller.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2017, Red Hat, Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nANSIBLE_METADATA = {'metadata_version': '1.0',\n 'status': ['preview'],\n 'supported_by': 'core'}\n\n\nDOCUMENTATION='''\nmodule: win_domain_controller\nshort_description: Manage domain controller/member server state for a Windows host\nversion_added: 2.3\ndescription:\n - Ensure that a Windows Server 2012+ host is configured as a domain controller or demoted to member server. This module may require\n subsequent use of the M(win_reboot) action if changes are made.\noptions:\n dns_domain_name:\n description:\n - when C(state) is C(domain_controller), the DNS name of the domain for which the targeted Windows host should be a DC\n domain_admin_user:\n description:\n - username of a domain admin for the target domain (necessary to promote or demote a domain controller)\n required: true\n domain_admin_password:\n description:\n - password for the specified C(domain_admin_user)\n required: true\n safe_mode_password:\n description:\n - safe mode password for the domain controller (required when C(state) is C(domain_controller))\n local_admin_password:\n description:\n - password to be assigned to the local C(Administrator) user (required when C(state) is C(member_server))\n state:\n description:\n - whether the target host should be a domain controller or a member server\n choices:\n - domain_controller\n - member_server\nauthor:\n - Matt Davis (@nitzmahone)\n'''\n\nRETURN='''\nreboot_required:\n description: True if changes were made that require a reboot.\n returned: always\n type: boolean\n sample: true\n\n'''\n\nEXAMPLES=r'''\n# ensure a server is a domain controller\n- hosts: winclient\n gather_facts: no\n tasks:\n - win_domain_controller:\n dns_domain_name: ansible.vagrant\n domain_admin_user: [email protected]\n domain_admin_password: password123!\n safe_mode_password: password123!\n state: domain_controller\n log_path: c:\\ansible_win_domain_controller.txt\n\n# ensure a server is not a domain controller\n# note that without an action wrapper, in the case where a DC is demoted,\n# the task will fail with a 401 Unauthorized, because the domain credential\n# becomes invalid to fetch the final output over WinRM. This requires win_async\n# with credential switching (or other clever credential-switching\n# mechanism to get the output and trigger the required reboot)\n- hosts: winclient\n gather_facts: no\n tasks:\n - win_domain_controller:\n domain_admin_user: [email protected]\n domain_admin_password: password123!\n local_admin_password: password123!\n state: member_server\n log_path: c:\\ansible_win_domain_controller.txt\n\n'''\n\n", "path": "lib/ansible/modules/windows/win_domain_controller.py"}]} | 1,812 | 246 |
gh_patches_debug_7897 | rasdani/github-patches | git_diff | nautobot__nautobot-2640 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
wrong link from circuit types to circuit
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Nautobot version (Docker tag too if applicable): eee34d7bc54d (v1.4.5)
* Python version: 3.10
* Database platform, version: postgresql 14
* Middleware(s):
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
1. Create circuit type
2. Create circuit of above circuit type
3. Go to Circuit Types
4. Click one circuit type
5. Click number of circuits belonging to circuit type
<!-- What did you expect to happen? -->
### Expected Behavior
redirect to `circuits/circuits/?type=XXX`
<!-- What happened instead? -->
### Observed Behavior
redirects to `circuits/circuits/?circuit_type=XXX`
which shows an "Invalid filters were specified" error
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/circuits/tables.py`
Content:
```
1 import django_tables2 as tables
2 from django_tables2.utils import Accessor
3
4 from nautobot.extras.tables import StatusTableMixin
5 from nautobot.tenancy.tables import TenantColumn
6 from nautobot.utilities.tables import (
7 BaseTable,
8 ButtonsColumn,
9 TagColumn,
10 ToggleColumn,
11 )
12 from .models import Circuit, CircuitType, Provider, ProviderNetwork
13
14 CIRCUIT_TERMINATION_PARENT = """
15 {% if value.provider_network %}
16 <a href="{{ value.provider_network.get_absolute_url }}">{{ value.provider_network }}</a>
17 {% elif value.site %}
18 <a href="{{ value.site.get_absolute_url }}">{{ value.site }}</a>
19 {% endif %}
20 """
21
22 #
23 # Provider Network
24 #
25
26
27 class ProviderNetworkTable(BaseTable):
28 pk = ToggleColumn()
29 name = tables.Column(linkify=True)
30 provider = tables.Column(linkify=True)
31 tags = TagColumn(url_name="circuits:providernetwork_list")
32
33 class Meta(BaseTable.Meta):
34 model = ProviderNetwork
35 fields = ("pk", "name", "provider", "description", "tags")
36 default_columns = ("pk", "name", "provider", "description")
37
38
39 #
40 # Providers
41 #
42
43
44 class ProviderTable(BaseTable):
45 pk = ToggleColumn()
46 name = tables.LinkColumn()
47 circuit_count = tables.Column(accessor=Accessor("count_circuits"), verbose_name="Circuits")
48 tags = TagColumn(url_name="circuits:provider_list")
49
50 class Meta(BaseTable.Meta):
51 model = Provider
52 fields = (
53 "pk",
54 "name",
55 "asn",
56 "account",
57 "portal_url",
58 "noc_contact",
59 "admin_contact",
60 "circuit_count",
61 "tags",
62 )
63 default_columns = ("pk", "name", "asn", "account", "circuit_count")
64
65
66 #
67 # Circuit types
68 #
69
70
71 class CircuitTypeTable(BaseTable):
72 pk = ToggleColumn()
73 name = tables.LinkColumn()
74 circuit_count = tables.Column(verbose_name="Circuits")
75 actions = ButtonsColumn(CircuitType, pk_field="slug")
76
77 class Meta(BaseTable.Meta):
78 model = CircuitType
79 fields = ("pk", "name", "circuit_count", "description", "slug", "actions")
80 default_columns = (
81 "pk",
82 "name",
83 "circuit_count",
84 "description",
85 "slug",
86 "actions",
87 )
88
89
90 #
91 # Circuits
92 #
93
94
95 class CircuitTable(StatusTableMixin, BaseTable):
96 pk = ToggleColumn()
97 cid = tables.LinkColumn(verbose_name="ID")
98 provider = tables.LinkColumn(viewname="circuits:provider", args=[Accessor("provider__slug")])
99 tenant = TenantColumn()
100 tags = TagColumn(url_name="circuits:circuit_list")
101
102 termination_a = tables.TemplateColumn(
103 template_code=CIRCUIT_TERMINATION_PARENT,
104 accessor=Accessor("termination_a"),
105 orderable=False,
106 verbose_name="Side A",
107 )
108 termination_z = tables.TemplateColumn(
109 template_code=CIRCUIT_TERMINATION_PARENT,
110 accessor=Accessor("termination_z"),
111 orderable=False,
112 verbose_name="Side Z",
113 )
114
115 class Meta(BaseTable.Meta):
116 model = Circuit
117 fields = (
118 "pk",
119 "cid",
120 "provider",
121 "type",
122 "status",
123 "tenant",
124 "termination_a",
125 "termination_z",
126 "install_date",
127 "commit_rate",
128 "description",
129 "tags",
130 )
131 default_columns = (
132 "pk",
133 "cid",
134 "provider",
135 "type",
136 "status",
137 "tenant",
138 "termination_a",
139 "termination_z",
140 "description",
141 )
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nautobot/circuits/tables.py b/nautobot/circuits/tables.py
--- a/nautobot/circuits/tables.py
+++ b/nautobot/circuits/tables.py
@@ -12,10 +12,13 @@
from .models import Circuit, CircuitType, Provider, ProviderNetwork
CIRCUIT_TERMINATION_PARENT = """
+{% load helpers %}
{% if value.provider_network %}
-<a href="{{ value.provider_network.get_absolute_url }}">{{ value.provider_network }}</a>
+{{ value.provider_network|hyperlinked_object }}
{% elif value.site %}
-<a href="{{ value.site.get_absolute_url }}">{{ value.site }}</a>
+{{ value.site|hyperlinked_object }}
+{% else %}
+{{ None|placeholder }}
{% endif %}
"""
| {"golden_diff": "diff --git a/nautobot/circuits/tables.py b/nautobot/circuits/tables.py\n--- a/nautobot/circuits/tables.py\n+++ b/nautobot/circuits/tables.py\n@@ -12,10 +12,13 @@\n from .models import Circuit, CircuitType, Provider, ProviderNetwork\n \n CIRCUIT_TERMINATION_PARENT = \"\"\"\n+{% load helpers %}\n {% if value.provider_network %}\n-<a href=\"{{ value.provider_network.get_absolute_url }}\">{{ value.provider_network }}</a>\n+{{ value.provider_network|hyperlinked_object }}\n {% elif value.site %}\n-<a href=\"{{ value.site.get_absolute_url }}\">{{ value.site }}</a>\n+{{ value.site|hyperlinked_object }}\n+{% else %}\n+{{ None|placeholder }}\n {% endif %}\n \"\"\"\n", "issue": "wrong link from circuit types to circuit\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Nautobot version (Docker tag too if applicable): eee34d7bc54d (v1.4.5)\r\n* Python version: 3.10\r\n* Database platform, version: postgresql 14\r\n* Middleware(s):\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n### Steps to Reproduce\r\n1. Create circuit type\r\n2. Create circuit of above circuit type\r\n3. Go to Circuit Types\r\n4. Click one circuit type\r\n5. Click number of circuits belonging to circuit type\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nredirect to `circuits/circuits/?type=XXX`\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nredirects to `circuits/circuits/?circuit_type=XXX`\r\nwhich shows an \"Invalid filters were specified\" error\n", "before_files": [{"content": "import django_tables2 as tables\nfrom django_tables2.utils import Accessor\n\nfrom nautobot.extras.tables import StatusTableMixin\nfrom nautobot.tenancy.tables import TenantColumn\nfrom nautobot.utilities.tables import (\n BaseTable,\n ButtonsColumn,\n TagColumn,\n ToggleColumn,\n)\nfrom .models import Circuit, CircuitType, Provider, ProviderNetwork\n\nCIRCUIT_TERMINATION_PARENT = \"\"\"\n{% if value.provider_network %}\n<a href=\"{{ value.provider_network.get_absolute_url }}\">{{ value.provider_network }}</a>\n{% elif value.site %}\n<a href=\"{{ value.site.get_absolute_url }}\">{{ value.site }}</a>\n{% endif %}\n\"\"\"\n\n#\n# Provider Network\n#\n\n\nclass ProviderNetworkTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(linkify=True)\n provider = tables.Column(linkify=True)\n tags = TagColumn(url_name=\"circuits:providernetwork_list\")\n\n class Meta(BaseTable.Meta):\n model = ProviderNetwork\n fields = (\"pk\", \"name\", \"provider\", \"description\", \"tags\")\n default_columns = (\"pk\", \"name\", \"provider\", \"description\")\n\n\n#\n# Providers\n#\n\n\nclass ProviderTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n circuit_count = tables.Column(accessor=Accessor(\"count_circuits\"), verbose_name=\"Circuits\")\n tags = TagColumn(url_name=\"circuits:provider_list\")\n\n class Meta(BaseTable.Meta):\n model = Provider\n fields = (\n \"pk\",\n \"name\",\n \"asn\",\n \"account\",\n \"portal_url\",\n \"noc_contact\",\n \"admin_contact\",\n \"circuit_count\",\n \"tags\",\n )\n default_columns = (\"pk\", \"name\", \"asn\", \"account\", \"circuit_count\")\n\n\n#\n# Circuit types\n#\n\n\nclass CircuitTypeTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n circuit_count = tables.Column(verbose_name=\"Circuits\")\n actions = ButtonsColumn(CircuitType, pk_field=\"slug\")\n\n class Meta(BaseTable.Meta):\n model = CircuitType\n fields = (\"pk\", \"name\", \"circuit_count\", \"description\", \"slug\", \"actions\")\n default_columns = (\n \"pk\",\n \"name\",\n \"circuit_count\",\n \"description\",\n \"slug\",\n \"actions\",\n )\n\n\n#\n# Circuits\n#\n\n\nclass CircuitTable(StatusTableMixin, BaseTable):\n pk = ToggleColumn()\n cid = tables.LinkColumn(verbose_name=\"ID\")\n provider = tables.LinkColumn(viewname=\"circuits:provider\", args=[Accessor(\"provider__slug\")])\n tenant = TenantColumn()\n tags = TagColumn(url_name=\"circuits:circuit_list\")\n\n termination_a = tables.TemplateColumn(\n template_code=CIRCUIT_TERMINATION_PARENT,\n accessor=Accessor(\"termination_a\"),\n orderable=False,\n verbose_name=\"Side A\",\n )\n termination_z = tables.TemplateColumn(\n template_code=CIRCUIT_TERMINATION_PARENT,\n accessor=Accessor(\"termination_z\"),\n orderable=False,\n verbose_name=\"Side Z\",\n )\n\n class Meta(BaseTable.Meta):\n model = Circuit\n fields = (\n \"pk\",\n \"cid\",\n \"provider\",\n \"type\",\n \"status\",\n \"tenant\",\n \"termination_a\",\n \"termination_z\",\n \"install_date\",\n \"commit_rate\",\n \"description\",\n \"tags\",\n )\n default_columns = (\n \"pk\",\n \"cid\",\n \"provider\",\n \"type\",\n \"status\",\n \"tenant\",\n \"termination_a\",\n \"termination_z\",\n \"description\",\n )\n", "path": "nautobot/circuits/tables.py"}], "after_files": [{"content": "import django_tables2 as tables\nfrom django_tables2.utils import Accessor\n\nfrom nautobot.extras.tables import StatusTableMixin\nfrom nautobot.tenancy.tables import TenantColumn\nfrom nautobot.utilities.tables import (\n BaseTable,\n ButtonsColumn,\n TagColumn,\n ToggleColumn,\n)\nfrom .models import Circuit, CircuitType, Provider, ProviderNetwork\n\nCIRCUIT_TERMINATION_PARENT = \"\"\"\n{% load helpers %}\n{% if value.provider_network %}\n{{ value.provider_network|hyperlinked_object }}\n{% elif value.site %}\n{{ value.site|hyperlinked_object }}\n{% else %}\n{{ None|placeholder }}\n{% endif %}\n\"\"\"\n\n#\n# Provider Network\n#\n\n\nclass ProviderNetworkTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(linkify=True)\n provider = tables.Column(linkify=True)\n tags = TagColumn(url_name=\"circuits:providernetwork_list\")\n\n class Meta(BaseTable.Meta):\n model = ProviderNetwork\n fields = (\"pk\", \"name\", \"provider\", \"description\", \"tags\")\n default_columns = (\"pk\", \"name\", \"provider\", \"description\")\n\n\n#\n# Providers\n#\n\n\nclass ProviderTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n circuit_count = tables.Column(accessor=Accessor(\"count_circuits\"), verbose_name=\"Circuits\")\n tags = TagColumn(url_name=\"circuits:provider_list\")\n\n class Meta(BaseTable.Meta):\n model = Provider\n fields = (\n \"pk\",\n \"name\",\n \"asn\",\n \"account\",\n \"portal_url\",\n \"noc_contact\",\n \"admin_contact\",\n \"circuit_count\",\n \"tags\",\n )\n default_columns = (\"pk\", \"name\", \"asn\", \"account\", \"circuit_count\")\n\n\n#\n# Circuit types\n#\n\n\nclass CircuitTypeTable(BaseTable):\n pk = ToggleColumn()\n name = tables.LinkColumn()\n circuit_count = tables.Column(verbose_name=\"Circuits\")\n actions = ButtonsColumn(CircuitType, pk_field=\"slug\")\n\n class Meta(BaseTable.Meta):\n model = CircuitType\n fields = (\"pk\", \"name\", \"circuit_count\", \"description\", \"slug\", \"actions\")\n default_columns = (\n \"pk\",\n \"name\",\n \"circuit_count\",\n \"description\",\n \"slug\",\n \"actions\",\n )\n\n\n#\n# Circuits\n#\n\n\nclass CircuitTable(StatusTableMixin, BaseTable):\n pk = ToggleColumn()\n cid = tables.LinkColumn(verbose_name=\"ID\")\n provider = tables.LinkColumn(viewname=\"circuits:provider\", args=[Accessor(\"provider__slug\")])\n tenant = TenantColumn()\n tags = TagColumn(url_name=\"circuits:circuit_list\")\n\n termination_a = tables.TemplateColumn(\n template_code=CIRCUIT_TERMINATION_PARENT,\n accessor=Accessor(\"termination_a\"),\n orderable=False,\n verbose_name=\"Side A\",\n )\n termination_z = tables.TemplateColumn(\n template_code=CIRCUIT_TERMINATION_PARENT,\n accessor=Accessor(\"termination_z\"),\n orderable=False,\n verbose_name=\"Side Z\",\n )\n\n class Meta(BaseTable.Meta):\n model = Circuit\n fields = (\n \"pk\",\n \"cid\",\n \"provider\",\n \"type\",\n \"status\",\n \"tenant\",\n \"termination_a\",\n \"termination_z\",\n \"install_date\",\n \"commit_rate\",\n \"description\",\n \"tags\",\n )\n default_columns = (\n \"pk\",\n \"cid\",\n \"provider\",\n \"type\",\n \"status\",\n \"tenant\",\n \"termination_a\",\n \"termination_z\",\n \"description\",\n )\n", "path": "nautobot/circuits/tables.py"}]} | 1,767 | 168 |
gh_patches_debug_608 | rasdani/github-patches | git_diff | pex-tool__pex-1482 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.51
On the docket:
+ [ ] UnicodeDecodeError when packaging after upgrading to v2.1.46 #1479
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.50"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.50"
+__version__ = "2.1.51"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.50\"\n+__version__ = \"2.1.51\"\n", "issue": "Release 2.1.51\nOn the docket:\r\n+ [ ] UnicodeDecodeError when packaging after upgrading to v2.1.46 #1479 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.50\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.51\"\n", "path": "pex/version.py"}]} | 346 | 96 |
gh_patches_debug_13431 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1755 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CSV export fails on converting uuid to json
Problematic JSON structure in https://grand-challenge.org/api/v1/cases/images/redacted_uuid/?format=csv
```
{"pk":"redacted_uuid","name":"redacted.png","study":null,"files":[{"pk":"redacted_uuid","image":"redacted_uuid","file":"https://grand-challenge.org/media/images/...mhd","image_type":"MHD"},{"pk":"redacted_uuid","image":"09b3b3d6-0994-43d2-b6a9-eaff634b8805","file":"https://grand-challenge.org/media/images/...zraw","image_type":"MHD"}],"reader_study_set":["https://grand-challenge.org/api/v1/reader-studies/redacted_uuid/","https://grand-challenge.org/api/v1/reader-studies/redacted_uuid/"],"archive_set":[],"job_set":[],"width":596,"height":596,"depth":null,"color_space":"RGB","modality":null,"eye_choice":"NA","stereoscopic_choice":null,"field_of_view":null,"shape_without_color":[596,596],"shape":[596,596,3],"voxel_width_mm":null,"voxel_height_mm":null,"voxel_depth_mm":null,"api_url":"https://grand-challenge.org/api/v1/cases/images/redacted_uuid/"}
```
Probably due to trying to serialize the list of files to json in https://github.com/comic/grand-challenge.org/blob/14bc3dd4002756e9cf4a32bb0f238859a9175252/app/grandchallenge/core/renderers.py#L26-L27
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/core/renderers.py`
Content:
```
1 import json
2
3 from rest_framework_csv.renderers import CSVRenderer
4
5
6 class PaginatedCSVRenderer(CSVRenderer):
7 results_field = "results"
8
9 def render(self, data, *args, **kwargs):
10 if self.results_field in data:
11 data = data[self.results_field]
12
13 return super().render(data, *args, **kwargs)
14
15 def flatten_data(self, data):
16 """
17 Create a dictionary that is 1 level deep, with nested values serialized
18 as json. This means that the header rows are now consistent.
19 """
20 for row in data:
21 flat_row = {k: self._flatten_value(v) for k, v in row.items()}
22 yield flat_row
23
24 @staticmethod
25 def _flatten_value(value):
26 if isinstance(value, (dict, list)):
27 return json.dumps(value)
28 else:
29 return value
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/core/renderers.py b/app/grandchallenge/core/renderers.py
--- a/app/grandchallenge/core/renderers.py
+++ b/app/grandchallenge/core/renderers.py
@@ -1,5 +1,7 @@
import json
+from rest_framework.settings import api_settings
+from rest_framework.utils.encoders import JSONEncoder
from rest_framework_csv.renderers import CSVRenderer
@@ -24,6 +26,11 @@
@staticmethod
def _flatten_value(value):
if isinstance(value, (dict, list)):
- return json.dumps(value)
+ return json.dumps(
+ value,
+ cls=JSONEncoder,
+ ensure_ascii=not api_settings.UNICODE_JSON,
+ allow_nan=not api_settings.STRICT_JSON,
+ )
else:
return value
| {"golden_diff": "diff --git a/app/grandchallenge/core/renderers.py b/app/grandchallenge/core/renderers.py\n--- a/app/grandchallenge/core/renderers.py\n+++ b/app/grandchallenge/core/renderers.py\n@@ -1,5 +1,7 @@\n import json\n \n+from rest_framework.settings import api_settings\n+from rest_framework.utils.encoders import JSONEncoder\n from rest_framework_csv.renderers import CSVRenderer\n \n \n@@ -24,6 +26,11 @@\n @staticmethod\n def _flatten_value(value):\n if isinstance(value, (dict, list)):\n- return json.dumps(value)\n+ return json.dumps(\n+ value,\n+ cls=JSONEncoder,\n+ ensure_ascii=not api_settings.UNICODE_JSON,\n+ allow_nan=not api_settings.STRICT_JSON,\n+ )\n else:\n return value\n", "issue": "CSV export fails on converting uuid to json\nProblematic JSON structure in https://grand-challenge.org/api/v1/cases/images/redacted_uuid/?format=csv\r\n\r\n```\r\n{\"pk\":\"redacted_uuid\",\"name\":\"redacted.png\",\"study\":null,\"files\":[{\"pk\":\"redacted_uuid\",\"image\":\"redacted_uuid\",\"file\":\"https://grand-challenge.org/media/images/...mhd\",\"image_type\":\"MHD\"},{\"pk\":\"redacted_uuid\",\"image\":\"09b3b3d6-0994-43d2-b6a9-eaff634b8805\",\"file\":\"https://grand-challenge.org/media/images/...zraw\",\"image_type\":\"MHD\"}],\"reader_study_set\":[\"https://grand-challenge.org/api/v1/reader-studies/redacted_uuid/\",\"https://grand-challenge.org/api/v1/reader-studies/redacted_uuid/\"],\"archive_set\":[],\"job_set\":[],\"width\":596,\"height\":596,\"depth\":null,\"color_space\":\"RGB\",\"modality\":null,\"eye_choice\":\"NA\",\"stereoscopic_choice\":null,\"field_of_view\":null,\"shape_without_color\":[596,596],\"shape\":[596,596,3],\"voxel_width_mm\":null,\"voxel_height_mm\":null,\"voxel_depth_mm\":null,\"api_url\":\"https://grand-challenge.org/api/v1/cases/images/redacted_uuid/\"}\r\n```\r\n\r\nProbably due to trying to serialize the list of files to json in https://github.com/comic/grand-challenge.org/blob/14bc3dd4002756e9cf4a32bb0f238859a9175252/app/grandchallenge/core/renderers.py#L26-L27\n", "before_files": [{"content": "import json\n\nfrom rest_framework_csv.renderers import CSVRenderer\n\n\nclass PaginatedCSVRenderer(CSVRenderer):\n results_field = \"results\"\n\n def render(self, data, *args, **kwargs):\n if self.results_field in data:\n data = data[self.results_field]\n\n return super().render(data, *args, **kwargs)\n\n def flatten_data(self, data):\n \"\"\"\n Create a dictionary that is 1 level deep, with nested values serialized\n as json. This means that the header rows are now consistent.\n \"\"\"\n for row in data:\n flat_row = {k: self._flatten_value(v) for k, v in row.items()}\n yield flat_row\n\n @staticmethod\n def _flatten_value(value):\n if isinstance(value, (dict, list)):\n return json.dumps(value)\n else:\n return value\n", "path": "app/grandchallenge/core/renderers.py"}], "after_files": [{"content": "import json\n\nfrom rest_framework.settings import api_settings\nfrom rest_framework.utils.encoders import JSONEncoder\nfrom rest_framework_csv.renderers import CSVRenderer\n\n\nclass PaginatedCSVRenderer(CSVRenderer):\n results_field = \"results\"\n\n def render(self, data, *args, **kwargs):\n if self.results_field in data:\n data = data[self.results_field]\n\n return super().render(data, *args, **kwargs)\n\n def flatten_data(self, data):\n \"\"\"\n Create a dictionary that is 1 level deep, with nested values serialized\n as json. This means that the header rows are now consistent.\n \"\"\"\n for row in data:\n flat_row = {k: self._flatten_value(v) for k, v in row.items()}\n yield flat_row\n\n @staticmethod\n def _flatten_value(value):\n if isinstance(value, (dict, list)):\n return json.dumps(\n value,\n cls=JSONEncoder,\n ensure_ascii=not api_settings.UNICODE_JSON,\n allow_nan=not api_settings.STRICT_JSON,\n )\n else:\n return value\n", "path": "app/grandchallenge/core/renderers.py"}]} | 887 | 180 |
gh_patches_debug_27459 | rasdani/github-patches | git_diff | NVIDIA__apex-590 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SyncBatchNorm doesn't support 2 dimensions input?
Hi,
I'm facing the issue that the program crash when the input for SyncBatchNorm is two dimensions. Here's the code:
```python
import torch
import apex
model = apex.parallel.SyncBatchNorm(4).cuda()
data = torch.rand((8,4)).cuda()
output = model(data)
```
When running the code, error raised like this:
```
Traceback (most recent call last):
File "syncbn_test.by", line 7, in <module>
output = model(data)
File "/usr/local/lib/python3.5/dist-packages/torch/nn/modules/module.py", line 489, in __call__
result = self.forward(*input, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/apex/parallel/optimized_sync_batchnorm.py", line 81, in forward
return SyncBatchnormFunction.apply(input, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, self.channel_last)
File "/usr/local/lib/python3.5/dist-packages/apex/parallel/optimized_sync_batchnorm_kernel.py", line 27, in forward
mean, var_biased = syncbn.welford_mean_var(input)
RuntimeError: Dimension out of range (expected to be in range of [-2, 1], but got 2) (maybe_wrap_dim at /pytorch/aten/src/ATen/core/WrapDimMinimal.h:18)
```
And everthing runs ok when `data` a 4 dims tensor.
Here is my environment:
```
Ubuntu 16.04
Python 3.5.2
Pytorch 1.0.1, installed with "pip install torch"
apex is installed with command:
pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" .
cuda 10.0
nvidia driver 410.72
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apex/parallel/optimized_sync_batchnorm.py`
Content:
```
1 import torch
2 from torch.nn.modules.batchnorm import _BatchNorm
3 from torch.nn import functional as F
4
5 import syncbn
6 from .optimized_sync_batchnorm_kernel import SyncBatchnormFunction
7
8
9 class SyncBatchNorm(_BatchNorm):
10 """
11 synchronized batch normalization module extented from `torch.nn.BatchNormNd`
12 with the added stats reduction across multiple processes.
13 :class:`apex.parallel.SyncBatchNorm` is designed to work with
14 `DistributedDataParallel`.
15
16 When running in training mode, the layer reduces stats across all processes
17 to increase the effective batchsize for normalization layer. This is useful
18 in applications where batch size is small on a given process that would
19 diminish converged accuracy of the model. The model uses collective
20 communication package from `torch.distributed`.
21
22 When running in evaluation mode, the layer falls back to
23 `torch.nn.functional.batch_norm`
24
25 Args:
26 num_features: :math:`C` from an expected input of size
27 :math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`
28 eps: a value added to the denominator for numerical stability.
29 Default: 1e-5
30 momentum: the value used for the running_mean and running_var
31 computation. Can be set to ``None`` for cumulative moving average
32 (i.e. simple average). Default: 0.1
33 affine: a boolean value that when set to ``True``, this module has
34 learnable affine parameters. Default: ``True``
35 track_running_stats: a boolean value that when set to ``True``, this
36 module tracks the running mean and variance, and when set to ``False``,
37 this module does not track such statistics and always uses batch
38 statistics in both training and eval modes. Default: ``True``
39 process_group: pass in a process group within which the stats of the
40 mini-batch is being synchronized. ``None`` for using default process
41 group
42 channel_last: a boolean value that when set to ``True``, this module
43 take the last dimension of the input tensor to be the channel
44 dimension. Default: False
45
46 Examples::
47 >>> # channel first tensor
48 >>> sbn = apex.parallel.SyncBatchNorm(100).cuda()
49 >>> inp = torch.randn(10, 100, 14, 14).cuda()
50 >>> out = sbn(inp)
51 >>> inp = torch.randn(3, 100, 20).cuda()
52 >>> out = sbn(inp)
53 >>> # channel last tensor
54 >>> sbn = apex.parallel.SyncBatchNorm(100, channel_last=True).cuda()
55 >>> inp = torch.randn(10, 14, 14, 100).cuda()
56 """
57
58 def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, process_group=None, channel_last=False, fuse_relu=False):
59 super(SyncBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)
60 self.process_group = process_group
61 self.channel_last = channel_last
62 self.fuse_relu = fuse_relu
63
64 def _specify_process_group(self, process_group):
65 self.process_group = process_group
66
67 def _specify_channel_last(self, channel_last):
68 self.channel_last = channel_last
69
70 def forward(self, input, z = None):
71 # if input.dim() == 2, we switch to channel_last for efficient memory accessing
72 channel_last = self.channel_last if input.dim() != 2 else True
73
74 if not self.training and self.track_running_stats and not self.channel_last and not self.fuse_relu and z == None:
75 # fall back to pytorch implementation for inference
76 return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, 0.0, self.eps)
77 else:
78 exponential_average_factor = 0.0
79 if self.training and self.track_running_stats:
80 self.num_batches_tracked += 1
81 if self.momentum is None:
82 exponential_average_factor = 1.0 / float(self.num_batches_tracked)
83 else:
84 exponential_average_factor = self.momentum
85 return SyncBatchnormFunction.apply(input, z, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, self.channel_last, self.fuse_relu)
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apex/parallel/optimized_sync_batchnorm.py b/apex/parallel/optimized_sync_batchnorm.py
--- a/apex/parallel/optimized_sync_batchnorm.py
+++ b/apex/parallel/optimized_sync_batchnorm.py
@@ -71,7 +71,7 @@
# if input.dim() == 2, we switch to channel_last for efficient memory accessing
channel_last = self.channel_last if input.dim() != 2 else True
- if not self.training and self.track_running_stats and not self.channel_last and not self.fuse_relu and z == None:
+ if not self.training and self.track_running_stats and not channel_last and not self.fuse_relu and z == None:
# fall back to pytorch implementation for inference
return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, 0.0, self.eps)
else:
@@ -82,4 +82,4 @@
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else:
exponential_average_factor = self.momentum
- return SyncBatchnormFunction.apply(input, z, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, self.channel_last, self.fuse_relu)
+ return SyncBatchnormFunction.apply(input, z, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, channel_last, self.fuse_relu)
| {"golden_diff": "diff --git a/apex/parallel/optimized_sync_batchnorm.py b/apex/parallel/optimized_sync_batchnorm.py\n--- a/apex/parallel/optimized_sync_batchnorm.py\n+++ b/apex/parallel/optimized_sync_batchnorm.py\n@@ -71,7 +71,7 @@\n # if input.dim() == 2, we switch to channel_last for efficient memory accessing\n channel_last = self.channel_last if input.dim() != 2 else True\n \n- if not self.training and self.track_running_stats and not self.channel_last and not self.fuse_relu and z == None:\n+ if not self.training and self.track_running_stats and not channel_last and not self.fuse_relu and z == None:\n # fall back to pytorch implementation for inference\n return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, 0.0, self.eps)\n else:\n@@ -82,4 +82,4 @@\n exponential_average_factor = 1.0 / float(self.num_batches_tracked)\n else:\n exponential_average_factor = self.momentum\n- return SyncBatchnormFunction.apply(input, z, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, self.channel_last, self.fuse_relu)\n+ return SyncBatchnormFunction.apply(input, z, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, channel_last, self.fuse_relu)\n", "issue": "SyncBatchNorm doesn't support 2 dimensions input?\nHi,\r\nI'm facing the issue that the program crash when the input for SyncBatchNorm is two dimensions. Here's the code:\r\n```python\r\nimport torch\r\nimport apex\r\n\r\nmodel = apex.parallel.SyncBatchNorm(4).cuda()\r\ndata = torch.rand((8,4)).cuda()\r\noutput = model(data)\r\n```\r\nWhen running the code, error raised like this:\r\n```\r\nTraceback (most recent call last):\r\n File \"syncbn_test.by\", line 7, in <module>\r\n output = model(data)\r\n File \"/usr/local/lib/python3.5/dist-packages/torch/nn/modules/module.py\", line 489, in __call__\r\n result = self.forward(*input, **kwargs)\r\n File \"/usr/local/lib/python3.5/dist-packages/apex/parallel/optimized_sync_batchnorm.py\", line 81, in forward\r\n return SyncBatchnormFunction.apply(input, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, self.channel_last)\r\n File \"/usr/local/lib/python3.5/dist-packages/apex/parallel/optimized_sync_batchnorm_kernel.py\", line 27, in forward\r\n mean, var_biased = syncbn.welford_mean_var(input)\r\nRuntimeError: Dimension out of range (expected to be in range of [-2, 1], but got 2) (maybe_wrap_dim at /pytorch/aten/src/ATen/core/WrapDimMinimal.h:18)\r\n```\r\nAnd everthing runs ok when `data` a 4 dims tensor. \r\n\r\nHere is my environment:\r\n```\r\nUbuntu 16.04\r\nPython 3.5.2\r\nPytorch 1.0.1, installed with \"pip install torch\"\r\napex is installed with command:\r\n pip install -v --no-cache-dir --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" .\r\ncuda 10.0\r\nnvidia driver 410.72\r\n```\n", "before_files": [{"content": "import torch\nfrom torch.nn.modules.batchnorm import _BatchNorm\nfrom torch.nn import functional as F\n\nimport syncbn\nfrom .optimized_sync_batchnorm_kernel import SyncBatchnormFunction\n\n\nclass SyncBatchNorm(_BatchNorm):\n \"\"\"\n synchronized batch normalization module extented from `torch.nn.BatchNormNd`\n with the added stats reduction across multiple processes.\n :class:`apex.parallel.SyncBatchNorm` is designed to work with\n `DistributedDataParallel`.\n\n When running in training mode, the layer reduces stats across all processes\n to increase the effective batchsize for normalization layer. This is useful\n in applications where batch size is small on a given process that would\n diminish converged accuracy of the model. The model uses collective\n communication package from `torch.distributed`.\n\n When running in evaluation mode, the layer falls back to\n `torch.nn.functional.batch_norm`\n\n Args:\n num_features: :math:`C` from an expected input of size\n :math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`\n eps: a value added to the denominator for numerical stability.\n Default: 1e-5\n momentum: the value used for the running_mean and running_var\n computation. Can be set to ``None`` for cumulative moving average\n (i.e. simple average). Default: 0.1\n affine: a boolean value that when set to ``True``, this module has\n learnable affine parameters. Default: ``True``\n track_running_stats: a boolean value that when set to ``True``, this\n module tracks the running mean and variance, and when set to ``False``,\n this module does not track such statistics and always uses batch\n statistics in both training and eval modes. Default: ``True``\n process_group: pass in a process group within which the stats of the\n mini-batch is being synchronized. ``None`` for using default process\n group\n channel_last: a boolean value that when set to ``True``, this module\n take the last dimension of the input tensor to be the channel\n dimension. Default: False\n\n Examples::\n >>> # channel first tensor\n >>> sbn = apex.parallel.SyncBatchNorm(100).cuda()\n >>> inp = torch.randn(10, 100, 14, 14).cuda()\n >>> out = sbn(inp)\n >>> inp = torch.randn(3, 100, 20).cuda()\n >>> out = sbn(inp)\n >>> # channel last tensor\n >>> sbn = apex.parallel.SyncBatchNorm(100, channel_last=True).cuda()\n >>> inp = torch.randn(10, 14, 14, 100).cuda()\n \"\"\"\n\n def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, process_group=None, channel_last=False, fuse_relu=False):\n super(SyncBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)\n self.process_group = process_group\n self.channel_last = channel_last\n self.fuse_relu = fuse_relu\n\n def _specify_process_group(self, process_group):\n self.process_group = process_group\n\n def _specify_channel_last(self, channel_last):\n self.channel_last = channel_last\n\n def forward(self, input, z = None):\n # if input.dim() == 2, we switch to channel_last for efficient memory accessing\n channel_last = self.channel_last if input.dim() != 2 else True\n\n if not self.training and self.track_running_stats and not self.channel_last and not self.fuse_relu and z == None:\n # fall back to pytorch implementation for inference\n return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, 0.0, self.eps)\n else:\n exponential_average_factor = 0.0\n if self.training and self.track_running_stats:\n self.num_batches_tracked += 1\n if self.momentum is None:\n exponential_average_factor = 1.0 / float(self.num_batches_tracked)\n else:\n exponential_average_factor = self.momentum\n return SyncBatchnormFunction.apply(input, z, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, self.channel_last, self.fuse_relu)\n", "path": "apex/parallel/optimized_sync_batchnorm.py"}], "after_files": [{"content": "import torch\nfrom torch.nn.modules.batchnorm import _BatchNorm\nfrom torch.nn import functional as F\n\nimport syncbn\nfrom .optimized_sync_batchnorm_kernel import SyncBatchnormFunction\n\n\nclass SyncBatchNorm(_BatchNorm):\n \"\"\"\n synchronized batch normalization module extented from `torch.nn.BatchNormNd`\n with the added stats reduction across multiple processes.\n :class:`apex.parallel.SyncBatchNorm` is designed to work with\n `DistributedDataParallel`.\n\n When running in training mode, the layer reduces stats across all processes\n to increase the effective batchsize for normalization layer. This is useful\n in applications where batch size is small on a given process that would\n diminish converged accuracy of the model. The model uses collective\n communication package from `torch.distributed`.\n\n When running in evaluation mode, the layer falls back to\n `torch.nn.functional.batch_norm`\n\n Args:\n num_features: :math:`C` from an expected input of size\n :math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`\n eps: a value added to the denominator for numerical stability.\n Default: 1e-5\n momentum: the value used for the running_mean and running_var\n computation. Can be set to ``None`` for cumulative moving average\n (i.e. simple average). Default: 0.1\n affine: a boolean value that when set to ``True``, this module has\n learnable affine parameters. Default: ``True``\n track_running_stats: a boolean value that when set to ``True``, this\n module tracks the running mean and variance, and when set to ``False``,\n this module does not track such statistics and always uses batch\n statistics in both training and eval modes. Default: ``True``\n process_group: pass in a process group within which the stats of the\n mini-batch is being synchronized. ``None`` for using default process\n group\n channel_last: a boolean value that when set to ``True``, this module\n take the last dimension of the input tensor to be the channel\n dimension. Default: False\n\n Examples::\n >>> # channel first tensor\n >>> sbn = apex.parallel.SyncBatchNorm(100).cuda()\n >>> inp = torch.randn(10, 100, 14, 14).cuda()\n >>> out = sbn(inp)\n >>> inp = torch.randn(3, 100, 20).cuda()\n >>> out = sbn(inp)\n >>> # channel last tensor\n >>> sbn = apex.parallel.SyncBatchNorm(100, channel_last=True).cuda()\n >>> inp = torch.randn(10, 14, 14, 100).cuda()\n \"\"\"\n\n def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, process_group=None, channel_last=False, fuse_relu=False):\n super(SyncBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)\n self.process_group = process_group\n self.channel_last = channel_last\n self.fuse_relu = fuse_relu\n\n def _specify_process_group(self, process_group):\n self.process_group = process_group\n\n def _specify_channel_last(self, channel_last):\n self.channel_last = channel_last\n\n def forward(self, input, z = None):\n # if input.dim() == 2, we switch to channel_last for efficient memory accessing\n channel_last = self.channel_last if input.dim() != 2 else True\n\n if not self.training and self.track_running_stats and not channel_last and not self.fuse_relu and z == None:\n # fall back to pytorch implementation for inference\n return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, 0.0, self.eps)\n else:\n exponential_average_factor = 0.0\n if self.training and self.track_running_stats:\n self.num_batches_tracked += 1\n if self.momentum is None:\n exponential_average_factor = 1.0 / float(self.num_batches_tracked)\n else:\n exponential_average_factor = self.momentum\n return SyncBatchnormFunction.apply(input, z, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, channel_last, self.fuse_relu)\n", "path": "apex/parallel/optimized_sync_batchnorm.py"}]} | 1,869 | 349 |
gh_patches_debug_25801 | rasdani/github-patches | git_diff | akvo__akvo-rsr-3421 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Results framework loads very slowly for projects with lot of indicator dimensions
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/indicator_dimension.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import IndicatorDimension
9
10 from ..serializers import IndicatorDimensionSerializer
11 from ..viewsets import PublicProjectViewSet
12
13
14 class IndicatorDimensionViewSet(PublicProjectViewSet):
15 """
16 """
17 queryset = IndicatorDimension.objects.all()
18 serializer_class = IndicatorDimensionSerializer
19 project_relation = 'indicator__result__project__'
20
```
Path: `akvo/rest/pagination.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from rest_framework import pagination
8 from rest_framework.response import Response
9
10
11 class LimitSizePageNumberPagination(pagination.PageNumberPagination):
12 page_size = 30
13 page_size_query_param = 'limit'
14 max_page_size = 100
15
16
17 class TastypieOffsetPagination(pagination.LimitOffsetPagination):
18
19 def get_paginated_response(self, data):
20 """ Emulate the old style Tastypie format if the URL contains /api/v1/
21 """
22 return Response({
23 'meta': {
24 'next': self.get_next_link(),
25 'previous': self.get_previous_link(),
26 'total_count': self.count,
27 'limit': self.limit,
28 'offset': self.offset,
29 },
30 'objects': data
31 })
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/pagination.py b/akvo/rest/pagination.py
--- a/akvo/rest/pagination.py
+++ b/akvo/rest/pagination.py
@@ -8,12 +8,18 @@
from rest_framework.response import Response
-class LimitSizePageNumberPagination(pagination.PageNumberPagination):
+class StandardSizePageNumberPagination(pagination.PageNumberPagination):
page_size = 30
page_size_query_param = 'limit'
max_page_size = 100
+class LargeSizePageNumberPagination(pagination.PageNumberPagination):
+ page_size = 100
+ page_size_query_param = 'limit'
+ max_page_size = 1000
+
+
class TastypieOffsetPagination(pagination.LimitOffsetPagination):
def get_paginated_response(self, data):
diff --git a/akvo/rest/views/indicator_dimension.py b/akvo/rest/views/indicator_dimension.py
--- a/akvo/rest/views/indicator_dimension.py
+++ b/akvo/rest/views/indicator_dimension.py
@@ -6,6 +6,7 @@
from akvo.rsr.models import IndicatorDimension
+from akvo.rest.pagination import LargeSizePageNumberPagination
from ..serializers import IndicatorDimensionSerializer
from ..viewsets import PublicProjectViewSet
@@ -17,3 +18,4 @@
queryset = IndicatorDimension.objects.all()
serializer_class = IndicatorDimensionSerializer
project_relation = 'indicator__result__project__'
+ pagination_class = LargeSizePageNumberPagination
| {"golden_diff": "diff --git a/akvo/rest/pagination.py b/akvo/rest/pagination.py\n--- a/akvo/rest/pagination.py\n+++ b/akvo/rest/pagination.py\n@@ -8,12 +8,18 @@\n from rest_framework.response import Response\n \n \n-class LimitSizePageNumberPagination(pagination.PageNumberPagination):\n+class StandardSizePageNumberPagination(pagination.PageNumberPagination):\n page_size = 30\n page_size_query_param = 'limit'\n max_page_size = 100\n \n \n+class LargeSizePageNumberPagination(pagination.PageNumberPagination):\n+ page_size = 100\n+ page_size_query_param = 'limit'\n+ max_page_size = 1000\n+\n+\n class TastypieOffsetPagination(pagination.LimitOffsetPagination):\n \n def get_paginated_response(self, data):\ndiff --git a/akvo/rest/views/indicator_dimension.py b/akvo/rest/views/indicator_dimension.py\n--- a/akvo/rest/views/indicator_dimension.py\n+++ b/akvo/rest/views/indicator_dimension.py\n@@ -6,6 +6,7 @@\n \n \n from akvo.rsr.models import IndicatorDimension\n+from akvo.rest.pagination import LargeSizePageNumberPagination\n \n from ..serializers import IndicatorDimensionSerializer\n from ..viewsets import PublicProjectViewSet\n@@ -17,3 +18,4 @@\n queryset = IndicatorDimension.objects.all()\n serializer_class = IndicatorDimensionSerializer\n project_relation = 'indicator__result__project__'\n+ pagination_class = LargeSizePageNumberPagination\n", "issue": "Results framework loads very slowly for projects with lot of indicator dimensions\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorDimension\n\nfrom ..serializers import IndicatorDimensionSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass IndicatorDimensionViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorDimension.objects.all()\n serializer_class = IndicatorDimensionSerializer\n project_relation = 'indicator__result__project__'\n", "path": "akvo/rest/views/indicator_dimension.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom rest_framework import pagination\nfrom rest_framework.response import Response\n\n\nclass LimitSizePageNumberPagination(pagination.PageNumberPagination):\n page_size = 30\n page_size_query_param = 'limit'\n max_page_size = 100\n\n\nclass TastypieOffsetPagination(pagination.LimitOffsetPagination):\n\n def get_paginated_response(self, data):\n \"\"\" Emulate the old style Tastypie format if the URL contains /api/v1/\n \"\"\"\n return Response({\n 'meta': {\n 'next': self.get_next_link(),\n 'previous': self.get_previous_link(),\n 'total_count': self.count,\n 'limit': self.limit,\n 'offset': self.offset,\n },\n 'objects': data\n })\n", "path": "akvo/rest/pagination.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorDimension\nfrom akvo.rest.pagination import LargeSizePageNumberPagination\n\nfrom ..serializers import IndicatorDimensionSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass IndicatorDimensionViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorDimension.objects.all()\n serializer_class = IndicatorDimensionSerializer\n project_relation = 'indicator__result__project__'\n pagination_class = LargeSizePageNumberPagination\n", "path": "akvo/rest/views/indicator_dimension.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom rest_framework import pagination\nfrom rest_framework.response import Response\n\n\nclass StandardSizePageNumberPagination(pagination.PageNumberPagination):\n page_size = 30\n page_size_query_param = 'limit'\n max_page_size = 100\n\n\nclass LargeSizePageNumberPagination(pagination.PageNumberPagination):\n page_size = 100\n page_size_query_param = 'limit'\n max_page_size = 1000\n\n\nclass TastypieOffsetPagination(pagination.LimitOffsetPagination):\n\n def get_paginated_response(self, data):\n \"\"\" Emulate the old style Tastypie format if the URL contains /api/v1/\n \"\"\"\n return Response({\n 'meta': {\n 'next': self.get_next_link(),\n 'previous': self.get_previous_link(),\n 'total_count': self.count,\n 'limit': self.limit,\n 'offset': self.offset,\n },\n 'objects': data\n })\n", "path": "akvo/rest/pagination.py"}]} | 752 | 336 |
gh_patches_debug_317 | rasdani/github-patches | git_diff | jazzband__pip-tools-1871 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Convert the README from rst to md
<!--- Describe the changes here. --->
This PR converts the documentation from README.rst to README.md
Related: https://github.com/jazzband/pip-tools/issues/1856
##### Contributor checklist
- [ ] Provided the tests for the changes.
- [x] Assure PR title is short, clear, and good to be included in the user-oriented changelog
##### Maintainer checklist
- [ ] Assure one of these labels is present: `backwards incompatible`, `feature`, `enhancement`, `deprecation`, `bug`, `dependency`, `docs` or `skip-changelog` as they determine changelog listing.
- [ ] Assign the PR to an existing or new milestone for the target version (following [Semantic Versioning](https://blog.versioneye.com/2014/01/16/semantic-versioning/)).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # https://www.sphinx-doc.org/en/master/usage/configuration.html
2 """Configuration file for the Sphinx documentation builder."""
3
4 from __future__ import annotations
5
6 from functools import partial
7 from pathlib import Path
8
9 from setuptools_scm import get_version
10
11 # -- Path setup --------------------------------------------------------------
12
13 PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()
14 get_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)
15
16
17 # -- Project information -----------------------------------------------------
18
19 project = "pip-tools"
20 author = f"{project} Contributors"
21 copyright = f"The {author}"
22
23 # The short X.Y version
24 version = ".".join(
25 get_scm_version(
26 local_scheme="no-local-version",
27 ).split(
28 "."
29 )[:3],
30 )
31
32 # The full version, including alpha/beta/rc tags
33 release = get_scm_version()
34
35
36 # -- General configuration ---------------------------------------------------
37
38 # Add any Sphinx extension module names here, as strings. They can be
39 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
40 # ones.
41 extensions = ["myst_parser"]
42
43
44 # -- Options for HTML output -------------------------------------------------
45
46 # The theme to use for HTML and HTML Help pages. See the documentation for
47 # a list of builtin themes.
48 #
49 html_theme = "furo"
50
51
52 # -------------------------------------------------------------------------
53 default_role = "any"
54 nitpicky = True
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -52,3 +52,4 @@
# -------------------------------------------------------------------------
default_role = "any"
nitpicky = True
+suppress_warnings = ["myst.xref_missing"]
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -52,3 +52,4 @@\n # -------------------------------------------------------------------------\n default_role = \"any\"\n nitpicky = True\n+suppress_warnings = [\"myst.xref_missing\"]\n", "issue": "Convert the README from rst to md\n<!--- Describe the changes here. --->\r\nThis PR converts the documentation from README.rst to README.md\r\nRelated: https://github.com/jazzband/pip-tools/issues/1856\r\n##### Contributor checklist\r\n\r\n- [ ] Provided the tests for the changes.\r\n- [x] Assure PR title is short, clear, and good to be included in the user-oriented changelog\r\n\r\n##### Maintainer checklist\r\n\r\n- [ ] Assure one of these labels is present: `backwards incompatible`, `feature`, `enhancement`, `deprecation`, `bug`, `dependency`, `docs` or `skip-changelog` as they determine changelog listing.\r\n- [ ] Assign the PR to an existing or new milestone for the target version (following [Semantic Versioning](https://blog.versioneye.com/2014/01/16/semantic-versioning/)).\r\n\n", "before_files": [{"content": "# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\nfrom __future__ import annotations\n\nfrom functools import partial\nfrom pathlib import Path\n\nfrom setuptools_scm import get_version\n\n# -- Path setup --------------------------------------------------------------\n\nPROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()\nget_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pip-tools\"\nauthor = f\"{project} Contributors\"\ncopyright = f\"The {author}\"\n\n# The short X.Y version\nversion = \".\".join(\n get_scm_version(\n local_scheme=\"no-local-version\",\n ).split(\n \".\"\n )[:3],\n)\n\n# The full version, including alpha/beta/rc tags\nrelease = get_scm_version()\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\"myst_parser\"]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"furo\"\n\n\n# -------------------------------------------------------------------------\ndefault_role = \"any\"\nnitpicky = True\n", "path": "docs/conf.py"}], "after_files": [{"content": "# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\nfrom __future__ import annotations\n\nfrom functools import partial\nfrom pathlib import Path\n\nfrom setuptools_scm import get_version\n\n# -- Path setup --------------------------------------------------------------\n\nPROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()\nget_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pip-tools\"\nauthor = f\"{project} Contributors\"\ncopyright = f\"The {author}\"\n\n# The short X.Y version\nversion = \".\".join(\n get_scm_version(\n local_scheme=\"no-local-version\",\n ).split(\n \".\"\n )[:3],\n)\n\n# The full version, including alpha/beta/rc tags\nrelease = get_scm_version()\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\"myst_parser\"]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"furo\"\n\n\n# -------------------------------------------------------------------------\ndefault_role = \"any\"\nnitpicky = True\nsuppress_warnings = [\"myst.xref_missing\"]\n", "path": "docs/conf.py"}]} | 841 | 61 |
gh_patches_debug_40399 | rasdani/github-patches | git_diff | SeldonIO__MLServer-233 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support MLflow current protocol
As a follow-up to #167, it would be interesting to explore adding a custom endpoint to the `mlserver-mlflow` runtime which supports [MLflow's existing API](https://www.mlflow.org/docs/latest/models.html#deploy-mlflow-models). This would help reduce friction on user adoption of MLSever, as well as a temporary stopgap for users while they adopt the V2 protocol.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `runtimes/mlflow/mlserver_mlflow/runtime.py`
Content:
```
1 import mlflow
2
3 from mlserver.types import InferenceRequest, InferenceResponse
4 from mlserver.model import MLModel
5 from mlserver.utils import get_model_uri
6 from mlserver.codecs import get_decoded_or_raw
7
8 from .encoding import to_outputs
9
10
11 class MLflowRuntime(MLModel):
12 """
13 Implementation of the MLModel interface to load and serve `scikit-learn`
14 models persisted with `joblib`.
15 """
16
17 async def load(self) -> bool:
18 # TODO: Log info message
19 model_uri = await get_model_uri(self._settings)
20 self._model = mlflow.pyfunc.load_model(model_uri)
21
22 self.ready = True
23 return self.ready
24
25 async def predict(self, payload: InferenceRequest) -> InferenceResponse:
26 decoded_payload = get_decoded_or_raw(payload)
27
28 # TODO: Can `output` be a dictionary of tensors?
29 model_output = self._model.predict(decoded_payload)
30
31 return InferenceResponse(
32 model_name=self.name,
33 model_version=self.version,
34 outputs=to_outputs(model_output),
35 )
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/runtimes/mlflow/mlserver_mlflow/runtime.py b/runtimes/mlflow/mlserver_mlflow/runtime.py
--- a/runtimes/mlflow/mlserver_mlflow/runtime.py
+++ b/runtimes/mlflow/mlserver_mlflow/runtime.py
@@ -1,9 +1,29 @@
import mlflow
+from io import StringIO
+from fastapi import Request, Response
+
+from mlflow.exceptions import MlflowException
+from mlflow.pyfunc.scoring_server import (
+ CONTENT_TYPES,
+ CONTENT_TYPE_CSV,
+ CONTENT_TYPE_JSON,
+ CONTENT_TYPE_JSON_SPLIT_ORIENTED,
+ CONTENT_TYPE_JSON_RECORDS_ORIENTED,
+ CONTENT_TYPE_JSON_SPLIT_NUMPY,
+ parse_csv_input,
+ infer_and_parse_json_input,
+ parse_json_input,
+ parse_split_oriented_json_input_to_numpy,
+ predictions_to_json,
+)
+
from mlserver.types import InferenceRequest, InferenceResponse
from mlserver.model import MLModel
from mlserver.utils import get_model_uri
from mlserver.codecs import get_decoded_or_raw
+from mlserver.handlers import custom_handler
+from mlserver.errors import InferenceError
from .encoding import to_outputs
@@ -14,10 +34,68 @@
models persisted with `joblib`.
"""
+ # TODO: Decouple from REST
+ @custom_handler(rest_path="/invocations")
+ async def invocations(self, request: Request) -> Response:
+ """
+ This custom handler is meant to mimic the behaviour of the existing
+ scoring server in MLflow.
+ For details about its implementation, please consult the original
+ implementation in the MLflow repository:
+
+ https://github.com/mlflow/mlflow/blob/master/mlflow/pyfunc/scoring_server/__init__.py
+ """
+ content_type = request.headers.get("content-type", None)
+ raw_data = await request.body()
+ as_str = raw_data.decode("utf-8")
+
+ if content_type == CONTENT_TYPE_CSV:
+ csv_input = StringIO(as_str)
+ data = parse_csv_input(csv_input=csv_input)
+ elif content_type == CONTENT_TYPE_JSON:
+ data = infer_and_parse_json_input(as_str, self._input_schema)
+ elif content_type == CONTENT_TYPE_JSON_SPLIT_ORIENTED:
+ data = parse_json_input(
+ json_input=StringIO(as_str),
+ orient="split",
+ schema=self._input_schema,
+ )
+ elif content_type == CONTENT_TYPE_JSON_RECORDS_ORIENTED:
+ data = parse_json_input(
+ json_input=StringIO(as_str),
+ orient="records",
+ schema=self._input_schema,
+ )
+ elif content_type == CONTENT_TYPE_JSON_SPLIT_NUMPY:
+ data = parse_split_oriented_json_input_to_numpy(as_str)
+ else:
+ content_type_error_message = (
+ "This predictor only supports the following content types, "
+ f"{CONTENT_TYPES}. Got '{content_type}'."
+ )
+ raise InferenceError(content_type_error_message)
+
+ try:
+ raw_predictions = self._model.predict(data)
+ except MlflowException as e:
+ raise InferenceError(e.message)
+ except Exception:
+ error_message = (
+ "Encountered an unexpected error while evaluating the model. Verify"
+ " that the serialized input Dataframe is compatible with the model for"
+ " inference."
+ )
+ raise InferenceError(error_message)
+
+ result = StringIO()
+ predictions_to_json(raw_predictions, result)
+ return Response(content=result.getvalue(), media_type="application/json")
+
async def load(self) -> bool:
# TODO: Log info message
model_uri = await get_model_uri(self._settings)
self._model = mlflow.pyfunc.load_model(model_uri)
+ self._input_schema = self._model.metadata.get_input_schema()
self.ready = True
return self.ready
| {"golden_diff": "diff --git a/runtimes/mlflow/mlserver_mlflow/runtime.py b/runtimes/mlflow/mlserver_mlflow/runtime.py\n--- a/runtimes/mlflow/mlserver_mlflow/runtime.py\n+++ b/runtimes/mlflow/mlserver_mlflow/runtime.py\n@@ -1,9 +1,29 @@\n import mlflow\n \n+from io import StringIO\n+from fastapi import Request, Response\n+\n+from mlflow.exceptions import MlflowException\n+from mlflow.pyfunc.scoring_server import (\n+ CONTENT_TYPES,\n+ CONTENT_TYPE_CSV,\n+ CONTENT_TYPE_JSON,\n+ CONTENT_TYPE_JSON_SPLIT_ORIENTED,\n+ CONTENT_TYPE_JSON_RECORDS_ORIENTED,\n+ CONTENT_TYPE_JSON_SPLIT_NUMPY,\n+ parse_csv_input,\n+ infer_and_parse_json_input,\n+ parse_json_input,\n+ parse_split_oriented_json_input_to_numpy,\n+ predictions_to_json,\n+)\n+\n from mlserver.types import InferenceRequest, InferenceResponse\n from mlserver.model import MLModel\n from mlserver.utils import get_model_uri\n from mlserver.codecs import get_decoded_or_raw\n+from mlserver.handlers import custom_handler\n+from mlserver.errors import InferenceError\n \n from .encoding import to_outputs\n \n@@ -14,10 +34,68 @@\n models persisted with `joblib`.\n \"\"\"\n \n+ # TODO: Decouple from REST\n+ @custom_handler(rest_path=\"/invocations\")\n+ async def invocations(self, request: Request) -> Response:\n+ \"\"\"\n+ This custom handler is meant to mimic the behaviour of the existing\n+ scoring server in MLflow.\n+ For details about its implementation, please consult the original\n+ implementation in the MLflow repository:\n+\n+ https://github.com/mlflow/mlflow/blob/master/mlflow/pyfunc/scoring_server/__init__.py\n+ \"\"\"\n+ content_type = request.headers.get(\"content-type\", None)\n+ raw_data = await request.body()\n+ as_str = raw_data.decode(\"utf-8\")\n+\n+ if content_type == CONTENT_TYPE_CSV:\n+ csv_input = StringIO(as_str)\n+ data = parse_csv_input(csv_input=csv_input)\n+ elif content_type == CONTENT_TYPE_JSON:\n+ data = infer_and_parse_json_input(as_str, self._input_schema)\n+ elif content_type == CONTENT_TYPE_JSON_SPLIT_ORIENTED:\n+ data = parse_json_input(\n+ json_input=StringIO(as_str),\n+ orient=\"split\",\n+ schema=self._input_schema,\n+ )\n+ elif content_type == CONTENT_TYPE_JSON_RECORDS_ORIENTED:\n+ data = parse_json_input(\n+ json_input=StringIO(as_str),\n+ orient=\"records\",\n+ schema=self._input_schema,\n+ )\n+ elif content_type == CONTENT_TYPE_JSON_SPLIT_NUMPY:\n+ data = parse_split_oriented_json_input_to_numpy(as_str)\n+ else:\n+ content_type_error_message = (\n+ \"This predictor only supports the following content types, \"\n+ f\"{CONTENT_TYPES}. Got '{content_type}'.\"\n+ )\n+ raise InferenceError(content_type_error_message)\n+\n+ try:\n+ raw_predictions = self._model.predict(data)\n+ except MlflowException as e:\n+ raise InferenceError(e.message)\n+ except Exception:\n+ error_message = (\n+ \"Encountered an unexpected error while evaluating the model. Verify\"\n+ \" that the serialized input Dataframe is compatible with the model for\"\n+ \" inference.\"\n+ )\n+ raise InferenceError(error_message)\n+\n+ result = StringIO()\n+ predictions_to_json(raw_predictions, result)\n+ return Response(content=result.getvalue(), media_type=\"application/json\")\n+\n async def load(self) -> bool:\n # TODO: Log info message\n model_uri = await get_model_uri(self._settings)\n self._model = mlflow.pyfunc.load_model(model_uri)\n+ self._input_schema = self._model.metadata.get_input_schema()\n \n self.ready = True\n return self.ready\n", "issue": "Support MLflow current protocol\nAs a follow-up to #167, it would be interesting to explore adding a custom endpoint to the `mlserver-mlflow` runtime which supports [MLflow's existing API](https://www.mlflow.org/docs/latest/models.html#deploy-mlflow-models). This would help reduce friction on user adoption of MLSever, as well as a temporary stopgap for users while they adopt the V2 protocol.\n", "before_files": [{"content": "import mlflow\n\nfrom mlserver.types import InferenceRequest, InferenceResponse\nfrom mlserver.model import MLModel\nfrom mlserver.utils import get_model_uri\nfrom mlserver.codecs import get_decoded_or_raw\n\nfrom .encoding import to_outputs\n\n\nclass MLflowRuntime(MLModel):\n \"\"\"\n Implementation of the MLModel interface to load and serve `scikit-learn`\n models persisted with `joblib`.\n \"\"\"\n\n async def load(self) -> bool:\n # TODO: Log info message\n model_uri = await get_model_uri(self._settings)\n self._model = mlflow.pyfunc.load_model(model_uri)\n\n self.ready = True\n return self.ready\n\n async def predict(self, payload: InferenceRequest) -> InferenceResponse:\n decoded_payload = get_decoded_or_raw(payload)\n\n # TODO: Can `output` be a dictionary of tensors?\n model_output = self._model.predict(decoded_payload)\n\n return InferenceResponse(\n model_name=self.name,\n model_version=self.version,\n outputs=to_outputs(model_output),\n )\n", "path": "runtimes/mlflow/mlserver_mlflow/runtime.py"}], "after_files": [{"content": "import mlflow\n\nfrom io import StringIO\nfrom fastapi import Request, Response\n\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.pyfunc.scoring_server import (\n CONTENT_TYPES,\n CONTENT_TYPE_CSV,\n CONTENT_TYPE_JSON,\n CONTENT_TYPE_JSON_SPLIT_ORIENTED,\n CONTENT_TYPE_JSON_RECORDS_ORIENTED,\n CONTENT_TYPE_JSON_SPLIT_NUMPY,\n parse_csv_input,\n infer_and_parse_json_input,\n parse_json_input,\n parse_split_oriented_json_input_to_numpy,\n predictions_to_json,\n)\n\nfrom mlserver.types import InferenceRequest, InferenceResponse\nfrom mlserver.model import MLModel\nfrom mlserver.utils import get_model_uri\nfrom mlserver.codecs import get_decoded_or_raw\nfrom mlserver.handlers import custom_handler\nfrom mlserver.errors import InferenceError\n\nfrom .encoding import to_outputs\n\n\nclass MLflowRuntime(MLModel):\n \"\"\"\n Implementation of the MLModel interface to load and serve `scikit-learn`\n models persisted with `joblib`.\n \"\"\"\n\n # TODO: Decouple from REST\n @custom_handler(rest_path=\"/invocations\")\n async def invocations(self, request: Request) -> Response:\n \"\"\"\n This custom handler is meant to mimic the behaviour of the existing\n scoring server in MLflow.\n For details about its implementation, please consult the original\n implementation in the MLflow repository:\n\n https://github.com/mlflow/mlflow/blob/master/mlflow/pyfunc/scoring_server/__init__.py\n \"\"\"\n content_type = request.headers.get(\"content-type\", None)\n raw_data = await request.body()\n as_str = raw_data.decode(\"utf-8\")\n\n if content_type == CONTENT_TYPE_CSV:\n csv_input = StringIO(as_str)\n data = parse_csv_input(csv_input=csv_input)\n elif content_type == CONTENT_TYPE_JSON:\n data = infer_and_parse_json_input(as_str, self._input_schema)\n elif content_type == CONTENT_TYPE_JSON_SPLIT_ORIENTED:\n data = parse_json_input(\n json_input=StringIO(as_str),\n orient=\"split\",\n schema=self._input_schema,\n )\n elif content_type == CONTENT_TYPE_JSON_RECORDS_ORIENTED:\n data = parse_json_input(\n json_input=StringIO(as_str),\n orient=\"records\",\n schema=self._input_schema,\n )\n elif content_type == CONTENT_TYPE_JSON_SPLIT_NUMPY:\n data = parse_split_oriented_json_input_to_numpy(as_str)\n else:\n content_type_error_message = (\n \"This predictor only supports the following content types, \"\n f\"{CONTENT_TYPES}. Got '{content_type}'.\"\n )\n raise InferenceError(content_type_error_message)\n\n try:\n raw_predictions = self._model.predict(data)\n except MlflowException as e:\n raise InferenceError(e.message)\n except Exception:\n error_message = (\n \"Encountered an unexpected error while evaluating the model. Verify\"\n \" that the serialized input Dataframe is compatible with the model for\"\n \" inference.\"\n )\n raise InferenceError(error_message)\n\n result = StringIO()\n predictions_to_json(raw_predictions, result)\n return Response(content=result.getvalue(), media_type=\"application/json\")\n\n async def load(self) -> bool:\n # TODO: Log info message\n model_uri = await get_model_uri(self._settings)\n self._model = mlflow.pyfunc.load_model(model_uri)\n self._input_schema = self._model.metadata.get_input_schema()\n\n self.ready = True\n return self.ready\n\n async def predict(self, payload: InferenceRequest) -> InferenceResponse:\n decoded_payload = get_decoded_or_raw(payload)\n\n # TODO: Can `output` be a dictionary of tensors?\n model_output = self._model.predict(decoded_payload)\n\n return InferenceResponse(\n model_name=self.name,\n model_version=self.version,\n outputs=to_outputs(model_output),\n )\n", "path": "runtimes/mlflow/mlserver_mlflow/runtime.py"}]} | 655 | 873 |
gh_patches_debug_20942 | rasdani/github-patches | git_diff | Project-MONAI__MONAI-3873 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enhance `ContrastiveLoss` to avoid warning
Call ContrastiveLoss will see a warning message:
```
To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
```
Simple code to reproduce this issue:
```
from monai.losses import ContrastiveLoss
import torch
inp = torch.randn([2, 10])
target = torch.randn([2, 10])
loss = ContrastiveLoss(batch_size=2)
loss(inp, target)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `monai/losses/contrastive.py`
Content:
```
1 # Copyright (c) MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 import torch
13 from torch.nn import functional as F
14 from torch.nn.modules.loss import _Loss
15
16 from monai.utils import deprecated_arg
17
18
19 class ContrastiveLoss(_Loss):
20
21 """
22 Compute the Contrastive loss defined in:
23
24 Chen, Ting, et al. "A simple framework for contrastive learning of visual representations." International
25 conference on machine learning. PMLR, 2020. (http://proceedings.mlr.press/v119/chen20j.html)
26
27 Adapted from:
28 https://github.com/Sara-Ahmed/SiT/blob/1aacd6adcd39b71efc903d16b4e9095b97dda76f/losses.py#L5
29
30 """
31
32 @deprecated_arg(name="reduction", since="0.8", msg_suffix="`reduction` is no longer supported.")
33 def __init__(self, temperature: float = 0.5, batch_size: int = 1, reduction="sum") -> None:
34 """
35 Args:
36 temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.
37 batch_size: The number of samples.
38
39 Raises:
40 ValueError: When an input of dimension length > 2 is passed
41 ValueError: When input and target are of different shapes
42
43 .. deprecated:: 0.8.0
44
45 `reduction` is no longer supported.
46
47 """
48 super().__init__()
49
50 self.batch_size = batch_size
51 self.temperature = temperature
52
53 def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
54 """
55 Args:
56 input: the shape should be B[F].
57 target: the shape should be B[F].
58 """
59 if len(target.shape) > 2 or len(input.shape) > 2:
60 raise ValueError(
61 f"Either target or input has dimensions greater than 2 where target "
62 f"shape is ({target.shape}) and input shape is ({input.shape})"
63 )
64
65 if target.shape != input.shape:
66 raise ValueError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")
67
68 temperature_tensor = torch.tensor(self.temperature).to(input.device)
69
70 norm_i = F.normalize(input, dim=1)
71 norm_j = F.normalize(target, dim=1)
72
73 negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)
74 negatives_mask = torch.tensor(negatives_mask, dtype=torch.float)
75 negatives_mask = torch.clone(torch.as_tensor(negatives_mask)).to(input.device)
76
77 repr = torch.cat([norm_i, norm_j], dim=0)
78 sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)
79 sim_ij = torch.diag(sim_matrix, self.batch_size)
80 sim_ji = torch.diag(sim_matrix, -self.batch_size)
81
82 positives = torch.cat([sim_ij, sim_ji], dim=0)
83 nominator = torch.exp(positives / temperature_tensor)
84 denominator = negatives_mask * torch.exp(sim_matrix / temperature_tensor)
85
86 loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))
87
88 return torch.sum(loss_partial) / (2 * self.batch_size)
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/monai/losses/contrastive.py b/monai/losses/contrastive.py
--- a/monai/losses/contrastive.py
+++ b/monai/losses/contrastive.py
@@ -65,14 +65,13 @@
if target.shape != input.shape:
raise ValueError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")
- temperature_tensor = torch.tensor(self.temperature).to(input.device)
+ temperature_tensor = torch.as_tensor(self.temperature).to(input.device)
norm_i = F.normalize(input, dim=1)
norm_j = F.normalize(target, dim=1)
negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)
- negatives_mask = torch.tensor(negatives_mask, dtype=torch.float)
- negatives_mask = torch.clone(torch.as_tensor(negatives_mask)).to(input.device)
+ negatives_mask = torch.clone(negatives_mask.type(torch.float)).to(input.device)
repr = torch.cat([norm_i, norm_j], dim=0)
sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)
| {"golden_diff": "diff --git a/monai/losses/contrastive.py b/monai/losses/contrastive.py\n--- a/monai/losses/contrastive.py\n+++ b/monai/losses/contrastive.py\n@@ -65,14 +65,13 @@\n if target.shape != input.shape:\n raise ValueError(f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\")\n \n- temperature_tensor = torch.tensor(self.temperature).to(input.device)\n+ temperature_tensor = torch.as_tensor(self.temperature).to(input.device)\n \n norm_i = F.normalize(input, dim=1)\n norm_j = F.normalize(target, dim=1)\n \n negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)\n- negatives_mask = torch.tensor(negatives_mask, dtype=torch.float)\n- negatives_mask = torch.clone(torch.as_tensor(negatives_mask)).to(input.device)\n+ negatives_mask = torch.clone(negatives_mask.type(torch.float)).to(input.device)\n \n repr = torch.cat([norm_i, norm_j], dim=0)\n sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)\n", "issue": "Enhance `ContrastiveLoss` to avoid warning\nCall ContrastiveLoss will see a warning message:\r\n```\r\nTo copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\r\n```\r\n\r\nSimple code to reproduce this issue:\r\n```\r\nfrom monai.losses import ContrastiveLoss\r\nimport torch\r\n\r\ninp = torch.randn([2, 10])\r\ntarget = torch.randn([2, 10])\r\nloss = ContrastiveLoss(batch_size=2)\r\nloss(inp, target)\r\n```\n", "before_files": [{"content": "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nfrom torch.nn import functional as F\nfrom torch.nn.modules.loss import _Loss\n\nfrom monai.utils import deprecated_arg\n\n\nclass ContrastiveLoss(_Loss):\n\n \"\"\"\n Compute the Contrastive loss defined in:\n\n Chen, Ting, et al. \"A simple framework for contrastive learning of visual representations.\" International\n conference on machine learning. PMLR, 2020. (http://proceedings.mlr.press/v119/chen20j.html)\n\n Adapted from:\n https://github.com/Sara-Ahmed/SiT/blob/1aacd6adcd39b71efc903d16b4e9095b97dda76f/losses.py#L5\n\n \"\"\"\n\n @deprecated_arg(name=\"reduction\", since=\"0.8\", msg_suffix=\"`reduction` is no longer supported.\")\n def __init__(self, temperature: float = 0.5, batch_size: int = 1, reduction=\"sum\") -> None:\n \"\"\"\n Args:\n temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.\n batch_size: The number of samples.\n\n Raises:\n ValueError: When an input of dimension length > 2 is passed\n ValueError: When input and target are of different shapes\n\n .. deprecated:: 0.8.0\n\n `reduction` is no longer supported.\n\n \"\"\"\n super().__init__()\n\n self.batch_size = batch_size\n self.temperature = temperature\n\n def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n input: the shape should be B[F].\n target: the shape should be B[F].\n \"\"\"\n if len(target.shape) > 2 or len(input.shape) > 2:\n raise ValueError(\n f\"Either target or input has dimensions greater than 2 where target \"\n f\"shape is ({target.shape}) and input shape is ({input.shape})\"\n )\n\n if target.shape != input.shape:\n raise ValueError(f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\")\n\n temperature_tensor = torch.tensor(self.temperature).to(input.device)\n\n norm_i = F.normalize(input, dim=1)\n norm_j = F.normalize(target, dim=1)\n\n negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)\n negatives_mask = torch.tensor(negatives_mask, dtype=torch.float)\n negatives_mask = torch.clone(torch.as_tensor(negatives_mask)).to(input.device)\n\n repr = torch.cat([norm_i, norm_j], dim=0)\n sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)\n sim_ij = torch.diag(sim_matrix, self.batch_size)\n sim_ji = torch.diag(sim_matrix, -self.batch_size)\n\n positives = torch.cat([sim_ij, sim_ji], dim=0)\n nominator = torch.exp(positives / temperature_tensor)\n denominator = negatives_mask * torch.exp(sim_matrix / temperature_tensor)\n\n loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))\n\n return torch.sum(loss_partial) / (2 * self.batch_size)\n", "path": "monai/losses/contrastive.py"}], "after_files": [{"content": "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nfrom torch.nn import functional as F\nfrom torch.nn.modules.loss import _Loss\n\nfrom monai.utils import deprecated_arg\n\n\nclass ContrastiveLoss(_Loss):\n\n \"\"\"\n Compute the Contrastive loss defined in:\n\n Chen, Ting, et al. \"A simple framework for contrastive learning of visual representations.\" International\n conference on machine learning. PMLR, 2020. (http://proceedings.mlr.press/v119/chen20j.html)\n\n Adapted from:\n https://github.com/Sara-Ahmed/SiT/blob/1aacd6adcd39b71efc903d16b4e9095b97dda76f/losses.py#L5\n\n \"\"\"\n\n @deprecated_arg(name=\"reduction\", since=\"0.8\", msg_suffix=\"`reduction` is no longer supported.\")\n def __init__(self, temperature: float = 0.5, batch_size: int = 1, reduction=\"sum\") -> None:\n \"\"\"\n Args:\n temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.\n batch_size: The number of samples.\n\n Raises:\n ValueError: When an input of dimension length > 2 is passed\n ValueError: When input and target are of different shapes\n\n .. deprecated:: 0.8.0\n\n `reduction` is no longer supported.\n\n \"\"\"\n super().__init__()\n\n self.batch_size = batch_size\n self.temperature = temperature\n\n def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n input: the shape should be B[F].\n target: the shape should be B[F].\n \"\"\"\n if len(target.shape) > 2 or len(input.shape) > 2:\n raise ValueError(\n f\"Either target or input has dimensions greater than 2 where target \"\n f\"shape is ({target.shape}) and input shape is ({input.shape})\"\n )\n\n if target.shape != input.shape:\n raise ValueError(f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\")\n\n temperature_tensor = torch.as_tensor(self.temperature).to(input.device)\n\n norm_i = F.normalize(input, dim=1)\n norm_j = F.normalize(target, dim=1)\n\n negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)\n negatives_mask = torch.clone(negatives_mask.type(torch.float)).to(input.device)\n\n repr = torch.cat([norm_i, norm_j], dim=0)\n sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)\n sim_ij = torch.diag(sim_matrix, self.batch_size)\n sim_ji = torch.diag(sim_matrix, -self.batch_size)\n\n positives = torch.cat([sim_ij, sim_ji], dim=0)\n nominator = torch.exp(positives / temperature_tensor)\n denominator = negatives_mask * torch.exp(sim_matrix / temperature_tensor)\n\n loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))\n\n return torch.sum(loss_partial) / (2 * self.batch_size)\n", "path": "monai/losses/contrastive.py"}]} | 1,414 | 270 |
gh_patches_debug_13438 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3307 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider vetco is broken
During the global build at 2021-09-01-14-42-16, spider **vetco** failed with **0 features** and **24644 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/logs/vetco.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/vetco.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/vetco.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/vetco_clinic.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3 import re
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from scrapy.selector import Selector
9
10
11 class VetcoSpider(scrapy.Spider):
12 name = "vetco"
13 item_attributes = {'brand': "vetcoclinics"}
14 allowed_domains = ["vetcoclinics.com"]
15 start_urls = (
16 'https://www.vetcoclinics.com/services-and-clinics/vaccination-clinics-by-state/',
17 )
18
19 def start_requests(self):
20 with open('./locations/searchable_points/us_zcta.csv') as points:
21 next(points) # Ignore the header
22 for point in points:
23 row = point.split(',')
24 zip = row[0].strip().strip('"')
25
26 url = f"https://www.vetcoclinics.com/_assets/dynamic/ajax/locator.php?zip={zip}"
27
28 yield scrapy.http.Request(
29 url,
30 self.parse,
31 method='GET'
32 )
33
34 def parse(self, response):
35 jsonresponse = json.loads(response.body_as_unicode())
36 if jsonresponse is not None:
37 clinics = jsonresponse.get('clinics')
38 if clinics:
39 for stores in clinics:
40 body = stores['label']
41 address = Selector(text=body).xpath('//div[@class="locationinfo_area"]/address/text()').extract()
42 if len(address) == 3:
43 addr_full, city_state_postal, phone = [item.split(",") for item in address]
44 city, state_postal = [item.split(",") for item in city_state_postal]
45 state, postal = re.search(r'([A-Z]{2}) (\d{5})', state_postal[0]).groups()
46
47
48 else:
49 addr_full, city_state_postal = [item.split(",") for item in address]
50 city, state_postal = [item.split(",") for item in city_state_postal]
51 state, postal = re.search(r'([A-Z]{2}) (\d{5})', state_postal[0]).groups()
52
53 properties = {
54 'ref': addr_full[0].strip(),
55 'addr_full': addr_full[0].strip(),
56 'city': city[0].strip(),
57 'state': state,
58 'postcode': postal,
59 'lat': float(stores["point"]["lat"]),
60 'lon': float(stores["point"]["long"]),
61 'website': response.url
62 }
63
64 yield GeojsonPointItem(**properties)
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/vetco_clinic.py b/locations/spiders/vetco_clinic.py
--- a/locations/spiders/vetco_clinic.py
+++ b/locations/spiders/vetco_clinic.py
@@ -38,7 +38,7 @@
if clinics:
for stores in clinics:
body = stores['label']
- address = Selector(text=body).xpath('//div[@class="locationinfo_area"]/address/text()').extract()
+ address = Selector(text=body).xpath('//address/text()').extract()
if len(address) == 3:
addr_full, city_state_postal, phone = [item.split(",") for item in address]
city, state_postal = [item.split(",") for item in city_state_postal]
| {"golden_diff": "diff --git a/locations/spiders/vetco_clinic.py b/locations/spiders/vetco_clinic.py\n--- a/locations/spiders/vetco_clinic.py\n+++ b/locations/spiders/vetco_clinic.py\n@@ -38,7 +38,7 @@\n if clinics:\n for stores in clinics:\n body = stores['label']\n- address = Selector(text=body).xpath('//div[@class=\"locationinfo_area\"]/address/text()').extract()\n+ address = Selector(text=body).xpath('//address/text()').extract()\n if len(address) == 3:\n addr_full, city_state_postal, phone = [item.split(\",\") for item in address]\n city, state_postal = [item.split(\",\") for item in city_state_postal]\n", "issue": "Spider vetco is broken\nDuring the global build at 2021-09-01-14-42-16, spider **vetco** failed with **0 features** and **24644 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/logs/vetco.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/vetco.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/vetco.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom scrapy.selector import Selector\n\n\nclass VetcoSpider(scrapy.Spider):\n name = \"vetco\"\n item_attributes = {'brand': \"vetcoclinics\"}\n allowed_domains = [\"vetcoclinics.com\"]\n start_urls = (\n 'https://www.vetcoclinics.com/services-and-clinics/vaccination-clinics-by-state/',\n )\n\n def start_requests(self):\n with open('./locations/searchable_points/us_zcta.csv') as points:\n next(points) # Ignore the header\n for point in points:\n row = point.split(',')\n zip = row[0].strip().strip('\"')\n\n url = f\"https://www.vetcoclinics.com/_assets/dynamic/ajax/locator.php?zip={zip}\"\n\n yield scrapy.http.Request(\n url,\n self.parse,\n method='GET'\n )\n\n def parse(self, response):\n jsonresponse = json.loads(response.body_as_unicode())\n if jsonresponse is not None:\n clinics = jsonresponse.get('clinics')\n if clinics:\n for stores in clinics:\n body = stores['label']\n address = Selector(text=body).xpath('//div[@class=\"locationinfo_area\"]/address/text()').extract()\n if len(address) == 3:\n addr_full, city_state_postal, phone = [item.split(\",\") for item in address]\n city, state_postal = [item.split(\",\") for item in city_state_postal]\n state, postal = re.search(r'([A-Z]{2}) (\\d{5})', state_postal[0]).groups()\n\n\n else:\n addr_full, city_state_postal = [item.split(\",\") for item in address]\n city, state_postal = [item.split(\",\") for item in city_state_postal]\n state, postal = re.search(r'([A-Z]{2}) (\\d{5})', state_postal[0]).groups()\n\n properties = {\n 'ref': addr_full[0].strip(),\n 'addr_full': addr_full[0].strip(),\n 'city': city[0].strip(),\n 'state': state,\n 'postcode': postal,\n 'lat': float(stores[\"point\"][\"lat\"]),\n 'lon': float(stores[\"point\"][\"long\"]),\n 'website': response.url\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/vetco_clinic.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom scrapy.selector import Selector\n\n\nclass VetcoSpider(scrapy.Spider):\n name = \"vetco\"\n item_attributes = {'brand': \"vetcoclinics\"}\n allowed_domains = [\"vetcoclinics.com\"]\n start_urls = (\n 'https://www.vetcoclinics.com/services-and-clinics/vaccination-clinics-by-state/',\n )\n\n def start_requests(self):\n with open('./locations/searchable_points/us_zcta.csv') as points:\n next(points) # Ignore the header\n for point in points:\n row = point.split(',')\n zip = row[0].strip().strip('\"')\n\n url = f\"https://www.vetcoclinics.com/_assets/dynamic/ajax/locator.php?zip={zip}\"\n\n yield scrapy.http.Request(\n url,\n self.parse,\n method='GET'\n )\n\n def parse(self, response):\n jsonresponse = json.loads(response.body_as_unicode())\n if jsonresponse is not None:\n clinics = jsonresponse.get('clinics')\n if clinics:\n for stores in clinics:\n body = stores['label']\n address = Selector(text=body).xpath('//address/text()').extract()\n if len(address) == 3:\n addr_full, city_state_postal, phone = [item.split(\",\") for item in address]\n city, state_postal = [item.split(\",\") for item in city_state_postal]\n state, postal = re.search(r'([A-Z]{2}) (\\d{5})', state_postal[0]).groups()\n\n\n else:\n addr_full, city_state_postal = [item.split(\",\") for item in address]\n city, state_postal = [item.split(\",\") for item in city_state_postal]\n state, postal = re.search(r'([A-Z]{2}) (\\d{5})', state_postal[0]).groups()\n\n properties = {\n 'ref': addr_full[0].strip(),\n 'addr_full': addr_full[0].strip(),\n 'city': city[0].strip(),\n 'state': state,\n 'postcode': postal,\n 'lat': float(stores[\"point\"][\"lat\"]),\n 'lon': float(stores[\"point\"][\"long\"]),\n 'website': response.url\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/vetco_clinic.py"}]} | 1,108 | 173 |
gh_patches_debug_51335 | rasdani/github-patches | git_diff | beetbox__beets-1650 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plexupdate plugin crashed
Every time after import plexupdate plugin crashed with this error:
```
Traceback (most recent call last):
File "/usr/local/bin/beet", line 9, in <module>
load_entry_point('beets==1.3.15', 'console_scripts', 'beet')()
File "/usr/local/lib/python2.7/dist-packages/beets/ui/__init__.py", line 1163, in main
_raw_main(args)
File "/usr/local/lib/python2.7/dist-packages/beets/ui/__init__.py", line 1155, in _raw_main
plugins.send('cli_exit', lib=lib)
File "/usr/local/lib/python2.7/dist-packages/beets/plugins.py", line 458, in send
result = handler(**arguments)
File "/usr/local/lib/python2.7/dist-packages/beets/plugins.py", line 123, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py", line 87, in update
config['plex']['library_name'].get())
File "/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py", line 41, in update_plex
section_key = get_music_section(host, port, token, library_name)
File "/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py", line 31, in get_music_section
tree = ET.fromstring(r.raw)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1300, in XML
parser.feed(text)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1640, in feed
self._parser.Parse(data, 0)
TypeError: must be string or read-only buffer, not HTTPResponse
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beetsplug/plexupdate.py`
Content:
```
1 """Updates an Plex library whenever the beets library is changed.
2
3 Plex Home users enter the Plex Token to enable updating.
4 Put something like the following in your config.yaml to configure:
5 plex:
6 host: localhost
7 port: 32400
8 token: token
9 """
10 from __future__ import (division, absolute_import, print_function,
11 unicode_literals)
12
13 import requests
14 from urlparse import urljoin
15 from urllib import urlencode
16 import xml.etree.ElementTree as ET
17 from beets import config
18 from beets.plugins import BeetsPlugin
19
20
21 def get_music_section(host, port, token, library_name):
22 """Getting the section key for the music library in Plex.
23 """
24 api_endpoint = append_token('library/sections', token)
25 url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
26
27 # Sends request.
28 r = requests.get(url)
29
30 # Parse xml tree and extract music section key.
31 tree = ET.fromstring(r.text)
32 for child in tree.findall('Directory'):
33 if child.get('title') == library_name:
34 return child.get('key')
35
36
37 def update_plex(host, port, token, library_name):
38 """Sends request to the Plex api to start a library refresh.
39 """
40 # Getting section key and build url.
41 section_key = get_music_section(host, port, token, library_name)
42 api_endpoint = 'library/sections/{0}/refresh'.format(section_key)
43 api_endpoint = append_token(api_endpoint, token)
44 url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
45
46 # Sends request and returns requests object.
47 r = requests.get(url)
48 return r
49
50
51 def append_token(url, token):
52 """Appends the Plex Home token to the api call if required.
53 """
54 if token:
55 url += '?' + urlencode({'X-Plex-Token': token})
56 return url
57
58
59 class PlexUpdate(BeetsPlugin):
60 def __init__(self):
61 super(PlexUpdate, self).__init__()
62
63 # Adding defaults.
64 config['plex'].add({
65 u'host': u'localhost',
66 u'port': 32400,
67 u'token': u'',
68 u'library_name': u'Music'})
69
70 self.register_listener('database_change', self.listen_for_db_change)
71
72 def listen_for_db_change(self, lib, model):
73 """Listens for beets db change and register the update for the end"""
74 self.register_listener('cli_exit', self.update)
75
76 def update(self, lib):
77 """When the client exists try to send refresh request to Plex server.
78 """
79 self._log.info('Updating Plex library...')
80
81 # Try to send update request.
82 try:
83 update_plex(
84 config['plex']['host'].get(),
85 config['plex']['port'].get(),
86 config['plex']['token'].get(),
87 config['plex']['library_name'].get())
88 self._log.info('... started.')
89
90 except requests.exceptions.RequestException:
91 self._log.warning('Update failed.')
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/beetsplug/plexupdate.py b/beetsplug/plexupdate.py
--- a/beetsplug/plexupdate.py
+++ b/beetsplug/plexupdate.py
@@ -28,7 +28,7 @@
r = requests.get(url)
# Parse xml tree and extract music section key.
- tree = ET.fromstring(r.text)
+ tree = ET.fromstring(r.content)
for child in tree.findall('Directory'):
if child.get('title') == library_name:
return child.get('key')
| {"golden_diff": "diff --git a/beetsplug/plexupdate.py b/beetsplug/plexupdate.py\n--- a/beetsplug/plexupdate.py\n+++ b/beetsplug/plexupdate.py\n@@ -28,7 +28,7 @@\n r = requests.get(url)\n \n # Parse xml tree and extract music section key.\n- tree = ET.fromstring(r.text)\n+ tree = ET.fromstring(r.content)\n for child in tree.findall('Directory'):\n if child.get('title') == library_name:\n return child.get('key')\n", "issue": "plexupdate plugin crashed\nEvery time after import plexupdate plugin crashed with this error:\n\n```\nTraceback (most recent call last):\n File \"/usr/local/bin/beet\", line 9, in <module>\n load_entry_point('beets==1.3.15', 'console_scripts', 'beet')()\n File \"/usr/local/lib/python2.7/dist-packages/beets/ui/__init__.py\", line 1163, in main\n _raw_main(args)\n File \"/usr/local/lib/python2.7/dist-packages/beets/ui/__init__.py\", line 1155, in _raw_main\n plugins.send('cli_exit', lib=lib)\n File \"/usr/local/lib/python2.7/dist-packages/beets/plugins.py\", line 458, in send\n result = handler(**arguments)\n File \"/usr/local/lib/python2.7/dist-packages/beets/plugins.py\", line 123, in wrapper\n return func(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py\", line 87, in update\n config['plex']['library_name'].get())\n File \"/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py\", line 41, in update_plex\n section_key = get_music_section(host, port, token, library_name)\n File \"/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py\", line 31, in get_music_section\n tree = ET.fromstring(r.raw)\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1300, in XML\n parser.feed(text)\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1640, in feed\n self._parser.Parse(data, 0)\nTypeError: must be string or read-only buffer, not HTTPResponse\n```\n\n", "before_files": [{"content": "\"\"\"Updates an Plex library whenever the beets library is changed.\n\nPlex Home users enter the Plex Token to enable updating.\nPut something like the following in your config.yaml to configure:\n plex:\n host: localhost\n port: 32400\n token: token\n\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport requests\nfrom urlparse import urljoin\nfrom urllib import urlencode\nimport xml.etree.ElementTree as ET\nfrom beets import config\nfrom beets.plugins import BeetsPlugin\n\n\ndef get_music_section(host, port, token, library_name):\n \"\"\"Getting the section key for the music library in Plex.\n \"\"\"\n api_endpoint = append_token('library/sections', token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request.\n r = requests.get(url)\n\n # Parse xml tree and extract music section key.\n tree = ET.fromstring(r.text)\n for child in tree.findall('Directory'):\n if child.get('title') == library_name:\n return child.get('key')\n\n\ndef update_plex(host, port, token, library_name):\n \"\"\"Sends request to the Plex api to start a library refresh.\n \"\"\"\n # Getting section key and build url.\n section_key = get_music_section(host, port, token, library_name)\n api_endpoint = 'library/sections/{0}/refresh'.format(section_key)\n api_endpoint = append_token(api_endpoint, token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request and returns requests object.\n r = requests.get(url)\n return r\n\n\ndef append_token(url, token):\n \"\"\"Appends the Plex Home token to the api call if required.\n \"\"\"\n if token:\n url += '?' + urlencode({'X-Plex-Token': token})\n return url\n\n\nclass PlexUpdate(BeetsPlugin):\n def __init__(self):\n super(PlexUpdate, self).__init__()\n\n # Adding defaults.\n config['plex'].add({\n u'host': u'localhost',\n u'port': 32400,\n u'token': u'',\n u'library_name': u'Music'})\n\n self.register_listener('database_change', self.listen_for_db_change)\n\n def listen_for_db_change(self, lib, model):\n \"\"\"Listens for beets db change and register the update for the end\"\"\"\n self.register_listener('cli_exit', self.update)\n\n def update(self, lib):\n \"\"\"When the client exists try to send refresh request to Plex server.\n \"\"\"\n self._log.info('Updating Plex library...')\n\n # Try to send update request.\n try:\n update_plex(\n config['plex']['host'].get(),\n config['plex']['port'].get(),\n config['plex']['token'].get(),\n config['plex']['library_name'].get())\n self._log.info('... started.')\n\n except requests.exceptions.RequestException:\n self._log.warning('Update failed.')\n", "path": "beetsplug/plexupdate.py"}], "after_files": [{"content": "\"\"\"Updates an Plex library whenever the beets library is changed.\n\nPlex Home users enter the Plex Token to enable updating.\nPut something like the following in your config.yaml to configure:\n plex:\n host: localhost\n port: 32400\n token: token\n\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport requests\nfrom urlparse import urljoin\nfrom urllib import urlencode\nimport xml.etree.ElementTree as ET\nfrom beets import config\nfrom beets.plugins import BeetsPlugin\n\n\ndef get_music_section(host, port, token, library_name):\n \"\"\"Getting the section key for the music library in Plex.\n \"\"\"\n api_endpoint = append_token('library/sections', token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request.\n r = requests.get(url)\n\n # Parse xml tree and extract music section key.\n tree = ET.fromstring(r.content)\n for child in tree.findall('Directory'):\n if child.get('title') == library_name:\n return child.get('key')\n\n\ndef update_plex(host, port, token, library_name):\n \"\"\"Sends request to the Plex api to start a library refresh.\n \"\"\"\n # Getting section key and build url.\n section_key = get_music_section(host, port, token, library_name)\n api_endpoint = 'library/sections/{0}/refresh'.format(section_key)\n api_endpoint = append_token(api_endpoint, token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request and returns requests object.\n r = requests.get(url)\n return r\n\n\ndef append_token(url, token):\n \"\"\"Appends the Plex Home token to the api call if required.\n \"\"\"\n if token:\n url += '?' + urlencode({'X-Plex-Token': token})\n return url\n\n\nclass PlexUpdate(BeetsPlugin):\n def __init__(self):\n super(PlexUpdate, self).__init__()\n\n # Adding defaults.\n config['plex'].add({\n u'host': u'localhost',\n u'port': 32400,\n u'token': u'',\n u'library_name': u'Music'})\n\n self.register_listener('database_change', self.listen_for_db_change)\n\n def listen_for_db_change(self, lib, model):\n \"\"\"Listens for beets db change and register the update for the end\"\"\"\n self.register_listener('cli_exit', self.update)\n\n def update(self, lib):\n \"\"\"When the client exists try to send refresh request to Plex server.\n \"\"\"\n self._log.info('Updating Plex library...')\n\n # Try to send update request.\n try:\n update_plex(\n config['plex']['host'].get(),\n config['plex']['port'].get(),\n config['plex']['token'].get(),\n config['plex']['library_name'].get())\n self._log.info('... started.')\n\n except requests.exceptions.RequestException:\n self._log.warning('Update failed.')\n", "path": "beetsplug/plexupdate.py"}]} | 1,528 | 118 |
gh_patches_debug_34669 | rasdani/github-patches | git_diff | PlasmaPy__PlasmaPy-125 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Raise ImportError when installing/importing with old versions of Python, Astropy, & NumPy
We decided that PlasmaPy will only be supported for:
- Python version > 3.6
- Astropy version > 2.0
- NumPy version > 1.13
However, when I try to run:
```ShellSession
python setup.py install
```
from the command line with Python 3.5 then I get a `SyntaxError` for syntax that is new in version 3.6.
When I try to run
```Python
import plasmapy
```
in Python 3.6 with Astropy 1.3.1, then I get an exception since one of the constants imported from Astropy was renamed in 2.0.
We should raise an appropriate exception (probably an `ImportError`) when we try to install or import PlasmaPy with any of the unsupported versions above. We should also have appropriate and useful error messages for any of these situations. The pull request to close this issue would involve editing `setup.py`, `requirements/base.txt`, and `plasmapy/__init__.py`.
Thank you!
Nick
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plasmapy/__init__.py`
Content:
```
1 from ._metadata import (
2 name as __name__,
3 version as __version__,
4 description as __doc__,
5 author as __author__,
6 )
7
8 from .classes import Plasma
9 from . import classes
10 from . import constants
11 from . import atomic
12 from . import math
13 from . import physics
14 from . import utils
15
16 import sys
17 import warnings
18
19 if sys.version_info[:2] < (3, 6): # coveralls: ignore
20 warnings.warn("PlasmaPy does not support Python 3.5 and below")
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py
--- a/plasmapy/__init__.py
+++ b/plasmapy/__init__.py
@@ -5,16 +5,81 @@
author as __author__,
)
-from .classes import Plasma
-from . import classes
-from . import constants
-from . import atomic
-from . import math
-from . import physics
-from . import utils
-
import sys
import warnings
-if sys.version_info[:2] < (3, 6): # coveralls: ignore
+__minimum_python_version__ = '3.6'
+__minimum_numpy_version__ = '1.13.0'
+__minimum_astropy_version__ = '2.0.0'
+
+
+def _split_version(version):
+ return tuple(int(ver) for ver in version.split('.'))
+
+
+def _min_required_version(required, current): # coveralls: ignore
+ """ Return `True` if the current version meets the required minimum
+ version and `False` if not/ if not installed.
+
+ Right now `required` and `current` are just '.' separated strings
+ but it would be good to make this more general and accept modules.
+ """
+ return _split_version(current) >= _split_version(required)
+
+
+def _check_numpy_version(): # coveralls: ignore
+ """ Make sure numpy in installed and meets the minimum version requirements
+ """
+ required_version = False
+ np_ver = None
+
+ try:
+ from numpy import __version__ as np_ver
+ required_version = _min_required_version(__minimum_numpy_version__,
+ np_ver)
+ except ImportError:
+ pass
+
+ if not required_version:
+ ver_error = ("Numpy {} or above is required for PlasmaPy. The "
+ "currently installed version is {}"
+ ).format(__minimum_numpy_version__, np_ver)
+ raise ImportError(ver_error)
+
+
+def _check_astropy_version(): # coveralls: ignore
+ """ Make sure astropy in installed and meets the minimum version requirements
+ """
+ required_version = False
+ ap_ver = None
+
+ try:
+ from astropy import __version__ as ap_ver
+ required_version = _min_required_version(__minimum_astropy_version__,
+ ap_ver)
+ except ImportError:
+ pass
+
+ if not required_version:
+ ver_error = ("Astropy {} or above is required for PlasmaPy. The "
+ "currently installed version is {}"
+ ).format(__minimum_astropy_version__, ap_ver)
+ raise ImportError(ver_error)
+
+
+if (sys.version_info < _split_version(__minimum_python_version__)): # coveralls: ignore
warnings.warn("PlasmaPy does not support Python 3.5 and below")
+
+_check_numpy_version()
+_check_astropy_version()
+
+try:
+ from .classes import Plasma
+ from . import classes
+ from . import constants
+ from . import atomic
+ from . import math
+ from . import physics
+ from . import utils
+except Exception:
+ raise ImportError("Unable to load PlasmaPy subpackages.")
| {"golden_diff": "diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py\n--- a/plasmapy/__init__.py\n+++ b/plasmapy/__init__.py\n@@ -5,16 +5,81 @@\n author as __author__,\n )\n \n-from .classes import Plasma\n-from . import classes\n-from . import constants\n-from . import atomic\n-from . import math\n-from . import physics\n-from . import utils\n-\n import sys\n import warnings\n \n-if sys.version_info[:2] < (3, 6): # coveralls: ignore\n+__minimum_python_version__ = '3.6'\n+__minimum_numpy_version__ = '1.13.0'\n+__minimum_astropy_version__ = '2.0.0'\n+\n+\n+def _split_version(version):\n+ return tuple(int(ver) for ver in version.split('.'))\n+\n+\n+def _min_required_version(required, current): # coveralls: ignore\n+ \"\"\" Return `True` if the current version meets the required minimum\n+ version and `False` if not/ if not installed.\n+\n+ Right now `required` and `current` are just '.' separated strings\n+ but it would be good to make this more general and accept modules.\n+ \"\"\"\n+ return _split_version(current) >= _split_version(required)\n+\n+\n+def _check_numpy_version(): # coveralls: ignore\n+ \"\"\" Make sure numpy in installed and meets the minimum version requirements\n+ \"\"\"\n+ required_version = False\n+ np_ver = None\n+\n+ try:\n+ from numpy import __version__ as np_ver\n+ required_version = _min_required_version(__minimum_numpy_version__,\n+ np_ver)\n+ except ImportError:\n+ pass\n+\n+ if not required_version:\n+ ver_error = (\"Numpy {} or above is required for PlasmaPy. The \"\n+ \"currently installed version is {}\"\n+ ).format(__minimum_numpy_version__, np_ver)\n+ raise ImportError(ver_error)\n+\n+\n+def _check_astropy_version(): # coveralls: ignore\n+ \"\"\" Make sure astropy in installed and meets the minimum version requirements\n+ \"\"\"\n+ required_version = False\n+ ap_ver = None\n+\n+ try:\n+ from astropy import __version__ as ap_ver\n+ required_version = _min_required_version(__minimum_astropy_version__,\n+ ap_ver)\n+ except ImportError:\n+ pass\n+\n+ if not required_version:\n+ ver_error = (\"Astropy {} or above is required for PlasmaPy. The \"\n+ \"currently installed version is {}\"\n+ ).format(__minimum_astropy_version__, ap_ver)\n+ raise ImportError(ver_error)\n+\n+\n+if (sys.version_info < _split_version(__minimum_python_version__)): # coveralls: ignore\n warnings.warn(\"PlasmaPy does not support Python 3.5 and below\")\n+\n+_check_numpy_version()\n+_check_astropy_version()\n+\n+try:\n+ from .classes import Plasma\n+ from . import classes\n+ from . import constants\n+ from . import atomic\n+ from . import math\n+ from . import physics\n+ from . import utils\n+except Exception:\n+ raise ImportError(\"Unable to load PlasmaPy subpackages.\")\n", "issue": "Raise ImportError when installing/importing with old versions of Python, Astropy, & NumPy\nWe decided that PlasmaPy will only be supported for:\r\n- Python version > 3.6\r\n- Astropy version > 2.0\r\n- NumPy version > 1.13\r\n\r\nHowever, when I try to run:\r\n```ShellSession\r\npython setup.py install\r\n```\r\nfrom the command line with Python 3.5 then I get a `SyntaxError` for syntax that is new in version 3.6.\r\n\r\nWhen I try to run\r\n```Python\r\nimport plasmapy\r\n```\r\nin Python 3.6 with Astropy 1.3.1, then I get an exception since one of the constants imported from Astropy was renamed in 2.0.\r\n\r\nWe should raise an appropriate exception (probably an `ImportError`) when we try to install or import PlasmaPy with any of the unsupported versions above. We should also have appropriate and useful error messages for any of these situations. The pull request to close this issue would involve editing `setup.py`, `requirements/base.txt`, and `plasmapy/__init__.py`.\r\n\r\nThank you!\r\nNick\n", "before_files": [{"content": "from ._metadata import (\n name as __name__,\n version as __version__,\n description as __doc__,\n author as __author__,\n)\n\nfrom .classes import Plasma\nfrom . import classes\nfrom . import constants\nfrom . import atomic\nfrom . import math\nfrom . import physics\nfrom . import utils\n\nimport sys\nimport warnings\n\nif sys.version_info[:2] < (3, 6): # coveralls: ignore\n warnings.warn(\"PlasmaPy does not support Python 3.5 and below\")\n", "path": "plasmapy/__init__.py"}], "after_files": [{"content": "from ._metadata import (\n name as __name__,\n version as __version__,\n description as __doc__,\n author as __author__,\n)\n\nimport sys\nimport warnings\n\n__minimum_python_version__ = '3.6'\n__minimum_numpy_version__ = '1.13.0'\n__minimum_astropy_version__ = '2.0.0'\n\n\ndef _split_version(version):\n return tuple(int(ver) for ver in version.split('.'))\n\n\ndef _min_required_version(required, current): # coveralls: ignore\n \"\"\" Return `True` if the current version meets the required minimum\n version and `False` if not/ if not installed.\n\n Right now `required` and `current` are just '.' separated strings\n but it would be good to make this more general and accept modules.\n \"\"\"\n return _split_version(current) >= _split_version(required)\n\n\ndef _check_numpy_version(): # coveralls: ignore\n \"\"\" Make sure numpy in installed and meets the minimum version requirements\n \"\"\"\n required_version = False\n np_ver = None\n\n try:\n from numpy import __version__ as np_ver\n required_version = _min_required_version(__minimum_numpy_version__,\n np_ver)\n except ImportError:\n pass\n\n if not required_version:\n ver_error = (\"Numpy {} or above is required for PlasmaPy. The \"\n \"currently installed version is {}\"\n ).format(__minimum_numpy_version__, np_ver)\n raise ImportError(ver_error)\n\n\ndef _check_astropy_version(): # coveralls: ignore\n \"\"\" Make sure astropy in installed and meets the minimum version requirements\n \"\"\"\n required_version = False\n ap_ver = None\n\n try:\n from astropy import __version__ as ap_ver\n required_version = _min_required_version(__minimum_astropy_version__,\n ap_ver)\n except ImportError:\n pass\n\n if not required_version:\n ver_error = (\"Astropy {} or above is required for PlasmaPy. The \"\n \"currently installed version is {}\"\n ).format(__minimum_astropy_version__, ap_ver)\n raise ImportError(ver_error)\n\n\nif (sys.version_info < _split_version(__minimum_python_version__)): # coveralls: ignore\n warnings.warn(\"PlasmaPy does not support Python 3.5 and below\")\n\n_check_numpy_version()\n_check_astropy_version()\n\ntry:\n from .classes import Plasma\n from . import classes\n from . import constants\n from . import atomic\n from . import math\n from . import physics\n from . import utils\nexcept Exception:\n raise ImportError(\"Unable to load PlasmaPy subpackages.\")\n", "path": "plasmapy/__init__.py"}]} | 656 | 720 |
gh_patches_debug_4229 | rasdani/github-patches | git_diff | twisted__twisted-11816 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
twisted.web.pages.errorPage docstring has a typo
> Use [IResource.putChild()](https://docs.twisted.org/en/stable/api/twisted.web.resource.IResource.html#putChild) override specific path.
Should be:
> Use [IResource.putChild()](https://docs.twisted.org/en/stable/api/twisted.web.resource.IResource.html#putChild) override **a** specific path.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/twisted/web/pages.py`
Content:
```
1 # -*- test-case-name: twisted.web.test.test_pages -*-
2 # Copyright (c) Twisted Matrix Laboratories.
3 # See LICENSE for details.
4
5 """
6 Utility implementations of L{IResource}.
7 """
8
9 __all__ = (
10 "errorPage",
11 "notFound",
12 "forbidden",
13 )
14
15 from typing import cast
16
17 from twisted.web import http
18 from twisted.web.iweb import IRenderable, IRequest
19 from twisted.web.resource import IResource, Resource
20 from twisted.web.template import renderElement, tags
21
22
23 class _ErrorPage(Resource):
24 """
25 L{_ErrorPage} is a resource that responds to all requests with a particular
26 (parameterized) HTTP status code and an HTML body containing some
27 descriptive text. This is useful for rendering simple error pages.
28
29 @see: L{twisted.web.pages.errorPage}
30
31 @ivar _code: An integer HTTP status code which will be used for the
32 response.
33
34 @ivar _brief: A short string which will be included in the response body as
35 the page title.
36
37 @ivar _detail: A longer string which will be included in the response body.
38 """
39
40 def __init__(self, code: int, brief: str, detail: str) -> None:
41 super().__init__()
42 self._code: int = code
43 self._brief: str = brief
44 self._detail: str = detail
45
46 def render(self, request: IRequest) -> object:
47 """
48 Respond to all requests with the given HTTP status code and an HTML
49 document containing the explanatory strings.
50 """
51 request.setResponseCode(self._code)
52 request.setHeader(b"content-type", b"text/html; charset=utf-8")
53 return renderElement(
54 request,
55 # cast because the type annotations here seem off; Tag isn't an
56 # IRenderable but also probably should be? See
57 # https://github.com/twisted/twisted/issues/4982
58 cast(
59 IRenderable,
60 tags.html(
61 tags.head(tags.title(f"{self._code} - {self._brief}")),
62 tags.body(tags.h1(self._brief), tags.p(self._detail)),
63 ),
64 ),
65 )
66
67 def getChild(self, path: bytes, request: IRequest) -> Resource:
68 """
69 Handle all requests for which L{_ErrorPage} lacks a child by returning
70 this error page.
71
72 @param path: A path segment.
73
74 @param request: HTTP request
75 """
76 return self
77
78
79 def errorPage(code: int, brief: str, detail: str) -> IResource:
80 """
81 Build a resource that responds to all requests with a particular HTTP
82 status code and an HTML body containing some descriptive text. This is
83 useful for rendering simple error pages.
84
85 The resource dynamically handles all paths below it. Use
86 L{IResource.putChild()} override specific path.
87
88 @param code: An integer HTTP status code which will be used for the
89 response.
90
91 @param brief: A short string which will be included in the response
92 body as the page title.
93
94 @param detail: A longer string which will be included in the
95 response body.
96
97 @returns: An L{IResource}
98 """
99 return _ErrorPage(code, brief, detail)
100
101
102 def notFound(
103 brief: str = "No Such Resource",
104 message: str = "Sorry. No luck finding that resource.",
105 ) -> IResource:
106 """
107 Generate an L{IResource} with a 404 Not Found status code.
108
109 @see: L{twisted.web.pages.errorPage}
110
111 @param brief: A short string displayed as the page title.
112
113 @param brief: A longer string displayed in the page body.
114
115 @returns: An L{IResource}
116 """
117 return _ErrorPage(http.NOT_FOUND, brief, message)
118
119
120 def forbidden(
121 brief: str = "Forbidden Resource", message: str = "Sorry, resource is forbidden."
122 ) -> IResource:
123 """
124 Generate an L{IResource} with a 403 Forbidden status code.
125
126 @see: L{twisted.web.pages.errorPage}
127
128 @param brief: A short string displayed as the page title.
129
130 @param brief: A longer string displayed in the page body.
131
132 @returns: An L{IResource}
133 """
134 return _ErrorPage(http.FORBIDDEN, brief, message)
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/twisted/web/pages.py b/src/twisted/web/pages.py
--- a/src/twisted/web/pages.py
+++ b/src/twisted/web/pages.py
@@ -83,7 +83,7 @@
useful for rendering simple error pages.
The resource dynamically handles all paths below it. Use
- L{IResource.putChild()} override specific path.
+ L{IResource.putChild()} to override a specific path.
@param code: An integer HTTP status code which will be used for the
response.
| {"golden_diff": "diff --git a/src/twisted/web/pages.py b/src/twisted/web/pages.py\n--- a/src/twisted/web/pages.py\n+++ b/src/twisted/web/pages.py\n@@ -83,7 +83,7 @@\n useful for rendering simple error pages.\n \n The resource dynamically handles all paths below it. Use\n- L{IResource.putChild()} override specific path.\n+ L{IResource.putChild()} to override a specific path.\n \n @param code: An integer HTTP status code which will be used for the\n response.\n", "issue": "twisted.web.pages.errorPage docstring has a typo\n> Use [IResource.putChild()](https://docs.twisted.org/en/stable/api/twisted.web.resource.IResource.html#putChild) override specific path.\r\n\r\nShould be:\r\n\r\n> Use [IResource.putChild()](https://docs.twisted.org/en/stable/api/twisted.web.resource.IResource.html#putChild) override **a** specific path.\n", "before_files": [{"content": "# -*- test-case-name: twisted.web.test.test_pages -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nUtility implementations of L{IResource}.\n\"\"\"\n\n__all__ = (\n \"errorPage\",\n \"notFound\",\n \"forbidden\",\n)\n\nfrom typing import cast\n\nfrom twisted.web import http\nfrom twisted.web.iweb import IRenderable, IRequest\nfrom twisted.web.resource import IResource, Resource\nfrom twisted.web.template import renderElement, tags\n\n\nclass _ErrorPage(Resource):\n \"\"\"\n L{_ErrorPage} is a resource that responds to all requests with a particular\n (parameterized) HTTP status code and an HTML body containing some\n descriptive text. This is useful for rendering simple error pages.\n\n @see: L{twisted.web.pages.errorPage}\n\n @ivar _code: An integer HTTP status code which will be used for the\n response.\n\n @ivar _brief: A short string which will be included in the response body as\n the page title.\n\n @ivar _detail: A longer string which will be included in the response body.\n \"\"\"\n\n def __init__(self, code: int, brief: str, detail: str) -> None:\n super().__init__()\n self._code: int = code\n self._brief: str = brief\n self._detail: str = detail\n\n def render(self, request: IRequest) -> object:\n \"\"\"\n Respond to all requests with the given HTTP status code and an HTML\n document containing the explanatory strings.\n \"\"\"\n request.setResponseCode(self._code)\n request.setHeader(b\"content-type\", b\"text/html; charset=utf-8\")\n return renderElement(\n request,\n # cast because the type annotations here seem off; Tag isn't an\n # IRenderable but also probably should be? See\n # https://github.com/twisted/twisted/issues/4982\n cast(\n IRenderable,\n tags.html(\n tags.head(tags.title(f\"{self._code} - {self._brief}\")),\n tags.body(tags.h1(self._brief), tags.p(self._detail)),\n ),\n ),\n )\n\n def getChild(self, path: bytes, request: IRequest) -> Resource:\n \"\"\"\n Handle all requests for which L{_ErrorPage} lacks a child by returning\n this error page.\n\n @param path: A path segment.\n\n @param request: HTTP request\n \"\"\"\n return self\n\n\ndef errorPage(code: int, brief: str, detail: str) -> IResource:\n \"\"\"\n Build a resource that responds to all requests with a particular HTTP\n status code and an HTML body containing some descriptive text. This is\n useful for rendering simple error pages.\n\n The resource dynamically handles all paths below it. Use\n L{IResource.putChild()} override specific path.\n\n @param code: An integer HTTP status code which will be used for the\n response.\n\n @param brief: A short string which will be included in the response\n body as the page title.\n\n @param detail: A longer string which will be included in the\n response body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(code, brief, detail)\n\n\ndef notFound(\n brief: str = \"No Such Resource\",\n message: str = \"Sorry. No luck finding that resource.\",\n) -> IResource:\n \"\"\"\n Generate an L{IResource} with a 404 Not Found status code.\n\n @see: L{twisted.web.pages.errorPage}\n\n @param brief: A short string displayed as the page title.\n\n @param brief: A longer string displayed in the page body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(http.NOT_FOUND, brief, message)\n\n\ndef forbidden(\n brief: str = \"Forbidden Resource\", message: str = \"Sorry, resource is forbidden.\"\n) -> IResource:\n \"\"\"\n Generate an L{IResource} with a 403 Forbidden status code.\n\n @see: L{twisted.web.pages.errorPage}\n\n @param brief: A short string displayed as the page title.\n\n @param brief: A longer string displayed in the page body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(http.FORBIDDEN, brief, message)\n", "path": "src/twisted/web/pages.py"}], "after_files": [{"content": "# -*- test-case-name: twisted.web.test.test_pages -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nUtility implementations of L{IResource}.\n\"\"\"\n\n__all__ = (\n \"errorPage\",\n \"notFound\",\n \"forbidden\",\n)\n\nfrom typing import cast\n\nfrom twisted.web import http\nfrom twisted.web.iweb import IRenderable, IRequest\nfrom twisted.web.resource import IResource, Resource\nfrom twisted.web.template import renderElement, tags\n\n\nclass _ErrorPage(Resource):\n \"\"\"\n L{_ErrorPage} is a resource that responds to all requests with a particular\n (parameterized) HTTP status code and an HTML body containing some\n descriptive text. This is useful for rendering simple error pages.\n\n @see: L{twisted.web.pages.errorPage}\n\n @ivar _code: An integer HTTP status code which will be used for the\n response.\n\n @ivar _brief: A short string which will be included in the response body as\n the page title.\n\n @ivar _detail: A longer string which will be included in the response body.\n \"\"\"\n\n def __init__(self, code: int, brief: str, detail: str) -> None:\n super().__init__()\n self._code: int = code\n self._brief: str = brief\n self._detail: str = detail\n\n def render(self, request: IRequest) -> object:\n \"\"\"\n Respond to all requests with the given HTTP status code and an HTML\n document containing the explanatory strings.\n \"\"\"\n request.setResponseCode(self._code)\n request.setHeader(b\"content-type\", b\"text/html; charset=utf-8\")\n return renderElement(\n request,\n # cast because the type annotations here seem off; Tag isn't an\n # IRenderable but also probably should be? See\n # https://github.com/twisted/twisted/issues/4982\n cast(\n IRenderable,\n tags.html(\n tags.head(tags.title(f\"{self._code} - {self._brief}\")),\n tags.body(tags.h1(self._brief), tags.p(self._detail)),\n ),\n ),\n )\n\n def getChild(self, path: bytes, request: IRequest) -> Resource:\n \"\"\"\n Handle all requests for which L{_ErrorPage} lacks a child by returning\n this error page.\n\n @param path: A path segment.\n\n @param request: HTTP request\n \"\"\"\n return self\n\n\ndef errorPage(code: int, brief: str, detail: str) -> IResource:\n \"\"\"\n Build a resource that responds to all requests with a particular HTTP\n status code and an HTML body containing some descriptive text. This is\n useful for rendering simple error pages.\n\n The resource dynamically handles all paths below it. Use\n L{IResource.putChild()} to override a specific path.\n\n @param code: An integer HTTP status code which will be used for the\n response.\n\n @param brief: A short string which will be included in the response\n body as the page title.\n\n @param detail: A longer string which will be included in the\n response body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(code, brief, detail)\n\n\ndef notFound(\n brief: str = \"No Such Resource\",\n message: str = \"Sorry. No luck finding that resource.\",\n) -> IResource:\n \"\"\"\n Generate an L{IResource} with a 404 Not Found status code.\n\n @see: L{twisted.web.pages.errorPage}\n\n @param brief: A short string displayed as the page title.\n\n @param brief: A longer string displayed in the page body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(http.NOT_FOUND, brief, message)\n\n\ndef forbidden(\n brief: str = \"Forbidden Resource\", message: str = \"Sorry, resource is forbidden.\"\n) -> IResource:\n \"\"\"\n Generate an L{IResource} with a 403 Forbidden status code.\n\n @see: L{twisted.web.pages.errorPage}\n\n @param brief: A short string displayed as the page title.\n\n @param brief: A longer string displayed in the page body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(http.FORBIDDEN, brief, message)\n", "path": "src/twisted/web/pages.py"}]} | 1,605 | 122 |
gh_patches_debug_38939 | rasdani/github-patches | git_diff | AlexsLemonade__refinebio-3363 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clean up AWS Batch job definition list
### Problem or idea
The Batch job definition section contains 100+ pages of jobs (~2500 items). They haven't been cleaned up properly during deploy process for a while.
### Solution or next step
Clean up stale items, make sure job deregistering script takes care of old job definitions in a right way.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `infrastructure/delete_batch_job_queue.py`
Content:
```
1 import os
2 from time import sleep
3
4 import boto3
5
6 AWS_REGION = os.environ["AWS_REGION"]
7 AWS_BATCH_QUEUE_ALL_NAMES = os.environ["REFINEBIO_JOB_QUEUE_ALL_NAMES"].split(",")
8
9 batch = boto3.client("batch", region_name=AWS_REGION)
10
11 # First disable each job queue.
12 for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
13 try:
14 batch.update_job_queue(jobQueue=batch_queue_name, state="DISABLED")
15 except Exception as e:
16 # If the job queue doesn't exist, that's cool, we were trying to delete it anyway.
17 pass
18
19 # Then wait for each one to be disabled so it can be deleted.
20 for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
21 while True:
22 job_queues = batch.describe_job_queues(jobQueues=[batch_queue_name])
23 if "jobQueues" in job_queues:
24 job_queue = job_queues["jobQueues"][0]
25 if job_queue["state"] == "DISABLED" and job_queue["status"] != "UPDATING":
26 break
27 else:
28 print(f"Unexpected response while describing job queue {batch_queue_name}.")
29 break
30
31 sleep(3)
32
33 batch.delete_job_queue(jobQueue=batch_queue_name)
34
```
Path: `infrastructure/deregister_batch_job_definitions.py`
Content:
```
1 import os
2
3 import boto3
4
5 AWS_REGION = os.environ["AWS_REGION"]
6
7 batch = boto3.client("batch", region_name=AWS_REGION)
8
9 # TODO: stop repeating this construction everywhere. Just set it once somewhere.
10 JOB_DEFINITION_PREFIX = os.environ["USER"] + "_" + os.environ["STAGE"] + "_"
11
12 job_definition_files = os.listdir("batch-job-templates")
13
14 job_definition_list = list(
15 {JOB_DEFINITION_PREFIX + job_def.upper().split(".")[0] for job_def in job_definition_files}
16 )
17
18 # Have to go one by one because providing a list of job names doesn't work:
19 # https://github.com/boto/boto3/issues/2908
20 for job_definition in job_definition_list:
21 job_definitions = batch.describe_job_definitions(
22 jobDefinitionName=job_definition, status="ACTIVE"
23 )
24 # There can be multiple revisions per job definition. We want them all gone.
25 for job_definition_revision in job_definitions["jobDefinitions"]:
26 batch.deregister_job_definition(jobDefinition=job_definition_revision["jobDefinitionArn"])
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/infrastructure/delete_batch_job_queue.py b/infrastructure/delete_batch_job_queue.py
--- a/infrastructure/delete_batch_job_queue.py
+++ b/infrastructure/delete_batch_job_queue.py
@@ -2,19 +2,22 @@
from time import sleep
import boto3
+from botocore.exceptions import ClientError
-AWS_REGION = os.environ["AWS_REGION"]
AWS_BATCH_QUEUE_ALL_NAMES = os.environ["REFINEBIO_JOB_QUEUE_ALL_NAMES"].split(",")
-batch = boto3.client("batch", region_name=AWS_REGION)
+batch = boto3.client("batch", region_name=os.environ["AWS_REGION"])
# First disable each job queue.
for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
try:
batch.update_job_queue(jobQueue=batch_queue_name, state="DISABLED")
- except Exception as e:
+ except ClientError as e:
# If the job queue doesn't exist, that's cool, we were trying to delete it anyway.
- pass
+ if str(e).endswith(" does not exist."):
+ pass
+ else:
+ raise e
# Then wait for each one to be disabled so it can be deleted.
for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
diff --git a/infrastructure/deregister_batch_job_definitions.py b/infrastructure/deregister_batch_job_definitions.py
--- a/infrastructure/deregister_batch_job_definitions.py
+++ b/infrastructure/deregister_batch_job_definitions.py
@@ -2,25 +2,36 @@
import boto3
-AWS_REGION = os.environ["AWS_REGION"]
-
-batch = boto3.client("batch", region_name=AWS_REGION)
+batch = boto3.client("batch", region_name=os.environ["AWS_REGION"])
# TODO: stop repeating this construction everywhere. Just set it once somewhere.
JOB_DEFINITION_PREFIX = os.environ["USER"] + "_" + os.environ["STAGE"] + "_"
-job_definition_files = os.listdir("batch-job-templates")
-
-job_definition_list = list(
- {JOB_DEFINITION_PREFIX + job_def.upper().split(".")[0] for job_def in job_definition_files}
+job_names = (
+ JOB_DEFINITION_PREFIX + batch_job_template.upper().split(".")[0]
+ for batch_job_template in os.listdir("batch-job-templates")
)
+nextToken = ""
# Have to go one by one because providing a list of job names doesn't work:
# https://github.com/boto/boto3/issues/2908
-for job_definition in job_definition_list:
- job_definitions = batch.describe_job_definitions(
- jobDefinitionName=job_definition, status="ACTIVE"
- )
- # There can be multiple revisions per job definition. We want them all gone.
- for job_definition_revision in job_definitions["jobDefinitions"]:
- batch.deregister_job_definition(jobDefinition=job_definition_revision["jobDefinitionArn"])
+for job_name in sorted(job_names):
+ while True:
+ data = {
+ "jobDefinitionName": job_name,
+ "maxResults": 100,
+ "status": "ACTIVE",
+ }
+ if nextToken:
+ data["nextToken"] = nextToken
+
+ response = batch.describe_job_definitions(**data)
+ nextToken = response.get("nextToken", "")
+
+ job_definitions = response.get("jobDefinitions")
+ if not job_definitions:
+ break
+
+ # There can be multiple revisions per job definition. We want them all gone.
+ for job_definition in job_definitions:
+ batch.deregister_job_definition(jobDefinition=job_definition["jobDefinitionArn"])
| {"golden_diff": "diff --git a/infrastructure/delete_batch_job_queue.py b/infrastructure/delete_batch_job_queue.py\n--- a/infrastructure/delete_batch_job_queue.py\n+++ b/infrastructure/delete_batch_job_queue.py\n@@ -2,19 +2,22 @@\n from time import sleep\n \n import boto3\n+from botocore.exceptions import ClientError\n \n-AWS_REGION = os.environ[\"AWS_REGION\"]\n AWS_BATCH_QUEUE_ALL_NAMES = os.environ[\"REFINEBIO_JOB_QUEUE_ALL_NAMES\"].split(\",\")\n \n-batch = boto3.client(\"batch\", region_name=AWS_REGION)\n+batch = boto3.client(\"batch\", region_name=os.environ[\"AWS_REGION\"])\n \n # First disable each job queue.\n for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\n try:\n batch.update_job_queue(jobQueue=batch_queue_name, state=\"DISABLED\")\n- except Exception as e:\n+ except ClientError as e:\n # If the job queue doesn't exist, that's cool, we were trying to delete it anyway.\n- pass\n+ if str(e).endswith(\" does not exist.\"):\n+ pass\n+ else:\n+ raise e\n \n # Then wait for each one to be disabled so it can be deleted.\n for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\ndiff --git a/infrastructure/deregister_batch_job_definitions.py b/infrastructure/deregister_batch_job_definitions.py\n--- a/infrastructure/deregister_batch_job_definitions.py\n+++ b/infrastructure/deregister_batch_job_definitions.py\n@@ -2,25 +2,36 @@\n \n import boto3\n \n-AWS_REGION = os.environ[\"AWS_REGION\"]\n-\n-batch = boto3.client(\"batch\", region_name=AWS_REGION)\n+batch = boto3.client(\"batch\", region_name=os.environ[\"AWS_REGION\"])\n \n # TODO: stop repeating this construction everywhere. Just set it once somewhere.\n JOB_DEFINITION_PREFIX = os.environ[\"USER\"] + \"_\" + os.environ[\"STAGE\"] + \"_\"\n \n-job_definition_files = os.listdir(\"batch-job-templates\")\n-\n-job_definition_list = list(\n- {JOB_DEFINITION_PREFIX + job_def.upper().split(\".\")[0] for job_def in job_definition_files}\n+job_names = (\n+ JOB_DEFINITION_PREFIX + batch_job_template.upper().split(\".\")[0]\n+ for batch_job_template in os.listdir(\"batch-job-templates\")\n )\n+nextToken = \"\"\n \n # Have to go one by one because providing a list of job names doesn't work:\n # https://github.com/boto/boto3/issues/2908\n-for job_definition in job_definition_list:\n- job_definitions = batch.describe_job_definitions(\n- jobDefinitionName=job_definition, status=\"ACTIVE\"\n- )\n- # There can be multiple revisions per job definition. We want them all gone.\n- for job_definition_revision in job_definitions[\"jobDefinitions\"]:\n- batch.deregister_job_definition(jobDefinition=job_definition_revision[\"jobDefinitionArn\"])\n+for job_name in sorted(job_names):\n+ while True:\n+ data = {\n+ \"jobDefinitionName\": job_name,\n+ \"maxResults\": 100,\n+ \"status\": \"ACTIVE\",\n+ }\n+ if nextToken:\n+ data[\"nextToken\"] = nextToken\n+\n+ response = batch.describe_job_definitions(**data)\n+ nextToken = response.get(\"nextToken\", \"\")\n+\n+ job_definitions = response.get(\"jobDefinitions\")\n+ if not job_definitions:\n+ break\n+\n+ # There can be multiple revisions per job definition. We want them all gone.\n+ for job_definition in job_definitions:\n+ batch.deregister_job_definition(jobDefinition=job_definition[\"jobDefinitionArn\"])\n", "issue": "Clean up AWS Batch job definition list\n### Problem or idea\r\n\r\nThe Batch job definition section contains 100+ pages of jobs (~2500 items). They haven't been cleaned up properly during deploy process for a while.\r\n\r\n\r\n### Solution or next step\r\n\r\nClean up stale items, make sure job deregistering script takes care of old job definitions in a right way.\r\n\n", "before_files": [{"content": "import os\nfrom time import sleep\n\nimport boto3\n\nAWS_REGION = os.environ[\"AWS_REGION\"]\nAWS_BATCH_QUEUE_ALL_NAMES = os.environ[\"REFINEBIO_JOB_QUEUE_ALL_NAMES\"].split(\",\")\n\nbatch = boto3.client(\"batch\", region_name=AWS_REGION)\n\n# First disable each job queue.\nfor batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\n try:\n batch.update_job_queue(jobQueue=batch_queue_name, state=\"DISABLED\")\n except Exception as e:\n # If the job queue doesn't exist, that's cool, we were trying to delete it anyway.\n pass\n\n# Then wait for each one to be disabled so it can be deleted.\nfor batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\n while True:\n job_queues = batch.describe_job_queues(jobQueues=[batch_queue_name])\n if \"jobQueues\" in job_queues:\n job_queue = job_queues[\"jobQueues\"][0]\n if job_queue[\"state\"] == \"DISABLED\" and job_queue[\"status\"] != \"UPDATING\":\n break\n else:\n print(f\"Unexpected response while describing job queue {batch_queue_name}.\")\n break\n\n sleep(3)\n\n batch.delete_job_queue(jobQueue=batch_queue_name)\n", "path": "infrastructure/delete_batch_job_queue.py"}, {"content": "import os\n\nimport boto3\n\nAWS_REGION = os.environ[\"AWS_REGION\"]\n\nbatch = boto3.client(\"batch\", region_name=AWS_REGION)\n\n# TODO: stop repeating this construction everywhere. Just set it once somewhere.\nJOB_DEFINITION_PREFIX = os.environ[\"USER\"] + \"_\" + os.environ[\"STAGE\"] + \"_\"\n\njob_definition_files = os.listdir(\"batch-job-templates\")\n\njob_definition_list = list(\n {JOB_DEFINITION_PREFIX + job_def.upper().split(\".\")[0] for job_def in job_definition_files}\n)\n\n# Have to go one by one because providing a list of job names doesn't work:\n# https://github.com/boto/boto3/issues/2908\nfor job_definition in job_definition_list:\n job_definitions = batch.describe_job_definitions(\n jobDefinitionName=job_definition, status=\"ACTIVE\"\n )\n # There can be multiple revisions per job definition. We want them all gone.\n for job_definition_revision in job_definitions[\"jobDefinitions\"]:\n batch.deregister_job_definition(jobDefinition=job_definition_revision[\"jobDefinitionArn\"])\n", "path": "infrastructure/deregister_batch_job_definitions.py"}], "after_files": [{"content": "import os\nfrom time import sleep\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\nAWS_BATCH_QUEUE_ALL_NAMES = os.environ[\"REFINEBIO_JOB_QUEUE_ALL_NAMES\"].split(\",\")\n\nbatch = boto3.client(\"batch\", region_name=os.environ[\"AWS_REGION\"])\n\n# First disable each job queue.\nfor batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\n try:\n batch.update_job_queue(jobQueue=batch_queue_name, state=\"DISABLED\")\n except ClientError as e:\n # If the job queue doesn't exist, that's cool, we were trying to delete it anyway.\n if str(e).endswith(\" does not exist.\"):\n pass\n else:\n raise e\n\n# Then wait for each one to be disabled so it can be deleted.\nfor batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\n while True:\n job_queues = batch.describe_job_queues(jobQueues=[batch_queue_name])\n if \"jobQueues\" in job_queues:\n job_queue = job_queues[\"jobQueues\"][0]\n if job_queue[\"state\"] == \"DISABLED\" and job_queue[\"status\"] != \"UPDATING\":\n break\n else:\n print(f\"Unexpected response while describing job queue {batch_queue_name}.\")\n break\n\n sleep(3)\n\n batch.delete_job_queue(jobQueue=batch_queue_name)\n", "path": "infrastructure/delete_batch_job_queue.py"}, {"content": "import os\n\nimport boto3\n\nbatch = boto3.client(\"batch\", region_name=os.environ[\"AWS_REGION\"])\n\n# TODO: stop repeating this construction everywhere. Just set it once somewhere.\nJOB_DEFINITION_PREFIX = os.environ[\"USER\"] + \"_\" + os.environ[\"STAGE\"] + \"_\"\n\njob_names = (\n JOB_DEFINITION_PREFIX + batch_job_template.upper().split(\".\")[0]\n for batch_job_template in os.listdir(\"batch-job-templates\")\n)\nnextToken = \"\"\n\n# Have to go one by one because providing a list of job names doesn't work:\n# https://github.com/boto/boto3/issues/2908\nfor job_name in sorted(job_names):\n while True:\n data = {\n \"jobDefinitionName\": job_name,\n \"maxResults\": 100,\n \"status\": \"ACTIVE\",\n }\n if nextToken:\n data[\"nextToken\"] = nextToken\n\n response = batch.describe_job_definitions(**data)\n nextToken = response.get(\"nextToken\", \"\")\n\n job_definitions = response.get(\"jobDefinitions\")\n if not job_definitions:\n break\n\n # There can be multiple revisions per job definition. We want them all gone.\n for job_definition in job_definitions:\n batch.deregister_job_definition(jobDefinition=job_definition[\"jobDefinitionArn\"])\n", "path": "infrastructure/deregister_batch_job_definitions.py"}]} | 958 | 786 |
gh_patches_debug_10043 | rasdani/github-patches | git_diff | nautobot__nautobot-877 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Migrate user, social auth, and system settings from Django Admin to be first-class citizens in UI/API
### Proposed Functionality
Before the launch of Nautobot, there was significant work to migrate functionality from Django Admin into core and create first-class views and APIs for webhooks, custom links, and export templates. Custom fields are also coming soon in #229. This proposes doing the same for everything in the Users, Python Social Auth, and System sections in Django Admin.
### Use Cases
As Patti the Platform Admin,
I want to have the ability to manage users, groups, and permissions from the Nautobot UI without going into Django Admin,
So that there is a more consistent user experience for my team as well as APIs for anything else that doesn't have an API currently within Django Admin, e.g. Social Auth, retrieving background tasks, etc.
One option is to create an "Admin" dropdown in the navigation bar which contains "Users (no change)," "Social Auth (drop 'Python')," and "System" sections. We may need one additional section called "plugins" for when plugins have created entries in Django Admin.
I will know this is done when it is possible to:
* Manage what is currently under Users, Python Social Auth, and System sections in Django Admin in the main Nautobot UI
* Manage what is currently under Users, Python Social Auth, and System sections in Django Admin through Nautobot REST API calls
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/core/admin.py`
Content:
```
1 from django.conf import settings
2 from django.contrib.admin import site as admin_site
3 from taggit.models import Tag
4
5
6 # Override default AdminSite attributes so we can avoid creating and
7 # registering our own class
8 admin_site.site_header = "Nautobot Administration"
9 admin_site.site_title = "Nautobot"
10 admin_site.index_template = "admin/nautobot_index.html"
11
12 # Unregister the unused stock Tag model provided by django-taggit
13 admin_site.unregister(Tag)
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nautobot/core/admin.py b/nautobot/core/admin.py
--- a/nautobot/core/admin.py
+++ b/nautobot/core/admin.py
@@ -1,5 +1,6 @@
from django.conf import settings
from django.contrib.admin import site as admin_site
+from social_django.models import Association, Nonce, UserSocialAuth
from taggit.models import Tag
@@ -11,3 +12,8 @@
# Unregister the unused stock Tag model provided by django-taggit
admin_site.unregister(Tag)
+
+# Unregister SocialAuth from Django admin menu
+admin_site.unregister(Association)
+admin_site.unregister(Nonce)
+admin_site.unregister(UserSocialAuth)
| {"golden_diff": "diff --git a/nautobot/core/admin.py b/nautobot/core/admin.py\n--- a/nautobot/core/admin.py\n+++ b/nautobot/core/admin.py\n@@ -1,5 +1,6 @@\n from django.conf import settings\n from django.contrib.admin import site as admin_site\n+from social_django.models import Association, Nonce, UserSocialAuth\n from taggit.models import Tag\n \n \n@@ -11,3 +12,8 @@\n \n # Unregister the unused stock Tag model provided by django-taggit\n admin_site.unregister(Tag)\n+\n+# Unregister SocialAuth from Django admin menu\n+admin_site.unregister(Association)\n+admin_site.unregister(Nonce)\n+admin_site.unregister(UserSocialAuth)\n", "issue": "Migrate user, social auth, and system settings from Django Admin to be first-class citizens in UI/API\n### Proposed Functionality \r\n\r\nBefore the launch of Nautobot, there was significant work to migrate functionality from Django Admin into core and create first-class views and APIs for webhooks, custom links, and export templates. Custom fields are also coming soon in #229. This proposes doing the same for everything in the Users, Python Social Auth, and System sections in Django Admin.\r\n\r\n### Use Cases\r\n\r\nAs Patti the Platform Admin,\r\nI want to have the ability to manage users, groups, and permissions from the Nautobot UI without going into Django Admin,\r\nSo that there is a more consistent user experience for my team as well as APIs for anything else that doesn't have an API currently within Django Admin, e.g. Social Auth, retrieving background tasks, etc. \r\n\r\nOne option is to create an \"Admin\" dropdown in the navigation bar which contains \"Users (no change),\" \"Social Auth (drop 'Python'),\" and \"System\" sections. We may need one additional section called \"plugins\" for when plugins have created entries in Django Admin.\r\n\r\nI will know this is done when it is possible to:\r\n* Manage what is currently under Users, Python Social Auth, and System sections in Django Admin in the main Nautobot UI\r\n* Manage what is currently under Users, Python Social Auth, and System sections in Django Admin through Nautobot REST API calls\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.admin import site as admin_site\nfrom taggit.models import Tag\n\n\n# Override default AdminSite attributes so we can avoid creating and\n# registering our own class\nadmin_site.site_header = \"Nautobot Administration\"\nadmin_site.site_title = \"Nautobot\"\nadmin_site.index_template = \"admin/nautobot_index.html\"\n\n# Unregister the unused stock Tag model provided by django-taggit\nadmin_site.unregister(Tag)\n", "path": "nautobot/core/admin.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.contrib.admin import site as admin_site\nfrom social_django.models import Association, Nonce, UserSocialAuth\nfrom taggit.models import Tag\n\n\n# Override default AdminSite attributes so we can avoid creating and\n# registering our own class\nadmin_site.site_header = \"Nautobot Administration\"\nadmin_site.site_title = \"Nautobot\"\nadmin_site.index_template = \"admin/nautobot_index.html\"\n\n# Unregister the unused stock Tag model provided by django-taggit\nadmin_site.unregister(Tag)\n\n# Unregister SocialAuth from Django admin menu\nadmin_site.unregister(Association)\nadmin_site.unregister(Nonce)\nadmin_site.unregister(UserSocialAuth)\n", "path": "nautobot/core/admin.py"}]} | 680 | 153 |
gh_patches_debug_4919 | rasdani/github-patches | git_diff | bokeh__bokeh-1361 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot style minor ticks
Axis objects do not have minor tick properties.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bokeh/models/axes.py`
Content:
```
1 from __future__ import absolute_import
2
3 from ..properties import Int, Float, String, Enum, Instance, Tuple, Either, Include
4 from ..mixins import LineProps, TextProps
5 from ..enums import Location
6
7 from .renderers import GuideRenderer
8 from .tickers import Ticker, BasicTicker, LogTicker, CategoricalTicker, DatetimeTicker
9 from .formatters import TickFormatter, BasicTickFormatter, LogTickFormatter, CategoricalTickFormatter, DatetimeTickFormatter
10
11 class Axis(GuideRenderer):
12 location = Either(Enum('auto'), Enum(Location))
13 bounds = Either(Enum('auto'), Tuple(Float, Float))
14
15 x_range_name = String('default')
16 y_range_name = String('default')
17
18 ticker = Instance(Ticker)
19 formatter = Instance(TickFormatter)
20
21 axis_label = String
22 axis_label_standoff = Int
23 axis_label_props = Include(TextProps)
24
25 major_label_standoff = Int
26 major_label_orientation = Either(Enum("horizontal", "vertical"), Float)
27 major_label_props = Include(TextProps)
28
29 axis_props = Include(LineProps)
30 major_tick_props = Include(LineProps)
31
32 major_tick_in = Int
33 major_tick_out = Int
34
35 class ContinuousAxis(Axis):
36 pass
37
38 class LinearAxis(ContinuousAxis):
39 def __init__(self, ticker=None, formatter=None, **kwargs):
40 if ticker is None:
41 ticker = BasicTicker()
42 if formatter is None:
43 formatter = BasicTickFormatter()
44 super(LinearAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)
45
46 class LogAxis(ContinuousAxis):
47 def __init__(self, ticker=None, formatter=None, **kwargs):
48 if ticker is None:
49 ticker = LogTicker(num_minor_ticks=10)
50 if formatter is None:
51 formatter = LogTickFormatter()
52 super(LogAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)
53
54 class CategoricalAxis(Axis):
55 def __init__(self, ticker=None, formatter=None, **kwargs):
56 if ticker is None:
57 ticker = CategoricalTicker()
58 if formatter is None:
59 formatter = CategoricalTickFormatter()
60 super(CategoricalAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)
61
62 class DatetimeAxis(LinearAxis):
63 axis_label = String("date")
64 scale = String("time")
65 num_labels = Int(8)
66 char_width = Int(10)
67 fill_ratio = Float(0.3)
68
69 def __init__(self, ticker=None, formatter=None, **kwargs):
70 if ticker is None:
71 ticker = DatetimeTicker()
72 if formatter is None:
73 formatter = DatetimeTickFormatter()
74 super(DatetimeAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bokeh/models/axes.py b/bokeh/models/axes.py
--- a/bokeh/models/axes.py
+++ b/bokeh/models/axes.py
@@ -27,11 +27,15 @@
major_label_props = Include(TextProps)
axis_props = Include(LineProps)
- major_tick_props = Include(LineProps)
+ major_tick_props = Include(LineProps)
major_tick_in = Int
major_tick_out = Int
+ minor_tick_props = Include(LineProps)
+ minor_tick_in = Int
+ minor_tick_out = Int
+
class ContinuousAxis(Axis):
pass
| {"golden_diff": "diff --git a/bokeh/models/axes.py b/bokeh/models/axes.py\n--- a/bokeh/models/axes.py\n+++ b/bokeh/models/axes.py\n@@ -27,11 +27,15 @@\n major_label_props = Include(TextProps)\n \n axis_props = Include(LineProps)\n- major_tick_props = Include(LineProps)\n \n+ major_tick_props = Include(LineProps)\n major_tick_in = Int\n major_tick_out = Int\n \n+ minor_tick_props = Include(LineProps)\n+ minor_tick_in = Int\n+ minor_tick_out = Int\n+\n class ContinuousAxis(Axis):\n pass\n", "issue": "Cannot style minor ticks\nAxis objects do not have minor tick properties.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom ..properties import Int, Float, String, Enum, Instance, Tuple, Either, Include\nfrom ..mixins import LineProps, TextProps\nfrom ..enums import Location\n\nfrom .renderers import GuideRenderer\nfrom .tickers import Ticker, BasicTicker, LogTicker, CategoricalTicker, DatetimeTicker\nfrom .formatters import TickFormatter, BasicTickFormatter, LogTickFormatter, CategoricalTickFormatter, DatetimeTickFormatter\n\nclass Axis(GuideRenderer):\n location = Either(Enum('auto'), Enum(Location))\n bounds = Either(Enum('auto'), Tuple(Float, Float))\n\n x_range_name = String('default')\n y_range_name = String('default')\n\n ticker = Instance(Ticker)\n formatter = Instance(TickFormatter)\n\n axis_label = String\n axis_label_standoff = Int\n axis_label_props = Include(TextProps)\n\n major_label_standoff = Int\n major_label_orientation = Either(Enum(\"horizontal\", \"vertical\"), Float)\n major_label_props = Include(TextProps)\n\n axis_props = Include(LineProps)\n major_tick_props = Include(LineProps)\n\n major_tick_in = Int\n major_tick_out = Int\n\nclass ContinuousAxis(Axis):\n pass\n\nclass LinearAxis(ContinuousAxis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = BasicTicker()\n if formatter is None:\n formatter = BasicTickFormatter()\n super(LinearAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass LogAxis(ContinuousAxis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = LogTicker(num_minor_ticks=10)\n if formatter is None:\n formatter = LogTickFormatter()\n super(LogAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass CategoricalAxis(Axis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = CategoricalTicker()\n if formatter is None:\n formatter = CategoricalTickFormatter()\n super(CategoricalAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass DatetimeAxis(LinearAxis):\n axis_label = String(\"date\")\n scale = String(\"time\")\n num_labels = Int(8)\n char_width = Int(10)\n fill_ratio = Float(0.3)\n\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = DatetimeTicker()\n if formatter is None:\n formatter = DatetimeTickFormatter()\n super(DatetimeAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n", "path": "bokeh/models/axes.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom ..properties import Int, Float, String, Enum, Instance, Tuple, Either, Include\nfrom ..mixins import LineProps, TextProps\nfrom ..enums import Location\n\nfrom .renderers import GuideRenderer\nfrom .tickers import Ticker, BasicTicker, LogTicker, CategoricalTicker, DatetimeTicker\nfrom .formatters import TickFormatter, BasicTickFormatter, LogTickFormatter, CategoricalTickFormatter, DatetimeTickFormatter\n\nclass Axis(GuideRenderer):\n location = Either(Enum('auto'), Enum(Location))\n bounds = Either(Enum('auto'), Tuple(Float, Float))\n\n x_range_name = String('default')\n y_range_name = String('default')\n\n ticker = Instance(Ticker)\n formatter = Instance(TickFormatter)\n\n axis_label = String\n axis_label_standoff = Int\n axis_label_props = Include(TextProps)\n\n major_label_standoff = Int\n major_label_orientation = Either(Enum(\"horizontal\", \"vertical\"), Float)\n major_label_props = Include(TextProps)\n\n axis_props = Include(LineProps)\n\n major_tick_props = Include(LineProps)\n major_tick_in = Int\n major_tick_out = Int\n\n minor_tick_props = Include(LineProps)\n minor_tick_in = Int\n minor_tick_out = Int\n\nclass ContinuousAxis(Axis):\n pass\n\nclass LinearAxis(ContinuousAxis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = BasicTicker()\n if formatter is None:\n formatter = BasicTickFormatter()\n super(LinearAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass LogAxis(ContinuousAxis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = LogTicker(num_minor_ticks=10)\n if formatter is None:\n formatter = LogTickFormatter()\n super(LogAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass CategoricalAxis(Axis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = CategoricalTicker()\n if formatter is None:\n formatter = CategoricalTickFormatter()\n super(CategoricalAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass DatetimeAxis(LinearAxis):\n axis_label = String(\"date\")\n scale = String(\"time\")\n num_labels = Int(8)\n char_width = Int(10)\n fill_ratio = Float(0.3)\n\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = DatetimeTicker()\n if formatter is None:\n formatter = DatetimeTickFormatter()\n super(DatetimeAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n", "path": "bokeh/models/axes.py"}]} | 1,017 | 143 |
gh_patches_debug_14570 | rasdani/github-patches | git_diff | freedomofpress__securedrop-2491 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
localization: various problems
# Bug
## Description
This issue is to collect the various localization problems found before the 0.4.4 release and after the **string freeze**
They should be fixed in a pull request right after the 0.4.4 release.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/source_app/forms.py`
Content:
```
1 from flask_babel import gettext
2 from flask_wtf import FlaskForm
3 from wtforms import PasswordField
4 from wtforms.validators import InputRequired, Regexp, Length
5
6 from db import Source
7
8
9 class LoginForm(FlaskForm):
10 codename = PasswordField('codename', validators=[
11 InputRequired(message=gettext('This field is required.')),
12 Length(1, Source.MAX_CODENAME_LEN,
13 message=gettext('Field must be between 1 and '
14 '{max_codename_len} characters long. '.format(
15 max_codename_len=Source.MAX_CODENAME_LEN))),
16 # Make sure to allow dashes since some words in the wordlist have them
17 Regexp(r'[\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))
18 ])
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/source_app/forms.py b/securedrop/source_app/forms.py
--- a/securedrop/source_app/forms.py
+++ b/securedrop/source_app/forms.py
@@ -11,7 +11,7 @@
InputRequired(message=gettext('This field is required.')),
Length(1, Source.MAX_CODENAME_LEN,
message=gettext('Field must be between 1 and '
- '{max_codename_len} characters long. '.format(
+ '{max_codename_len} characters long.'.format(
max_codename_len=Source.MAX_CODENAME_LEN))),
# Make sure to allow dashes since some words in the wordlist have them
Regexp(r'[\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))
| {"golden_diff": "diff --git a/securedrop/source_app/forms.py b/securedrop/source_app/forms.py\n--- a/securedrop/source_app/forms.py\n+++ b/securedrop/source_app/forms.py\n@@ -11,7 +11,7 @@\n InputRequired(message=gettext('This field is required.')),\n Length(1, Source.MAX_CODENAME_LEN,\n message=gettext('Field must be between 1 and '\n- '{max_codename_len} characters long. '.format(\n+ '{max_codename_len} characters long.'.format(\n max_codename_len=Source.MAX_CODENAME_LEN))),\n # Make sure to allow dashes since some words in the wordlist have them\n Regexp(r'[\\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))\n", "issue": "localization: various problems\n# Bug\r\n\r\n## Description\r\n\r\nThis issue is to collect the various localization problems found before the 0.4.4 release and after the **string freeze**\r\n\r\nThey should be fixed in a pull request right after the 0.4.4 release.\n", "before_files": [{"content": "from flask_babel import gettext\nfrom flask_wtf import FlaskForm\nfrom wtforms import PasswordField\nfrom wtforms.validators import InputRequired, Regexp, Length\n\nfrom db import Source\n\n\nclass LoginForm(FlaskForm):\n codename = PasswordField('codename', validators=[\n InputRequired(message=gettext('This field is required.')),\n Length(1, Source.MAX_CODENAME_LEN,\n message=gettext('Field must be between 1 and '\n '{max_codename_len} characters long. '.format(\n max_codename_len=Source.MAX_CODENAME_LEN))),\n # Make sure to allow dashes since some words in the wordlist have them\n Regexp(r'[\\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))\n ])\n", "path": "securedrop/source_app/forms.py"}], "after_files": [{"content": "from flask_babel import gettext\nfrom flask_wtf import FlaskForm\nfrom wtforms import PasswordField\nfrom wtforms.validators import InputRequired, Regexp, Length\n\nfrom db import Source\n\n\nclass LoginForm(FlaskForm):\n codename = PasswordField('codename', validators=[\n InputRequired(message=gettext('This field is required.')),\n Length(1, Source.MAX_CODENAME_LEN,\n message=gettext('Field must be between 1 and '\n '{max_codename_len} characters long.'.format(\n max_codename_len=Source.MAX_CODENAME_LEN))),\n # Make sure to allow dashes since some words in the wordlist have them\n Regexp(r'[\\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))\n ])\n", "path": "securedrop/source_app/forms.py"}]} | 513 | 169 |
gh_patches_debug_41609 | rasdani/github-patches | git_diff | getnikola__nikola-1292 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
render fails if the theme has a code.css
The `conf.py` says:
```
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored.
```
I've [provided one](https://github.com/mgaitan/my-nikola-theme/commit/f0140d0d67771d7ee9f46df2c78548c0e757f65e) but then I can't render my site
```
(blog)tin@morochita:~/lab/blog$ nikola build
Scanning posts.....done!
ERROR: Two different tasks can't have a common target.'output/assets/css/code.css' is a target for copy_files:output/assets/css/code.css and copy_assets:output/assets/css/code.css.
(blog)tin@morochita:~/lab/blog$
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/task/copy_assets.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 import codecs
28 import os
29
30 from nikola.plugin_categories import Task
31 from nikola import utils
32
33
34 class CopyAssets(Task):
35 """Copy theme assets into output."""
36
37 name = "copy_assets"
38
39 def gen_tasks(self):
40 """Create tasks to copy the assets of the whole theme chain.
41
42 If a file is present on two themes, use the version
43 from the "youngest" theme.
44 """
45
46 kw = {
47 "themes": self.site.THEMES,
48 "output_folder": self.site.config['OUTPUT_FOLDER'],
49 "filters": self.site.config['FILTERS'],
50 "code_color_scheme": self.site.config['CODE_COLOR_SCHEME'],
51 "code.css_selectors": 'pre.code',
52 "code.css_close": "\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\n",
53 }
54 has_code_css = False
55 tasks = {}
56 code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')
57
58 yield self.group_task()
59
60 for theme_name in kw['themes']:
61 src = os.path.join(utils.get_theme_path(theme_name), 'assets')
62 dst = os.path.join(kw['output_folder'], 'assets')
63 for task in utils.copy_tree(src, dst):
64 if task['name'] in tasks:
65 continue
66 has_code_css = task['targets'][0] == code_css_path
67 tasks[task['name']] = task
68 task['uptodate'] = [utils.config_changed(kw)]
69 task['basename'] = self.name
70 yield utils.apply_filters(task, kw['filters'])
71
72 if not has_code_css: # Generate it
73
74 def create_code_css():
75 from pygments.formatters import get_formatter_by_name
76 formatter = get_formatter_by_name('html', style=kw["code_color_scheme"])
77 utils.makedirs(os.path.dirname(code_css_path))
78 with codecs.open(code_css_path, 'wb+', 'utf8') as outf:
79 outf.write(formatter.get_style_defs(kw["code.css_selectors"]))
80 outf.write(kw["code.css_close"])
81
82 task = {
83 'basename': self.name,
84 'name': code_css_path,
85 'targets': [code_css_path],
86 'uptodate': [utils.config_changed(kw)],
87 'actions': [(create_code_css, [])],
88 'clean': True,
89 }
90 yield utils.apply_filters(task, kw['filters'])
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py
--- a/nikola/plugins/task/copy_assets.py
+++ b/nikola/plugins/task/copy_assets.py
@@ -45,15 +45,21 @@
kw = {
"themes": self.site.THEMES,
+ "files_folders": self.site.config['FILES_FOLDERS'],
"output_folder": self.site.config['OUTPUT_FOLDER'],
"filters": self.site.config['FILTERS'],
"code_color_scheme": self.site.config['CODE_COLOR_SCHEME'],
"code.css_selectors": 'pre.code',
+ "code.css_head": '/* code.css file generated by Nikola */\n',
"code.css_close": "\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\n",
}
- has_code_css = False
tasks = {}
code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')
+ code_css_input = utils.get_asset_path('assets/css/code.css',
+ themes=kw['themes'],
+ files_folders=kw['files_folders'])
+
+ kw["code.css_input"] = code_css_input
yield self.group_task()
@@ -63,27 +69,35 @@
for task in utils.copy_tree(src, dst):
if task['name'] in tasks:
continue
- has_code_css = task['targets'][0] == code_css_path
tasks[task['name']] = task
task['uptodate'] = [utils.config_changed(kw)]
task['basename'] = self.name
+ if code_css_input:
+ task['file_dep'] = [code_css_input]
yield utils.apply_filters(task, kw['filters'])
- if not has_code_css: # Generate it
-
+ # Check whether or not there is a code.css file around.
+ if not code_css_input:
def create_code_css():
from pygments.formatters import get_formatter_by_name
formatter = get_formatter_by_name('html', style=kw["code_color_scheme"])
utils.makedirs(os.path.dirname(code_css_path))
with codecs.open(code_css_path, 'wb+', 'utf8') as outf:
+ outf.write(kw["code.css_head"])
outf.write(formatter.get_style_defs(kw["code.css_selectors"]))
outf.write(kw["code.css_close"])
+ if os.path.exists(code_css_path):
+ with codecs.open(code_css_path, 'r', 'utf-8') as fh:
+ testcontents = fh.read(len(kw["code.css_head"])) == kw["code.css_head"]
+ else:
+ testcontents = False
+
task = {
'basename': self.name,
'name': code_css_path,
'targets': [code_css_path],
- 'uptodate': [utils.config_changed(kw)],
+ 'uptodate': [utils.config_changed(kw), testcontents],
'actions': [(create_code_css, [])],
'clean': True,
}
| {"golden_diff": "diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py\n--- a/nikola/plugins/task/copy_assets.py\n+++ b/nikola/plugins/task/copy_assets.py\n@@ -45,15 +45,21 @@\n \n kw = {\n \"themes\": self.site.THEMES,\n+ \"files_folders\": self.site.config['FILES_FOLDERS'],\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"code_color_scheme\": self.site.config['CODE_COLOR_SCHEME'],\n \"code.css_selectors\": 'pre.code',\n+ \"code.css_head\": '/* code.css file generated by Nikola */\\n',\n \"code.css_close\": \"\\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\\n\",\n }\n- has_code_css = False\n tasks = {}\n code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')\n+ code_css_input = utils.get_asset_path('assets/css/code.css',\n+ themes=kw['themes'],\n+ files_folders=kw['files_folders'])\n+\n+ kw[\"code.css_input\"] = code_css_input\n \n yield self.group_task()\n \n@@ -63,27 +69,35 @@\n for task in utils.copy_tree(src, dst):\n if task['name'] in tasks:\n continue\n- has_code_css = task['targets'][0] == code_css_path\n tasks[task['name']] = task\n task['uptodate'] = [utils.config_changed(kw)]\n task['basename'] = self.name\n+ if code_css_input:\n+ task['file_dep'] = [code_css_input]\n yield utils.apply_filters(task, kw['filters'])\n \n- if not has_code_css: # Generate it\n-\n+ # Check whether or not there is a code.css file around.\n+ if not code_css_input:\n def create_code_css():\n from pygments.formatters import get_formatter_by_name\n formatter = get_formatter_by_name('html', style=kw[\"code_color_scheme\"])\n utils.makedirs(os.path.dirname(code_css_path))\n with codecs.open(code_css_path, 'wb+', 'utf8') as outf:\n+ outf.write(kw[\"code.css_head\"])\n outf.write(formatter.get_style_defs(kw[\"code.css_selectors\"]))\n outf.write(kw[\"code.css_close\"])\n \n+ if os.path.exists(code_css_path):\n+ with codecs.open(code_css_path, 'r', 'utf-8') as fh:\n+ testcontents = fh.read(len(kw[\"code.css_head\"])) == kw[\"code.css_head\"]\n+ else:\n+ testcontents = False\n+\n task = {\n 'basename': self.name,\n 'name': code_css_path,\n 'targets': [code_css_path],\n- 'uptodate': [utils.config_changed(kw)],\n+ 'uptodate': [utils.config_changed(kw), testcontents],\n 'actions': [(create_code_css, [])],\n 'clean': True,\n }\n", "issue": "render fails if the theme has a code.css \nThe `conf.py` says: \n\n```\n# Color scheme to be used for code blocks. If your theme provides\n# \"assets/css/code.css\" this is ignored.\n```\n\nI've [provided one](https://github.com/mgaitan/my-nikola-theme/commit/f0140d0d67771d7ee9f46df2c78548c0e757f65e) but then I can't render my site\n\n```\n(blog)tin@morochita:~/lab/blog$ nikola build\nScanning posts.....done!\nERROR: Two different tasks can't have a common target.'output/assets/css/code.css' is a target for copy_files:output/assets/css/code.css and copy_assets:output/assets/css/code.css.\n(blog)tin@morochita:~/lab/blog$ \n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport codecs\nimport os\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass CopyAssets(Task):\n \"\"\"Copy theme assets into output.\"\"\"\n\n name = \"copy_assets\"\n\n def gen_tasks(self):\n \"\"\"Create tasks to copy the assets of the whole theme chain.\n\n If a file is present on two themes, use the version\n from the \"youngest\" theme.\n \"\"\"\n\n kw = {\n \"themes\": self.site.THEMES,\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"code_color_scheme\": self.site.config['CODE_COLOR_SCHEME'],\n \"code.css_selectors\": 'pre.code',\n \"code.css_close\": \"\\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\\n\",\n }\n has_code_css = False\n tasks = {}\n code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')\n\n yield self.group_task()\n\n for theme_name in kw['themes']:\n src = os.path.join(utils.get_theme_path(theme_name), 'assets')\n dst = os.path.join(kw['output_folder'], 'assets')\n for task in utils.copy_tree(src, dst):\n if task['name'] in tasks:\n continue\n has_code_css = task['targets'][0] == code_css_path\n tasks[task['name']] = task\n task['uptodate'] = [utils.config_changed(kw)]\n task['basename'] = self.name\n yield utils.apply_filters(task, kw['filters'])\n\n if not has_code_css: # Generate it\n\n def create_code_css():\n from pygments.formatters import get_formatter_by_name\n formatter = get_formatter_by_name('html', style=kw[\"code_color_scheme\"])\n utils.makedirs(os.path.dirname(code_css_path))\n with codecs.open(code_css_path, 'wb+', 'utf8') as outf:\n outf.write(formatter.get_style_defs(kw[\"code.css_selectors\"]))\n outf.write(kw[\"code.css_close\"])\n\n task = {\n 'basename': self.name,\n 'name': code_css_path,\n 'targets': [code_css_path],\n 'uptodate': [utils.config_changed(kw)],\n 'actions': [(create_code_css, [])],\n 'clean': True,\n }\n yield utils.apply_filters(task, kw['filters'])\n", "path": "nikola/plugins/task/copy_assets.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport codecs\nimport os\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass CopyAssets(Task):\n \"\"\"Copy theme assets into output.\"\"\"\n\n name = \"copy_assets\"\n\n def gen_tasks(self):\n \"\"\"Create tasks to copy the assets of the whole theme chain.\n\n If a file is present on two themes, use the version\n from the \"youngest\" theme.\n \"\"\"\n\n kw = {\n \"themes\": self.site.THEMES,\n \"files_folders\": self.site.config['FILES_FOLDERS'],\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"code_color_scheme\": self.site.config['CODE_COLOR_SCHEME'],\n \"code.css_selectors\": 'pre.code',\n \"code.css_head\": '/* code.css file generated by Nikola */\\n',\n \"code.css_close\": \"\\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\\n\",\n }\n tasks = {}\n code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')\n code_css_input = utils.get_asset_path('assets/css/code.css',\n themes=kw['themes'],\n files_folders=kw['files_folders'])\n\n kw[\"code.css_input\"] = code_css_input\n\n yield self.group_task()\n\n for theme_name in kw['themes']:\n src = os.path.join(utils.get_theme_path(theme_name), 'assets')\n dst = os.path.join(kw['output_folder'], 'assets')\n for task in utils.copy_tree(src, dst):\n if task['name'] in tasks:\n continue\n tasks[task['name']] = task\n task['uptodate'] = [utils.config_changed(kw)]\n task['basename'] = self.name\n if code_css_input:\n task['file_dep'] = [code_css_input]\n yield utils.apply_filters(task, kw['filters'])\n\n # Check whether or not there is a code.css file around.\n if not code_css_input:\n def create_code_css():\n from pygments.formatters import get_formatter_by_name\n formatter = get_formatter_by_name('html', style=kw[\"code_color_scheme\"])\n utils.makedirs(os.path.dirname(code_css_path))\n with codecs.open(code_css_path, 'wb+', 'utf8') as outf:\n outf.write(kw[\"code.css_head\"])\n outf.write(formatter.get_style_defs(kw[\"code.css_selectors\"]))\n outf.write(kw[\"code.css_close\"])\n\n if os.path.exists(code_css_path):\n with codecs.open(code_css_path, 'r', 'utf-8') as fh:\n testcontents = fh.read(len(kw[\"code.css_head\"])) == kw[\"code.css_head\"]\n else:\n testcontents = False\n\n task = {\n 'basename': self.name,\n 'name': code_css_path,\n 'targets': [code_css_path],\n 'uptodate': [utils.config_changed(kw), testcontents],\n 'actions': [(create_code_css, [])],\n 'clean': True,\n }\n yield utils.apply_filters(task, kw['filters'])\n", "path": "nikola/plugins/task/copy_assets.py"}]} | 1,425 | 689 |
gh_patches_debug_34527 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1694 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: source 'City of Karlsruhe' stopped working
### I Have A Problem With:
A specific source
### What's Your Problem
Release 1.44.0:
Due to changes on the website the source '**City of Karlsruhe**' (name: karlsruhe_de) stopped working.
I start troubleshooting and add my findings here.
### Source (if relevant)
karlsruhe_de
### Logs
_No response_
### Relevant Configuration
_No response_
### Checklist Source Error
- [x] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [x] Checked that the website of your service provider is still working
- [x] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [x] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py`
Content:
```
1 from datetime import datetime
2
3 import requests
4 from waste_collection_schedule import Collection # type: ignore[attr-defined]
5 from waste_collection_schedule.service.ICS import ICS
6
7 TITLE = "City of Karlsruhe"
8 DESCRIPTION = "Source for City of Karlsruhe."
9 URL = "https://www.karlsruhe.de/"
10 TEST_CASES = {
11 "Östliche Rheinbrückenstraße 1": {
12 "street": "Östliche Rheinbrückenstraße",
13 "hnr": 1,
14 },
15 "Habichtweg 4": {"street": "Habichtweg", "hnr": 4},
16 "Machstraße 5": {"street": "Machstraße", "hnr": 5},
17 "Bernsteinstraße 10 ladeort 1": {
18 "street": "Bernsteinstraße",
19 "hnr": 10,
20 "ladeort": 1,
21 },
22 "Bernsteinstraße 10 ladeort 2": {
23 "street": "Bernsteinstraße",
24 "hnr": 10,
25 "ladeort": 2,
26 },
27 }
28
29
30 ICON_MAP = {
31 "Restmüll": "mdi:trash-can",
32 "Bioabfall": "mdi:leaf",
33 "Papier": "mdi:package-variant",
34 "Wertstoff": "mdi:recycle",
35 "Sperrmüllabholung": "mdi:wardrobe",
36 }
37
38
39 API_URL = "https://web6.karlsruhe.de/service/abfall/akal/akal_{year}.php"
40
41
42 class Source:
43 def __init__(self, street: str, hnr: str | int, ladeort: int | None = None):
44 self._street: str = street
45 self._hnr: str | int = hnr
46 self._ladeort: int | None = ladeort
47 self.ics = ICS()
48
49 def fetch(self):
50 now = datetime.now()
51 error = None
52 for year in (now.year, now.year + 1, now.year - 1):
53 try:
54 return self.get_data(API_URL.format(year=year))
55 except Exception as e:
56 error = e
57 raise error
58
59 def get_data(self, url):
60 data = {
61 "strasse_n": self._street,
62 "hausnr": self._hnr,
63 "ical": "+iCalendar",
64 "ladeort": self._ladeort,
65 }
66 params = {"hausnr": self._hnr}
67
68 r = requests.post(url, data=data, params=params)
69 dates = self.ics.convert(r.text)
70
71 entries = []
72 for d in dates:
73 date, waste_type = d
74 waste_type = waste_type.split(",")[0]
75 icon = ICON_MAP.get(waste_type)
76 entries.append(Collection(date=date, t=waste_type, icon=icon))
77
78 return entries
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py
@@ -1,9 +1,17 @@
from datetime import datetime
import requests
+import urllib3
from waste_collection_schedule import Collection # type: ignore[attr-defined]
from waste_collection_schedule.service.ICS import ICS
+# With verify=True the POST fails due to a SSLCertVerificationError.
+# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:
+# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings
+# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl
+# These two lines areused to suppress the InsecureRequestWarning when using verify=False
+urllib3.disable_warnings()
+
TITLE = "City of Karlsruhe"
DESCRIPTION = "Source for City of Karlsruhe."
URL = "https://www.karlsruhe.de/"
@@ -36,7 +44,7 @@
}
-API_URL = "https://web6.karlsruhe.de/service/abfall/akal/akal_{year}.php"
+API_URL = "https://web{i}.karlsruhe.de/service/abfall/akal/akal_{year}.php"
class Source:
@@ -50,10 +58,11 @@
now = datetime.now()
error = None
for year in (now.year, now.year + 1, now.year - 1):
- try:
- return self.get_data(API_URL.format(year=year))
- except Exception as e:
- error = e
+ for i in (4, 6):
+ try:
+ return self.get_data(API_URL.format(year=year, i=i))
+ except Exception as e:
+ error = e
raise error
def get_data(self, url):
@@ -65,7 +74,7 @@
}
params = {"hausnr": self._hnr}
- r = requests.post(url, data=data, params=params)
+ r = requests.post(url, data=data, params=params, verify=False)
dates = self.ics.convert(r.text)
entries = []
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py\n@@ -1,9 +1,17 @@\n from datetime import datetime\n \n import requests\n+import urllib3\n from waste_collection_schedule import Collection # type: ignore[attr-defined]\n from waste_collection_schedule.service.ICS import ICS\n \n+# With verify=True the POST fails due to a SSLCertVerificationError.\n+# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:\n+# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings\n+# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl\n+# These two lines areused to suppress the InsecureRequestWarning when using verify=False\n+urllib3.disable_warnings()\n+\n TITLE = \"City of Karlsruhe\"\n DESCRIPTION = \"Source for City of Karlsruhe.\"\n URL = \"https://www.karlsruhe.de/\"\n@@ -36,7 +44,7 @@\n }\n \n \n-API_URL = \"https://web6.karlsruhe.de/service/abfall/akal/akal_{year}.php\"\n+API_URL = \"https://web{i}.karlsruhe.de/service/abfall/akal/akal_{year}.php\"\n \n \n class Source:\n@@ -50,10 +58,11 @@\n now = datetime.now()\n error = None\n for year in (now.year, now.year + 1, now.year - 1):\n- try:\n- return self.get_data(API_URL.format(year=year))\n- except Exception as e:\n- error = e\n+ for i in (4, 6):\n+ try:\n+ return self.get_data(API_URL.format(year=year, i=i))\n+ except Exception as e:\n+ error = e\n raise error\n \n def get_data(self, url):\n@@ -65,7 +74,7 @@\n }\n params = {\"hausnr\": self._hnr}\n \n- r = requests.post(url, data=data, params=params)\n+ r = requests.post(url, data=data, params=params, verify=False)\n dates = self.ics.convert(r.text)\n \n entries = []\n", "issue": "[Bug]: source 'City of Karlsruhe' stopped working\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nRelease 1.44.0:\r\nDue to changes on the website the source '**City of Karlsruhe**' (name: karlsruhe_de) stopped working.\r\nI start troubleshooting and add my findings here.\n\n### Source (if relevant)\n\nkarlsruhe_de\n\n### Logs\n\n_No response_\n\n### Relevant Configuration\n\n_No response_\n\n### Checklist Source Error\n\n- [x] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [x] Checked that the website of your service provider is still working\n- [x] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [x] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from datetime import datetime\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"City of Karlsruhe\"\nDESCRIPTION = \"Source for City of Karlsruhe.\"\nURL = \"https://www.karlsruhe.de/\"\nTEST_CASES = {\n \"\u00d6stliche Rheinbr\u00fcckenstra\u00dfe 1\": {\n \"street\": \"\u00d6stliche Rheinbr\u00fcckenstra\u00dfe\",\n \"hnr\": 1,\n },\n \"Habichtweg 4\": {\"street\": \"Habichtweg\", \"hnr\": 4},\n \"Machstra\u00dfe 5\": {\"street\": \"Machstra\u00dfe\", \"hnr\": 5},\n \"Bernsteinstra\u00dfe 10 ladeort 1\": {\n \"street\": \"Bernsteinstra\u00dfe\",\n \"hnr\": 10,\n \"ladeort\": 1,\n },\n \"Bernsteinstra\u00dfe 10 ladeort 2\": {\n \"street\": \"Bernsteinstra\u00dfe\",\n \"hnr\": 10,\n \"ladeort\": 2,\n },\n}\n\n\nICON_MAP = {\n \"Restm\u00fcll\": \"mdi:trash-can\",\n \"Bioabfall\": \"mdi:leaf\",\n \"Papier\": \"mdi:package-variant\",\n \"Wertstoff\": \"mdi:recycle\",\n \"Sperrm\u00fcllabholung\": \"mdi:wardrobe\",\n}\n\n\nAPI_URL = \"https://web6.karlsruhe.de/service/abfall/akal/akal_{year}.php\"\n\n\nclass Source:\n def __init__(self, street: str, hnr: str | int, ladeort: int | None = None):\n self._street: str = street\n self._hnr: str | int = hnr\n self._ladeort: int | None = ladeort\n self.ics = ICS()\n\n def fetch(self):\n now = datetime.now()\n error = None\n for year in (now.year, now.year + 1, now.year - 1):\n try:\n return self.get_data(API_URL.format(year=year))\n except Exception as e:\n error = e\n raise error\n\n def get_data(self, url):\n data = {\n \"strasse_n\": self._street,\n \"hausnr\": self._hnr,\n \"ical\": \"+iCalendar\",\n \"ladeort\": self._ladeort,\n }\n params = {\"hausnr\": self._hnr}\n\n r = requests.post(url, data=data, params=params)\n dates = self.ics.convert(r.text)\n\n entries = []\n for d in dates:\n date, waste_type = d\n waste_type = waste_type.split(\",\")[0]\n icon = ICON_MAP.get(waste_type)\n entries.append(Collection(date=date, t=waste_type, icon=icon))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py"}], "after_files": [{"content": "from datetime import datetime\n\nimport requests\nimport urllib3\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\n# With verify=True the POST fails due to a SSLCertVerificationError.\n# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:\n# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings\n# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl\n# These two lines areused to suppress the InsecureRequestWarning when using verify=False\nurllib3.disable_warnings()\n\nTITLE = \"City of Karlsruhe\"\nDESCRIPTION = \"Source for City of Karlsruhe.\"\nURL = \"https://www.karlsruhe.de/\"\nTEST_CASES = {\n \"\u00d6stliche Rheinbr\u00fcckenstra\u00dfe 1\": {\n \"street\": \"\u00d6stliche Rheinbr\u00fcckenstra\u00dfe\",\n \"hnr\": 1,\n },\n \"Habichtweg 4\": {\"street\": \"Habichtweg\", \"hnr\": 4},\n \"Machstra\u00dfe 5\": {\"street\": \"Machstra\u00dfe\", \"hnr\": 5},\n \"Bernsteinstra\u00dfe 10 ladeort 1\": {\n \"street\": \"Bernsteinstra\u00dfe\",\n \"hnr\": 10,\n \"ladeort\": 1,\n },\n \"Bernsteinstra\u00dfe 10 ladeort 2\": {\n \"street\": \"Bernsteinstra\u00dfe\",\n \"hnr\": 10,\n \"ladeort\": 2,\n },\n}\n\n\nICON_MAP = {\n \"Restm\u00fcll\": \"mdi:trash-can\",\n \"Bioabfall\": \"mdi:leaf\",\n \"Papier\": \"mdi:package-variant\",\n \"Wertstoff\": \"mdi:recycle\",\n \"Sperrm\u00fcllabholung\": \"mdi:wardrobe\",\n}\n\n\nAPI_URL = \"https://web{i}.karlsruhe.de/service/abfall/akal/akal_{year}.php\"\n\n\nclass Source:\n def __init__(self, street: str, hnr: str | int, ladeort: int | None = None):\n self._street: str = street\n self._hnr: str | int = hnr\n self._ladeort: int | None = ladeort\n self.ics = ICS()\n\n def fetch(self):\n now = datetime.now()\n error = None\n for year in (now.year, now.year + 1, now.year - 1):\n for i in (4, 6):\n try:\n return self.get_data(API_URL.format(year=year, i=i))\n except Exception as e:\n error = e\n raise error\n\n def get_data(self, url):\n data = {\n \"strasse_n\": self._street,\n \"hausnr\": self._hnr,\n \"ical\": \"+iCalendar\",\n \"ladeort\": self._ladeort,\n }\n params = {\"hausnr\": self._hnr}\n\n r = requests.post(url, data=data, params=params, verify=False)\n dates = self.ics.convert(r.text)\n\n entries = []\n for d in dates:\n date, waste_type = d\n waste_type = waste_type.split(\",\")[0]\n icon = ICON_MAP.get(waste_type)\n entries.append(Collection(date=date, t=waste_type, icon=icon))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py"}]} | 1,364 | 546 |
gh_patches_debug_28800 | rasdani/github-patches | git_diff | quantumlib__Cirq-1674 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve error message if on_each gets a list
When you do `cirq.H.on_each([q0, q1])` instead of the unpacked version `cirq.H.on_each(q0, q1)` for some qubits `q0` and `q1`, the error message you get is **Gate was called with type different than Qid**.
Maybe either flatten (because most of the time you'll have your qubits in a list or a list of lists), or give a more descriptive error message.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq/ops/gate_features.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Marker classes for indicating which additional features gates support.
16
17 For example: some gates are reversible, some have known matrices, etc.
18 """
19
20 import abc
21
22 from cirq.ops import op_tree, raw_types
23
24
25 class InterchangeableQubitsGate(metaclass=abc.ABCMeta):
26 """Indicates operations should be equal under some qubit permutations."""
27
28 def qubit_index_to_equivalence_group_key(self, index: int) -> int:
29 """Returns a key that differs between non-interchangeable qubits."""
30 return 0
31
32
33 class SingleQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
34 """A gate that must be applied to exactly one qubit."""
35 def num_qubits(self) -> int:
36 return 1
37
38 def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE:
39 """Returns a list of operations apply this gate to each of the targets.
40
41 Args:
42 *targets: The qubits to apply this gate to.
43
44 Returns:
45 Operations applying this gate to the target qubits.
46
47 Raises:
48 ValueError if targets are not instances of Qid.
49 """
50 return [self.on(target) for target in targets]
51
52
53 class TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
54 """A gate that must be applied to exactly two qubits."""
55 def num_qubits(self) -> int:
56 return 2
57
58
59 class ThreeQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
60 """A gate that must be applied to exactly three qubits."""
61 def num_qubits(self) -> int:
62 return 3
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq/ops/gate_features.py b/cirq/ops/gate_features.py
--- a/cirq/ops/gate_features.py
+++ b/cirq/ops/gate_features.py
@@ -18,8 +18,10 @@
"""
import abc
+import collections
+from typing import Union, Iterable, Any, List
-from cirq.ops import op_tree, raw_types
+from cirq.ops import raw_types
class InterchangeableQubitsGate(metaclass=abc.ABCMeta):
@@ -35,7 +37,8 @@
def num_qubits(self) -> int:
return 1
- def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE:
+ def on_each(self, *targets: Union[raw_types.Qid, Iterable[Any]]
+ ) -> List[raw_types.Operation]:
"""Returns a list of operations apply this gate to each of the targets.
Args:
@@ -45,9 +48,20 @@
Operations applying this gate to the target qubits.
Raises:
- ValueError if targets are not instances of Qid.
+ ValueError if targets are not instances of Qid or List[Qid].
"""
- return [self.on(target) for target in targets]
+ operations = [] # type: List[raw_types.Operation]
+ for target in targets:
+ if isinstance(target,
+ collections.Iterable) and not isinstance(target, str):
+ operations.extend(self.on_each(*target))
+ elif isinstance(target, raw_types.Qid):
+ operations.append(self.on(target))
+ else:
+ raise ValueError(
+ 'Gate was called with type different than Qid. Type: {}'.
+ format(type(target)))
+ return operations
class TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
| {"golden_diff": "diff --git a/cirq/ops/gate_features.py b/cirq/ops/gate_features.py\n--- a/cirq/ops/gate_features.py\n+++ b/cirq/ops/gate_features.py\n@@ -18,8 +18,10 @@\n \"\"\"\n \n import abc\n+import collections\n+from typing import Union, Iterable, Any, List\n \n-from cirq.ops import op_tree, raw_types\n+from cirq.ops import raw_types\n \n \n class InterchangeableQubitsGate(metaclass=abc.ABCMeta):\n@@ -35,7 +37,8 @@\n def num_qubits(self) -> int:\n return 1\n \n- def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE:\n+ def on_each(self, *targets: Union[raw_types.Qid, Iterable[Any]]\n+ ) -> List[raw_types.Operation]:\n \"\"\"Returns a list of operations apply this gate to each of the targets.\n \n Args:\n@@ -45,9 +48,20 @@\n Operations applying this gate to the target qubits.\n \n Raises:\n- ValueError if targets are not instances of Qid.\n+ ValueError if targets are not instances of Qid or List[Qid].\n \"\"\"\n- return [self.on(target) for target in targets]\n+ operations = [] # type: List[raw_types.Operation]\n+ for target in targets:\n+ if isinstance(target,\n+ collections.Iterable) and not isinstance(target, str):\n+ operations.extend(self.on_each(*target))\n+ elif isinstance(target, raw_types.Qid):\n+ operations.append(self.on(target))\n+ else:\n+ raise ValueError(\n+ 'Gate was called with type different than Qid. Type: {}'.\n+ format(type(target)))\n+ return operations\n \n \n class TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n", "issue": "Improve error message if on_each gets a list\nWhen you do `cirq.H.on_each([q0, q1])` instead of the unpacked version `cirq.H.on_each(q0, q1)` for some qubits `q0` and `q1`, the error message you get is **Gate was called with type different than Qid**. \r\n\r\nMaybe either flatten (because most of the time you'll have your qubits in a list or a list of lists), or give a more descriptive error message.\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Marker classes for indicating which additional features gates support.\n\nFor example: some gates are reversible, some have known matrices, etc.\n\"\"\"\n\nimport abc\n\nfrom cirq.ops import op_tree, raw_types\n\n\nclass InterchangeableQubitsGate(metaclass=abc.ABCMeta):\n \"\"\"Indicates operations should be equal under some qubit permutations.\"\"\"\n\n def qubit_index_to_equivalence_group_key(self, index: int) -> int:\n \"\"\"Returns a key that differs between non-interchangeable qubits.\"\"\"\n return 0\n\n\nclass SingleQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly one qubit.\"\"\"\n def num_qubits(self) -> int:\n return 1\n\n def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE:\n \"\"\"Returns a list of operations apply this gate to each of the targets.\n\n Args:\n *targets: The qubits to apply this gate to.\n\n Returns:\n Operations applying this gate to the target qubits.\n\n Raises:\n ValueError if targets are not instances of Qid.\n \"\"\"\n return [self.on(target) for target in targets]\n\n\nclass TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly two qubits.\"\"\"\n def num_qubits(self) -> int:\n return 2\n\n\nclass ThreeQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly three qubits.\"\"\"\n def num_qubits(self) -> int:\n return 3\n", "path": "cirq/ops/gate_features.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Marker classes for indicating which additional features gates support.\n\nFor example: some gates are reversible, some have known matrices, etc.\n\"\"\"\n\nimport abc\nimport collections\nfrom typing import Union, Iterable, Any, List\n\nfrom cirq.ops import raw_types\n\n\nclass InterchangeableQubitsGate(metaclass=abc.ABCMeta):\n \"\"\"Indicates operations should be equal under some qubit permutations.\"\"\"\n\n def qubit_index_to_equivalence_group_key(self, index: int) -> int:\n \"\"\"Returns a key that differs between non-interchangeable qubits.\"\"\"\n return 0\n\n\nclass SingleQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly one qubit.\"\"\"\n def num_qubits(self) -> int:\n return 1\n\n def on_each(self, *targets: Union[raw_types.Qid, Iterable[Any]]\n ) -> List[raw_types.Operation]:\n \"\"\"Returns a list of operations apply this gate to each of the targets.\n\n Args:\n *targets: The qubits to apply this gate to.\n\n Returns:\n Operations applying this gate to the target qubits.\n\n Raises:\n ValueError if targets are not instances of Qid or List[Qid].\n \"\"\"\n operations = [] # type: List[raw_types.Operation]\n for target in targets:\n if isinstance(target,\n collections.Iterable) and not isinstance(target, str):\n operations.extend(self.on_each(*target))\n elif isinstance(target, raw_types.Qid):\n operations.append(self.on(target))\n else:\n raise ValueError(\n 'Gate was called with type different than Qid. Type: {}'.\n format(type(target)))\n return operations\n\n\nclass TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly two qubits.\"\"\"\n def num_qubits(self) -> int:\n return 2\n\n\nclass ThreeQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly three qubits.\"\"\"\n def num_qubits(self) -> int:\n return 3\n", "path": "cirq/ops/gate_features.py"}]} | 984 | 411 |
gh_patches_debug_39718 | rasdani/github-patches | git_diff | prowler-cloud__prowler-2291 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: backup_plans_exist and backup_reportplans_exist trigger in regions where not backups exist
### Steps to Reproduce
The mentioned checks are triggered even if no backups are present or configured.
### Expected behavior
When the check can't find a resource ID (it actually says "No Backups"), the check shouldn't trigger
### Actual Result with Screenshots or Logs

### How did you install Prowler?
From pip package (pip install prowler)
### Environment Resource
Workstation
### OS used
WSL2 under Windows 11
### Prowler version
Prowler 3.4.0 (it is the latest version, yay!)
### Pip version
pip 22.0.2 from /usr/lib/python3/dist-packages/pip (python 3.10)
### Context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py`
Content:
```
1 from prowler.lib.check.models import Check, Check_Report_AWS
2 from prowler.providers.aws.services.backup.backup_client import backup_client
3
4
5 class backup_plans_exist(Check):
6 def execute(self):
7 findings = []
8 report = Check_Report_AWS(self.metadata())
9 report.status = "FAIL"
10 report.status_extended = "No Backup Plan Exist"
11 report.resource_arn = ""
12 report.resource_id = "No Backups"
13 report.region = backup_client.region
14 if backup_client.backup_plans:
15 report.status = "PASS"
16 report.status_extended = f"At least one backup plan exists: { backup_client.backup_plans[0].name}"
17 report.resource_arn = backup_client.backup_plans[0].arn
18 report.resource_id = backup_client.backup_plans[0].name
19 report.region = backup_client.backup_plans[0].region
20
21 findings.append(report)
22 return findings
23
```
Path: `prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py`
Content:
```
1 from prowler.lib.check.models import Check, Check_Report_AWS
2 from prowler.providers.aws.services.backup.backup_client import backup_client
3
4
5 class backup_reportplans_exist(Check):
6 def execute(self):
7 findings = []
8 report = Check_Report_AWS(self.metadata())
9 report.status = "FAIL"
10 report.status_extended = "No Backup Report Plan Exist"
11 report.resource_arn = ""
12 report.resource_id = "No Backups"
13 report.region = backup_client.region
14 if backup_client.backup_report_plans:
15 report.status = "PASS"
16 report.status_extended = f"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}"
17 report.resource_arn = backup_client.backup_report_plans[0].arn
18 report.resource_id = backup_client.backup_report_plans[0].name
19 report.region = backup_client.backup_report_plans[0].region
20
21 findings.append(report)
22 return findings
23
```
Path: `prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py`
Content:
```
1 from prowler.lib.check.models import Check, Check_Report_AWS
2 from prowler.providers.aws.services.backup.backup_client import backup_client
3
4
5 class backup_vaults_exist(Check):
6 def execute(self):
7 findings = []
8 report = Check_Report_AWS(self.metadata())
9 report.status = "FAIL"
10 report.status_extended = "No Backup Vault Exist"
11 report.resource_arn = ""
12 report.resource_id = "No Backups"
13 report.region = backup_client.region
14 if backup_client.backup_vaults:
15 report.status = "PASS"
16 report.status_extended = f"At least one backup vault exists: { backup_client.backup_vaults[0].name}"
17 report.resource_arn = backup_client.backup_vaults[0].arn
18 report.resource_id = backup_client.backup_vaults[0].name
19 report.region = backup_client.backup_vaults[0].region
20
21 findings.append(report)
22 return findings
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py
--- a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py
+++ b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py
@@ -9,11 +9,13 @@
report.status = "FAIL"
report.status_extended = "No Backup Plan Exist"
report.resource_arn = ""
- report.resource_id = "No Backups"
+ report.resource_id = "Backups"
report.region = backup_client.region
if backup_client.backup_plans:
report.status = "PASS"
- report.status_extended = f"At least one backup plan exists: { backup_client.backup_plans[0].name}"
+ report.status_extended = (
+ f"At least one backup plan exists: {backup_client.backup_plans[0].name}"
+ )
report.resource_arn = backup_client.backup_plans[0].arn
report.resource_id = backup_client.backup_plans[0].name
report.region = backup_client.backup_plans[0].region
diff --git a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py
--- a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py
+++ b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py
@@ -5,18 +5,20 @@
class backup_reportplans_exist(Check):
def execute(self):
findings = []
- report = Check_Report_AWS(self.metadata())
- report.status = "FAIL"
- report.status_extended = "No Backup Report Plan Exist"
- report.resource_arn = ""
- report.resource_id = "No Backups"
- report.region = backup_client.region
- if backup_client.backup_report_plans:
- report.status = "PASS"
- report.status_extended = f"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}"
- report.resource_arn = backup_client.backup_report_plans[0].arn
- report.resource_id = backup_client.backup_report_plans[0].name
- report.region = backup_client.backup_report_plans[0].region
+ # We only check report plans if backup plans exist, reducing noise
+ if backup_client.backup_plans:
+ report = Check_Report_AWS(self.metadata())
+ report.status = "FAIL"
+ report.status_extended = "No Backup Report Plan Exist"
+ report.resource_arn = ""
+ report.resource_id = "Backups"
+ report.region = backup_client.region
+ if backup_client.backup_report_plans:
+ report.status = "PASS"
+ report.status_extended = f"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}"
+ report.resource_arn = backup_client.backup_report_plans[0].arn
+ report.resource_id = backup_client.backup_report_plans[0].name
+ report.region = backup_client.backup_report_plans[0].region
- findings.append(report)
+ findings.append(report)
return findings
diff --git a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py
--- a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py
+++ b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py
@@ -9,7 +9,7 @@
report.status = "FAIL"
report.status_extended = "No Backup Vault Exist"
report.resource_arn = ""
- report.resource_id = "No Backups"
+ report.resource_id = "Backups"
report.region = backup_client.region
if backup_client.backup_vaults:
report.status = "PASS"
| {"golden_diff": "diff --git a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py\n--- a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py\n+++ b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py\n@@ -9,11 +9,13 @@\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Plan Exist\"\n report.resource_arn = \"\"\n- report.resource_id = \"No Backups\"\n+ report.resource_id = \"Backups\"\n report.region = backup_client.region\n if backup_client.backup_plans:\n report.status = \"PASS\"\n- report.status_extended = f\"At least one backup plan exists: { backup_client.backup_plans[0].name}\"\n+ report.status_extended = (\n+ f\"At least one backup plan exists: {backup_client.backup_plans[0].name}\"\n+ )\n report.resource_arn = backup_client.backup_plans[0].arn\n report.resource_id = backup_client.backup_plans[0].name\n report.region = backup_client.backup_plans[0].region\ndiff --git a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py\n--- a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py\n+++ b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py\n@@ -5,18 +5,20 @@\n class backup_reportplans_exist(Check):\n def execute(self):\n findings = []\n- report = Check_Report_AWS(self.metadata())\n- report.status = \"FAIL\"\n- report.status_extended = \"No Backup Report Plan Exist\"\n- report.resource_arn = \"\"\n- report.resource_id = \"No Backups\"\n- report.region = backup_client.region\n- if backup_client.backup_report_plans:\n- report.status = \"PASS\"\n- report.status_extended = f\"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}\"\n- report.resource_arn = backup_client.backup_report_plans[0].arn\n- report.resource_id = backup_client.backup_report_plans[0].name\n- report.region = backup_client.backup_report_plans[0].region\n+ # We only check report plans if backup plans exist, reducing noise\n+ if backup_client.backup_plans:\n+ report = Check_Report_AWS(self.metadata())\n+ report.status = \"FAIL\"\n+ report.status_extended = \"No Backup Report Plan Exist\"\n+ report.resource_arn = \"\"\n+ report.resource_id = \"Backups\"\n+ report.region = backup_client.region\n+ if backup_client.backup_report_plans:\n+ report.status = \"PASS\"\n+ report.status_extended = f\"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}\"\n+ report.resource_arn = backup_client.backup_report_plans[0].arn\n+ report.resource_id = backup_client.backup_report_plans[0].name\n+ report.region = backup_client.backup_report_plans[0].region\n \n- findings.append(report)\n+ findings.append(report)\n return findings\ndiff --git a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py\n--- a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py\n+++ b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py\n@@ -9,7 +9,7 @@\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Vault Exist\"\n report.resource_arn = \"\"\n- report.resource_id = \"No Backups\"\n+ report.resource_id = \"Backups\"\n report.region = backup_client.region\n if backup_client.backup_vaults:\n report.status = \"PASS\"\n", "issue": "[Bug]: backup_plans_exist and backup_reportplans_exist trigger in regions where not backups exist\n### Steps to Reproduce\n\nThe mentioned checks are triggered even if no backups are present or configured.\n\n### Expected behavior\n\nWhen the check can't find a resource ID (it actually says \"No Backups\"), the check shouldn't trigger\n\n### Actual Result with Screenshots or Logs\n\n\r\n\n\n### How did you install Prowler?\n\nFrom pip package (pip install prowler)\n\n### Environment Resource\n\nWorkstation\n\n### OS used\n\nWSL2 under Windows 11\n\n### Prowler version\n\nProwler 3.4.0 (it is the latest version, yay!)\n\n### Pip version\n\npip 22.0.2 from /usr/lib/python3/dist-packages/pip (python 3.10)\n\n### Context\n\n_No response_\n", "before_files": [{"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_plans_exist(Check):\n def execute(self):\n findings = []\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Plan Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"No Backups\"\n report.region = backup_client.region\n if backup_client.backup_plans:\n report.status = \"PASS\"\n report.status_extended = f\"At least one backup plan exists: { backup_client.backup_plans[0].name}\"\n report.resource_arn = backup_client.backup_plans[0].arn\n report.resource_id = backup_client.backup_plans[0].name\n report.region = backup_client.backup_plans[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py"}, {"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_reportplans_exist(Check):\n def execute(self):\n findings = []\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Report Plan Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"No Backups\"\n report.region = backup_client.region\n if backup_client.backup_report_plans:\n report.status = \"PASS\"\n report.status_extended = f\"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}\"\n report.resource_arn = backup_client.backup_report_plans[0].arn\n report.resource_id = backup_client.backup_report_plans[0].name\n report.region = backup_client.backup_report_plans[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py"}, {"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_vaults_exist(Check):\n def execute(self):\n findings = []\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Vault Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"No Backups\"\n report.region = backup_client.region\n if backup_client.backup_vaults:\n report.status = \"PASS\"\n report.status_extended = f\"At least one backup vault exists: { backup_client.backup_vaults[0].name}\"\n report.resource_arn = backup_client.backup_vaults[0].arn\n report.resource_id = backup_client.backup_vaults[0].name\n report.region = backup_client.backup_vaults[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py"}], "after_files": [{"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_plans_exist(Check):\n def execute(self):\n findings = []\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Plan Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"Backups\"\n report.region = backup_client.region\n if backup_client.backup_plans:\n report.status = \"PASS\"\n report.status_extended = (\n f\"At least one backup plan exists: {backup_client.backup_plans[0].name}\"\n )\n report.resource_arn = backup_client.backup_plans[0].arn\n report.resource_id = backup_client.backup_plans[0].name\n report.region = backup_client.backup_plans[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py"}, {"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_reportplans_exist(Check):\n def execute(self):\n findings = []\n # We only check report plans if backup plans exist, reducing noise\n if backup_client.backup_plans:\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Report Plan Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"Backups\"\n report.region = backup_client.region\n if backup_client.backup_report_plans:\n report.status = \"PASS\"\n report.status_extended = f\"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}\"\n report.resource_arn = backup_client.backup_report_plans[0].arn\n report.resource_id = backup_client.backup_report_plans[0].name\n report.region = backup_client.backup_report_plans[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py"}, {"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_vaults_exist(Check):\n def execute(self):\n findings = []\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Vault Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"Backups\"\n report.region = backup_client.region\n if backup_client.backup_vaults:\n report.status = \"PASS\"\n report.status_extended = f\"At least one backup vault exists: { backup_client.backup_vaults[0].name}\"\n report.resource_arn = backup_client.backup_vaults[0].arn\n report.resource_id = backup_client.backup_vaults[0].name\n report.region = backup_client.backup_vaults[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py"}]} | 1,274 | 915 |
gh_patches_debug_19093 | rasdani/github-patches | git_diff | weecology__retriever-287 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
download command should probably fail when specified path does not exist
A datsaet can be downloaded to a specific path with the function `download` while specifying the -p argument. For example `retriever download MCDB -p my_path` will download each of the MCDB files and then copy them to the directory my_path but if my_path does not exist a file called my_path is created and the files overwrite that file as each is copied from the download directory. It may be best if the retriever fails with a warning that the path does not exist.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `engines/download_only.py`
Content:
```
1 import os
2 import platform
3 import shutil
4 import inspect
5 from retriever.lib.engine import filename_from_url
6 from retriever.lib.models import Engine, no_cleanup
7 from retriever import DATA_DIR, HOME_DIR
8
9 class DummyConnection:
10 def cursor(self):
11 pass
12 def commit(self):
13 pass
14 def rollback(self):
15 pass
16 def close(self):
17 pass
18
19 class DummyCursor(DummyConnection):
20 pass
21
22
23 class engine(Engine):
24 """Engine instance for writing data to a CSV file."""
25 name = "Download Only"
26 abbreviation = "download"
27 required_opts = [("path",
28 "File path to copy data files",
29 "./"),
30 ]
31
32 def table_exists(self, dbname, tablename):
33 try:
34 tablename = self.table_name(name=tablename, dbname=dbname)
35 return os.path.exists(tablename)
36 except:
37 return False
38
39 def get_connection(self):
40 """Gets the db connection."""
41 self.get_input()
42 return DummyConnection()
43
44 def final_cleanup(self):
45 data_dir = self.format_data_dir()
46 if hasattr(self, "all_files"):
47 for file_name in self.all_files:
48 file_path, file_name_nopath = os.path.split(file_name)
49 if file_path == DATA_DIR:
50 print ("%s is already in the working directory" % file_name_nopath)
51 print("Keeping existing copy.")
52 else:
53 print("Copying %s from %s" % (file_name_nopath, file_path))
54 shutil.copy(file_name, self.opts['path'])
55 self.all_files = set()
56
57 def auto_create_table(self, table, url=None, filename=None, pk=None):
58 if url and not filename:
59 filename = filename_from_url(url)
60
61 if url and not self.find_file(filename):
62 # If the file doesn't exist, download it
63 self.download_file(url, filename)
64
65 def insert_data_from_url(self, url):
66 filename = filename_from_url(url)
67 find = self.find_file(filename)
68 if not find:
69 self.create_raw_data_dir()
70 self.download_file(url, filename)
71
72 def find_file(self, filename):
73 result = Engine.find_file(self, filename)
74 if not hasattr(self, "all_files"): self.all_files = set()
75 if result: self.all_files.add(result)
76 return result
77
78 def register_files(self, filenames):
79 """Identify a list of files to be moved by the download
80
81 When downloading archives with multiple files the engine needs to be
82 informed of all of the file names so that it can move them.
83
84 """
85 full_filenames = {self.find_file(filename) for filename in filenames}
86 self.all_files = self.all_files.union(full_filenames)
87
88
89 # replace all other methods with a function that does nothing
90 def dummy_method(self, *args, **kwargs):
91 pass
92 methods = inspect.getmembers(engine, predicate=inspect.ismethod)
93 keep_methods = {'table_exists',
94 'get_connection',
95 'final_cleanup',
96 'auto_create_table',
97 'insert_data_from_url',
98 }
99 remove_methods = ['insert_data_from_file']
100 for name, method in methods:
101 if (not name in keep_methods
102 and not 'download' in name
103 and not 'file' in name
104 and not 'dir' in name):
105
106 setattr(engine, name, dummy_method)
107 for name in remove_methods:
108 setattr(engine, name, dummy_method)
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/engines/download_only.py b/engines/download_only.py
--- a/engines/download_only.py
+++ b/engines/download_only.py
@@ -51,7 +51,18 @@
print("Keeping existing copy.")
else:
print("Copying %s from %s" % (file_name_nopath, file_path))
- shutil.copy(file_name, self.opts['path'])
+ if os.path.isdir(self.opts['path']):
+ try:
+ shutil.copy(file_name, self.opts['path'])
+ except:
+ print("Couldn't copy file to %s" % self.opts['path'])
+ else:
+ try:
+ print("Creating directory %s" % self.opts['path'])
+ os.mkdir(self.opts['path'])
+ shutil.copy(file_name, self.opts['path'])
+ except:
+ print("Couldn't create directory %s" % self.opts['path'])
self.all_files = set()
def auto_create_table(self, table, url=None, filename=None, pk=None):
| {"golden_diff": "diff --git a/engines/download_only.py b/engines/download_only.py\n--- a/engines/download_only.py\n+++ b/engines/download_only.py\n@@ -51,7 +51,18 @@\n print(\"Keeping existing copy.\")\n else:\n print(\"Copying %s from %s\" % (file_name_nopath, file_path))\n- shutil.copy(file_name, self.opts['path'])\n+ if os.path.isdir(self.opts['path']):\n+ try:\n+ shutil.copy(file_name, self.opts['path'])\n+ except:\n+ print(\"Couldn't copy file to %s\" % self.opts['path'])\n+ else:\n+ try:\n+ print(\"Creating directory %s\" % self.opts['path'])\n+ os.mkdir(self.opts['path'])\n+ shutil.copy(file_name, self.opts['path'])\n+ except:\n+ print(\"Couldn't create directory %s\" % self.opts['path'])\n self.all_files = set()\n \n def auto_create_table(self, table, url=None, filename=None, pk=None):\n", "issue": "download command should probably fail when specified path does not exist\nA datsaet can be downloaded to a specific path with the function `download` while specifying the -p argument. For example `retriever download MCDB -p my_path` will download each of the MCDB files and then copy them to the directory my_path but if my_path does not exist a file called my_path is created and the files overwrite that file as each is copied from the download directory. It may be best if the retriever fails with a warning that the path does not exist. \n\n", "before_files": [{"content": "import os\nimport platform\nimport shutil\nimport inspect\nfrom retriever.lib.engine import filename_from_url\nfrom retriever.lib.models import Engine, no_cleanup\nfrom retriever import DATA_DIR, HOME_DIR\n\nclass DummyConnection:\n def cursor(self):\n pass\n def commit(self):\n pass\n def rollback(self):\n pass\n def close(self):\n pass\n\nclass DummyCursor(DummyConnection):\n pass\n\n\nclass engine(Engine):\n \"\"\"Engine instance for writing data to a CSV file.\"\"\"\n name = \"Download Only\"\n abbreviation = \"download\"\n required_opts = [(\"path\",\n \"File path to copy data files\",\n \"./\"),\n ]\n\n def table_exists(self, dbname, tablename):\n try:\n tablename = self.table_name(name=tablename, dbname=dbname)\n return os.path.exists(tablename)\n except:\n return False\n\n def get_connection(self):\n \"\"\"Gets the db connection.\"\"\"\n self.get_input()\n return DummyConnection()\n\n def final_cleanup(self):\n data_dir = self.format_data_dir()\n if hasattr(self, \"all_files\"):\n for file_name in self.all_files:\n file_path, file_name_nopath = os.path.split(file_name)\n if file_path == DATA_DIR:\n print (\"%s is already in the working directory\" % file_name_nopath)\n print(\"Keeping existing copy.\")\n else:\n print(\"Copying %s from %s\" % (file_name_nopath, file_path))\n shutil.copy(file_name, self.opts['path'])\n self.all_files = set()\n\n def auto_create_table(self, table, url=None, filename=None, pk=None):\n if url and not filename:\n filename = filename_from_url(url)\n\n if url and not self.find_file(filename):\n # If the file doesn't exist, download it\n self.download_file(url, filename)\n\n def insert_data_from_url(self, url):\n filename = filename_from_url(url)\n find = self.find_file(filename)\n if not find:\n self.create_raw_data_dir()\n self.download_file(url, filename)\n\n def find_file(self, filename):\n result = Engine.find_file(self, filename)\n if not hasattr(self, \"all_files\"): self.all_files = set()\n if result: self.all_files.add(result)\n return result\n\n def register_files(self, filenames):\n \"\"\"Identify a list of files to be moved by the download\n\n When downloading archives with multiple files the engine needs to be\n informed of all of the file names so that it can move them.\n\n \"\"\"\n full_filenames = {self.find_file(filename) for filename in filenames}\n self.all_files = self.all_files.union(full_filenames)\n\n\n# replace all other methods with a function that does nothing\ndef dummy_method(self, *args, **kwargs):\n pass\nmethods = inspect.getmembers(engine, predicate=inspect.ismethod)\nkeep_methods = {'table_exists',\n 'get_connection',\n 'final_cleanup',\n 'auto_create_table',\n 'insert_data_from_url',\n }\nremove_methods = ['insert_data_from_file']\nfor name, method in methods:\n if (not name in keep_methods\n and not 'download' in name\n and not 'file' in name\n and not 'dir' in name):\n\n setattr(engine, name, dummy_method)\nfor name in remove_methods:\n setattr(engine, name, dummy_method)\n", "path": "engines/download_only.py"}], "after_files": [{"content": "import os\nimport platform\nimport shutil\nimport inspect\nfrom retriever.lib.engine import filename_from_url\nfrom retriever.lib.models import Engine, no_cleanup\nfrom retriever import DATA_DIR, HOME_DIR\n\nclass DummyConnection:\n def cursor(self):\n pass\n def commit(self):\n pass\n def rollback(self):\n pass\n def close(self):\n pass\n\nclass DummyCursor(DummyConnection):\n pass\n\n\nclass engine(Engine):\n \"\"\"Engine instance for writing data to a CSV file.\"\"\"\n name = \"Download Only\"\n abbreviation = \"download\"\n required_opts = [(\"path\",\n \"File path to copy data files\",\n \"./\"),\n ]\n\n def table_exists(self, dbname, tablename):\n try:\n tablename = self.table_name(name=tablename, dbname=dbname)\n return os.path.exists(tablename)\n except:\n return False\n\n def get_connection(self):\n \"\"\"Gets the db connection.\"\"\"\n self.get_input()\n return DummyConnection()\n\n def final_cleanup(self):\n data_dir = self.format_data_dir()\n if hasattr(self, \"all_files\"):\n for file_name in self.all_files:\n file_path, file_name_nopath = os.path.split(file_name)\n if file_path == DATA_DIR:\n print (\"%s is already in the working directory\" % file_name_nopath)\n print(\"Keeping existing copy.\")\n else:\n print(\"Copying %s from %s\" % (file_name_nopath, file_path))\n if os.path.isdir(self.opts['path']):\n try:\n shutil.copy(file_name, self.opts['path'])\n except:\n print(\"Couldn't copy file to %s\" % self.opts['path'])\n else:\n try:\n print(\"Creating directory %s\" % self.opts['path'])\n os.mkdir(self.opts['path'])\n shutil.copy(file_name, self.opts['path'])\n except:\n print(\"Couldn't create directory %s\" % self.opts['path'])\n self.all_files = set()\n\n def auto_create_table(self, table, url=None, filename=None, pk=None):\n if url and not filename:\n filename = filename_from_url(url)\n\n if url and not self.find_file(filename):\n # If the file doesn't exist, download it\n self.download_file(url, filename)\n\n def insert_data_from_url(self, url):\n filename = filename_from_url(url)\n find = self.find_file(filename)\n if not find:\n self.create_raw_data_dir()\n self.download_file(url, filename)\n\n def find_file(self, filename):\n result = Engine.find_file(self, filename)\n if not hasattr(self, \"all_files\"): self.all_files = set()\n if result: self.all_files.add(result)\n return result\n\n def register_files(self, filenames):\n \"\"\"Identify a list of files to be moved by the download\n\n When downloading archives with multiple files the engine needs to be\n informed of all of the file names so that it can move them.\n\n \"\"\"\n full_filenames = {self.find_file(filename) for filename in filenames}\n self.all_files = self.all_files.union(full_filenames)\n\n\n# replace all other methods with a function that does nothing\ndef dummy_method(self, *args, **kwargs):\n pass\nmethods = inspect.getmembers(engine, predicate=inspect.ismethod)\nkeep_methods = {'table_exists',\n 'get_connection',\n 'final_cleanup',\n 'auto_create_table',\n 'insert_data_from_url',\n }\nremove_methods = ['insert_data_from_file']\nfor name, method in methods:\n if (not name in keep_methods\n and not 'download' in name\n and not 'file' in name\n and not 'dir' in name):\n\n setattr(engine, name, dummy_method)\nfor name in remove_methods:\n setattr(engine, name, dummy_method)\n", "path": "engines/download_only.py"}]} | 1,322 | 231 |
gh_patches_debug_12065 | rasdani/github-patches | git_diff | tinygrad__tinygrad-65 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
EOFError: Ran out of input
When running example and solving "Can't import fetch from utils" issue, this one comes up:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tinygrad/utils.py`
Content:
```
1 import numpy as np
2
3 def mask_like(like, mask_inx, mask_value = 1.0):
4 mask = np.zeros_like(like).reshape(-1)
5 mask[mask_inx] = mask_value
6 return mask.reshape(like.shape)
7
8 def layer_init_uniform(*x):
9 ret = np.random.uniform(-1., 1., size=x)/np.sqrt(np.prod(x))
10 return ret.astype(np.float32)
11
12 def fetch(url):
13 import requests, os, hashlib, tempfile
14 fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest())
15 if os.path.isfile(fp):
16 with open(fp, "rb") as f:
17 dat = f.read()
18 else:
19 print("fetching %s" % url)
20 with open(fp+".tmp", "wb") as f:
21 dat = requests.get(url).content
22 f.write(dat)
23 os.rename(fp+".tmp", fp)
24 return dat
25
26 def fetch_mnist():
27 import gzip
28 parse = lambda dat: np.frombuffer(gzip.decompress(dat), dtype=np.uint8).copy()
29 X_train = parse(fetch("http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz"))[0x10:].reshape((-1, 28, 28))
30 Y_train = parse(fetch("http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz"))[8:]
31 X_test = parse(fetch("http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz"))[0x10:].reshape((-1, 28, 28))
32 Y_test = parse(fetch("http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz"))[8:]
33 return X_train, Y_train, X_test, Y_test
34
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tinygrad/utils.py b/tinygrad/utils.py
--- a/tinygrad/utils.py
+++ b/tinygrad/utils.py
@@ -1,4 +1,5 @@
import numpy as np
+import os
def mask_like(like, mask_inx, mask_value = 1.0):
mask = np.zeros_like(like).reshape(-1)
@@ -12,7 +13,7 @@
def fetch(url):
import requests, os, hashlib, tempfile
fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest())
- if os.path.isfile(fp):
+ if os.path.isfile(fp) and os.stat(fp).st_size > 0:
with open(fp, "rb") as f:
dat = f.read()
else:
| {"golden_diff": "diff --git a/tinygrad/utils.py b/tinygrad/utils.py\n--- a/tinygrad/utils.py\n+++ b/tinygrad/utils.py\n@@ -1,4 +1,5 @@\n import numpy as np\n+import os\n \n def mask_like(like, mask_inx, mask_value = 1.0):\n mask = np.zeros_like(like).reshape(-1)\n@@ -12,7 +13,7 @@\n def fetch(url):\n import requests, os, hashlib, tempfile\n fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest()) \n- if os.path.isfile(fp):\n+ if os.path.isfile(fp) and os.stat(fp).st_size > 0:\n with open(fp, \"rb\") as f:\n dat = f.read()\n else:\n", "issue": "EOFError: Ran out of input\nWhen running example and solving \"Can't import fetch from utils\" issue, this one comes up:\r\n\r\n\r\n\n", "before_files": [{"content": "import numpy as np\n\ndef mask_like(like, mask_inx, mask_value = 1.0):\n mask = np.zeros_like(like).reshape(-1)\n mask[mask_inx] = mask_value\n return mask.reshape(like.shape)\n\ndef layer_init_uniform(*x):\n ret = np.random.uniform(-1., 1., size=x)/np.sqrt(np.prod(x))\n return ret.astype(np.float32)\n\ndef fetch(url):\n import requests, os, hashlib, tempfile\n fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest()) \n if os.path.isfile(fp):\n with open(fp, \"rb\") as f:\n dat = f.read()\n else:\n print(\"fetching %s\" % url)\n with open(fp+\".tmp\", \"wb\") as f:\n dat = requests.get(url).content\n f.write(dat)\n os.rename(fp+\".tmp\", fp)\n return dat\n\ndef fetch_mnist():\n import gzip\n parse = lambda dat: np.frombuffer(gzip.decompress(dat), dtype=np.uint8).copy()\n X_train = parse(fetch(\"http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\"))[0x10:].reshape((-1, 28, 28))\n Y_train = parse(fetch(\"http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz\"))[8:]\n X_test = parse(fetch(\"http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\"))[0x10:].reshape((-1, 28, 28))\n Y_test = parse(fetch(\"http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\"))[8:]\n return X_train, Y_train, X_test, Y_test\n\n", "path": "tinygrad/utils.py"}], "after_files": [{"content": "import numpy as np\nimport os\n\ndef mask_like(like, mask_inx, mask_value = 1.0):\n mask = np.zeros_like(like).reshape(-1)\n mask[mask_inx] = mask_value\n return mask.reshape(like.shape)\n\ndef layer_init_uniform(*x):\n ret = np.random.uniform(-1., 1., size=x)/np.sqrt(np.prod(x))\n return ret.astype(np.float32)\n\ndef fetch(url):\n import requests, os, hashlib, tempfile\n fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest()) \n if os.path.isfile(fp) and os.stat(fp).st_size > 0:\n with open(fp, \"rb\") as f:\n dat = f.read()\n else:\n print(\"fetching %s\" % url)\n with open(fp+\".tmp\", \"wb\") as f:\n dat = requests.get(url).content\n f.write(dat)\n os.rename(fp+\".tmp\", fp)\n return dat\n\ndef fetch_mnist():\n import gzip\n parse = lambda dat: np.frombuffer(gzip.decompress(dat), dtype=np.uint8).copy()\n X_train = parse(fetch(\"http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\"))[0x10:].reshape((-1, 28, 28))\n Y_train = parse(fetch(\"http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz\"))[8:]\n X_test = parse(fetch(\"http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\"))[0x10:].reshape((-1, 28, 28))\n Y_test = parse(fetch(\"http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\"))[8:]\n return X_train, Y_train, X_test, Y_test\n\n", "path": "tinygrad/utils.py"}]} | 830 | 176 |
gh_patches_debug_20502 | rasdani/github-patches | git_diff | cloudtools__troposphere-1205 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add 'PermissionsBoundary' properties to AWS::IAM::Role and AWS::IAM::User
This property has been released on November 9 by AWS.
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/ReleaseHistory.html
```
PermissionsBoundary
The ARN of the policy that is used to set the permissions boundary for the role. Minimum length of 20. Maximum length of 2048.
Required: No
Type: String
Update requires: No interruption
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/iam.py`
Content:
```
1 # Copyright (c) 2012-2013, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSObject, AWSProperty
7 from .validators import integer, boolean, status
8 from .validators import iam_path, iam_role_name, iam_group_name, iam_user_name
9
10 try:
11 from awacs.aws import Policy
12 policytypes = (dict, Policy)
13 except ImportError:
14 policytypes = dict,
15
16
17 Active = "Active"
18 Inactive = "Inactive"
19
20
21 class AccessKey(AWSObject):
22 resource_type = "AWS::IAM::AccessKey"
23
24 props = {
25 'Serial': (integer, False),
26 'Status': (status, False),
27 'UserName': (basestring, True),
28 }
29
30
31 class PolicyType(AWSObject):
32 resource_type = "AWS::IAM::Policy"
33
34 props = {
35 'Groups': ([basestring], False),
36 'PolicyDocument': (policytypes, True),
37 'PolicyName': (basestring, True),
38 'Roles': ([basestring], False),
39 'Users': ([basestring], False),
40 }
41
42
43 class Policy(AWSProperty):
44 props = {
45 'PolicyDocument': (policytypes, True),
46 'PolicyName': (basestring, True),
47 }
48
49
50 PolicyProperty = Policy
51
52
53 class Group(AWSObject):
54 resource_type = "AWS::IAM::Group"
55
56 props = {
57 'GroupName': (iam_group_name, False),
58 'ManagedPolicyArns': ([basestring], False),
59 'Path': (iam_path, False),
60 'Policies': ([Policy], False),
61 }
62
63
64 class InstanceProfile(AWSObject):
65 resource_type = "AWS::IAM::InstanceProfile"
66
67 props = {
68 'Path': (iam_path, False),
69 'Roles': (list, True),
70 'InstanceProfileName': (basestring, False),
71 }
72
73
74 class Role(AWSObject):
75 resource_type = "AWS::IAM::Role"
76
77 props = {
78 'AssumeRolePolicyDocument': (policytypes, True),
79 'ManagedPolicyArns': ([basestring], False),
80 'MaxSessionDuration': (integer, False),
81 'Path': (iam_path, False),
82 'Policies': ([Policy], False),
83 'RoleName': (iam_role_name, False),
84 }
85
86
87 class ServiceLinkedRole(AWSObject):
88 resource_type = "AWS::IAM::ServiceLinkedRole"
89
90 props = {
91 'AWSServiceName': (basestring, True),
92 'CustomSuffix': (basestring, False),
93 'Description': (basestring, False),
94 }
95
96
97 class LoginProfile(AWSProperty):
98 props = {
99 'Password': (basestring, True),
100 'PasswordResetRequired': (boolean, False),
101 }
102
103
104 class User(AWSObject):
105 resource_type = "AWS::IAM::User"
106
107 props = {
108 'Path': (iam_path, False),
109 'Groups': ([basestring], False),
110 'ManagedPolicyArns': ([basestring], False),
111 'LoginProfile': (LoginProfile, False),
112 'Policies': ([Policy], False),
113 'UserName': (iam_user_name, False),
114 }
115
116
117 class UserToGroupAddition(AWSObject):
118 resource_type = "AWS::IAM::UserToGroupAddition"
119
120 props = {
121 'GroupName': (basestring, True),
122 'Users': (list, True),
123 }
124
125
126 class ManagedPolicy(AWSObject):
127 resource_type = "AWS::IAM::ManagedPolicy"
128
129 props = {
130 'Description': (basestring, False),
131 'Groups': ([basestring], False),
132 'ManagedPolicyName': (basestring, False),
133 'Path': (iam_path, False),
134 'PolicyDocument': (policytypes, True),
135 'Roles': ([basestring], False),
136 'Users': ([basestring], False),
137 }
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/troposphere/iam.py b/troposphere/iam.py
--- a/troposphere/iam.py
+++ b/troposphere/iam.py
@@ -79,6 +79,7 @@
'ManagedPolicyArns': ([basestring], False),
'MaxSessionDuration': (integer, False),
'Path': (iam_path, False),
+ 'PermissionsBoundary': (basestring, False),
'Policies': ([Policy], False),
'RoleName': (iam_role_name, False),
}
@@ -105,10 +106,11 @@
resource_type = "AWS::IAM::User"
props = {
- 'Path': (iam_path, False),
'Groups': ([basestring], False),
- 'ManagedPolicyArns': ([basestring], False),
'LoginProfile': (LoginProfile, False),
+ 'ManagedPolicyArns': ([basestring], False),
+ 'Path': (iam_path, False),
+ 'PermissionsBoundary': (basestring, False),
'Policies': ([Policy], False),
'UserName': (iam_user_name, False),
}
| {"golden_diff": "diff --git a/troposphere/iam.py b/troposphere/iam.py\n--- a/troposphere/iam.py\n+++ b/troposphere/iam.py\n@@ -79,6 +79,7 @@\n 'ManagedPolicyArns': ([basestring], False),\n 'MaxSessionDuration': (integer, False),\n 'Path': (iam_path, False),\n+ 'PermissionsBoundary': (basestring, False),\n 'Policies': ([Policy], False),\n 'RoleName': (iam_role_name, False),\n }\n@@ -105,10 +106,11 @@\n resource_type = \"AWS::IAM::User\"\n \n props = {\n- 'Path': (iam_path, False),\n 'Groups': ([basestring], False),\n- 'ManagedPolicyArns': ([basestring], False),\n 'LoginProfile': (LoginProfile, False),\n+ 'ManagedPolicyArns': ([basestring], False),\n+ 'Path': (iam_path, False),\n+ 'PermissionsBoundary': (basestring, False),\n 'Policies': ([Policy], False),\n 'UserName': (iam_user_name, False),\n }\n", "issue": "Add 'PermissionsBoundary' properties to AWS::IAM::Role and AWS::IAM::User\nThis property has been released on November 9 by AWS.\r\nhttps://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/ReleaseHistory.html\r\n```\r\nPermissionsBoundary\r\n\r\n The ARN of the policy that is used to set the permissions boundary for the role. Minimum length of 20. Maximum length of 2048.\r\n\r\n Required: No\r\n\r\n Type: String\r\n\r\n Update requires: No interruption\r\n\r\n```\n", "before_files": [{"content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\nfrom .validators import integer, boolean, status\nfrom .validators import iam_path, iam_role_name, iam_group_name, iam_user_name\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\nActive = \"Active\"\nInactive = \"Inactive\"\n\n\nclass AccessKey(AWSObject):\n resource_type = \"AWS::IAM::AccessKey\"\n\n props = {\n 'Serial': (integer, False),\n 'Status': (status, False),\n 'UserName': (basestring, True),\n }\n\n\nclass PolicyType(AWSObject):\n resource_type = \"AWS::IAM::Policy\"\n\n props = {\n 'Groups': ([basestring], False),\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n\n\nclass Policy(AWSProperty):\n props = {\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n }\n\n\nPolicyProperty = Policy\n\n\nclass Group(AWSObject):\n resource_type = \"AWS::IAM::Group\"\n\n props = {\n 'GroupName': (iam_group_name, False),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n }\n\n\nclass InstanceProfile(AWSObject):\n resource_type = \"AWS::IAM::InstanceProfile\"\n\n props = {\n 'Path': (iam_path, False),\n 'Roles': (list, True),\n 'InstanceProfileName': (basestring, False),\n }\n\n\nclass Role(AWSObject):\n resource_type = \"AWS::IAM::Role\"\n\n props = {\n 'AssumeRolePolicyDocument': (policytypes, True),\n 'ManagedPolicyArns': ([basestring], False),\n 'MaxSessionDuration': (integer, False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n 'RoleName': (iam_role_name, False),\n }\n\n\nclass ServiceLinkedRole(AWSObject):\n resource_type = \"AWS::IAM::ServiceLinkedRole\"\n\n props = {\n 'AWSServiceName': (basestring, True),\n 'CustomSuffix': (basestring, False),\n 'Description': (basestring, False),\n }\n\n\nclass LoginProfile(AWSProperty):\n props = {\n 'Password': (basestring, True),\n 'PasswordResetRequired': (boolean, False),\n }\n\n\nclass User(AWSObject):\n resource_type = \"AWS::IAM::User\"\n\n props = {\n 'Path': (iam_path, False),\n 'Groups': ([basestring], False),\n 'ManagedPolicyArns': ([basestring], False),\n 'LoginProfile': (LoginProfile, False),\n 'Policies': ([Policy], False),\n 'UserName': (iam_user_name, False),\n }\n\n\nclass UserToGroupAddition(AWSObject):\n resource_type = \"AWS::IAM::UserToGroupAddition\"\n\n props = {\n 'GroupName': (basestring, True),\n 'Users': (list, True),\n }\n\n\nclass ManagedPolicy(AWSObject):\n resource_type = \"AWS::IAM::ManagedPolicy\"\n\n props = {\n 'Description': (basestring, False),\n 'Groups': ([basestring], False),\n 'ManagedPolicyName': (basestring, False),\n 'Path': (iam_path, False),\n 'PolicyDocument': (policytypes, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n", "path": "troposphere/iam.py"}], "after_files": [{"content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\nfrom .validators import integer, boolean, status\nfrom .validators import iam_path, iam_role_name, iam_group_name, iam_user_name\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\nActive = \"Active\"\nInactive = \"Inactive\"\n\n\nclass AccessKey(AWSObject):\n resource_type = \"AWS::IAM::AccessKey\"\n\n props = {\n 'Serial': (integer, False),\n 'Status': (status, False),\n 'UserName': (basestring, True),\n }\n\n\nclass PolicyType(AWSObject):\n resource_type = \"AWS::IAM::Policy\"\n\n props = {\n 'Groups': ([basestring], False),\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n\n\nclass Policy(AWSProperty):\n props = {\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n }\n\n\nPolicyProperty = Policy\n\n\nclass Group(AWSObject):\n resource_type = \"AWS::IAM::Group\"\n\n props = {\n 'GroupName': (iam_group_name, False),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n }\n\n\nclass InstanceProfile(AWSObject):\n resource_type = \"AWS::IAM::InstanceProfile\"\n\n props = {\n 'Path': (iam_path, False),\n 'Roles': (list, True),\n 'InstanceProfileName': (basestring, False),\n }\n\n\nclass Role(AWSObject):\n resource_type = \"AWS::IAM::Role\"\n\n props = {\n 'AssumeRolePolicyDocument': (policytypes, True),\n 'ManagedPolicyArns': ([basestring], False),\n 'MaxSessionDuration': (integer, False),\n 'Path': (iam_path, False),\n 'PermissionsBoundary': (basestring, False),\n 'Policies': ([Policy], False),\n 'RoleName': (iam_role_name, False),\n }\n\n\nclass ServiceLinkedRole(AWSObject):\n resource_type = \"AWS::IAM::ServiceLinkedRole\"\n\n props = {\n 'AWSServiceName': (basestring, True),\n 'CustomSuffix': (basestring, False),\n 'Description': (basestring, False),\n }\n\n\nclass LoginProfile(AWSProperty):\n props = {\n 'Password': (basestring, True),\n 'PasswordResetRequired': (boolean, False),\n }\n\n\nclass User(AWSObject):\n resource_type = \"AWS::IAM::User\"\n\n props = {\n 'Groups': ([basestring], False),\n 'LoginProfile': (LoginProfile, False),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'PermissionsBoundary': (basestring, False),\n 'Policies': ([Policy], False),\n 'UserName': (iam_user_name, False),\n }\n\n\nclass UserToGroupAddition(AWSObject):\n resource_type = \"AWS::IAM::UserToGroupAddition\"\n\n props = {\n 'GroupName': (basestring, True),\n 'Users': (list, True),\n }\n\n\nclass ManagedPolicy(AWSObject):\n resource_type = \"AWS::IAM::ManagedPolicy\"\n\n props = {\n 'Description': (basestring, False),\n 'Groups': ([basestring], False),\n 'ManagedPolicyName': (basestring, False),\n 'Path': (iam_path, False),\n 'PolicyDocument': (policytypes, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n", "path": "troposphere/iam.py"}]} | 1,541 | 256 |
gh_patches_debug_26528 | rasdani/github-patches | git_diff | ESMCI__cime-1048 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing f19_g16_r01 high resolution river flow grid
Between cime5.2.0-alpha.9 and cime5.2.0-alpha.20 the config_grids file format was changed, and one grid needed for CLM testing was removed. The change to add it back again is as follows...
```
[erik@yslogin4 scripts]$ svn diff ../cime_config/cesm/config_grids.xml
Index: ../cime_config/cesm/config_grids.xml
===================================================================
--- ../cime_config/cesm/config_grids.xml (revision 7095)
+++ ../cime_config/cesm/config_grids.xml (working copy)
@@ -294,6 +294,15 @@
<grid name="ocnice">gx1v6</grid>
</model_grid>
+ <model_grid alias="f19_g16_r01">
+ <grid name="atm">1.9x2.5</grid>
+ <grid name="lnd">1.9x2.5</grid>
+ <grid name="ocnice">gx1v6</grid>
+ <grid name="rof">r01</grid>
+ <mask>gx1v6</mask>
+ </model_grid>
+
+
<model_grid alias="f19_g16_gl4" compset="_CISM">
<grid name="atm">1.9x2.5</grid>
<grid name="lnd">1.9x2.5</grid>
```
@mvertens @jedwards4b @billsacks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `utils/python/CIME/BuildTools/configure.py`
Content:
```
1 #!/usr/bin/env python
2
3 """This script writes CIME build information to a directory.
4
5 The pieces of information that will be written include:
6
7 1. Machine-specific build settings (i.e. the "Macros" file).
8 2. File-specific build settings (i.e. "Depends" files).
9 3. Environment variable loads (i.e. the env_mach_specific files).
10
11 The .env_mach_specific.sh and .env_mach_specific.csh files are specific to a
12 given compiler, MPI library, and DEBUG setting. By default, these will be the
13 machine's default compiler, the machine's default MPI library, and FALSE,
14 respectively. These can be changed by setting the environment variables
15 COMPILER, MPILIB, and DEBUG, respectively.
16 """
17
18 import shutil
19 from CIME.XML.standard_module_setup import *
20 from CIME.utils import expect
21 from CIME.XML.compilers import Compilers
22 from CIME.XML.env_mach_specific import EnvMachSpecific
23
24 logger = logging.getLogger(__name__)
25
26 def configure(machobj, output_dir, macros_format, compiler, mpilib, debug, sysos):
27 """Add Macros, Depends, and env_mach_specific files to a directory.
28
29 Arguments:
30 machobj - Machines argument for this machine.
31 output_dir - Directory in which to place output.
32 macros_format - Container containing the string 'Makefile' to produce
33 Makefile Macros output, and/or 'CMake' for CMake output.
34 compiler - String containing the compiler vendor to configure for.
35 mpilib - String containing the MPI implementation to configure for.
36 debug - Boolean specifying whether debugging options are enabled.
37 """
38 # Macros generation.
39 suffixes = {'Makefile': 'make', 'CMake': 'cmake'}
40 macro_maker = Compilers(machobj)
41 for form in macros_format:
42 out_file_name = os.path.join(output_dir,"Macros."+suffixes[form])
43 macro_maker.write_macros_file(macros_file=out_file_name, output_format=suffixes[form])
44
45 _copy_depends_files(machobj.get_machine_name(), machobj.machines_dir, output_dir, compiler)
46 _generate_env_mach_specific(output_dir, machobj, compiler, mpilib,
47 debug, sysos)
48
49 def _copy_depends_files(machine_name, machines_dir, output_dir, compiler):
50 """
51 Copy any system or compiler Depends files if they do not exist in the output directory
52 """
53 for dep in (machine_name, compiler):
54 dfile = os.path.join(machines_dir, "Depends.%s"%dep)
55 outputdfile = os.path.join(output_dir, "Depends.%s"%dep)
56 if os.path.isfile(dfile) and not os.path.isfile(outputdfile):
57 shutil.copyfile(dfile, outputdfile)
58 dfile = os.path.join(machines_dir, "Depends.%s.%s"%(machine_name,compiler))
59 outputdfile = os.path.join(output_dir, "Depends.%s.%s"%(machine_name,compiler))
60 if os.path.isfile(dfile) and not os.path.isfile(outputdfile):
61 shutil.copyfile(dfile, outputdfile)
62
63 def _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, sysos):
64 """
65 env_mach_specific generation.
66 """
67 ems_path = os.path.join(output_dir, "env_mach_specific.xml")
68 if os.path.exists(ems_path):
69 logger.warn("%s already exists, delete to replace"%ems_path)
70 return
71 ems_file = EnvMachSpecific(output_dir)
72 ems_file.populate(machobj)
73 ems_file.write()
74 for shell in ('sh', 'csh'):
75 ems_file.make_env_mach_specific_file(compiler, debug, mpilib, shell)
76 shell_path = os.path.join(output_dir, ".env_mach_specific." + shell)
77 with open(shell_path, 'a') as shell_file:
78 if shell == 'sh':
79 shell_file.write("\nexport COMPILER=%s\n" % compiler)
80 shell_file.write("export MPILIB=%s\n" % mpilib)
81 shell_file.write("export DEBUG=%s\n" % repr(debug).upper())
82 shell_file.write("export OS=%s\n" % sysos)
83 else:
84 shell_file.write("\nsetenv COMPILER %s\n" % compiler)
85 shell_file.write("setenv MPILIB %s\n" % mpilib)
86 shell_file.write("setenv DEBUG %s\n" % repr(debug).upper())
87 shell_file.write("setenv OS %s\n" % sysos)
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/utils/python/CIME/BuildTools/configure.py b/utils/python/CIME/BuildTools/configure.py
--- a/utils/python/CIME/BuildTools/configure.py
+++ b/utils/python/CIME/BuildTools/configure.py
@@ -49,16 +49,20 @@
def _copy_depends_files(machine_name, machines_dir, output_dir, compiler):
"""
Copy any system or compiler Depends files if they do not exist in the output directory
+ If there is a match for Depends.machine_name.compiler copy that and ignore the others
"""
- for dep in (machine_name, compiler):
- dfile = os.path.join(machines_dir, "Depends.%s"%dep)
- outputdfile = os.path.join(output_dir, "Depends.%s"%dep)
- if os.path.isfile(dfile) and not os.path.isfile(outputdfile):
- shutil.copyfile(dfile, outputdfile)
dfile = os.path.join(machines_dir, "Depends.%s.%s"%(machine_name,compiler))
outputdfile = os.path.join(output_dir, "Depends.%s.%s"%(machine_name,compiler))
- if os.path.isfile(dfile) and not os.path.isfile(outputdfile):
- shutil.copyfile(dfile, outputdfile)
+ if os.path.isfile(dfile):
+ if not os.path.isfile(outputdfile):
+ shutil.copyfile(dfile, outputdfile)
+ else:
+ for dep in (machine_name, compiler):
+ dfile = os.path.join(machines_dir, "Depends.%s"%dep)
+ outputdfile = os.path.join(output_dir, "Depends.%s"%dep)
+ if os.path.isfile(dfile) and not os.path.isfile(outputdfile):
+ shutil.copyfile(dfile, outputdfile)
+
def _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, sysos):
"""
| {"golden_diff": "diff --git a/utils/python/CIME/BuildTools/configure.py b/utils/python/CIME/BuildTools/configure.py\n--- a/utils/python/CIME/BuildTools/configure.py\n+++ b/utils/python/CIME/BuildTools/configure.py\n@@ -49,16 +49,20 @@\n def _copy_depends_files(machine_name, machines_dir, output_dir, compiler):\n \"\"\"\n Copy any system or compiler Depends files if they do not exist in the output directory\n+ If there is a match for Depends.machine_name.compiler copy that and ignore the others\n \"\"\"\n- for dep in (machine_name, compiler):\n- dfile = os.path.join(machines_dir, \"Depends.%s\"%dep)\n- outputdfile = os.path.join(output_dir, \"Depends.%s\"%dep)\n- if os.path.isfile(dfile) and not os.path.isfile(outputdfile):\n- shutil.copyfile(dfile, outputdfile)\n dfile = os.path.join(machines_dir, \"Depends.%s.%s\"%(machine_name,compiler))\n outputdfile = os.path.join(output_dir, \"Depends.%s.%s\"%(machine_name,compiler))\n- if os.path.isfile(dfile) and not os.path.isfile(outputdfile):\n- shutil.copyfile(dfile, outputdfile)\n+ if os.path.isfile(dfile):\n+ if not os.path.isfile(outputdfile):\n+ shutil.copyfile(dfile, outputdfile)\n+ else:\n+ for dep in (machine_name, compiler):\n+ dfile = os.path.join(machines_dir, \"Depends.%s\"%dep)\n+ outputdfile = os.path.join(output_dir, \"Depends.%s\"%dep)\n+ if os.path.isfile(dfile) and not os.path.isfile(outputdfile):\n+ shutil.copyfile(dfile, outputdfile)\n+\n \n def _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, sysos):\n \"\"\"\n", "issue": "Missing f19_g16_r01 high resolution river flow grid\nBetween cime5.2.0-alpha.9 and cime5.2.0-alpha.20 the config_grids file format was changed, and one grid needed for CLM testing was removed. The change to add it back again is as follows...\r\n\r\n```\r\n[erik@yslogin4 scripts]$ svn diff ../cime_config/cesm/config_grids.xml \r\nIndex: ../cime_config/cesm/config_grids.xml\r\n===================================================================\r\n--- ../cime_config/cesm/config_grids.xml\t(revision 7095)\r\n+++ ../cime_config/cesm/config_grids.xml\t(working copy)\r\n@@ -294,6 +294,15 @@\r\n <grid name=\"ocnice\">gx1v6</grid>\r\n </model_grid>\r\n \r\n+ <model_grid alias=\"f19_g16_r01\">\r\n+ <grid name=\"atm\">1.9x2.5</grid>\r\n+ <grid name=\"lnd\">1.9x2.5</grid>\r\n+ <grid name=\"ocnice\">gx1v6</grid>\r\n+ <grid name=\"rof\">r01</grid>\r\n+ <mask>gx1v6</mask>\r\n+ </model_grid>\r\n+\r\n+\r\n <model_grid alias=\"f19_g16_gl4\" compset=\"_CISM\">\r\n <grid name=\"atm\">1.9x2.5</grid>\r\n <grid name=\"lnd\">1.9x2.5</grid>\r\n\r\n```\r\n@mvertens @jedwards4b @billsacks\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"This script writes CIME build information to a directory.\n\nThe pieces of information that will be written include:\n\n1. Machine-specific build settings (i.e. the \"Macros\" file).\n2. File-specific build settings (i.e. \"Depends\" files).\n3. Environment variable loads (i.e. the env_mach_specific files).\n\nThe .env_mach_specific.sh and .env_mach_specific.csh files are specific to a\ngiven compiler, MPI library, and DEBUG setting. By default, these will be the\nmachine's default compiler, the machine's default MPI library, and FALSE,\nrespectively. These can be changed by setting the environment variables\nCOMPILER, MPILIB, and DEBUG, respectively.\n\"\"\"\n\nimport shutil\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import expect\nfrom CIME.XML.compilers import Compilers\nfrom CIME.XML.env_mach_specific import EnvMachSpecific\n\nlogger = logging.getLogger(__name__)\n\ndef configure(machobj, output_dir, macros_format, compiler, mpilib, debug, sysos):\n \"\"\"Add Macros, Depends, and env_mach_specific files to a directory.\n\n Arguments:\n machobj - Machines argument for this machine.\n output_dir - Directory in which to place output.\n macros_format - Container containing the string 'Makefile' to produce\n Makefile Macros output, and/or 'CMake' for CMake output.\n compiler - String containing the compiler vendor to configure for.\n mpilib - String containing the MPI implementation to configure for.\n debug - Boolean specifying whether debugging options are enabled.\n \"\"\"\n # Macros generation.\n suffixes = {'Makefile': 'make', 'CMake': 'cmake'}\n macro_maker = Compilers(machobj)\n for form in macros_format:\n out_file_name = os.path.join(output_dir,\"Macros.\"+suffixes[form])\n macro_maker.write_macros_file(macros_file=out_file_name, output_format=suffixes[form])\n\n _copy_depends_files(machobj.get_machine_name(), machobj.machines_dir, output_dir, compiler)\n _generate_env_mach_specific(output_dir, machobj, compiler, mpilib,\n debug, sysos)\n\ndef _copy_depends_files(machine_name, machines_dir, output_dir, compiler):\n \"\"\"\n Copy any system or compiler Depends files if they do not exist in the output directory\n \"\"\"\n for dep in (machine_name, compiler):\n dfile = os.path.join(machines_dir, \"Depends.%s\"%dep)\n outputdfile = os.path.join(output_dir, \"Depends.%s\"%dep)\n if os.path.isfile(dfile) and not os.path.isfile(outputdfile):\n shutil.copyfile(dfile, outputdfile)\n dfile = os.path.join(machines_dir, \"Depends.%s.%s\"%(machine_name,compiler))\n outputdfile = os.path.join(output_dir, \"Depends.%s.%s\"%(machine_name,compiler))\n if os.path.isfile(dfile) and not os.path.isfile(outputdfile):\n shutil.copyfile(dfile, outputdfile)\n\ndef _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, sysos):\n \"\"\"\n env_mach_specific generation.\n \"\"\"\n ems_path = os.path.join(output_dir, \"env_mach_specific.xml\")\n if os.path.exists(ems_path):\n logger.warn(\"%s already exists, delete to replace\"%ems_path)\n return\n ems_file = EnvMachSpecific(output_dir)\n ems_file.populate(machobj)\n ems_file.write()\n for shell in ('sh', 'csh'):\n ems_file.make_env_mach_specific_file(compiler, debug, mpilib, shell)\n shell_path = os.path.join(output_dir, \".env_mach_specific.\" + shell)\n with open(shell_path, 'a') as shell_file:\n if shell == 'sh':\n shell_file.write(\"\\nexport COMPILER=%s\\n\" % compiler)\n shell_file.write(\"export MPILIB=%s\\n\" % mpilib)\n shell_file.write(\"export DEBUG=%s\\n\" % repr(debug).upper())\n shell_file.write(\"export OS=%s\\n\" % sysos)\n else:\n shell_file.write(\"\\nsetenv COMPILER %s\\n\" % compiler)\n shell_file.write(\"setenv MPILIB %s\\n\" % mpilib)\n shell_file.write(\"setenv DEBUG %s\\n\" % repr(debug).upper())\n shell_file.write(\"setenv OS %s\\n\" % sysos)\n", "path": "utils/python/CIME/BuildTools/configure.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"This script writes CIME build information to a directory.\n\nThe pieces of information that will be written include:\n\n1. Machine-specific build settings (i.e. the \"Macros\" file).\n2. File-specific build settings (i.e. \"Depends\" files).\n3. Environment variable loads (i.e. the env_mach_specific files).\n\nThe .env_mach_specific.sh and .env_mach_specific.csh files are specific to a\ngiven compiler, MPI library, and DEBUG setting. By default, these will be the\nmachine's default compiler, the machine's default MPI library, and FALSE,\nrespectively. These can be changed by setting the environment variables\nCOMPILER, MPILIB, and DEBUG, respectively.\n\"\"\"\n\nimport shutil\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import expect\nfrom CIME.XML.compilers import Compilers\nfrom CIME.XML.env_mach_specific import EnvMachSpecific\n\nlogger = logging.getLogger(__name__)\n\ndef configure(machobj, output_dir, macros_format, compiler, mpilib, debug, sysos):\n \"\"\"Add Macros, Depends, and env_mach_specific files to a directory.\n\n Arguments:\n machobj - Machines argument for this machine.\n output_dir - Directory in which to place output.\n macros_format - Container containing the string 'Makefile' to produce\n Makefile Macros output, and/or 'CMake' for CMake output.\n compiler - String containing the compiler vendor to configure for.\n mpilib - String containing the MPI implementation to configure for.\n debug - Boolean specifying whether debugging options are enabled.\n \"\"\"\n # Macros generation.\n suffixes = {'Makefile': 'make', 'CMake': 'cmake'}\n macro_maker = Compilers(machobj)\n for form in macros_format:\n out_file_name = os.path.join(output_dir,\"Macros.\"+suffixes[form])\n macro_maker.write_macros_file(macros_file=out_file_name, output_format=suffixes[form])\n\n _copy_depends_files(machobj.get_machine_name(), machobj.machines_dir, output_dir, compiler)\n _generate_env_mach_specific(output_dir, machobj, compiler, mpilib,\n debug, sysos)\n\ndef _copy_depends_files(machine_name, machines_dir, output_dir, compiler):\n \"\"\"\n Copy any system or compiler Depends files if they do not exist in the output directory\n If there is a match for Depends.machine_name.compiler copy that and ignore the others\n \"\"\"\n dfile = os.path.join(machines_dir, \"Depends.%s.%s\"%(machine_name,compiler))\n outputdfile = os.path.join(output_dir, \"Depends.%s.%s\"%(machine_name,compiler))\n if os.path.isfile(dfile):\n if not os.path.isfile(outputdfile):\n shutil.copyfile(dfile, outputdfile)\n else:\n for dep in (machine_name, compiler):\n dfile = os.path.join(machines_dir, \"Depends.%s\"%dep)\n outputdfile = os.path.join(output_dir, \"Depends.%s\"%dep)\n if os.path.isfile(dfile) and not os.path.isfile(outputdfile):\n shutil.copyfile(dfile, outputdfile)\n\n\ndef _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, sysos):\n \"\"\"\n env_mach_specific generation.\n \"\"\"\n ems_path = os.path.join(output_dir, \"env_mach_specific.xml\")\n if os.path.exists(ems_path):\n logger.warn(\"%s already exists, delete to replace\"%ems_path)\n return\n ems_file = EnvMachSpecific(output_dir)\n ems_file.populate(machobj)\n ems_file.write()\n for shell in ('sh', 'csh'):\n ems_file.make_env_mach_specific_file(compiler, debug, mpilib, shell)\n shell_path = os.path.join(output_dir, \".env_mach_specific.\" + shell)\n with open(shell_path, 'a') as shell_file:\n if shell == 'sh':\n shell_file.write(\"\\nexport COMPILER=%s\\n\" % compiler)\n shell_file.write(\"export MPILIB=%s\\n\" % mpilib)\n shell_file.write(\"export DEBUG=%s\\n\" % repr(debug).upper())\n shell_file.write(\"export OS=%s\\n\" % sysos)\n else:\n shell_file.write(\"\\nsetenv COMPILER %s\\n\" % compiler)\n shell_file.write(\"setenv MPILIB %s\\n\" % mpilib)\n shell_file.write(\"setenv DEBUG %s\\n\" % repr(debug).upper())\n shell_file.write(\"setenv OS %s\\n\" % sysos)\n", "path": "utils/python/CIME/BuildTools/configure.py"}]} | 1,787 | 427 |
gh_patches_debug_19916 | rasdani/github-patches | git_diff | weecology__retriever-1121 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a description field(s) to setup.py
This populates the description on PYPI:
https://packaging.python.org/tutorials/distributing-packages/#description
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """Use the following command to install retriever: python setup.py install"""
2 from __future__ import absolute_import
3
4 import os
5 import platform
6
7 from pkg_resources import parse_version
8 from setuptools import setup, find_packages
9
10 current_platform = platform.system().lower()
11 extra_includes = []
12 if current_platform == "windows":
13 extra_includes += ["pypyodbc"]
14
15 if os.path.exists(".git/hooks"): # check if we are in git repo
16 os.system("cp hooks/pre-commit .git/hooks/pre-commit")
17 os.system("chmod +x .git/hooks/pre-commit")
18
19 app_data = "~/.retriever/scripts"
20 if os.path.exists(app_data):
21 os.system("rm -r {}".format(app_data))
22
23 __version__ = 'v2.1.dev'
24 with open(os.path.join("retriever", "_version.py"), "w") as version_file:
25 version_file.write("__version__ = " + "'" + __version__ + "'\n")
26 version_file.close()
27
28
29 def clean_version(v):
30 return parse_version(v).__repr__().lstrip("<Version('").rstrip("')>")
31
32 includes = [
33 'xlrd',
34 'future',
35 'argcomplete',
36 'pymysql',
37 'psycopg2',
38 'sqlite3',
39 ] + extra_includes
40
41 excludes = [
42 'pyreadline',
43 'doctest',
44 'pickle',
45 'pdb',
46 'pywin', 'pywin.debugger',
47 'pywin.debugger.dbgcon',
48 'pywin.dialogs', 'pywin.dialogs.list',
49 'Tkconstants', 'Tkinter', 'tcl', 'tk'
50 ]
51
52 setup(name='retriever',
53 version=clean_version(__version__),
54 description='Data Retriever',
55 author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',
56 author_email='[email protected]',
57 url='https://github.com/weecology/retriever',
58 classifiers=['Intended Audience :: Science/Research',
59 'License :: OSI Approved :: MIT License',
60 'Programming Language :: Python',
61 'Programming Language :: Python :: 2',
62 'Programming Language :: Python :: 3', ],
63 packages=find_packages(
64 exclude=['hooks',
65 'docs',
66 'tests',
67 'scripts',
68 'docker',
69 ".cache"]),
70 entry_points={
71 'console_scripts': [
72 'retriever = retriever.__main__:main',
73 ],
74 },
75 install_requires=[
76 'xlrd',
77 'future',
78 'argcomplete',
79 'tqdm'
80 ],
81 data_files=[('', ['CITATION'])],
82 setup_requires=[],
83 )
84
85 # windows doesn't have bash. No point in using bash-completion
86 if current_platform != "windows":
87 # if platform is OS X use "~/.bash_profile"
88 if current_platform == "darwin":
89 bash_file = "~/.bash_profile"
90 # if platform is Linux use "~/.bashrc
91 elif current_platform == "linux":
92 bash_file = "~/.bashrc"
93 # else write and discard
94 else:
95 bash_file = "/dev/null"
96
97 argcomplete_command = 'eval "$(register-python-argcomplete retriever)"'
98 with open(os.path.expanduser(bash_file), "a+") as bashrc:
99 bashrc.seek(0)
100 # register retriever for arg-completion if not already registered
101 # whenever a new shell is spawned
102 if argcomplete_command not in bashrc.read():
103 bashrc.write(argcomplete_command + "\n")
104 bashrc.close()
105 os.system("activate-global-python-argcomplete")
106 # register for the current shell
107 os.system(argcomplete_command)
108
109 try:
110 from retriever.compile import compile
111 from retriever.lib.repository import check_for_updates
112
113 check_for_updates(False)
114 compile()
115 except:
116 pass
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -29,6 +29,7 @@
def clean_version(v):
return parse_version(v).__repr__().lstrip("<Version('").rstrip("')>")
+
includes = [
'xlrd',
'future',
@@ -52,6 +53,10 @@
setup(name='retriever',
version=clean_version(__version__),
description='Data Retriever',
+ long_description=('The Data Retriever is a package manager for data. '
+ 'It downloads, cleans, and stores publicly available data, '
+ 'so that analysts spend less time cleaning and managing data, '
+ 'and more time analyzing it.'),
author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',
author_email='[email protected]',
url='https://github.com/weecology/retriever',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,6 +29,7 @@\n def clean_version(v):\n return parse_version(v).__repr__().lstrip(\"<Version('\").rstrip(\"')>\")\n \n+\n includes = [\n 'xlrd',\n 'future',\n@@ -52,6 +53,10 @@\n setup(name='retriever',\n version=clean_version(__version__),\n description='Data Retriever',\n+ long_description=('The Data Retriever is a package manager for data. '\n+ 'It downloads, cleans, and stores publicly available data, '\n+ 'so that analysts spend less time cleaning and managing data, '\n+ 'and more time analyzing it.'),\n author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',\n author_email='[email protected]',\n url='https://github.com/weecology/retriever',\n", "issue": "Add a description field(s) to setup.py\nThis populates the description on PYPI:\r\n\r\nhttps://packaging.python.org/tutorials/distributing-packages/#description\n", "before_files": [{"content": "\"\"\"Use the following command to install retriever: python setup.py install\"\"\"\nfrom __future__ import absolute_import\n\nimport os\nimport platform\n\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages\n\ncurrent_platform = platform.system().lower()\nextra_includes = []\nif current_platform == \"windows\":\n extra_includes += [\"pypyodbc\"]\n\nif os.path.exists(\".git/hooks\"): # check if we are in git repo\n os.system(\"cp hooks/pre-commit .git/hooks/pre-commit\")\n os.system(\"chmod +x .git/hooks/pre-commit\")\n\napp_data = \"~/.retriever/scripts\"\nif os.path.exists(app_data):\n os.system(\"rm -r {}\".format(app_data))\n\n__version__ = 'v2.1.dev'\nwith open(os.path.join(\"retriever\", \"_version.py\"), \"w\") as version_file:\n version_file.write(\"__version__ = \" + \"'\" + __version__ + \"'\\n\")\n version_file.close()\n\n\ndef clean_version(v):\n return parse_version(v).__repr__().lstrip(\"<Version('\").rstrip(\"')>\")\n\nincludes = [\n 'xlrd',\n 'future',\n 'argcomplete',\n 'pymysql',\n 'psycopg2',\n 'sqlite3',\n ] + extra_includes\n\nexcludes = [\n 'pyreadline',\n 'doctest',\n 'pickle',\n 'pdb',\n 'pywin', 'pywin.debugger',\n 'pywin.debugger.dbgcon',\n 'pywin.dialogs', 'pywin.dialogs.list',\n 'Tkconstants', 'Tkinter', 'tcl', 'tk'\n]\n\nsetup(name='retriever',\n version=clean_version(__version__),\n description='Data Retriever',\n author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',\n author_email='[email protected]',\n url='https://github.com/weecology/retriever',\n classifiers=['Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3', ],\n packages=find_packages(\n exclude=['hooks',\n 'docs',\n 'tests',\n 'scripts',\n 'docker',\n \".cache\"]),\n entry_points={\n 'console_scripts': [\n 'retriever = retriever.__main__:main',\n ],\n },\n install_requires=[\n 'xlrd',\n 'future',\n 'argcomplete',\n 'tqdm'\n ],\n data_files=[('', ['CITATION'])],\n setup_requires=[],\n )\n\n# windows doesn't have bash. No point in using bash-completion\nif current_platform != \"windows\":\n # if platform is OS X use \"~/.bash_profile\"\n if current_platform == \"darwin\":\n bash_file = \"~/.bash_profile\"\n # if platform is Linux use \"~/.bashrc\n elif current_platform == \"linux\":\n bash_file = \"~/.bashrc\"\n # else write and discard\n else:\n bash_file = \"/dev/null\"\n\n argcomplete_command = 'eval \"$(register-python-argcomplete retriever)\"'\n with open(os.path.expanduser(bash_file), \"a+\") as bashrc:\n bashrc.seek(0)\n # register retriever for arg-completion if not already registered\n # whenever a new shell is spawned\n if argcomplete_command not in bashrc.read():\n bashrc.write(argcomplete_command + \"\\n\")\n bashrc.close()\n os.system(\"activate-global-python-argcomplete\")\n # register for the current shell\n os.system(argcomplete_command)\n\ntry:\n from retriever.compile import compile\n from retriever.lib.repository import check_for_updates\n\n check_for_updates(False)\n compile()\nexcept:\n pass\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"Use the following command to install retriever: python setup.py install\"\"\"\nfrom __future__ import absolute_import\n\nimport os\nimport platform\n\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages\n\ncurrent_platform = platform.system().lower()\nextra_includes = []\nif current_platform == \"windows\":\n extra_includes += [\"pypyodbc\"]\n\nif os.path.exists(\".git/hooks\"): # check if we are in git repo\n os.system(\"cp hooks/pre-commit .git/hooks/pre-commit\")\n os.system(\"chmod +x .git/hooks/pre-commit\")\n\napp_data = \"~/.retriever/scripts\"\nif os.path.exists(app_data):\n os.system(\"rm -r {}\".format(app_data))\n\n__version__ = 'v2.1.dev'\nwith open(os.path.join(\"retriever\", \"_version.py\"), \"w\") as version_file:\n version_file.write(\"__version__ = \" + \"'\" + __version__ + \"'\\n\")\n version_file.close()\n\n\ndef clean_version(v):\n return parse_version(v).__repr__().lstrip(\"<Version('\").rstrip(\"')>\")\n\n\nincludes = [\n 'xlrd',\n 'future',\n 'argcomplete',\n 'pymysql',\n 'psycopg2',\n 'sqlite3',\n ] + extra_includes\n\nexcludes = [\n 'pyreadline',\n 'doctest',\n 'pickle',\n 'pdb',\n 'pywin', 'pywin.debugger',\n 'pywin.debugger.dbgcon',\n 'pywin.dialogs', 'pywin.dialogs.list',\n 'Tkconstants', 'Tkinter', 'tcl', 'tk'\n]\n\nsetup(name='retriever',\n version=clean_version(__version__),\n description='Data Retriever',\n long_description=('The Data Retriever is a package manager for data. '\n 'It downloads, cleans, and stores publicly available data, '\n 'so that analysts spend less time cleaning and managing data, '\n 'and more time analyzing it.'),\n author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',\n author_email='[email protected]',\n url='https://github.com/weecology/retriever',\n classifiers=['Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3', ],\n packages=find_packages(\n exclude=['hooks',\n 'docs',\n 'tests',\n 'scripts',\n 'docker',\n \".cache\"]),\n entry_points={\n 'console_scripts': [\n 'retriever = retriever.__main__:main',\n ],\n },\n install_requires=[\n 'xlrd',\n 'future',\n 'argcomplete',\n 'tqdm'\n ],\n data_files=[('', ['CITATION'])],\n setup_requires=[],\n )\n\n# windows doesn't have bash. No point in using bash-completion\nif current_platform != \"windows\":\n # if platform is OS X use \"~/.bash_profile\"\n if current_platform == \"darwin\":\n bash_file = \"~/.bash_profile\"\n # if platform is Linux use \"~/.bashrc\n elif current_platform == \"linux\":\n bash_file = \"~/.bashrc\"\n # else write and discard\n else:\n bash_file = \"/dev/null\"\n\n argcomplete_command = 'eval \"$(register-python-argcomplete retriever)\"'\n with open(os.path.expanduser(bash_file), \"a+\") as bashrc:\n bashrc.seek(0)\n # register retriever for arg-completion if not already registered\n # whenever a new shell is spawned\n if argcomplete_command not in bashrc.read():\n bashrc.write(argcomplete_command + \"\\n\")\n bashrc.close()\n os.system(\"activate-global-python-argcomplete\")\n # register for the current shell\n os.system(argcomplete_command)\n\ntry:\n from retriever.compile import compile\n from retriever.lib.repository import check_for_updates\n\n check_for_updates(False)\n compile()\nexcept:\n pass\n", "path": "setup.py"}]} | 1,372 | 217 |
gh_patches_debug_51313 | rasdani/github-patches | git_diff | scikit-image__scikit-image-5128 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
filters.farid missing from skimage.filters documentation
## Description
The `filters.farid{,_h,_v}` functions are missing from the [`skimage.filters` documentation](https://scikit-image.org/docs/dev/api/skimage.filters.html). I presume this is because they are not it `__all__`? (No time to investigate right now.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/filters/__init__.py`
Content:
```
1 from .lpi_filter import inverse, wiener, LPIFilter2D
2 from ._gaussian import (gaussian, _guess_spatial_dimensions,
3 difference_of_gaussians)
4 from .edges import (sobel, sobel_h, sobel_v,
5 scharr, scharr_h, scharr_v,
6 prewitt, prewitt_h, prewitt_v,
7 roberts, roberts_pos_diag, roberts_neg_diag,
8 laplace,
9 farid, farid_h, farid_v)
10 from ._rank_order import rank_order
11 from ._gabor import gabor_kernel, gabor
12 from .thresholding import (threshold_local, threshold_otsu, threshold_yen,
13 threshold_isodata, threshold_li, threshold_minimum,
14 threshold_mean, threshold_triangle,
15 threshold_niblack, threshold_sauvola,
16 threshold_multiotsu, try_all_threshold,
17 apply_hysteresis_threshold)
18 from .ridges import (meijering, sato, frangi, hessian)
19 from . import rank
20 from ._median import median
21 from ._sparse import correlate_sparse
22 from ._unsharp_mask import unsharp_mask
23 from ._window import window
24
25
26 __all__ = ['inverse',
27 'correlate_sparse',
28 'wiener',
29 'LPIFilter2D',
30 'gaussian',
31 'difference_of_gaussians',
32 'median',
33 'sobel',
34 'sobel_h',
35 'sobel_v',
36 'scharr',
37 'scharr_h',
38 'scharr_v',
39 'prewitt',
40 'prewitt_h',
41 'prewitt_v',
42 'roberts',
43 'roberts_pos_diag',
44 'roberts_neg_diag',
45 'laplace',
46 'rank_order',
47 'gabor_kernel',
48 'gabor',
49 'try_all_threshold',
50 'meijering',
51 'sato',
52 'frangi',
53 'hessian',
54 'threshold_otsu',
55 'threshold_yen',
56 'threshold_isodata',
57 'threshold_li',
58 'threshold_local',
59 'threshold_minimum',
60 'threshold_mean',
61 'threshold_niblack',
62 'threshold_sauvola',
63 'threshold_triangle',
64 'threshold_multiotsu',
65 'apply_hysteresis_threshold',
66 'rank',
67 'unsharp_mask',
68 'window']
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/filters/__init__.py b/skimage/filters/__init__.py
--- a/skimage/filters/__init__.py
+++ b/skimage/filters/__init__.py
@@ -43,6 +43,9 @@
'roberts_pos_diag',
'roberts_neg_diag',
'laplace',
+ 'farid',
+ 'farid_h',
+ 'farid_v',
'rank_order',
'gabor_kernel',
'gabor',
| {"golden_diff": "diff --git a/skimage/filters/__init__.py b/skimage/filters/__init__.py\n--- a/skimage/filters/__init__.py\n+++ b/skimage/filters/__init__.py\n@@ -43,6 +43,9 @@\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n+ 'farid',\n+ 'farid_h',\n+ 'farid_v',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n", "issue": "filters.farid missing from skimage.filters documentation\n## Description\r\n\r\nThe `filters.farid{,_h,_v}` functions are missing from the [`skimage.filters` documentation](https://scikit-image.org/docs/dev/api/skimage.filters.html). I presume this is because they are not it `__all__`? (No time to investigate right now.)\n", "before_files": [{"content": "from .lpi_filter import inverse, wiener, LPIFilter2D\nfrom ._gaussian import (gaussian, _guess_spatial_dimensions,\n difference_of_gaussians)\nfrom .edges import (sobel, sobel_h, sobel_v,\n scharr, scharr_h, scharr_v,\n prewitt, prewitt_h, prewitt_v,\n roberts, roberts_pos_diag, roberts_neg_diag,\n laplace,\n farid, farid_h, farid_v)\nfrom ._rank_order import rank_order\nfrom ._gabor import gabor_kernel, gabor\nfrom .thresholding import (threshold_local, threshold_otsu, threshold_yen,\n threshold_isodata, threshold_li, threshold_minimum,\n threshold_mean, threshold_triangle,\n threshold_niblack, threshold_sauvola,\n threshold_multiotsu, try_all_threshold,\n apply_hysteresis_threshold)\nfrom .ridges import (meijering, sato, frangi, hessian)\nfrom . import rank\nfrom ._median import median\nfrom ._sparse import correlate_sparse\nfrom ._unsharp_mask import unsharp_mask\nfrom ._window import window\n\n\n__all__ = ['inverse',\n 'correlate_sparse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian',\n 'difference_of_gaussians',\n 'median',\n 'sobel',\n 'sobel_h',\n 'sobel_v',\n 'scharr',\n 'scharr_h',\n 'scharr_v',\n 'prewitt',\n 'prewitt_h',\n 'prewitt_v',\n 'roberts',\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n 'try_all_threshold',\n 'meijering',\n 'sato',\n 'frangi',\n 'hessian',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'threshold_li',\n 'threshold_local',\n 'threshold_minimum',\n 'threshold_mean',\n 'threshold_niblack',\n 'threshold_sauvola',\n 'threshold_triangle',\n 'threshold_multiotsu',\n 'apply_hysteresis_threshold',\n 'rank',\n 'unsharp_mask',\n 'window']\n", "path": "skimage/filters/__init__.py"}], "after_files": [{"content": "from .lpi_filter import inverse, wiener, LPIFilter2D\nfrom ._gaussian import (gaussian, _guess_spatial_dimensions,\n difference_of_gaussians)\nfrom .edges import (sobel, sobel_h, sobel_v,\n scharr, scharr_h, scharr_v,\n prewitt, prewitt_h, prewitt_v,\n roberts, roberts_pos_diag, roberts_neg_diag,\n laplace,\n farid, farid_h, farid_v)\nfrom ._rank_order import rank_order\nfrom ._gabor import gabor_kernel, gabor\nfrom .thresholding import (threshold_local, threshold_otsu, threshold_yen,\n threshold_isodata, threshold_li, threshold_minimum,\n threshold_mean, threshold_triangle,\n threshold_niblack, threshold_sauvola,\n threshold_multiotsu, try_all_threshold,\n apply_hysteresis_threshold)\nfrom .ridges import (meijering, sato, frangi, hessian)\nfrom . import rank\nfrom ._median import median\nfrom ._sparse import correlate_sparse\nfrom ._unsharp_mask import unsharp_mask\nfrom ._window import window\n\n\n__all__ = ['inverse',\n 'correlate_sparse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian',\n 'difference_of_gaussians',\n 'median',\n 'sobel',\n 'sobel_h',\n 'sobel_v',\n 'scharr',\n 'scharr_h',\n 'scharr_v',\n 'prewitt',\n 'prewitt_h',\n 'prewitt_v',\n 'roberts',\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n 'farid',\n 'farid_h',\n 'farid_v',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n 'try_all_threshold',\n 'meijering',\n 'sato',\n 'frangi',\n 'hessian',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'threshold_li',\n 'threshold_local',\n 'threshold_minimum',\n 'threshold_mean',\n 'threshold_niblack',\n 'threshold_sauvola',\n 'threshold_triangle',\n 'threshold_multiotsu',\n 'apply_hysteresis_threshold',\n 'rank',\n 'unsharp_mask',\n 'window']\n", "path": "skimage/filters/__init__.py"}]} | 978 | 117 |
gh_patches_debug_43025 | rasdani/github-patches | git_diff | azavea__raster-vision-641 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include per-scene metrics in eval.json
It would be useful to see metrics for each scene in addition to metrics averaged over all scenes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rastervision/evaluation/classification_evaluation.py`
Content:
```
1 from abc import (ABC, abstractmethod)
2
3 import json
4
5 from rastervision.evaluation import ClassEvaluationItem
6 from rastervision.utils.files import str_to_file
7
8
9 class ClassificationEvaluation(ABC):
10 """Base class for evaluating predictions for tasks that have classes.
11
12 Evaluations can be keyed, for instance, if evaluations happen per class.
13 """
14
15 def __init__(self):
16 self.clear()
17
18 def clear(self):
19 """Clear the Evaluation."""
20 self.class_to_eval_item = {}
21 self.avg_item = None
22
23 def set_class_to_eval_item(self, class_to_eval_item):
24 self.class_to_eval_item = class_to_eval_item
25
26 def get_by_id(self, key):
27 """Gets the evaluation for a particular EvaluationItem key"""
28 return self.class_to_eval_item[key]
29
30 def has_id(self, key):
31 """Answers whether or not the EvaluationItem key is represented"""
32 return key in self.class_to_eval_item
33
34 def to_json(self):
35 json_rep = []
36 for eval_item in self.class_to_eval_item.values():
37 json_rep.append(eval_item.to_json())
38 json_rep.append(self.avg_item.to_json())
39 return json_rep
40
41 def save(self, output_uri):
42 """Save this Evaluation to a file.
43
44 Args:
45 output_uri: string URI for the file to write.
46 """
47 json_str = json.dumps(self.to_json(), indent=4)
48 str_to_file(json_str, output_uri)
49
50 def merge(self, evaluation):
51 """Merge Evaluation for another Scene into this one.
52
53 This is useful for computing the average metrics of a set of scenes.
54 The results of the averaging are stored in this Evaluation.
55
56 Args:
57 evaluation: Evaluation to merge into this one
58 """
59 if len(self.class_to_eval_item) == 0:
60 self.class_to_eval_item = evaluation.class_to_eval_item
61 else:
62 for key, other_eval_item in \
63 evaluation.class_to_eval_item.items():
64 if self.has_id(key):
65 self.get_by_id(key).merge(other_eval_item)
66 else:
67 self.class_to_eval_item[key] = other_eval_item
68
69 self.compute_avg()
70
71 def compute_avg(self):
72 """Compute average metrics over all keys."""
73 self.avg_item = ClassEvaluationItem(class_name='average')
74 for eval_item in self.class_to_eval_item.values():
75 self.avg_item.merge(eval_item)
76
77 @abstractmethod
78 def compute(self, ground_truth_labels, prediction_labels):
79 """Compute metrics for a single scene.
80
81 Args:
82 ground_truth_labels: Ground Truth labels to evaluate against.
83 prediction_labels: The predicted labels to evaluate.
84 """
85 pass
86
```
Path: `rastervision/evaluation/semantic_segmentation_evaluator.py`
Content:
```
1 import logging
2
3 from rastervision.data import ActivateMixin
4 from rastervision.rv_config import RVConfig
5 from rastervision.utils.files import (download_if_needed)
6 from rastervision.evaluation import (ClassificationEvaluator,
7 SemanticSegmentationEvaluation)
8
9 log = logging.getLogger(__name__)
10
11
12 class SemanticSegmentationEvaluator(ClassificationEvaluator):
13 """Evaluates predictions for a set of scenes.
14 """
15
16 def __init__(self, class_map, output_uri):
17 super().__init__(class_map, output_uri)
18
19 def create_evaluation(self):
20 return SemanticSegmentationEvaluation(self.class_map)
21
22 def process(self, scenes, tmp_dir):
23 evaluation = self.create_evaluation()
24 for scene in scenes:
25 log.info('Computing evaluation for scene {}...'.format(scene.id))
26 label_source = scene.ground_truth_label_source
27 label_store = scene.prediction_label_store
28 with ActivateMixin.compose(label_source, label_store):
29 ground_truth = label_source.get_labels()
30 predictions = label_store.get_labels()
31
32 if scene.aoi_polygons:
33 # Filter labels based on AOI.
34 ground_truth = ground_truth.filter_by_aoi(
35 scene.aoi_polygons)
36 predictions = predictions.filter_by_aoi(scene.aoi_polygons)
37 scene_evaluation = self.create_evaluation()
38 scene_evaluation.compute(ground_truth, predictions)
39 evaluation.merge(scene_evaluation)
40
41 if hasattr(label_source, 'source') and hasattr(
42 label_source.source, 'vector_source') and hasattr(
43 label_store, 'vector_output'):
44 tmp_dir = RVConfig.get_tmp_dir().name
45 gt_geojson = label_source.source.vector_source.get_geojson()
46 for vo in label_store.vector_output:
47 pred_geojson = vo['uri']
48 mode = vo['mode']
49 class_id = vo['class_id']
50 pred_geojson_local = download_if_needed(
51 pred_geojson, tmp_dir)
52 scene_evaluation = self.create_evaluation()
53 scene_evaluation.compute_vector(
54 gt_geojson, pred_geojson_local, mode, class_id)
55 evaluation.merge(scene_evaluation)
56
57 evaluation.save(self.output_uri)
58
```
Path: `rastervision/evaluation/classification_evaluator.py`
Content:
```
1 from abc import (abstractmethod)
2 import logging
3
4 from rastervision.evaluation import Evaluator
5 from rastervision.data import ActivateMixin
6
7 log = logging.getLogger(__name__)
8
9
10 class ClassificationEvaluator(Evaluator):
11 """Evaluates predictions for a set of scenes.
12 """
13
14 def __init__(self, class_map, output_uri):
15 self.class_map = class_map
16 self.output_uri = output_uri
17
18 @abstractmethod
19 def create_evaluation(self):
20 pass
21
22 def process(self, scenes, tmp_dir):
23 evaluation = self.create_evaluation()
24 for scene in scenes:
25 log.info('Computing evaluation for scene {}...'.format(scene.id))
26 label_source = scene.ground_truth_label_source
27 label_store = scene.prediction_label_store
28 with ActivateMixin.compose(label_source, label_store):
29 ground_truth = label_source.get_labels()
30 predictions = label_store.get_labels()
31
32 if scene.aoi_polygons:
33 # Filter labels based on AOI.
34 ground_truth = ground_truth.filter_by_aoi(
35 scene.aoi_polygons)
36 predictions = predictions.filter_by_aoi(scene.aoi_polygons)
37 scene_evaluation = self.create_evaluation()
38 scene_evaluation.compute(ground_truth, predictions)
39 evaluation.merge(scene_evaluation)
40
41 evaluation.save(self.output_uri)
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rastervision/evaluation/classification_evaluation.py b/rastervision/evaluation/classification_evaluation.py
--- a/rastervision/evaluation/classification_evaluation.py
+++ b/rastervision/evaluation/classification_evaluation.py
@@ -1,4 +1,5 @@
from abc import (ABC, abstractmethod)
+import copy
import json
@@ -18,6 +19,7 @@
def clear(self):
"""Clear the Evaluation."""
self.class_to_eval_item = {}
+ self.scene_to_eval = {}
self.avg_item = None
def set_class_to_eval_item(self, class_to_eval_item):
@@ -36,6 +38,14 @@
for eval_item in self.class_to_eval_item.values():
json_rep.append(eval_item.to_json())
json_rep.append(self.avg_item.to_json())
+
+ if self.scene_to_eval:
+ json_rep = {'overall': json_rep}
+ scene_to_eval_json = {}
+ for scene_id, eval in self.scene_to_eval.items():
+ scene_to_eval_json[scene_id] = eval.to_json()
+ json_rep['per_scene'] = scene_to_eval_json
+
return json_rep
def save(self, output_uri):
@@ -47,7 +57,7 @@
json_str = json.dumps(self.to_json(), indent=4)
str_to_file(json_str, output_uri)
- def merge(self, evaluation):
+ def merge(self, evaluation, scene_id=None):
"""Merge Evaluation for another Scene into this one.
This is useful for computing the average metrics of a set of scenes.
@@ -68,6 +78,9 @@
self.compute_avg()
+ if scene_id is not None:
+ self.scene_to_eval[scene_id] = copy.deepcopy(evaluation)
+
def compute_avg(self):
"""Compute average metrics over all keys."""
self.avg_item = ClassEvaluationItem(class_name='average')
diff --git a/rastervision/evaluation/classification_evaluator.py b/rastervision/evaluation/classification_evaluator.py
--- a/rastervision/evaluation/classification_evaluator.py
+++ b/rastervision/evaluation/classification_evaluator.py
@@ -36,6 +36,5 @@
predictions = predictions.filter_by_aoi(scene.aoi_polygons)
scene_evaluation = self.create_evaluation()
scene_evaluation.compute(ground_truth, predictions)
- evaluation.merge(scene_evaluation)
-
+ evaluation.merge(scene_evaluation, scene_id=scene.id)
evaluation.save(self.output_uri)
diff --git a/rastervision/evaluation/semantic_segmentation_evaluator.py b/rastervision/evaluation/semantic_segmentation_evaluator.py
--- a/rastervision/evaluation/semantic_segmentation_evaluator.py
+++ b/rastervision/evaluation/semantic_segmentation_evaluator.py
@@ -36,7 +36,7 @@
predictions = predictions.filter_by_aoi(scene.aoi_polygons)
scene_evaluation = self.create_evaluation()
scene_evaluation.compute(ground_truth, predictions)
- evaluation.merge(scene_evaluation)
+ evaluation.merge(scene_evaluation, scene_id=scene.id)
if hasattr(label_source, 'source') and hasattr(
label_source.source, 'vector_source') and hasattr(
@@ -52,6 +52,6 @@
scene_evaluation = self.create_evaluation()
scene_evaluation.compute_vector(
gt_geojson, pred_geojson_local, mode, class_id)
- evaluation.merge(scene_evaluation)
+ evaluation.merge(scene_evaluation, scene_id=scene.id)
evaluation.save(self.output_uri)
| {"golden_diff": "diff --git a/rastervision/evaluation/classification_evaluation.py b/rastervision/evaluation/classification_evaluation.py\n--- a/rastervision/evaluation/classification_evaluation.py\n+++ b/rastervision/evaluation/classification_evaluation.py\n@@ -1,4 +1,5 @@\n from abc import (ABC, abstractmethod)\n+import copy\n \n import json\n \n@@ -18,6 +19,7 @@\n def clear(self):\n \"\"\"Clear the Evaluation.\"\"\"\n self.class_to_eval_item = {}\n+ self.scene_to_eval = {}\n self.avg_item = None\n \n def set_class_to_eval_item(self, class_to_eval_item):\n@@ -36,6 +38,14 @@\n for eval_item in self.class_to_eval_item.values():\n json_rep.append(eval_item.to_json())\n json_rep.append(self.avg_item.to_json())\n+\n+ if self.scene_to_eval:\n+ json_rep = {'overall': json_rep}\n+ scene_to_eval_json = {}\n+ for scene_id, eval in self.scene_to_eval.items():\n+ scene_to_eval_json[scene_id] = eval.to_json()\n+ json_rep['per_scene'] = scene_to_eval_json\n+\n return json_rep\n \n def save(self, output_uri):\n@@ -47,7 +57,7 @@\n json_str = json.dumps(self.to_json(), indent=4)\n str_to_file(json_str, output_uri)\n \n- def merge(self, evaluation):\n+ def merge(self, evaluation, scene_id=None):\n \"\"\"Merge Evaluation for another Scene into this one.\n \n This is useful for computing the average metrics of a set of scenes.\n@@ -68,6 +78,9 @@\n \n self.compute_avg()\n \n+ if scene_id is not None:\n+ self.scene_to_eval[scene_id] = copy.deepcopy(evaluation)\n+\n def compute_avg(self):\n \"\"\"Compute average metrics over all keys.\"\"\"\n self.avg_item = ClassEvaluationItem(class_name='average')\ndiff --git a/rastervision/evaluation/classification_evaluator.py b/rastervision/evaluation/classification_evaluator.py\n--- a/rastervision/evaluation/classification_evaluator.py\n+++ b/rastervision/evaluation/classification_evaluator.py\n@@ -36,6 +36,5 @@\n predictions = predictions.filter_by_aoi(scene.aoi_polygons)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute(ground_truth, predictions)\n- evaluation.merge(scene_evaluation)\n-\n+ evaluation.merge(scene_evaluation, scene_id=scene.id)\n evaluation.save(self.output_uri)\ndiff --git a/rastervision/evaluation/semantic_segmentation_evaluator.py b/rastervision/evaluation/semantic_segmentation_evaluator.py\n--- a/rastervision/evaluation/semantic_segmentation_evaluator.py\n+++ b/rastervision/evaluation/semantic_segmentation_evaluator.py\n@@ -36,7 +36,7 @@\n predictions = predictions.filter_by_aoi(scene.aoi_polygons)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute(ground_truth, predictions)\n- evaluation.merge(scene_evaluation)\n+ evaluation.merge(scene_evaluation, scene_id=scene.id)\n \n if hasattr(label_source, 'source') and hasattr(\n label_source.source, 'vector_source') and hasattr(\n@@ -52,6 +52,6 @@\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute_vector(\n gt_geojson, pred_geojson_local, mode, class_id)\n- evaluation.merge(scene_evaluation)\n+ evaluation.merge(scene_evaluation, scene_id=scene.id)\n \n evaluation.save(self.output_uri)\n", "issue": "Include per-scene metrics in eval.json\nIt would be useful to see metrics for each scene in addition to metrics averaged over all scenes. \n", "before_files": [{"content": "from abc import (ABC, abstractmethod)\n\nimport json\n\nfrom rastervision.evaluation import ClassEvaluationItem\nfrom rastervision.utils.files import str_to_file\n\n\nclass ClassificationEvaluation(ABC):\n \"\"\"Base class for evaluating predictions for tasks that have classes.\n\n Evaluations can be keyed, for instance, if evaluations happen per class.\n \"\"\"\n\n def __init__(self):\n self.clear()\n\n def clear(self):\n \"\"\"Clear the Evaluation.\"\"\"\n self.class_to_eval_item = {}\n self.avg_item = None\n\n def set_class_to_eval_item(self, class_to_eval_item):\n self.class_to_eval_item = class_to_eval_item\n\n def get_by_id(self, key):\n \"\"\"Gets the evaluation for a particular EvaluationItem key\"\"\"\n return self.class_to_eval_item[key]\n\n def has_id(self, key):\n \"\"\"Answers whether or not the EvaluationItem key is represented\"\"\"\n return key in self.class_to_eval_item\n\n def to_json(self):\n json_rep = []\n for eval_item in self.class_to_eval_item.values():\n json_rep.append(eval_item.to_json())\n json_rep.append(self.avg_item.to_json())\n return json_rep\n\n def save(self, output_uri):\n \"\"\"Save this Evaluation to a file.\n\n Args:\n output_uri: string URI for the file to write.\n \"\"\"\n json_str = json.dumps(self.to_json(), indent=4)\n str_to_file(json_str, output_uri)\n\n def merge(self, evaluation):\n \"\"\"Merge Evaluation for another Scene into this one.\n\n This is useful for computing the average metrics of a set of scenes.\n The results of the averaging are stored in this Evaluation.\n\n Args:\n evaluation: Evaluation to merge into this one\n \"\"\"\n if len(self.class_to_eval_item) == 0:\n self.class_to_eval_item = evaluation.class_to_eval_item\n else:\n for key, other_eval_item in \\\n evaluation.class_to_eval_item.items():\n if self.has_id(key):\n self.get_by_id(key).merge(other_eval_item)\n else:\n self.class_to_eval_item[key] = other_eval_item\n\n self.compute_avg()\n\n def compute_avg(self):\n \"\"\"Compute average metrics over all keys.\"\"\"\n self.avg_item = ClassEvaluationItem(class_name='average')\n for eval_item in self.class_to_eval_item.values():\n self.avg_item.merge(eval_item)\n\n @abstractmethod\n def compute(self, ground_truth_labels, prediction_labels):\n \"\"\"Compute metrics for a single scene.\n\n Args:\n ground_truth_labels: Ground Truth labels to evaluate against.\n prediction_labels: The predicted labels to evaluate.\n \"\"\"\n pass\n", "path": "rastervision/evaluation/classification_evaluation.py"}, {"content": "import logging\n\nfrom rastervision.data import ActivateMixin\nfrom rastervision.rv_config import RVConfig\nfrom rastervision.utils.files import (download_if_needed)\nfrom rastervision.evaluation import (ClassificationEvaluator,\n SemanticSegmentationEvaluation)\n\nlog = logging.getLogger(__name__)\n\n\nclass SemanticSegmentationEvaluator(ClassificationEvaluator):\n \"\"\"Evaluates predictions for a set of scenes.\n \"\"\"\n\n def __init__(self, class_map, output_uri):\n super().__init__(class_map, output_uri)\n\n def create_evaluation(self):\n return SemanticSegmentationEvaluation(self.class_map)\n\n def process(self, scenes, tmp_dir):\n evaluation = self.create_evaluation()\n for scene in scenes:\n log.info('Computing evaluation for scene {}...'.format(scene.id))\n label_source = scene.ground_truth_label_source\n label_store = scene.prediction_label_store\n with ActivateMixin.compose(label_source, label_store):\n ground_truth = label_source.get_labels()\n predictions = label_store.get_labels()\n\n if scene.aoi_polygons:\n # Filter labels based on AOI.\n ground_truth = ground_truth.filter_by_aoi(\n scene.aoi_polygons)\n predictions = predictions.filter_by_aoi(scene.aoi_polygons)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute(ground_truth, predictions)\n evaluation.merge(scene_evaluation)\n\n if hasattr(label_source, 'source') and hasattr(\n label_source.source, 'vector_source') and hasattr(\n label_store, 'vector_output'):\n tmp_dir = RVConfig.get_tmp_dir().name\n gt_geojson = label_source.source.vector_source.get_geojson()\n for vo in label_store.vector_output:\n pred_geojson = vo['uri']\n mode = vo['mode']\n class_id = vo['class_id']\n pred_geojson_local = download_if_needed(\n pred_geojson, tmp_dir)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute_vector(\n gt_geojson, pred_geojson_local, mode, class_id)\n evaluation.merge(scene_evaluation)\n\n evaluation.save(self.output_uri)\n", "path": "rastervision/evaluation/semantic_segmentation_evaluator.py"}, {"content": "from abc import (abstractmethod)\nimport logging\n\nfrom rastervision.evaluation import Evaluator\nfrom rastervision.data import ActivateMixin\n\nlog = logging.getLogger(__name__)\n\n\nclass ClassificationEvaluator(Evaluator):\n \"\"\"Evaluates predictions for a set of scenes.\n \"\"\"\n\n def __init__(self, class_map, output_uri):\n self.class_map = class_map\n self.output_uri = output_uri\n\n @abstractmethod\n def create_evaluation(self):\n pass\n\n def process(self, scenes, tmp_dir):\n evaluation = self.create_evaluation()\n for scene in scenes:\n log.info('Computing evaluation for scene {}...'.format(scene.id))\n label_source = scene.ground_truth_label_source\n label_store = scene.prediction_label_store\n with ActivateMixin.compose(label_source, label_store):\n ground_truth = label_source.get_labels()\n predictions = label_store.get_labels()\n\n if scene.aoi_polygons:\n # Filter labels based on AOI.\n ground_truth = ground_truth.filter_by_aoi(\n scene.aoi_polygons)\n predictions = predictions.filter_by_aoi(scene.aoi_polygons)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute(ground_truth, predictions)\n evaluation.merge(scene_evaluation)\n\n evaluation.save(self.output_uri)\n", "path": "rastervision/evaluation/classification_evaluator.py"}], "after_files": [{"content": "from abc import (ABC, abstractmethod)\nimport copy\n\nimport json\n\nfrom rastervision.evaluation import ClassEvaluationItem\nfrom rastervision.utils.files import str_to_file\n\n\nclass ClassificationEvaluation(ABC):\n \"\"\"Base class for evaluating predictions for tasks that have classes.\n\n Evaluations can be keyed, for instance, if evaluations happen per class.\n \"\"\"\n\n def __init__(self):\n self.clear()\n\n def clear(self):\n \"\"\"Clear the Evaluation.\"\"\"\n self.class_to_eval_item = {}\n self.scene_to_eval = {}\n self.avg_item = None\n\n def set_class_to_eval_item(self, class_to_eval_item):\n self.class_to_eval_item = class_to_eval_item\n\n def get_by_id(self, key):\n \"\"\"Gets the evaluation for a particular EvaluationItem key\"\"\"\n return self.class_to_eval_item[key]\n\n def has_id(self, key):\n \"\"\"Answers whether or not the EvaluationItem key is represented\"\"\"\n return key in self.class_to_eval_item\n\n def to_json(self):\n json_rep = []\n for eval_item in self.class_to_eval_item.values():\n json_rep.append(eval_item.to_json())\n json_rep.append(self.avg_item.to_json())\n\n if self.scene_to_eval:\n json_rep = {'overall': json_rep}\n scene_to_eval_json = {}\n for scene_id, eval in self.scene_to_eval.items():\n scene_to_eval_json[scene_id] = eval.to_json()\n json_rep['per_scene'] = scene_to_eval_json\n\n return json_rep\n\n def save(self, output_uri):\n \"\"\"Save this Evaluation to a file.\n\n Args:\n output_uri: string URI for the file to write.\n \"\"\"\n json_str = json.dumps(self.to_json(), indent=4)\n str_to_file(json_str, output_uri)\n\n def merge(self, evaluation, scene_id=None):\n \"\"\"Merge Evaluation for another Scene into this one.\n\n This is useful for computing the average metrics of a set of scenes.\n The results of the averaging are stored in this Evaluation.\n\n Args:\n evaluation: Evaluation to merge into this one\n \"\"\"\n if len(self.class_to_eval_item) == 0:\n self.class_to_eval_item = evaluation.class_to_eval_item\n else:\n for key, other_eval_item in \\\n evaluation.class_to_eval_item.items():\n if self.has_id(key):\n self.get_by_id(key).merge(other_eval_item)\n else:\n self.class_to_eval_item[key] = other_eval_item\n\n self.compute_avg()\n\n if scene_id is not None:\n self.scene_to_eval[scene_id] = copy.deepcopy(evaluation)\n\n def compute_avg(self):\n \"\"\"Compute average metrics over all keys.\"\"\"\n self.avg_item = ClassEvaluationItem(class_name='average')\n for eval_item in self.class_to_eval_item.values():\n self.avg_item.merge(eval_item)\n\n @abstractmethod\n def compute(self, ground_truth_labels, prediction_labels):\n \"\"\"Compute metrics for a single scene.\n\n Args:\n ground_truth_labels: Ground Truth labels to evaluate against.\n prediction_labels: The predicted labels to evaluate.\n \"\"\"\n pass\n", "path": "rastervision/evaluation/classification_evaluation.py"}, {"content": "import logging\n\nfrom rastervision.data import ActivateMixin\nfrom rastervision.rv_config import RVConfig\nfrom rastervision.utils.files import (download_if_needed)\nfrom rastervision.evaluation import (ClassificationEvaluator,\n SemanticSegmentationEvaluation)\n\nlog = logging.getLogger(__name__)\n\n\nclass SemanticSegmentationEvaluator(ClassificationEvaluator):\n \"\"\"Evaluates predictions for a set of scenes.\n \"\"\"\n\n def __init__(self, class_map, output_uri):\n super().__init__(class_map, output_uri)\n\n def create_evaluation(self):\n return SemanticSegmentationEvaluation(self.class_map)\n\n def process(self, scenes, tmp_dir):\n evaluation = self.create_evaluation()\n for scene in scenes:\n log.info('Computing evaluation for scene {}...'.format(scene.id))\n label_source = scene.ground_truth_label_source\n label_store = scene.prediction_label_store\n with ActivateMixin.compose(label_source, label_store):\n ground_truth = label_source.get_labels()\n predictions = label_store.get_labels()\n\n if scene.aoi_polygons:\n # Filter labels based on AOI.\n ground_truth = ground_truth.filter_by_aoi(\n scene.aoi_polygons)\n predictions = predictions.filter_by_aoi(scene.aoi_polygons)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute(ground_truth, predictions)\n evaluation.merge(scene_evaluation, scene_id=scene.id)\n\n if hasattr(label_source, 'source') and hasattr(\n label_source.source, 'vector_source') and hasattr(\n label_store, 'vector_output'):\n tmp_dir = RVConfig.get_tmp_dir().name\n gt_geojson = label_source.source.vector_source.get_geojson()\n for vo in label_store.vector_output:\n pred_geojson = vo['uri']\n mode = vo['mode']\n class_id = vo['class_id']\n pred_geojson_local = download_if_needed(\n pred_geojson, tmp_dir)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute_vector(\n gt_geojson, pred_geojson_local, mode, class_id)\n evaluation.merge(scene_evaluation, scene_id=scene.id)\n\n evaluation.save(self.output_uri)\n", "path": "rastervision/evaluation/semantic_segmentation_evaluator.py"}, {"content": "from abc import (abstractmethod)\nimport logging\n\nfrom rastervision.evaluation import Evaluator\nfrom rastervision.data import ActivateMixin\n\nlog = logging.getLogger(__name__)\n\n\nclass ClassificationEvaluator(Evaluator):\n \"\"\"Evaluates predictions for a set of scenes.\n \"\"\"\n\n def __init__(self, class_map, output_uri):\n self.class_map = class_map\n self.output_uri = output_uri\n\n @abstractmethod\n def create_evaluation(self):\n pass\n\n def process(self, scenes, tmp_dir):\n evaluation = self.create_evaluation()\n for scene in scenes:\n log.info('Computing evaluation for scene {}...'.format(scene.id))\n label_source = scene.ground_truth_label_source\n label_store = scene.prediction_label_store\n with ActivateMixin.compose(label_source, label_store):\n ground_truth = label_source.get_labels()\n predictions = label_store.get_labels()\n\n if scene.aoi_polygons:\n # Filter labels based on AOI.\n ground_truth = ground_truth.filter_by_aoi(\n scene.aoi_polygons)\n predictions = predictions.filter_by_aoi(scene.aoi_polygons)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute(ground_truth, predictions)\n evaluation.merge(scene_evaluation, scene_id=scene.id)\n evaluation.save(self.output_uri)\n", "path": "rastervision/evaluation/classification_evaluator.py"}]} | 1,990 | 778 |
gh_patches_debug_23538 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1884 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The first worker may crash in ALLREDUCE mode
When the worker is the only running worker pod, `_get_peer_set` will get an empty peer set.
Then consensus_init_kwars will set "known_addr_list" as empty.
This will cause an error in ftlib.
```
[2020-03-30 06:16:07,202] [WARNING] [communicator.py:102:_get_peer_set] svc_name is edl-allreduce-haitao-ftlib-consensus
[2020-03-30 06:16:09,206] [WARNING] [communicator.py:108:_get_peer_set] peer_set is set()
Setting Bind Address as 11.233.87.89
log file: /tmp/memberlist.log
[2020-03-30 06:16:21,713] [WARNING] [communicator.py:37:__init__] Retry building consensus...
[2020-03-30 06:16:21,713] [WARNING] [communicator.py:102:_get_peer_set] svc_name is edl-allreduce-haitao-ftlib-consensus
[2020-03-30 06:16:21,714] [WARNING] [communicator.py:108:_get_peer_set] peer_set is set()
Traceback (most recent call last):
File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/elasticdl/elasticdl/python/worker/main.py", line 76, in <module>
main()
File "/elasticdl/elasticdl/python/worker/main.py", line 70, in main
set_parallelism=True,
File "/elasticdl/elasticdl/python/worker/worker.py", line 122, in __init__
self._init_from_args(args)
File "/elasticdl/elasticdl/python/worker/worker.py", line 159, in _init_from_args
if self._distribution_strategy == DistributionStrategy.ALLREDUCE
File "/elasticdl/elasticdl/python/collective_ops/communicator.py", line 39, in __init__
known_addr_list=list(self._get_peer_set(service_name))
File "/usr/local/lib/python3.6/dist-packages/ftlib/impl.py", line 137, in manual_join
return self.consensus.manual_join(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/ftlib/consensus/gossip/impl.py", line 85, in manual_join
self.joined = self._join(known_addr_list, wait_time=wait_time)
File "/usr/local/lib/python3.6/dist-packages/ftlib/consensus/gossip/impl.py", line 92, in _join
assert addr_list_len >= 1
AssertionError
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/collective_ops/communicator.py`
Content:
```
1 import socket
2
3 from elasticdl.python.common.constants import CollectiveCommunicatorStatus
4 from elasticdl.python.common.log_utils import default_logger as logger
5
6 try:
7 from ftlib import BasicFTLib
8 from ftlib.ftlib_status import FTAllReduceStatus
9
10 _FTLIB_INSTALLED = True
11 except ImportError:
12 BasicFTLib = object
13 FTAllReduceStatus = object
14 _FTLIB_INSTALLED = False
15
16
17 _SUPPORTED_ALLREDUCE_OPS = ["MEAN"]
18 _FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE = (
19 "FTLib is not installed. Default to succeeded for testing purposes"
20 )
21
22
23 class CollectiveCommunicator(object):
24 def __init__(self, service_name=None):
25 if _FTLIB_INSTALLED:
26 self._ftlib = BasicFTLib(
27 consensus="gossip",
28 commlib="pytorch",
29 consensus_init_kwargs={
30 "known_addr_list": list(self._get_peer_set(service_name)),
31 "custom_bind_addr": socket.gethostbyname(
32 socket.gethostname()
33 ),
34 },
35 )
36 while not self._ftlib.consensus_joined():
37 logger.warning("Retry building consensus...")
38 self._ftlib.manual_join(
39 known_addr_list=list(self._get_peer_set(service_name))
40 )
41 else:
42 logger.warning(
43 "FTLib is not installed. The CollectiveCommunicator "
44 "may not work as expected"
45 )
46 self._ftlib = None
47
48 def allreduce(self, data, op="MEAN"):
49 if data is None:
50 logger.error("Data is required for allreduce operation")
51 return CollectiveCommunicatorStatus.FAILED, data
52 if op not in _SUPPORTED_ALLREDUCE_OPS:
53 logger.error(
54 "%s is not in list of supported allreduce operations: %s"
55 % (op, _SUPPORTED_ALLREDUCE_OPS)
56 )
57 return CollectiveCommunicatorStatus.FAILED, data
58 if self._ftlib is not None:
59 res = self._ftlib.wait_gradients_ready(data)
60 if res == FTAllReduceStatus.SUCCESS:
61 return CollectiveCommunicatorStatus.SUCCEEDED, data
62 else:
63 return CollectiveCommunicatorStatus.FAILED, data
64 else:
65 logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)
66 return CollectiveCommunicatorStatus.SUCCEEDED, data
67
68 def broadcast(self, data, src_rank):
69 if self._ftlib is not None:
70 res = self._ftlib.broadcast(data, src_rank)
71 if res == FTAllReduceStatus.SUCCESS:
72 return CollectiveCommunicatorStatus.SUCCEEDED, data
73 else:
74 return CollectiveCommunicatorStatus.FAILED, data
75 else:
76 logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)
77 return CollectiveCommunicatorStatus.SUCCEEDED, data
78
79 def barrier(self):
80 if self._ftlib is not None:
81 res = self._ftlib.barrier()
82 if res == FTAllReduceStatus.SUCCESS:
83 return CollectiveCommunicatorStatus.SUCCEEDED
84 else:
85 return CollectiveCommunicatorStatus.FAILED
86 else:
87 logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)
88 return CollectiveCommunicatorStatus.SUCCEEDED
89
90 def is_initialized(self):
91 """This will be `False` under three occasions:
92 * New workers report joining in
93 * Collective-communication operations fail or time out
94 * Liveness probe fails for existing workers
95 """
96 if self._ftlib is not None:
97 return self._ftlib.initialized
98 else:
99 return True
100
101 def _get_peer_set(self, svc_name):
102 if svc_name is None:
103 return None
104 my_ip = socket.gethostbyname(socket.gethostname())
105 temp_set = socket.getaddrinfo(svc_name, 0, proto=socket.IPPROTO_TCP)
106 peer_set = {peer[-1][0] for peer in temp_set if peer[-1][0] != my_ip}
107 return peer_set
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/collective_ops/communicator.py b/elasticdl/python/collective_ops/communicator.py
--- a/elasticdl/python/collective_ops/communicator.py
+++ b/elasticdl/python/collective_ops/communicator.py
@@ -23,17 +23,18 @@
class CollectiveCommunicator(object):
def __init__(self, service_name=None):
if _FTLIB_INSTALLED:
+ peer_list = list(self._get_peer_set(service_name))
self._ftlib = BasicFTLib(
consensus="gossip",
commlib="pytorch",
consensus_init_kwargs={
- "known_addr_list": list(self._get_peer_set(service_name)),
+ "known_addr_list": peer_list,
"custom_bind_addr": socket.gethostbyname(
socket.gethostname()
),
},
)
- while not self._ftlib.consensus_joined():
+ while peer_list and not self._ftlib.consensus_joined():
logger.warning("Retry building consensus...")
self._ftlib.manual_join(
known_addr_list=list(self._get_peer_set(service_name))
| {"golden_diff": "diff --git a/elasticdl/python/collective_ops/communicator.py b/elasticdl/python/collective_ops/communicator.py\n--- a/elasticdl/python/collective_ops/communicator.py\n+++ b/elasticdl/python/collective_ops/communicator.py\n@@ -23,17 +23,18 @@\n class CollectiveCommunicator(object):\n def __init__(self, service_name=None):\n if _FTLIB_INSTALLED:\n+ peer_list = list(self._get_peer_set(service_name))\n self._ftlib = BasicFTLib(\n consensus=\"gossip\",\n commlib=\"pytorch\",\n consensus_init_kwargs={\n- \"known_addr_list\": list(self._get_peer_set(service_name)),\n+ \"known_addr_list\": peer_list,\n \"custom_bind_addr\": socket.gethostbyname(\n socket.gethostname()\n ),\n },\n )\n- while not self._ftlib.consensus_joined():\n+ while peer_list and not self._ftlib.consensus_joined():\n logger.warning(\"Retry building consensus...\")\n self._ftlib.manual_join(\n known_addr_list=list(self._get_peer_set(service_name))\n", "issue": "The first worker may crash in ALLREDUCE mode \nWhen the worker is the only running worker pod, `_get_peer_set` will get an empty peer set.\r\nThen consensus_init_kwars will set \"known_addr_list\" as empty.\r\nThis will cause an error in ftlib.\r\n\r\n\r\n```\r\n[2020-03-30 06:16:07,202] [WARNING] [communicator.py:102:_get_peer_set] svc_name is edl-allreduce-haitao-ftlib-consensus\r\n[2020-03-30 06:16:09,206] [WARNING] [communicator.py:108:_get_peer_set] peer_set is set()\r\nSetting Bind Address as 11.233.87.89\r\nlog file: /tmp/memberlist.log\r\n[2020-03-30 06:16:21,713] [WARNING] [communicator.py:37:__init__] Retry building consensus...\r\n[2020-03-30 06:16:21,713] [WARNING] [communicator.py:102:_get_peer_set] svc_name is edl-allreduce-haitao-ftlib-consensus\r\n[2020-03-30 06:16:21,714] [WARNING] [communicator.py:108:_get_peer_set] peer_set is set()\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/lib/python3.6/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/elasticdl/elasticdl/python/worker/main.py\", line 76, in <module>\r\n main()\r\n File \"/elasticdl/elasticdl/python/worker/main.py\", line 70, in main\r\n set_parallelism=True,\r\n File \"/elasticdl/elasticdl/python/worker/worker.py\", line 122, in __init__\r\n self._init_from_args(args)\r\n File \"/elasticdl/elasticdl/python/worker/worker.py\", line 159, in _init_from_args\r\n if self._distribution_strategy == DistributionStrategy.ALLREDUCE\r\n File \"/elasticdl/elasticdl/python/collective_ops/communicator.py\", line 39, in __init__\r\n known_addr_list=list(self._get_peer_set(service_name))\r\n File \"/usr/local/lib/python3.6/dist-packages/ftlib/impl.py\", line 137, in manual_join\r\n return self.consensus.manual_join(*args, **kwargs)\r\n File \"/usr/local/lib/python3.6/dist-packages/ftlib/consensus/gossip/impl.py\", line 85, in manual_join\r\n self.joined = self._join(known_addr_list, wait_time=wait_time)\r\n File \"/usr/local/lib/python3.6/dist-packages/ftlib/consensus/gossip/impl.py\", line 92, in _join\r\n assert addr_list_len >= 1\r\nAssertionError\r\n```\n", "before_files": [{"content": "import socket\n\nfrom elasticdl.python.common.constants import CollectiveCommunicatorStatus\nfrom elasticdl.python.common.log_utils import default_logger as logger\n\ntry:\n from ftlib import BasicFTLib\n from ftlib.ftlib_status import FTAllReduceStatus\n\n _FTLIB_INSTALLED = True\nexcept ImportError:\n BasicFTLib = object\n FTAllReduceStatus = object\n _FTLIB_INSTALLED = False\n\n\n_SUPPORTED_ALLREDUCE_OPS = [\"MEAN\"]\n_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE = (\n \"FTLib is not installed. Default to succeeded for testing purposes\"\n)\n\n\nclass CollectiveCommunicator(object):\n def __init__(self, service_name=None):\n if _FTLIB_INSTALLED:\n self._ftlib = BasicFTLib(\n consensus=\"gossip\",\n commlib=\"pytorch\",\n consensus_init_kwargs={\n \"known_addr_list\": list(self._get_peer_set(service_name)),\n \"custom_bind_addr\": socket.gethostbyname(\n socket.gethostname()\n ),\n },\n )\n while not self._ftlib.consensus_joined():\n logger.warning(\"Retry building consensus...\")\n self._ftlib.manual_join(\n known_addr_list=list(self._get_peer_set(service_name))\n )\n else:\n logger.warning(\n \"FTLib is not installed. The CollectiveCommunicator \"\n \"may not work as expected\"\n )\n self._ftlib = None\n\n def allreduce(self, data, op=\"MEAN\"):\n if data is None:\n logger.error(\"Data is required for allreduce operation\")\n return CollectiveCommunicatorStatus.FAILED, data\n if op not in _SUPPORTED_ALLREDUCE_OPS:\n logger.error(\n \"%s is not in list of supported allreduce operations: %s\"\n % (op, _SUPPORTED_ALLREDUCE_OPS)\n )\n return CollectiveCommunicatorStatus.FAILED, data\n if self._ftlib is not None:\n res = self._ftlib.wait_gradients_ready(data)\n if res == FTAllReduceStatus.SUCCESS:\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n else:\n return CollectiveCommunicatorStatus.FAILED, data\n else:\n logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n\n def broadcast(self, data, src_rank):\n if self._ftlib is not None:\n res = self._ftlib.broadcast(data, src_rank)\n if res == FTAllReduceStatus.SUCCESS:\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n else:\n return CollectiveCommunicatorStatus.FAILED, data\n else:\n logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n\n def barrier(self):\n if self._ftlib is not None:\n res = self._ftlib.barrier()\n if res == FTAllReduceStatus.SUCCESS:\n return CollectiveCommunicatorStatus.SUCCEEDED\n else:\n return CollectiveCommunicatorStatus.FAILED\n else:\n logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)\n return CollectiveCommunicatorStatus.SUCCEEDED\n\n def is_initialized(self):\n \"\"\"This will be `False` under three occasions:\n * New workers report joining in\n * Collective-communication operations fail or time out\n * Liveness probe fails for existing workers\n \"\"\"\n if self._ftlib is not None:\n return self._ftlib.initialized\n else:\n return True\n\n def _get_peer_set(self, svc_name):\n if svc_name is None:\n return None\n my_ip = socket.gethostbyname(socket.gethostname())\n temp_set = socket.getaddrinfo(svc_name, 0, proto=socket.IPPROTO_TCP)\n peer_set = {peer[-1][0] for peer in temp_set if peer[-1][0] != my_ip}\n return peer_set\n", "path": "elasticdl/python/collective_ops/communicator.py"}], "after_files": [{"content": "import socket\n\nfrom elasticdl.python.common.constants import CollectiveCommunicatorStatus\nfrom elasticdl.python.common.log_utils import default_logger as logger\n\ntry:\n from ftlib import BasicFTLib\n from ftlib.ftlib_status import FTAllReduceStatus\n\n _FTLIB_INSTALLED = True\nexcept ImportError:\n BasicFTLib = object\n FTAllReduceStatus = object\n _FTLIB_INSTALLED = False\n\n\n_SUPPORTED_ALLREDUCE_OPS = [\"MEAN\"]\n_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE = (\n \"FTLib is not installed. Default to succeeded for testing purposes\"\n)\n\n\nclass CollectiveCommunicator(object):\n def __init__(self, service_name=None):\n if _FTLIB_INSTALLED:\n peer_list = list(self._get_peer_set(service_name))\n self._ftlib = BasicFTLib(\n consensus=\"gossip\",\n commlib=\"pytorch\",\n consensus_init_kwargs={\n \"known_addr_list\": peer_list,\n \"custom_bind_addr\": socket.gethostbyname(\n socket.gethostname()\n ),\n },\n )\n while peer_list and not self._ftlib.consensus_joined():\n logger.warning(\"Retry building consensus...\")\n self._ftlib.manual_join(\n known_addr_list=list(self._get_peer_set(service_name))\n )\n else:\n logger.warning(\n \"FTLib is not installed. The CollectiveCommunicator \"\n \"may not work as expected\"\n )\n self._ftlib = None\n\n def allreduce(self, data, op=\"MEAN\"):\n if data is None:\n logger.error(\"Data is required for allreduce operation\")\n return CollectiveCommunicatorStatus.FAILED, data\n if op not in _SUPPORTED_ALLREDUCE_OPS:\n logger.error(\n \"%s is not in list of supported allreduce operations: %s\"\n % (op, _SUPPORTED_ALLREDUCE_OPS)\n )\n return CollectiveCommunicatorStatus.FAILED, data\n if self._ftlib is not None:\n res = self._ftlib.wait_gradients_ready(data)\n if res == FTAllReduceStatus.SUCCESS:\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n else:\n return CollectiveCommunicatorStatus.FAILED, data\n else:\n logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n\n def broadcast(self, data, src_rank):\n if self._ftlib is not None:\n res = self._ftlib.broadcast(data, src_rank)\n if res == FTAllReduceStatus.SUCCESS:\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n else:\n return CollectiveCommunicatorStatus.FAILED, data\n else:\n logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n\n def barrier(self):\n if self._ftlib is not None:\n res = self._ftlib.barrier()\n if res == FTAllReduceStatus.SUCCESS:\n return CollectiveCommunicatorStatus.SUCCEEDED\n else:\n return CollectiveCommunicatorStatus.FAILED\n else:\n logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)\n return CollectiveCommunicatorStatus.SUCCEEDED\n\n def is_initialized(self):\n \"\"\"This will be `False` under three occasions:\n * New workers report joining in\n * Collective-communication operations fail or time out\n * Liveness probe fails for existing workers\n \"\"\"\n if self._ftlib is not None:\n return self._ftlib.initialized\n else:\n return True\n\n def _get_peer_set(self, svc_name):\n if svc_name is None:\n return None\n my_ip = socket.gethostbyname(socket.gethostname())\n temp_set = socket.getaddrinfo(svc_name, 0, proto=socket.IPPROTO_TCP)\n peer_set = {peer[-1][0] for peer in temp_set if peer[-1][0] != my_ip}\n return peer_set\n", "path": "elasticdl/python/collective_ops/communicator.py"}]} | 2,041 | 248 |
gh_patches_debug_53980 | rasdani/github-patches | git_diff | scikit-hep__pyhf-2135 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Matplotlib broken in Pyodide demo in docs
In the docs https://pyhf.readthedocs.io/en/v0.7.0/, the Pyodide example is broken for me:
```pytb
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
Cell In[1], line 3
1 import piplite
2 await piplite.install(["pyhf==0.7.0"])
----> 3 get_ipython().run_line_magic('matplotlib', 'inline')
4 import pyhf
File /lib/python3.10/site-packages/IPython/core/interactiveshell.py:2369, in InteractiveShell.run_line_magic(self, magic_name, line, _stack_depth)
2367 kwargs['local_ns'] = self.get_local_scope(stack_depth)
2368 with self.builtin_trap:
-> 2369 result = fn(*args, **kwargs)
2371 # The code below prevents the output from being displayed
2372 # when using magics with decodator @output_can_be_silenced
2373 # when the last Python token in the expression is a ';'.
2374 if getattr(fn, magic.MAGIC_OUTPUT_CAN_BE_SILENCED, False):
File /lib/python3.10/site-packages/IPython/core/magics/pylab.py:99, in PylabMagics.matplotlib(self, line)
97 print("Available matplotlib backends: %s" % backends_list)
98 else:
---> 99 gui, backend = self.shell.enable_matplotlib(args.gui.lower() if isinstance(args.gui, str) else args.gui)
100 self._show_matplotlib_backend(args.gui, backend)
File /lib/python3.10/site-packages/IPython/core/interactiveshell.py:3540, in InteractiveShell.enable_matplotlib(self, gui)
3519 def enable_matplotlib(self, gui=None):
3520 """Enable interactive matplotlib and inline figure support.
3521
3522 This takes the following steps:
(...)
3538 display figures inline.
3539 """
-> 3540 from matplotlib_inline.backend_inline import configure_inline_support
3542 from IPython.core import pylabtools as pt
3543 gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
File /lib/python3.10/site-packages/matplotlib_inline/__init__.py:1
----> 1 from . import backend_inline, config # noqa
2 __version__ = "0.1.6" # noqa
File /lib/python3.10/site-packages/matplotlib_inline/backend_inline.py:6
1 """A matplotlib backend for publishing figures via display_data"""
3 # Copyright (c) IPython Development Team.
4 # Distributed under the terms of the BSD 3-Clause License.
----> 6 import matplotlib
7 from matplotlib import colors
8 from matplotlib.backends import backend_agg
ModuleNotFoundError: The module 'matplotlib' is included in the Pyodide distribution, but it is not installed.
You can install it by calling:
await micropip.install("matplotlib") in Python, or
await pyodide.loadPackage("matplotlib") in JavaScript
See https://pyodide.org/en/stable/usage/loading-packages.html for more details.
```
It used to work previously, though I can not say for sure when it last worked for me. Running on MacOS (ARM), tried Firefox and Chrome (resulting in the above), while Safari seems stuck executing the import commands provided.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/generate_jupyterlite_iframe.py`
Content:
```
1 import urllib.parse
2
3
4 def main():
5 code = """\
6 import piplite
7 await piplite.install(["pyhf==0.7.0"])
8 %matplotlib inline
9 import pyhf\
10 """
11
12 parsed_url = urllib.parse.quote(code)
13 url_base = "https://jupyterlite.github.io/demo/repl/index.html"
14 jupyterlite_options = "?kernel=python&toolbar=1&code="
15 jupyterlite_url = url_base + jupyterlite_options + parsed_url
16
17 print(f"# jupyterlite URL:\n{jupyterlite_url}")
18
19 jupyterlite_iframe_rst = f"""\
20 <iframe
21 src="{jupyterlite_url}"
22 width="100%"
23 height="500px"
24 ></iframe>\
25 """
26 print(f"\n# RST for iframe for jupyterlite.rst:\n{jupyterlite_iframe_rst}")
27
28
29 if __name__ == "__main__":
30 raise SystemExit(main())
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/generate_jupyterlite_iframe.py b/docs/generate_jupyterlite_iframe.py
--- a/docs/generate_jupyterlite_iframe.py
+++ b/docs/generate_jupyterlite_iframe.py
@@ -4,7 +4,7 @@
def main():
code = """\
import piplite
-await piplite.install(["pyhf==0.7.0"])
+await piplite.install(["pyhf==0.7.0", "matplotlib>=3.0.0"])
%matplotlib inline
import pyhf\
"""
| {"golden_diff": "diff --git a/docs/generate_jupyterlite_iframe.py b/docs/generate_jupyterlite_iframe.py\n--- a/docs/generate_jupyterlite_iframe.py\n+++ b/docs/generate_jupyterlite_iframe.py\n@@ -4,7 +4,7 @@\n def main():\n code = \"\"\"\\\n import piplite\n-await piplite.install([\"pyhf==0.7.0\"])\n+await piplite.install([\"pyhf==0.7.0\", \"matplotlib>=3.0.0\"])\n %matplotlib inline\n import pyhf\\\n \"\"\"\n", "issue": "Matplotlib broken in Pyodide demo in docs\nIn the docs https://pyhf.readthedocs.io/en/v0.7.0/, the Pyodide example is broken for me:\r\n```pytb\r\n---------------------------------------------------------------------------\r\nModuleNotFoundError Traceback (most recent call last)\r\nCell In[1], line 3\r\n 1 import piplite\r\n 2 await piplite.install([\"pyhf==0.7.0\"])\r\n----> 3 get_ipython().run_line_magic('matplotlib', 'inline')\r\n 4 import pyhf\r\n\r\nFile /lib/python3.10/site-packages/IPython/core/interactiveshell.py:2369, in InteractiveShell.run_line_magic(self, magic_name, line, _stack_depth)\r\n 2367 kwargs['local_ns'] = self.get_local_scope(stack_depth)\r\n 2368 with self.builtin_trap:\r\n-> 2369 result = fn(*args, **kwargs)\r\n 2371 # The code below prevents the output from being displayed\r\n 2372 # when using magics with decodator @output_can_be_silenced\r\n 2373 # when the last Python token in the expression is a ';'.\r\n 2374 if getattr(fn, magic.MAGIC_OUTPUT_CAN_BE_SILENCED, False):\r\n\r\nFile /lib/python3.10/site-packages/IPython/core/magics/pylab.py:99, in PylabMagics.matplotlib(self, line)\r\n 97 print(\"Available matplotlib backends: %s\" % backends_list)\r\n 98 else:\r\n---> 99 gui, backend = self.shell.enable_matplotlib(args.gui.lower() if isinstance(args.gui, str) else args.gui)\r\n 100 self._show_matplotlib_backend(args.gui, backend)\r\n\r\nFile /lib/python3.10/site-packages/IPython/core/interactiveshell.py:3540, in InteractiveShell.enable_matplotlib(self, gui)\r\n 3519 def enable_matplotlib(self, gui=None):\r\n 3520 \"\"\"Enable interactive matplotlib and inline figure support.\r\n 3521 \r\n 3522 This takes the following steps:\r\n (...)\r\n 3538 display figures inline.\r\n 3539 \"\"\"\r\n-> 3540 from matplotlib_inline.backend_inline import configure_inline_support\r\n 3542 from IPython.core import pylabtools as pt\r\n 3543 gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)\r\n\r\nFile /lib/python3.10/site-packages/matplotlib_inline/__init__.py:1\r\n----> 1 from . import backend_inline, config # noqa\r\n 2 __version__ = \"0.1.6\" # noqa\r\n\r\nFile /lib/python3.10/site-packages/matplotlib_inline/backend_inline.py:6\r\n 1 \"\"\"A matplotlib backend for publishing figures via display_data\"\"\"\r\n 3 # Copyright (c) IPython Development Team.\r\n 4 # Distributed under the terms of the BSD 3-Clause License.\r\n----> 6 import matplotlib\r\n 7 from matplotlib import colors\r\n 8 from matplotlib.backends import backend_agg\r\n\r\nModuleNotFoundError: The module 'matplotlib' is included in the Pyodide distribution, but it is not installed.\r\nYou can install it by calling:\r\n await micropip.install(\"matplotlib\") in Python, or\r\n await pyodide.loadPackage(\"matplotlib\") in JavaScript\r\nSee https://pyodide.org/en/stable/usage/loading-packages.html for more details.\r\n```\r\nIt used to work previously, though I can not say for sure when it last worked for me. Running on MacOS (ARM), tried Firefox and Chrome (resulting in the above), while Safari seems stuck executing the import commands provided.\n", "before_files": [{"content": "import urllib.parse\n\n\ndef main():\n code = \"\"\"\\\nimport piplite\nawait piplite.install([\"pyhf==0.7.0\"])\n%matplotlib inline\nimport pyhf\\\n\"\"\"\n\n parsed_url = urllib.parse.quote(code)\n url_base = \"https://jupyterlite.github.io/demo/repl/index.html\"\n jupyterlite_options = \"?kernel=python&toolbar=1&code=\"\n jupyterlite_url = url_base + jupyterlite_options + parsed_url\n\n print(f\"# jupyterlite URL:\\n{jupyterlite_url}\")\n\n jupyterlite_iframe_rst = f\"\"\"\\\n <iframe\n src=\"{jupyterlite_url}\"\n width=\"100%\"\n height=\"500px\"\n ></iframe>\\\n\"\"\"\n print(f\"\\n# RST for iframe for jupyterlite.rst:\\n{jupyterlite_iframe_rst}\")\n\n\nif __name__ == \"__main__\":\n raise SystemExit(main())\n", "path": "docs/generate_jupyterlite_iframe.py"}], "after_files": [{"content": "import urllib.parse\n\n\ndef main():\n code = \"\"\"\\\nimport piplite\nawait piplite.install([\"pyhf==0.7.0\", \"matplotlib>=3.0.0\"])\n%matplotlib inline\nimport pyhf\\\n\"\"\"\n\n parsed_url = urllib.parse.quote(code)\n url_base = \"https://jupyterlite.github.io/demo/repl/index.html\"\n jupyterlite_options = \"?kernel=python&toolbar=1&code=\"\n jupyterlite_url = url_base + jupyterlite_options + parsed_url\n\n print(f\"# jupyterlite URL:\\n{jupyterlite_url}\")\n\n jupyterlite_iframe_rst = f\"\"\"\\\n <iframe\n src=\"{jupyterlite_url}\"\n width=\"100%\"\n height=\"500px\"\n ></iframe>\\\n\"\"\"\n print(f\"\\n# RST for iframe for jupyterlite.rst:\\n{jupyterlite_iframe_rst}\")\n\n\nif __name__ == \"__main__\":\n raise SystemExit(main())\n", "path": "docs/generate_jupyterlite_iframe.py"}]} | 1,352 | 120 |
gh_patches_debug_20110 | rasdani/github-patches | git_diff | pytorch__ignite-2639 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Code formatting issue with latest flake8
https://github.com/pytorch/ignite/runs/7781175697?check_suite_focus=true#step:11:84
```
Collecting flake8
Downloading flake8-5.0.4-py2.py3-none-any.whl (61 kB)
+ flake8 ignite tests examples --config setup.cfg
ignite/metrics/psnr.py:12:121: E501 line too long (121 > 120 characters)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/metrics/psnr.py`
Content:
```
1 from typing import Callable, Sequence, Union
2
3 import torch
4
5 from ignite.exceptions import NotComputableError
6 from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
7
8 __all__ = ["PSNR"]
9
10
11 class PSNR(Metric):
12 r"""Computes average `Peak signal-to-noise ratio (PSNR) <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.
13
14 .. math::
15 \text{PSNR}(I, J) = 10 * \log_{10}\left(\frac{ MAX_{I}^2 }{ \text{ MSE } }\right)
16
17 where :math:`\text{MSE}` is `mean squared error <https://en.wikipedia.org/wiki/Mean_squared_error>`_.
18
19 - `y_pred` and `y` **must** have (batch_size, ...) shape.
20 - `y_pred` and `y` **must** have same dtype and same shape.
21
22 Args:
23 data_range: The data range of the target image (distance between minimum
24 and maximum possible values).
25 For other data types, please set the data range, otherwise an exception will be raised.
26 output_transform: A callable that is used to transform the Engine’s
27 process_function’s output into the form expected by the metric.
28 device: specifies which device updates are accumulated on.
29 Setting the metric’s device to be the same as your update arguments ensures
30 the update method is non-blocking. By default, CPU.
31
32 Examples:
33 To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
34 The output of the engine's ``process_function`` needs to be in format of
35 ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
36
37 For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
38
39 .. include:: defaults.rst
40 :start-after: :orphan:
41
42 .. testcode::
43
44 psnr = PSNR(data_range=1.0)
45 psnr.attach(default_evaluator, 'psnr')
46 preds = torch.rand([4, 3, 16, 16])
47 target = preds * 0.75
48 state = default_evaluator.run([[preds, target]])
49 print(state.metrics['psnr'])
50
51 .. testoutput::
52
53 16.8671405...
54
55 This metric by default accepts Grayscale or RGB images. But if you have YCbCr or YUV images, only
56 Y channel is needed for computing PSNR. And, this can be done with ``output_transform``. For instance,
57
58 .. testcode::
59
60 def get_y_channel(output):
61 y_pred, y = output
62 # y_pred and y are (B, 3, H, W) and YCbCr or YUV images
63 # let's select y channel
64 return y_pred[:, 0, ...], y[:, 0, ...]
65
66 psnr = PSNR(data_range=219, output_transform=get_y_channel)
67 psnr.attach(default_evaluator, 'psnr')
68 preds = 219 * torch.rand([4, 3, 16, 16])
69 target = preds * 0.75
70 state = default_evaluator.run([[preds, target]])
71 print(state.metrics['psnr'])
72
73 .. testoutput::
74
75 16.7027966...
76
77 .. versionadded:: 0.4.3
78 """
79
80 def __init__(
81 self,
82 data_range: Union[int, float],
83 output_transform: Callable = lambda x: x,
84 device: Union[str, torch.device] = torch.device("cpu"),
85 ):
86 super().__init__(output_transform=output_transform, device=device)
87 self.data_range = data_range
88
89 def _check_shape_dtype(self, output: Sequence[torch.Tensor]) -> None:
90 y_pred, y = output
91 if y_pred.dtype != y.dtype:
92 raise TypeError(
93 f"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}."
94 )
95
96 if y_pred.shape != y.shape:
97 raise ValueError(
98 f"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
99 )
100
101 @reinit__is_reduced
102 def reset(self) -> None:
103 self._sum_of_batchwise_psnr = torch.tensor(0.0, dtype=torch.float64, device=self._device)
104 self._num_examples = 0
105
106 @reinit__is_reduced
107 def update(self, output: Sequence[torch.Tensor]) -> None:
108 self._check_shape_dtype(output)
109 y_pred, y = output[0].detach(), output[1].detach()
110
111 dim = tuple(range(1, y.ndim))
112 mse_error = torch.pow(y_pred.double() - y.view_as(y_pred).double(), 2).mean(dim=dim)
113 self._sum_of_batchwise_psnr += torch.sum(10.0 * torch.log10(self.data_range ** 2 / (mse_error + 1e-10))).to(
114 device=self._device
115 )
116 self._num_examples += y.shape[0]
117
118 @sync_all_reduce("_sum_of_batchwise_psnr", "_num_examples")
119 def compute(self) -> torch.Tensor:
120 if self._num_examples == 0:
121 raise NotComputableError("PSNR must have at least one example before it can be computed.")
122 return self._sum_of_batchwise_psnr / self._num_examples
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ignite/metrics/psnr.py b/ignite/metrics/psnr.py
--- a/ignite/metrics/psnr.py
+++ b/ignite/metrics/psnr.py
@@ -9,7 +9,8 @@
class PSNR(Metric):
- r"""Computes average `Peak signal-to-noise ratio (PSNR) <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.
+ r"""Computes average
+ `Peak signal-to-noise ratio (PSNR) <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.
.. math::
\text{PSNR}(I, J) = 10 * \log_{10}\left(\frac{ MAX_{I}^2 }{ \text{ MSE } }\right)
@@ -34,7 +35,8 @@
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
- For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
+ For more information on how metric works with :class:`~ignite.engine.engine.Engine`,
+ visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
| {"golden_diff": "diff --git a/ignite/metrics/psnr.py b/ignite/metrics/psnr.py\n--- a/ignite/metrics/psnr.py\n+++ b/ignite/metrics/psnr.py\n@@ -9,7 +9,8 @@\n \n \n class PSNR(Metric):\n- r\"\"\"Computes average `Peak signal-to-noise ratio (PSNR) <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.\n+ r\"\"\"Computes average\n+ `Peak signal-to-noise ratio (PSNR) <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.\n \n .. math::\n \\text{PSNR}(I, J) = 10 * \\log_{10}\\left(\\frac{ MAX_{I}^2 }{ \\text{ MSE } }\\right)\n@@ -34,7 +35,8 @@\n The output of the engine's ``process_function`` needs to be in format of\n ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.\n \n- For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.\n+ For more information on how metric works with :class:`~ignite.engine.engine.Engine`,\n+ visit :ref:`attach-engine`.\n \n .. include:: defaults.rst\n :start-after: :orphan:\n", "issue": "Code formatting issue with latest flake8\n\r\nhttps://github.com/pytorch/ignite/runs/7781175697?check_suite_focus=true#step:11:84\r\n\r\n```\r\nCollecting flake8\r\n Downloading flake8-5.0.4-py2.py3-none-any.whl (61 kB)\r\n\r\n+ flake8 ignite tests examples --config setup.cfg\r\nignite/metrics/psnr.py:12:121: E501 line too long (121 > 120 characters)\r\n```\n", "before_files": [{"content": "from typing import Callable, Sequence, Union\n\nimport torch\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"PSNR\"]\n\n\nclass PSNR(Metric):\n r\"\"\"Computes average `Peak signal-to-noise ratio (PSNR) <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.\n\n .. math::\n \\text{PSNR}(I, J) = 10 * \\log_{10}\\left(\\frac{ MAX_{I}^2 }{ \\text{ MSE } }\\right)\n\n where :math:`\\text{MSE}` is `mean squared error <https://en.wikipedia.org/wiki/Mean_squared_error>`_.\n\n - `y_pred` and `y` **must** have (batch_size, ...) shape.\n - `y_pred` and `y` **must** have same dtype and same shape.\n\n Args:\n data_range: The data range of the target image (distance between minimum\n and maximum possible values).\n For other data types, please set the data range, otherwise an exception will be raised.\n output_transform: A callable that is used to transform the Engine\u2019s\n process_function\u2019s output into the form expected by the metric.\n device: specifies which device updates are accumulated on.\n Setting the metric\u2019s device to be the same as your update arguments ensures\n the update method is non-blocking. By default, CPU.\n\n Examples:\n To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.\n The output of the engine's ``process_function`` needs to be in format of\n ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.\n\n For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n psnr = PSNR(data_range=1.0)\n psnr.attach(default_evaluator, 'psnr')\n preds = torch.rand([4, 3, 16, 16])\n target = preds * 0.75\n state = default_evaluator.run([[preds, target]])\n print(state.metrics['psnr'])\n\n .. testoutput::\n\n 16.8671405...\n\n This metric by default accepts Grayscale or RGB images. But if you have YCbCr or YUV images, only\n Y channel is needed for computing PSNR. And, this can be done with ``output_transform``. For instance,\n\n .. testcode::\n\n def get_y_channel(output):\n y_pred, y = output\n # y_pred and y are (B, 3, H, W) and YCbCr or YUV images\n # let's select y channel\n return y_pred[:, 0, ...], y[:, 0, ...]\n\n psnr = PSNR(data_range=219, output_transform=get_y_channel)\n psnr.attach(default_evaluator, 'psnr')\n preds = 219 * torch.rand([4, 3, 16, 16])\n target = preds * 0.75\n state = default_evaluator.run([[preds, target]])\n print(state.metrics['psnr'])\n\n .. testoutput::\n\n 16.7027966...\n\n .. versionadded:: 0.4.3\n \"\"\"\n\n def __init__(\n self,\n data_range: Union[int, float],\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n super().__init__(output_transform=output_transform, device=device)\n self.data_range = data_range\n\n def _check_shape_dtype(self, output: Sequence[torch.Tensor]) -> None:\n y_pred, y = output\n if y_pred.dtype != y.dtype:\n raise TypeError(\n f\"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}.\"\n )\n\n if y_pred.shape != y.shape:\n raise ValueError(\n f\"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}.\"\n )\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._sum_of_batchwise_psnr = torch.tensor(0.0, dtype=torch.float64, device=self._device)\n self._num_examples = 0\n\n @reinit__is_reduced\n def update(self, output: Sequence[torch.Tensor]) -> None:\n self._check_shape_dtype(output)\n y_pred, y = output[0].detach(), output[1].detach()\n\n dim = tuple(range(1, y.ndim))\n mse_error = torch.pow(y_pred.double() - y.view_as(y_pred).double(), 2).mean(dim=dim)\n self._sum_of_batchwise_psnr += torch.sum(10.0 * torch.log10(self.data_range ** 2 / (mse_error + 1e-10))).to(\n device=self._device\n )\n self._num_examples += y.shape[0]\n\n @sync_all_reduce(\"_sum_of_batchwise_psnr\", \"_num_examples\")\n def compute(self) -> torch.Tensor:\n if self._num_examples == 0:\n raise NotComputableError(\"PSNR must have at least one example before it can be computed.\")\n return self._sum_of_batchwise_psnr / self._num_examples\n", "path": "ignite/metrics/psnr.py"}], "after_files": [{"content": "from typing import Callable, Sequence, Union\n\nimport torch\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"PSNR\"]\n\n\nclass PSNR(Metric):\n r\"\"\"Computes average\n `Peak signal-to-noise ratio (PSNR) <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.\n\n .. math::\n \\text{PSNR}(I, J) = 10 * \\log_{10}\\left(\\frac{ MAX_{I}^2 }{ \\text{ MSE } }\\right)\n\n where :math:`\\text{MSE}` is `mean squared error <https://en.wikipedia.org/wiki/Mean_squared_error>`_.\n\n - `y_pred` and `y` **must** have (batch_size, ...) shape.\n - `y_pred` and `y` **must** have same dtype and same shape.\n\n Args:\n data_range: The data range of the target image (distance between minimum\n and maximum possible values).\n For other data types, please set the data range, otherwise an exception will be raised.\n output_transform: A callable that is used to transform the Engine\u2019s\n process_function\u2019s output into the form expected by the metric.\n device: specifies which device updates are accumulated on.\n Setting the metric\u2019s device to be the same as your update arguments ensures\n the update method is non-blocking. By default, CPU.\n\n Examples:\n To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.\n The output of the engine's ``process_function`` needs to be in format of\n ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.\n\n For more information on how metric works with :class:`~ignite.engine.engine.Engine`,\n visit :ref:`attach-engine`.\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n psnr = PSNR(data_range=1.0)\n psnr.attach(default_evaluator, 'psnr')\n preds = torch.rand([4, 3, 16, 16])\n target = preds * 0.75\n state = default_evaluator.run([[preds, target]])\n print(state.metrics['psnr'])\n\n .. testoutput::\n\n 16.8671405...\n\n This metric by default accepts Grayscale or RGB images. But if you have YCbCr or YUV images, only\n Y channel is needed for computing PSNR. And, this can be done with ``output_transform``. For instance,\n\n .. testcode::\n\n def get_y_channel(output):\n y_pred, y = output\n # y_pred and y are (B, 3, H, W) and YCbCr or YUV images\n # let's select y channel\n return y_pred[:, 0, ...], y[:, 0, ...]\n\n psnr = PSNR(data_range=219, output_transform=get_y_channel)\n psnr.attach(default_evaluator, 'psnr')\n preds = 219 * torch.rand([4, 3, 16, 16])\n target = preds * 0.75\n state = default_evaluator.run([[preds, target]])\n print(state.metrics['psnr'])\n\n .. testoutput::\n\n 16.7027966...\n\n .. versionadded:: 0.4.3\n \"\"\"\n\n def __init__(\n self,\n data_range: Union[int, float],\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n super().__init__(output_transform=output_transform, device=device)\n self.data_range = data_range\n\n def _check_shape_dtype(self, output: Sequence[torch.Tensor]) -> None:\n y_pred, y = output\n if y_pred.dtype != y.dtype:\n raise TypeError(\n f\"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}.\"\n )\n\n if y_pred.shape != y.shape:\n raise ValueError(\n f\"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}.\"\n )\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._sum_of_batchwise_psnr = torch.tensor(0.0, dtype=torch.float64, device=self._device)\n self._num_examples = 0\n\n @reinit__is_reduced\n def update(self, output: Sequence[torch.Tensor]) -> None:\n self._check_shape_dtype(output)\n y_pred, y = output[0].detach(), output[1].detach()\n\n dim = tuple(range(1, y.ndim))\n mse_error = torch.pow(y_pred.double() - y.view_as(y_pred).double(), 2).mean(dim=dim)\n self._sum_of_batchwise_psnr += torch.sum(10.0 * torch.log10(self.data_range ** 2 / (mse_error + 1e-10))).to(\n device=self._device\n )\n self._num_examples += y.shape[0]\n\n @sync_all_reduce(\"_sum_of_batchwise_psnr\", \"_num_examples\")\n def compute(self) -> torch.Tensor:\n if self._num_examples == 0:\n raise NotComputableError(\"PSNR must have at least one example before it can be computed.\")\n return self._sum_of_batchwise_psnr / self._num_examples\n", "path": "ignite/metrics/psnr.py"}]} | 1,906 | 305 |
gh_patches_debug_36049 | rasdani/github-patches | git_diff | mozilla__pontoon-2716 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pretranslated Fluent string has the ID translated or modified
It happened for at least two strings.
```
remember-pw-link = Remember your password? Sign in
```
Became
```
Remember-pw-link = Ricordi la password? Accedi
```
No clue why it changed to uppercase.
On the other hand, for
```
plan-price-interval-year =
{ $intervalCount ->
[one] { $amount } all’anno
*[other] { $amount } ogni { $intervalCount } anni
}
.title =
{ $intervalCount ->
[one] { $amount } all’anno
*[other] { $amount } ogni { $intervalCount } anni
}
```
The id was translated to `piano-prezzo-intervallo-anno`(but the translation was good besides that).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pontoon/pretranslation/pretranslate.py`
Content:
```
1 import operator
2
3 from fluent.syntax import FluentSerializer
4 from functools import reduce
5
6 from django.db.models import CharField, Value as V
7 from django.db.models.functions import Concat
8
9 from pontoon.base.models import User, TranslatedResource
10 from pontoon.machinery.utils import (
11 get_google_translate_data,
12 get_translation_memory_data,
13 )
14
15 from pontoon.base.templatetags.helpers import (
16 as_simple_translation,
17 is_single_input_ftl_string,
18 get_reconstructed_message,
19 )
20
21
22 serializer = FluentSerializer()
23
24
25 def get_translations(entity, locale):
26 """
27 Get pretranslations for the entity-locale pair
28
29 :arg Entity entity: the Entity object
30 :arg Locale locale: the Locale object
31
32 :returns: a list of tuple with:
33 - a pretranslation of the entity
34 - plural form
35 - user - tm_user/gt_user
36 """
37 tm_user = User.objects.get(email="[email protected]")
38 gt_user = User.objects.get(email="[email protected]")
39
40 strings = []
41 plural_forms = range(0, locale.nplurals or 1)
42
43 entity_string = (
44 as_simple_translation(entity.string)
45 if is_single_input_ftl_string(entity.string)
46 else entity.string
47 )
48
49 # Try to get matches from translation_memory
50 tm_response = get_translation_memory_data(
51 text=entity_string,
52 locale=locale,
53 )
54
55 tm_response = [t for t in tm_response if int(t["quality"]) == 100]
56
57 if tm_response:
58 if entity.string_plural == "":
59 translation = tm_response[0]["target"]
60
61 if entity.string != entity_string:
62 translation = serializer.serialize_entry(
63 get_reconstructed_message(entity.string, translation)
64 )
65
66 strings = [(translation, None, tm_user)]
67 else:
68 for plural_form in plural_forms:
69 strings.append((tm_response[0]["target"], plural_form, tm_user))
70
71 # Else fetch from google translate
72 elif locale.google_translate_code:
73 gt_response = get_google_translate_data(
74 text=entity.string,
75 locale=locale,
76 )
77
78 if gt_response["status"]:
79 if entity.string_plural == "":
80 strings = [(gt_response["translation"], None, gt_user)]
81 else:
82 for plural_form in plural_forms:
83 strings.append((gt_response["translation"], plural_form, gt_user))
84 return strings
85
86
87 def update_changed_instances(tr_filter, tr_dict, translations):
88 """
89 Update the latest activity and stats for changed Locales, ProjectLocales
90 & TranslatedResources
91 """
92 tr_filter = tuple(tr_filter)
93 # Combine all generated filters with an OK operator.
94 # `operator.ior` is the '|' Python operator, which turns into a logical OR
95 # when used between django ORM query objects.
96 tr_query = reduce(operator.ior, tr_filter)
97
98 translatedresources = TranslatedResource.objects.filter(tr_query).annotate(
99 locale_resource=Concat(
100 "locale_id", V("-"), "resource_id", output_field=CharField()
101 )
102 )
103
104 translatedresources.update_stats()
105
106 for tr in translatedresources:
107 index = tr_dict[tr.locale_resource]
108 translation = translations[index]
109 translation.update_latest_translation()
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pontoon/pretranslation/pretranslate.py b/pontoon/pretranslation/pretranslate.py
--- a/pontoon/pretranslation/pretranslate.py
+++ b/pontoon/pretranslation/pretranslate.py
@@ -18,6 +18,7 @@
get_reconstructed_message,
)
+UNTRANSLATABLE_KEY = "AIzaSyDX3R5Y1kxh_8lJ4OAO"
serializer = FluentSerializer()
@@ -40,7 +41,7 @@
strings = []
plural_forms = range(0, locale.nplurals or 1)
- entity_string = (
+ tm_input = (
as_simple_translation(entity.string)
if is_single_input_ftl_string(entity.string)
else entity.string
@@ -48,7 +49,7 @@
# Try to get matches from translation_memory
tm_response = get_translation_memory_data(
- text=entity_string,
+ text=tm_input,
locale=locale,
)
@@ -58,7 +59,7 @@
if entity.string_plural == "":
translation = tm_response[0]["target"]
- if entity.string != entity_string:
+ if entity.string != tm_input:
translation = serializer.serialize_entry(
get_reconstructed_message(entity.string, translation)
)
@@ -70,12 +71,23 @@
# Else fetch from google translate
elif locale.google_translate_code:
+ gt_input = (
+ entity.string.replace(entity.key, UNTRANSLATABLE_KEY, 1)
+ if entity.resource.format == "ftl"
+ else entity.string
+ )
+
gt_response = get_google_translate_data(
- text=entity.string,
+ text=gt_input,
locale=locale,
)
if gt_response["status"]:
+ if entity.string != gt_input:
+ gt_response["translation"] = gt_response["translation"].replace(
+ UNTRANSLATABLE_KEY, entity.key
+ )
+
if entity.string_plural == "":
strings = [(gt_response["translation"], None, gt_user)]
else:
| {"golden_diff": "diff --git a/pontoon/pretranslation/pretranslate.py b/pontoon/pretranslation/pretranslate.py\n--- a/pontoon/pretranslation/pretranslate.py\n+++ b/pontoon/pretranslation/pretranslate.py\n@@ -18,6 +18,7 @@\n get_reconstructed_message,\n )\n \n+UNTRANSLATABLE_KEY = \"AIzaSyDX3R5Y1kxh_8lJ4OAO\"\n \n serializer = FluentSerializer()\n \n@@ -40,7 +41,7 @@\n strings = []\n plural_forms = range(0, locale.nplurals or 1)\n \n- entity_string = (\n+ tm_input = (\n as_simple_translation(entity.string)\n if is_single_input_ftl_string(entity.string)\n else entity.string\n@@ -48,7 +49,7 @@\n \n # Try to get matches from translation_memory\n tm_response = get_translation_memory_data(\n- text=entity_string,\n+ text=tm_input,\n locale=locale,\n )\n \n@@ -58,7 +59,7 @@\n if entity.string_plural == \"\":\n translation = tm_response[0][\"target\"]\n \n- if entity.string != entity_string:\n+ if entity.string != tm_input:\n translation = serializer.serialize_entry(\n get_reconstructed_message(entity.string, translation)\n )\n@@ -70,12 +71,23 @@\n \n # Else fetch from google translate\n elif locale.google_translate_code:\n+ gt_input = (\n+ entity.string.replace(entity.key, UNTRANSLATABLE_KEY, 1)\n+ if entity.resource.format == \"ftl\"\n+ else entity.string\n+ )\n+\n gt_response = get_google_translate_data(\n- text=entity.string,\n+ text=gt_input,\n locale=locale,\n )\n \n if gt_response[\"status\"]:\n+ if entity.string != gt_input:\n+ gt_response[\"translation\"] = gt_response[\"translation\"].replace(\n+ UNTRANSLATABLE_KEY, entity.key\n+ )\n+\n if entity.string_plural == \"\":\n strings = [(gt_response[\"translation\"], None, gt_user)]\n else:\n", "issue": "Pretranslated Fluent string has the ID translated or modified\nIt happened for at least two strings.\r\n\r\n```\r\nremember-pw-link = Remember your password? Sign in\r\n```\r\n\r\nBecame\r\n\r\n```\r\nRemember-pw-link = Ricordi la password? Accedi\r\n```\r\n\r\nNo clue why it changed to uppercase.\r\n\r\nOn the other hand, for \r\n\r\n```\r\nplan-price-interval-year =\r\n { $intervalCount ->\r\n [one] { $amount } all\u2019anno\r\n *[other] { $amount } ogni { $intervalCount } anni\r\n }\r\n .title =\r\n { $intervalCount ->\r\n [one] { $amount } all\u2019anno\r\n *[other] { $amount } ogni { $intervalCount } anni\r\n }\r\n```\r\n\r\nThe id was translated to `piano-prezzo-intervallo-anno`(but the translation was good besides that).\r\n\n", "before_files": [{"content": "import operator\n\nfrom fluent.syntax import FluentSerializer\nfrom functools import reduce\n\nfrom django.db.models import CharField, Value as V\nfrom django.db.models.functions import Concat\n\nfrom pontoon.base.models import User, TranslatedResource\nfrom pontoon.machinery.utils import (\n get_google_translate_data,\n get_translation_memory_data,\n)\n\nfrom pontoon.base.templatetags.helpers import (\n as_simple_translation,\n is_single_input_ftl_string,\n get_reconstructed_message,\n)\n\n\nserializer = FluentSerializer()\n\n\ndef get_translations(entity, locale):\n \"\"\"\n Get pretranslations for the entity-locale pair\n\n :arg Entity entity: the Entity object\n :arg Locale locale: the Locale object\n\n :returns: a list of tuple with:\n - a pretranslation of the entity\n - plural form\n - user - tm_user/gt_user\n \"\"\"\n tm_user = User.objects.get(email=\"[email protected]\")\n gt_user = User.objects.get(email=\"[email protected]\")\n\n strings = []\n plural_forms = range(0, locale.nplurals or 1)\n\n entity_string = (\n as_simple_translation(entity.string)\n if is_single_input_ftl_string(entity.string)\n else entity.string\n )\n\n # Try to get matches from translation_memory\n tm_response = get_translation_memory_data(\n text=entity_string,\n locale=locale,\n )\n\n tm_response = [t for t in tm_response if int(t[\"quality\"]) == 100]\n\n if tm_response:\n if entity.string_plural == \"\":\n translation = tm_response[0][\"target\"]\n\n if entity.string != entity_string:\n translation = serializer.serialize_entry(\n get_reconstructed_message(entity.string, translation)\n )\n\n strings = [(translation, None, tm_user)]\n else:\n for plural_form in plural_forms:\n strings.append((tm_response[0][\"target\"], plural_form, tm_user))\n\n # Else fetch from google translate\n elif locale.google_translate_code:\n gt_response = get_google_translate_data(\n text=entity.string,\n locale=locale,\n )\n\n if gt_response[\"status\"]:\n if entity.string_plural == \"\":\n strings = [(gt_response[\"translation\"], None, gt_user)]\n else:\n for plural_form in plural_forms:\n strings.append((gt_response[\"translation\"], plural_form, gt_user))\n return strings\n\n\ndef update_changed_instances(tr_filter, tr_dict, translations):\n \"\"\"\n Update the latest activity and stats for changed Locales, ProjectLocales\n & TranslatedResources\n \"\"\"\n tr_filter = tuple(tr_filter)\n # Combine all generated filters with an OK operator.\n # `operator.ior` is the '|' Python operator, which turns into a logical OR\n # when used between django ORM query objects.\n tr_query = reduce(operator.ior, tr_filter)\n\n translatedresources = TranslatedResource.objects.filter(tr_query).annotate(\n locale_resource=Concat(\n \"locale_id\", V(\"-\"), \"resource_id\", output_field=CharField()\n )\n )\n\n translatedresources.update_stats()\n\n for tr in translatedresources:\n index = tr_dict[tr.locale_resource]\n translation = translations[index]\n translation.update_latest_translation()\n", "path": "pontoon/pretranslation/pretranslate.py"}], "after_files": [{"content": "import operator\n\nfrom fluent.syntax import FluentSerializer\nfrom functools import reduce\n\nfrom django.db.models import CharField, Value as V\nfrom django.db.models.functions import Concat\n\nfrom pontoon.base.models import User, TranslatedResource\nfrom pontoon.machinery.utils import (\n get_google_translate_data,\n get_translation_memory_data,\n)\n\nfrom pontoon.base.templatetags.helpers import (\n as_simple_translation,\n is_single_input_ftl_string,\n get_reconstructed_message,\n)\n\nUNTRANSLATABLE_KEY = \"AIzaSyDX3R5Y1kxh_8lJ4OAO\"\n\nserializer = FluentSerializer()\n\n\ndef get_translations(entity, locale):\n \"\"\"\n Get pretranslations for the entity-locale pair\n\n :arg Entity entity: the Entity object\n :arg Locale locale: the Locale object\n\n :returns: a list of tuple with:\n - a pretranslation of the entity\n - plural form\n - user - tm_user/gt_user\n \"\"\"\n tm_user = User.objects.get(email=\"[email protected]\")\n gt_user = User.objects.get(email=\"[email protected]\")\n\n strings = []\n plural_forms = range(0, locale.nplurals or 1)\n\n tm_input = (\n as_simple_translation(entity.string)\n if is_single_input_ftl_string(entity.string)\n else entity.string\n )\n\n # Try to get matches from translation_memory\n tm_response = get_translation_memory_data(\n text=tm_input,\n locale=locale,\n )\n\n tm_response = [t for t in tm_response if int(t[\"quality\"]) == 100]\n\n if tm_response:\n if entity.string_plural == \"\":\n translation = tm_response[0][\"target\"]\n\n if entity.string != tm_input:\n translation = serializer.serialize_entry(\n get_reconstructed_message(entity.string, translation)\n )\n\n strings = [(translation, None, tm_user)]\n else:\n for plural_form in plural_forms:\n strings.append((tm_response[0][\"target\"], plural_form, tm_user))\n\n # Else fetch from google translate\n elif locale.google_translate_code:\n gt_input = (\n entity.string.replace(entity.key, UNTRANSLATABLE_KEY, 1)\n if entity.resource.format == \"ftl\"\n else entity.string\n )\n\n gt_response = get_google_translate_data(\n text=gt_input,\n locale=locale,\n )\n\n if gt_response[\"status\"]:\n if entity.string != gt_input:\n gt_response[\"translation\"] = gt_response[\"translation\"].replace(\n UNTRANSLATABLE_KEY, entity.key\n )\n\n if entity.string_plural == \"\":\n strings = [(gt_response[\"translation\"], None, gt_user)]\n else:\n for plural_form in plural_forms:\n strings.append((gt_response[\"translation\"], plural_form, gt_user))\n return strings\n\n\ndef update_changed_instances(tr_filter, tr_dict, translations):\n \"\"\"\n Update the latest activity and stats for changed Locales, ProjectLocales\n & TranslatedResources\n \"\"\"\n tr_filter = tuple(tr_filter)\n # Combine all generated filters with an OK operator.\n # `operator.ior` is the '|' Python operator, which turns into a logical OR\n # when used between django ORM query objects.\n tr_query = reduce(operator.ior, tr_filter)\n\n translatedresources = TranslatedResource.objects.filter(tr_query).annotate(\n locale_resource=Concat(\n \"locale_id\", V(\"-\"), \"resource_id\", output_field=CharField()\n )\n )\n\n translatedresources.update_stats()\n\n for tr in translatedresources:\n index = tr_dict[tr.locale_resource]\n translation = translations[index]\n translation.update_latest_translation()\n", "path": "pontoon/pretranslation/pretranslate.py"}]} | 1,374 | 467 |
gh_patches_debug_22239 | rasdani/github-patches | git_diff | dotkom__onlineweb4-2359 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Atendee list bug when adding or removing users
<!-- If this is a security issue or information leakage (having access to something you (probably) shouldn't), please send an email rather than opening a public issue. -->
## What kind of an issue is this?
- Bug report
## What is the expected behaviour?
It should look normal where every box and number is under its corresponding column. It should just look normal. Even if we remove users or add users through the dashboard, nothing should change.

## What is the current behaviour?
If you remove or add a user through the dashboard menu at the attendee list it will look like the screenshot above. We have randomly two boxes, the x for removing users is all the way to the right and the text "none" is occupying the remove column. If you refresh the site it will go back to expected behaviour, its only after deleteing/adding a user

<!-- if this is a bug report -->
## How do you reproduce this problem?
Remove or add a user to the attendee list manually.
<!-- if this is a bug report -->
<!-- provide steps to reproduce this problem, preferably in a bullet point list -->
1. go to the attendee list
2. Add a user by writing their name OR remove a user from the list
## Other information
This might be a bug which I didn't catch when I added "year of study" to the attendee list. I'm not sure if this was an issue before, but since it hasn't been brought up I will assume this is a bug from that pull request of mine
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/events/dashboard/utils.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from django.urls import reverse
3
4 from apps.authentication.models import OnlineUser as User
5 from apps.events.models import Attendee, Event
6
7
8 def _get_attendee(attendee_id):
9 try:
10 return Attendee.objects.get(pk=attendee_id)
11 except Attendee.DoesNotExist:
12 return None
13
14
15 def event_ajax_handler(event: Event, request):
16 action = request.POST.get('action')
17 administrating_user = request.user
18 attendee_id = request.POST.get('attendee_id')
19 user_id = request.POST.get('user_id')
20
21 if action == 'attended':
22 attendee = _get_attendee(attendee_id)
23 if not attendee:
24 return {'message': f'Fant ingen påmeldte med oppgitt ID ({attendee_id}).', 'status': 400}
25 return handle_attended(attendee)
26 elif action == 'paid':
27 attendee = _get_attendee(attendee_id)
28 if not attendee:
29 return {'message': f'Fant ingen påmeldte med oppgitt ID ({attendee_id}).', 'status': 400}
30 return handle_paid(attendee)
31 elif action == 'add_attendee':
32 return handle_add_attendee(event, user_id)
33 elif action == 'remove_attendee':
34 return handle_remove_attendee(event, attendee_id, administrating_user)
35 else:
36 raise NotImplementedError
37
38
39 def handle_attended(attendee: Attendee):
40 """
41 Toggle attending-status of an attendee between attending and not attending
42 """
43 attendee.attended = not attendee.attended
44 attendee.save()
45
46 return {'message': 'OK', 'status': 200}
47
48
49 def handle_paid(attendee: Attendee):
50 """
51 Toggle paid status of an attendee between paid and not paid
52 """
53 attendee.paid = not attendee.paid
54 attendee.save()
55
56 return {'message': 'OK', 'status': 200}
57
58
59 def _get_attendee_data(attendee_qs):
60 attendees = []
61
62 for number, a in enumerate(attendee_qs):
63 attendees.append({
64 'number': number + 1,
65 'id': a.id,
66 'first_name': a.user.first_name,
67 'last_name': a.user.last_name,
68 'paid': a.paid,
69 'extras': str(a.extras),
70 'attended': a.attended,
71 'link': reverse('dashboard_attendee_details', kwargs={'attendee_id': a.id})
72 })
73
74 return attendees
75
76
77 def _get_event_context(event: Event, response={}):
78 response['attendees'] = _get_attendee_data(event.attendance_event.attending_attendees_qs)
79 response['waitlist'] = _get_attendee_data(event.attendance_event.waitlist_qs)
80
81 return response
82
83
84 def handle_add_attendee(event: Event, user_id: int):
85 resp = _get_event_context(event)
86 if event.attendance_event.number_of_seats_taken >= event.attendance_event.max_capacity:
87 if not event.attendance_event.waitlist:
88 return {'message': f'Det er ingen ledige plasser på {event.title}.', 'status': 400, **resp}
89
90 user = User.objects.filter(pk=user_id)
91 if user.count() != 1:
92 return {'message': f'Fant ingen bruker med oppgitt ID ({user_id}).', 'status': 400, **resp}
93 user = user[0]
94 if Attendee.objects.filter(user=user, event=event.attendance_event).count() != 0:
95 return {'message': f'{user} er allerede påmeldt {event.title}.', 'status': 400, **resp}
96
97 attendee = Attendee(user=user, event=event.attendance_event)
98 attendee.save()
99
100 resp = _get_event_context(event, resp)
101 return {'message': f'{user} ble meldt på {event}', 'status': 200, **resp}
102
103
104 def handle_remove_attendee(event: Event, attendee_id: int, admin_user: User):
105 resp = _get_event_context(event)
106 attendee = Attendee.objects.filter(pk=attendee_id)
107 if attendee.count() != 1:
108 return {'message': f'Fant ingen påmeldte med oppgitt ID ({attendee_id}).', 'status': 400, **resp}
109 attendee = attendee[0]
110 attendee.unattend(admin_user)
111
112 resp = _get_event_context(event, resp)
113 return {'message': f'{attendee.user} ble fjernet fra {attendee.event}', 'status': 200, **resp}
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/events/dashboard/utils.py b/apps/events/dashboard/utils.py
--- a/apps/events/dashboard/utils.py
+++ b/apps/events/dashboard/utils.py
@@ -65,7 +65,9 @@
'id': a.id,
'first_name': a.user.first_name,
'last_name': a.user.last_name,
+ 'year_of_study': a.user.year,
'paid': a.paid,
+ 'payment_deadline': a.get_payment_deadline(),
'extras': str(a.extras),
'attended': a.attended,
'link': reverse('dashboard_attendee_details', kwargs={'attendee_id': a.id})
@@ -77,6 +79,8 @@
def _get_event_context(event: Event, response={}):
response['attendees'] = _get_attendee_data(event.attendance_event.attending_attendees_qs)
response['waitlist'] = _get_attendee_data(event.attendance_event.waitlist_qs)
+ response['is_payment_event'] = bool(event.attendance_event.payment())
+ response['has_extras'] = event.attendance_event.has_extras
return response
| {"golden_diff": "diff --git a/apps/events/dashboard/utils.py b/apps/events/dashboard/utils.py\n--- a/apps/events/dashboard/utils.py\n+++ b/apps/events/dashboard/utils.py\n@@ -65,7 +65,9 @@\n 'id': a.id,\n 'first_name': a.user.first_name,\n 'last_name': a.user.last_name,\n+ 'year_of_study': a.user.year,\n 'paid': a.paid,\n+ 'payment_deadline': a.get_payment_deadline(),\n 'extras': str(a.extras),\n 'attended': a.attended,\n 'link': reverse('dashboard_attendee_details', kwargs={'attendee_id': a.id})\n@@ -77,6 +79,8 @@\n def _get_event_context(event: Event, response={}):\n response['attendees'] = _get_attendee_data(event.attendance_event.attending_attendees_qs)\n response['waitlist'] = _get_attendee_data(event.attendance_event.waitlist_qs)\n+ response['is_payment_event'] = bool(event.attendance_event.payment())\n+ response['has_extras'] = event.attendance_event.has_extras\n \n return response\n", "issue": "Atendee list bug when adding or removing users\n<!-- If this is a security issue or information leakage (having access to something you (probably) shouldn't), please send an email rather than opening a public issue. -->\r\n\r\n## What kind of an issue is this?\r\n\r\n- Bug report\r\n\r\n\r\n## What is the expected behaviour?\r\nIt should look normal where every box and number is under its corresponding column. It should just look normal. Even if we remove users or add users through the dashboard, nothing should change.\r\n\r\n\r\n\r\n\r\n## What is the current behaviour?\r\nIf you remove or add a user through the dashboard menu at the attendee list it will look like the screenshot above. We have randomly two boxes, the x for removing users is all the way to the right and the text \"none\" is occupying the remove column. If you refresh the site it will go back to expected behaviour, its only after deleteing/adding a user\r\n\r\n\r\n<!-- if this is a bug report -->\r\n\r\n\r\n## How do you reproduce this problem? \r\nRemove or add a user to the attendee list manually.\r\n<!-- if this is a bug report -->\r\n<!-- provide steps to reproduce this problem, preferably in a bullet point list -->\r\n1. go to the attendee list\r\n2. Add a user by writing their name OR remove a user from the list\r\n## Other information\r\n\r\nThis might be a bug which I didn't catch when I added \"year of study\" to the attendee list. I'm not sure if this was an issue before, but since it hasn't been brought up I will assume this is a bug from that pull request of mine\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.urls import reverse\n\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.events.models import Attendee, Event\n\n\ndef _get_attendee(attendee_id):\n try:\n return Attendee.objects.get(pk=attendee_id)\n except Attendee.DoesNotExist:\n return None\n\n\ndef event_ajax_handler(event: Event, request):\n action = request.POST.get('action')\n administrating_user = request.user\n attendee_id = request.POST.get('attendee_id')\n user_id = request.POST.get('user_id')\n\n if action == 'attended':\n attendee = _get_attendee(attendee_id)\n if not attendee:\n return {'message': f'Fant ingen p\u00e5meldte med oppgitt ID ({attendee_id}).', 'status': 400}\n return handle_attended(attendee)\n elif action == 'paid':\n attendee = _get_attendee(attendee_id)\n if not attendee:\n return {'message': f'Fant ingen p\u00e5meldte med oppgitt ID ({attendee_id}).', 'status': 400}\n return handle_paid(attendee)\n elif action == 'add_attendee':\n return handle_add_attendee(event, user_id)\n elif action == 'remove_attendee':\n return handle_remove_attendee(event, attendee_id, administrating_user)\n else:\n raise NotImplementedError\n\n\ndef handle_attended(attendee: Attendee):\n \"\"\"\n Toggle attending-status of an attendee between attending and not attending\n \"\"\"\n attendee.attended = not attendee.attended\n attendee.save()\n\n return {'message': 'OK', 'status': 200}\n\n\ndef handle_paid(attendee: Attendee):\n \"\"\"\n Toggle paid status of an attendee between paid and not paid\n \"\"\"\n attendee.paid = not attendee.paid\n attendee.save()\n\n return {'message': 'OK', 'status': 200}\n\n\ndef _get_attendee_data(attendee_qs):\n attendees = []\n\n for number, a in enumerate(attendee_qs):\n attendees.append({\n 'number': number + 1,\n 'id': a.id,\n 'first_name': a.user.first_name,\n 'last_name': a.user.last_name,\n 'paid': a.paid,\n 'extras': str(a.extras),\n 'attended': a.attended,\n 'link': reverse('dashboard_attendee_details', kwargs={'attendee_id': a.id})\n })\n\n return attendees\n\n\ndef _get_event_context(event: Event, response={}):\n response['attendees'] = _get_attendee_data(event.attendance_event.attending_attendees_qs)\n response['waitlist'] = _get_attendee_data(event.attendance_event.waitlist_qs)\n\n return response\n\n\ndef handle_add_attendee(event: Event, user_id: int):\n resp = _get_event_context(event)\n if event.attendance_event.number_of_seats_taken >= event.attendance_event.max_capacity:\n if not event.attendance_event.waitlist:\n return {'message': f'Det er ingen ledige plasser p\u00e5 {event.title}.', 'status': 400, **resp}\n\n user = User.objects.filter(pk=user_id)\n if user.count() != 1:\n return {'message': f'Fant ingen bruker med oppgitt ID ({user_id}).', 'status': 400, **resp}\n user = user[0]\n if Attendee.objects.filter(user=user, event=event.attendance_event).count() != 0:\n return {'message': f'{user} er allerede p\u00e5meldt {event.title}.', 'status': 400, **resp}\n\n attendee = Attendee(user=user, event=event.attendance_event)\n attendee.save()\n\n resp = _get_event_context(event, resp)\n return {'message': f'{user} ble meldt p\u00e5 {event}', 'status': 200, **resp}\n\n\ndef handle_remove_attendee(event: Event, attendee_id: int, admin_user: User):\n resp = _get_event_context(event)\n attendee = Attendee.objects.filter(pk=attendee_id)\n if attendee.count() != 1:\n return {'message': f'Fant ingen p\u00e5meldte med oppgitt ID ({attendee_id}).', 'status': 400, **resp}\n attendee = attendee[0]\n attendee.unattend(admin_user)\n\n resp = _get_event_context(event, resp)\n return {'message': f'{attendee.user} ble fjernet fra {attendee.event}', 'status': 200, **resp}\n", "path": "apps/events/dashboard/utils.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.urls import reverse\n\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.events.models import Attendee, Event\n\n\ndef _get_attendee(attendee_id):\n try:\n return Attendee.objects.get(pk=attendee_id)\n except Attendee.DoesNotExist:\n return None\n\n\ndef event_ajax_handler(event: Event, request):\n action = request.POST.get('action')\n administrating_user = request.user\n attendee_id = request.POST.get('attendee_id')\n user_id = request.POST.get('user_id')\n\n if action == 'attended':\n attendee = _get_attendee(attendee_id)\n if not attendee:\n return {'message': f'Fant ingen p\u00e5meldte med oppgitt ID ({attendee_id}).', 'status': 400}\n return handle_attended(attendee)\n elif action == 'paid':\n attendee = _get_attendee(attendee_id)\n if not attendee:\n return {'message': f'Fant ingen p\u00e5meldte med oppgitt ID ({attendee_id}).', 'status': 400}\n return handle_paid(attendee)\n elif action == 'add_attendee':\n return handle_add_attendee(event, user_id)\n elif action == 'remove_attendee':\n return handle_remove_attendee(event, attendee_id, administrating_user)\n else:\n raise NotImplementedError\n\n\ndef handle_attended(attendee: Attendee):\n \"\"\"\n Toggle attending-status of an attendee between attending and not attending\n \"\"\"\n attendee.attended = not attendee.attended\n attendee.save()\n\n return {'message': 'OK', 'status': 200}\n\n\ndef handle_paid(attendee: Attendee):\n \"\"\"\n Toggle paid status of an attendee between paid and not paid\n \"\"\"\n attendee.paid = not attendee.paid\n attendee.save()\n\n return {'message': 'OK', 'status': 200}\n\n\ndef _get_attendee_data(attendee_qs):\n attendees = []\n\n for number, a in enumerate(attendee_qs):\n attendees.append({\n 'number': number + 1,\n 'id': a.id,\n 'first_name': a.user.first_name,\n 'last_name': a.user.last_name,\n 'year_of_study': a.user.year,\n 'paid': a.paid,\n 'payment_deadline': a.get_payment_deadline(),\n 'extras': str(a.extras),\n 'attended': a.attended,\n 'link': reverse('dashboard_attendee_details', kwargs={'attendee_id': a.id})\n })\n\n return attendees\n\n\ndef _get_event_context(event: Event, response={}):\n response['attendees'] = _get_attendee_data(event.attendance_event.attending_attendees_qs)\n response['waitlist'] = _get_attendee_data(event.attendance_event.waitlist_qs)\n response['is_payment_event'] = bool(event.attendance_event.payment())\n response['has_extras'] = event.attendance_event.has_extras\n\n return response\n\n\ndef handle_add_attendee(event: Event, user_id: int):\n resp = _get_event_context(event)\n if event.attendance_event.number_of_seats_taken >= event.attendance_event.max_capacity:\n if not event.attendance_event.waitlist:\n return {'message': f'Det er ingen ledige plasser p\u00e5 {event.title}.', 'status': 400, **resp}\n\n user = User.objects.filter(pk=user_id)\n if user.count() != 1:\n return {'message': f'Fant ingen bruker med oppgitt ID ({user_id}).', 'status': 400, **resp}\n user = user[0]\n if Attendee.objects.filter(user=user, event=event.attendance_event).count() != 0:\n return {'message': f'{user} er allerede p\u00e5meldt {event.title}.', 'status': 400, **resp}\n\n attendee = Attendee(user=user, event=event.attendance_event)\n attendee.save()\n\n resp = _get_event_context(event, resp)\n return {'message': f'{user} ble meldt p\u00e5 {event}', 'status': 200, **resp}\n\n\ndef handle_remove_attendee(event: Event, attendee_id: int, admin_user: User):\n resp = _get_event_context(event)\n attendee = Attendee.objects.filter(pk=attendee_id)\n if attendee.count() != 1:\n return {'message': f'Fant ingen p\u00e5meldte med oppgitt ID ({attendee_id}).', 'status': 400, **resp}\n attendee = attendee[0]\n attendee.unattend(admin_user)\n\n resp = _get_event_context(event, resp)\n return {'message': f'{attendee.user} ble fjernet fra {attendee.event}', 'status': 200, **resp}\n", "path": "apps/events/dashboard/utils.py"}]} | 2,006 | 247 |
gh_patches_debug_2042 | rasdani/github-patches | git_diff | aws__aws-cli-357 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip install awscli fails
I tried `pip install awscli` from https://github.com/aws/aws-cli/blob/develop/README.rst and failed:
http://sprunge.us/NfbW
/home/hendry/.pip/pip.log = http://ix.io/7SC
Hilarious how bad Python packaging is. I'm running Archlinux with Python 3.3.2.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import sys
3
4 from setuptools import setup, find_packages
5
6 import awscli
7
8
9 requires = ['botocore>=0.16.0,<0.17.0',
10 'bcdoc>=0.9.0,<0.10.0',
11 'six>=1.1.0',
12 'colorama==0.2.5',
13 'docutils>=0.10',
14 'rsa==3.1.1']
15
16 if sys.version_info[:2] == (2, 6):
17 # For python2.6 we have to require argparse since it
18 # was not in stdlib until 2.7.
19 requires.append('argparse>=1.1')
20
21
22 setup_options = dict(
23 name='awscli',
24 version=awscli.__version__,
25 description='Universal Command Line Environment for AWS.',
26 long_description=open('README.rst').read(),
27 author='Mitch Garnaat',
28 author_email='[email protected]',
29 url='http://aws.amazon.com/cli/',
30 scripts=['bin/aws', 'bin/aws.cmd',
31 'bin/aws_completer', 'bin/aws_zsh_completer.sh'],
32 packages=find_packages('.', exclude=['tests*']),
33 package_dir={'awscli': 'awscli'},
34 package_data={'awscli': ['data/*.json', 'examples/*/*']},
35 install_requires=requires,
36 license="Apache License 2.0",
37 classifiers=(
38 'Development Status :: 5 - Production/Stable',
39 'Intended Audience :: Developers',
40 'Intended Audience :: System Administrators',
41 'Natural Language :: English',
42 'License :: OSI Approved :: Apache Software License',
43 'Programming Language :: Python',
44 'Programming Language :: Python :: 2.6',
45 'Programming Language :: Python :: 2.7',
46 'Programming Language :: Python :: 3',
47 'Programming Language :: Python :: 3.3',
48 ),
49 )
50
51 if 'py2exe' in sys.argv:
52 # This will actually give us a py2exe command.
53 import py2exe
54 # And we have some py2exe specific options.
55 setup_options['options'] = {
56 'py2exe': {
57 'optimize': 0,
58 'skip_archive': True,
59 'includes': ['ConfigParser', 'urllib', 'httplib',
60 'docutils.readers.standalone',
61 'docutils.parsers.rst',
62 'docutils.languages.en',
63 'xml.etree.ElementTree', 'HTMLParser',
64 'awscli.handlers'],
65 }
66 }
67 setup_options['console'] = ['bin/aws']
68
69
70 setup(**setup_options)
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,7 @@
'six>=1.1.0',
'colorama==0.2.5',
'docutils>=0.10',
- 'rsa==3.1.1']
+ 'rsa==3.1.2']
if sys.version_info[:2] == (2, 6):
# For python2.6 we have to require argparse since it
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -11,7 +11,7 @@\n 'six>=1.1.0',\n 'colorama==0.2.5',\n 'docutils>=0.10',\n- 'rsa==3.1.1']\n+ 'rsa==3.1.2']\n \n if sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n", "issue": "pip install awscli fails\nI tried `pip install awscli` from https://github.com/aws/aws-cli/blob/develop/README.rst and failed:\n\nhttp://sprunge.us/NfbW\n/home/hendry/.pip/pip.log = http://ix.io/7SC\n\nHilarious how bad Python packaging is. I'm running Archlinux with Python 3.3.2.\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport sys\n\nfrom setuptools import setup, find_packages\n\nimport awscli\n\n\nrequires = ['botocore>=0.16.0,<0.17.0',\n 'bcdoc>=0.9.0,<0.10.0',\n 'six>=1.1.0',\n 'colorama==0.2.5',\n 'docutils>=0.10',\n 'rsa==3.1.1']\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=awscli.__version__,\n description='Universal Command Line Environment for AWS.',\n long_description=open('README.rst').read(),\n author='Mitch Garnaat',\n author_email='[email protected]',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh'],\n packages=find_packages('.', exclude=['tests*']),\n package_dir={'awscli': 'awscli'},\n package_data={'awscli': ['data/*.json', 'examples/*/*']},\n install_requires=requires,\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n ),\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'includes': ['ConfigParser', 'urllib', 'httplib',\n 'docutils.readers.standalone',\n 'docutils.parsers.rst',\n 'docutils.languages.en',\n 'xml.etree.ElementTree', 'HTMLParser',\n 'awscli.handlers'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport sys\n\nfrom setuptools import setup, find_packages\n\nimport awscli\n\n\nrequires = ['botocore>=0.16.0,<0.17.0',\n 'bcdoc>=0.9.0,<0.10.0',\n 'six>=1.1.0',\n 'colorama==0.2.5',\n 'docutils>=0.10',\n 'rsa==3.1.2']\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=awscli.__version__,\n description='Universal Command Line Environment for AWS.',\n long_description=open('README.rst').read(),\n author='Mitch Garnaat',\n author_email='[email protected]',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh'],\n packages=find_packages('.', exclude=['tests*']),\n package_dir={'awscli': 'awscli'},\n package_data={'awscli': ['data/*.json', 'examples/*/*']},\n install_requires=requires,\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n ),\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'includes': ['ConfigParser', 'urllib', 'httplib',\n 'docutils.readers.standalone',\n 'docutils.parsers.rst',\n 'docutils.languages.en',\n 'xml.etree.ElementTree', 'HTMLParser',\n 'awscli.handlers'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}]} | 1,043 | 115 |
gh_patches_debug_8933 | rasdani/github-patches | git_diff | akvo__akvo-rsr-1945 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Public projects filter in REST API not working correctly
## Test plan
All projects (and other objects) should be displayed in the REST API.
## Issue description
As a quick fix, just display all projects (public and private) in the API.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/viewsets.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from rest_framework import filters
8 from rest_framework import viewsets
9 from rest_framework.authentication import SessionAuthentication
10 from rest_framework.permissions import DjangoObjectPermissions
11
12 from .models import TastyTokenAuthentication
13
14 from akvo.rsr.models import Project
15
16
17 class BaseRSRViewSet(viewsets.ModelViewSet):
18 """
19 Base class used for the view sets for RSR models. Provides unified auth and perms settings.
20 Only public projects will be shown by filtering the queryset.
21 """
22 authentication_classes = (SessionAuthentication, TastyTokenAuthentication, )
23 permission_classes = (DjangoObjectPermissions, )
24 filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter, )
25 ordering_fields = '__all__'
26
27 def get_queryset(self):
28 """Filter out any private projects."""
29 for related_obj in self.queryset.model._meta.get_all_related_objects():
30 if related_obj.model == Project:
31 self.queryset = self.queryset.filter(project__is_public=True)
32 break
33 return super(BaseRSRViewSet, self).get_queryset()
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/viewsets.py b/akvo/rest/viewsets.py
--- a/akvo/rest/viewsets.py
+++ b/akvo/rest/viewsets.py
@@ -23,11 +23,3 @@
permission_classes = (DjangoObjectPermissions, )
filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter, )
ordering_fields = '__all__'
-
- def get_queryset(self):
- """Filter out any private projects."""
- for related_obj in self.queryset.model._meta.get_all_related_objects():
- if related_obj.model == Project:
- self.queryset = self.queryset.filter(project__is_public=True)
- break
- return super(BaseRSRViewSet, self).get_queryset()
| {"golden_diff": "diff --git a/akvo/rest/viewsets.py b/akvo/rest/viewsets.py\n--- a/akvo/rest/viewsets.py\n+++ b/akvo/rest/viewsets.py\n@@ -23,11 +23,3 @@\n permission_classes = (DjangoObjectPermissions, )\n filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter, )\n ordering_fields = '__all__'\n-\n- def get_queryset(self):\n- \"\"\"Filter out any private projects.\"\"\"\n- for related_obj in self.queryset.model._meta.get_all_related_objects():\n- if related_obj.model == Project:\n- self.queryset = self.queryset.filter(project__is_public=True)\n- break\n- return super(BaseRSRViewSet, self).get_queryset()\n", "issue": "Public projects filter in REST API not working correctly\n## Test plan\n\nAll projects (and other objects) should be displayed in the REST API.\n## Issue description\n\nAs a quick fix, just display all projects (public and private) in the API.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom rest_framework import filters\nfrom rest_framework import viewsets\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.permissions import DjangoObjectPermissions\n\nfrom .models import TastyTokenAuthentication\n\nfrom akvo.rsr.models import Project\n\n\nclass BaseRSRViewSet(viewsets.ModelViewSet):\n \"\"\"\n Base class used for the view sets for RSR models. Provides unified auth and perms settings.\n Only public projects will be shown by filtering the queryset.\n \"\"\"\n authentication_classes = (SessionAuthentication, TastyTokenAuthentication, )\n permission_classes = (DjangoObjectPermissions, )\n filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter, )\n ordering_fields = '__all__'\n\n def get_queryset(self):\n \"\"\"Filter out any private projects.\"\"\"\n for related_obj in self.queryset.model._meta.get_all_related_objects():\n if related_obj.model == Project:\n self.queryset = self.queryset.filter(project__is_public=True)\n break\n return super(BaseRSRViewSet, self).get_queryset()\n", "path": "akvo/rest/viewsets.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom rest_framework import filters\nfrom rest_framework import viewsets\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.permissions import DjangoObjectPermissions\n\nfrom .models import TastyTokenAuthentication\n\nfrom akvo.rsr.models import Project\n\n\nclass BaseRSRViewSet(viewsets.ModelViewSet):\n \"\"\"\n Base class used for the view sets for RSR models. Provides unified auth and perms settings.\n Only public projects will be shown by filtering the queryset.\n \"\"\"\n authentication_classes = (SessionAuthentication, TastyTokenAuthentication, )\n permission_classes = (DjangoObjectPermissions, )\n filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter, )\n ordering_fields = '__all__'\n", "path": "akvo/rest/viewsets.py"}]} | 658 | 167 |
gh_patches_debug_725 | rasdani/github-patches | git_diff | rasterio__rasterio-1477 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python crashes while building overviews
After performing the below code Python crashes:
```python
import rasterio
from rasterio.enums import Resampling
factors = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096]
dst = rasterio.open('rasterio/tests/data/RGB.byte.tif', 'r+')
dst.build_overviews(factors, Resampling.average)
```
```
*** Error in `python': malloc(): memory corruption: 0x0000000002e0f9c0 ***
======= Backtrace: =========
/lib/x86_64-linux-gnu/libc.so.6(+0x777e5)[0x7fbe1c3fd7e5]
/lib/x86_64-linux-gnu/libc.so.6(+0x8213e)[0x7fbe1c40813e]
/lib/x86_64-linux-gnu/libc.so.6(__libc_malloc+0x54)[0x7fbe1c40a184]
/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/.libs/libgdal-acedaae2.so.20.3.1(CPLMalloc+0x20)[0x7fbe19ab2700]
/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/.libs/libgdal-acedaae2.so.20.3.1(CPLCalloc+0x1c)[0x7fbe19ab27ac]
/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/.libs/libgdal-acedaae2.so.20.3.1(_ZN12GTiffDataset15IBuildOverviewsEPKciPiiS2_PFidS1_PvES3_+0x10f0)[0x7fbe19554bd0]
/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/.libs/libgdal-acedaae2.so.20.3.1(_ZN11GDALDataset14BuildOverviewsEPKciPiiS2_PFidS1_PvES3_+0x38)[0x7fbe198059f8]
/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/_io.cpython-35m-x86_64-linux-gnu.so(+0x3613a)[0x7fbe0595713a]
python(PyCFunction_Call+0x77)[0x4e9ba7]
python(PyEval_EvalFrameEx+0x614)[0x5372f4]
python[0x540199]
python(PyEval_EvalCode+0x1f)[0x540e4f]
python[0x60c272]
python(PyRun_InteractiveOneObject+0x2b1)[0x46b89f]
python(PyRun_InteractiveLoopFlags+0xe8)[0x46ba48]
python[0x46cfa0]
python[0x4cf2bd]
python(main+0xe1)[0x4cfeb1]
/lib/x86_64-linux-gnu/libc.so.6(__libc_start_main+0xf0)[0x7fbe1c3a6830]
python(_start+0x29)[0x5d6049]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/errors.py`
Content:
```
1 """Errors and Warnings."""
2
3 from click import FileError
4
5
6 class RasterioError(Exception):
7 """Root exception class"""
8
9
10 class WindowError(RasterioError):
11 """Raised when errors occur during window operations"""
12
13
14 class CRSError(ValueError):
15 """Raised when a CRS string or mapping is invalid or cannot serve
16 to define a coordinate transformation."""
17
18
19 class EnvError(RasterioError):
20 """Raised when the state of GDAL/AWS environment cannot be created
21 or modified."""
22
23
24 class DriverRegistrationError(ValueError):
25 """Raised when a format driver is requested but is not registered."""
26
27
28 class FileOverwriteError(FileError):
29 """Raised when Rasterio's CLI refuses to clobber output files."""
30
31 def __init__(self, message):
32 """Raise FileOverwriteError with message as hint."""
33 super(FileOverwriteError, self).__init__('', hint=message)
34
35
36 class RasterioIOError(IOError):
37 """Raised when a dataset cannot be opened using one of the
38 registered format drivers."""
39
40
41 class NodataShadowWarning(UserWarning):
42 """Warn that a dataset's nodata attribute is shadowing its alpha band."""
43
44 def __str__(self):
45 return ("The dataset's nodata attribute is shadowing "
46 "the alpha band. All masks will be determined "
47 "by the nodata attribute")
48
49
50 class NotGeoreferencedWarning(UserWarning):
51 """Warn that a dataset isn't georeferenced."""
52
53
54 class GDALBehaviorChangeException(RuntimeError):
55 """Raised when GDAL's behavior differs from the given arguments. For
56 example, antimeridian cutting is always on as of GDAL 2.2.0. Users
57 expecting it to be off will be presented with a MultiPolygon when the
58 rest of their code expects a Polygon.
59
60 # Raises an exception on GDAL >= 2.2.0
61 rasterio.warp.transform_geometry(
62 src_crs, dst_crs, antimeridian_cutting=False)
63 """
64
65
66 class GDALOptionNotImplementedError(RasterioError):
67 """A dataset opening or dataset creation option can't be supported
68
69 This will be raised from Rasterio's shim modules. For example, when
70 a user passes arguments to open_dataset() that can't be evaluated
71 by GDAL 1.x.
72 """
73
74 class GDALVersionError(RasterioError):
75 """Raised if the runtime version of GDAL does not meet the required
76 version of GDAL."""
77
78
79 class WindowEvaluationError(ValueError):
80 """Raised when window evaluation fails"""
81
82
83 class RasterioDeprecationWarning(UserWarning):
84 """Rasterio module deprecations"""
85
86
87 class RasterBlockError(RasterioError):
88 """Raised when raster block access fails"""
89
90
91 class BandOverviewError(UserWarning):
92 """Raised when a band overview access fails."""
93
94
95 class WarpOptionsError(RasterioError):
96 """Raised when options for a warp operation are invalid"""
97
98
99 class UnsupportedOperation(RasterioError):
100 """Raised when reading from a file opened in 'w' mode"""
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rasterio/errors.py b/rasterio/errors.py
--- a/rasterio/errors.py
+++ b/rasterio/errors.py
@@ -98,3 +98,7 @@
class UnsupportedOperation(RasterioError):
"""Raised when reading from a file opened in 'w' mode"""
+
+
+class OverviewCreationError(RasterioError):
+ """Raised when creation of an overview fails"""
| {"golden_diff": "diff --git a/rasterio/errors.py b/rasterio/errors.py\n--- a/rasterio/errors.py\n+++ b/rasterio/errors.py\n@@ -98,3 +98,7 @@\n \n class UnsupportedOperation(RasterioError):\n \"\"\"Raised when reading from a file opened in 'w' mode\"\"\"\n+\n+\n+class OverviewCreationError(RasterioError):\n+ \"\"\"Raised when creation of an overview fails\"\"\"\n", "issue": "Python crashes while building overviews\nAfter performing the below code Python crashes:\r\n\r\n```python\r\nimport rasterio\r\nfrom rasterio.enums import Resampling\r\n\r\nfactors = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096]\r\ndst = rasterio.open('rasterio/tests/data/RGB.byte.tif', 'r+')\r\ndst.build_overviews(factors, Resampling.average)\r\n```\r\n\r\n```\r\n*** Error in `python': malloc(): memory corruption: 0x0000000002e0f9c0 ***\r\n======= Backtrace: =========\r\n/lib/x86_64-linux-gnu/libc.so.6(+0x777e5)[0x7fbe1c3fd7e5]\r\n/lib/x86_64-linux-gnu/libc.so.6(+0x8213e)[0x7fbe1c40813e]\r\n/lib/x86_64-linux-gnu/libc.so.6(__libc_malloc+0x54)[0x7fbe1c40a184]\r\n/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/.libs/libgdal-acedaae2.so.20.3.1(CPLMalloc+0x20)[0x7fbe19ab2700]\r\n/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/.libs/libgdal-acedaae2.so.20.3.1(CPLCalloc+0x1c)[0x7fbe19ab27ac]\r\n/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/.libs/libgdal-acedaae2.so.20.3.1(_ZN12GTiffDataset15IBuildOverviewsEPKciPiiS2_PFidS1_PvES3_+0x10f0)[0x7fbe19554bd0]\r\n/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/.libs/libgdal-acedaae2.so.20.3.1(_ZN11GDALDataset14BuildOverviewsEPKciPiiS2_PFidS1_PvES3_+0x38)[0x7fbe198059f8]\r\n/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/_io.cpython-35m-x86_64-linux-gnu.so(+0x3613a)[0x7fbe0595713a]\r\npython(PyCFunction_Call+0x77)[0x4e9ba7]\r\npython(PyEval_EvalFrameEx+0x614)[0x5372f4]\r\npython[0x540199]\r\npython(PyEval_EvalCode+0x1f)[0x540e4f]\r\npython[0x60c272]\r\npython(PyRun_InteractiveOneObject+0x2b1)[0x46b89f]\r\npython(PyRun_InteractiveLoopFlags+0xe8)[0x46ba48]\r\npython[0x46cfa0]\r\npython[0x4cf2bd]\r\npython(main+0xe1)[0x4cfeb1]\r\n/lib/x86_64-linux-gnu/libc.so.6(__libc_start_main+0xf0)[0x7fbe1c3a6830]\r\npython(_start+0x29)[0x5d6049]\r\n```\n", "before_files": [{"content": "\"\"\"Errors and Warnings.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioError(Exception):\n \"\"\"Root exception class\"\"\"\n\n\nclass WindowError(RasterioError):\n \"\"\"Raised when errors occur during window operations\"\"\"\n\n\nclass CRSError(ValueError):\n \"\"\"Raised when a CRS string or mapping is invalid or cannot serve\n to define a coordinate transformation.\"\"\"\n\n\nclass EnvError(RasterioError):\n \"\"\"Raised when the state of GDAL/AWS environment cannot be created\n or modified.\"\"\"\n\n\nclass DriverRegistrationError(ValueError):\n \"\"\"Raised when a format driver is requested but is not registered.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Raised when Rasterio's CLI refuses to clobber output files.\"\"\"\n\n def __init__(self, message):\n \"\"\"Raise FileOverwriteError with message as hint.\"\"\"\n super(FileOverwriteError, self).__init__('', hint=message)\n\n\nclass RasterioIOError(IOError):\n \"\"\"Raised when a dataset cannot be opened using one of the\n registered format drivers.\"\"\"\n\n\nclass NodataShadowWarning(UserWarning):\n \"\"\"Warn that a dataset's nodata attribute is shadowing its alpha band.\"\"\"\n\n def __str__(self):\n return (\"The dataset's nodata attribute is shadowing \"\n \"the alpha band. All masks will be determined \"\n \"by the nodata attribute\")\n\n\nclass NotGeoreferencedWarning(UserWarning):\n \"\"\"Warn that a dataset isn't georeferenced.\"\"\"\n\n\nclass GDALBehaviorChangeException(RuntimeError):\n \"\"\"Raised when GDAL's behavior differs from the given arguments. For\n example, antimeridian cutting is always on as of GDAL 2.2.0. Users\n expecting it to be off will be presented with a MultiPolygon when the\n rest of their code expects a Polygon.\n\n # Raises an exception on GDAL >= 2.2.0\n rasterio.warp.transform_geometry(\n src_crs, dst_crs, antimeridian_cutting=False)\n \"\"\"\n\n\nclass GDALOptionNotImplementedError(RasterioError):\n \"\"\"A dataset opening or dataset creation option can't be supported\n\n This will be raised from Rasterio's shim modules. For example, when\n a user passes arguments to open_dataset() that can't be evaluated\n by GDAL 1.x.\n \"\"\"\n\nclass GDALVersionError(RasterioError):\n \"\"\"Raised if the runtime version of GDAL does not meet the required\n version of GDAL.\"\"\"\n\n\nclass WindowEvaluationError(ValueError):\n \"\"\"Raised when window evaluation fails\"\"\"\n\n\nclass RasterioDeprecationWarning(UserWarning):\n \"\"\"Rasterio module deprecations\"\"\"\n\n\nclass RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n\n\nclass BandOverviewError(UserWarning):\n \"\"\"Raised when a band overview access fails.\"\"\"\n\n\nclass WarpOptionsError(RasterioError):\n \"\"\"Raised when options for a warp operation are invalid\"\"\"\n\n\nclass UnsupportedOperation(RasterioError):\n \"\"\"Raised when reading from a file opened in 'w' mode\"\"\"\n", "path": "rasterio/errors.py"}], "after_files": [{"content": "\"\"\"Errors and Warnings.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioError(Exception):\n \"\"\"Root exception class\"\"\"\n\n\nclass WindowError(RasterioError):\n \"\"\"Raised when errors occur during window operations\"\"\"\n\n\nclass CRSError(ValueError):\n \"\"\"Raised when a CRS string or mapping is invalid or cannot serve\n to define a coordinate transformation.\"\"\"\n\n\nclass EnvError(RasterioError):\n \"\"\"Raised when the state of GDAL/AWS environment cannot be created\n or modified.\"\"\"\n\n\nclass DriverRegistrationError(ValueError):\n \"\"\"Raised when a format driver is requested but is not registered.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Raised when Rasterio's CLI refuses to clobber output files.\"\"\"\n\n def __init__(self, message):\n \"\"\"Raise FileOverwriteError with message as hint.\"\"\"\n super(FileOverwriteError, self).__init__('', hint=message)\n\n\nclass RasterioIOError(IOError):\n \"\"\"Raised when a dataset cannot be opened using one of the\n registered format drivers.\"\"\"\n\n\nclass NodataShadowWarning(UserWarning):\n \"\"\"Warn that a dataset's nodata attribute is shadowing its alpha band.\"\"\"\n\n def __str__(self):\n return (\"The dataset's nodata attribute is shadowing \"\n \"the alpha band. All masks will be determined \"\n \"by the nodata attribute\")\n\n\nclass NotGeoreferencedWarning(UserWarning):\n \"\"\"Warn that a dataset isn't georeferenced.\"\"\"\n\n\nclass GDALBehaviorChangeException(RuntimeError):\n \"\"\"Raised when GDAL's behavior differs from the given arguments. For\n example, antimeridian cutting is always on as of GDAL 2.2.0. Users\n expecting it to be off will be presented with a MultiPolygon when the\n rest of their code expects a Polygon.\n\n # Raises an exception on GDAL >= 2.2.0\n rasterio.warp.transform_geometry(\n src_crs, dst_crs, antimeridian_cutting=False)\n \"\"\"\n\n\nclass GDALOptionNotImplementedError(RasterioError):\n \"\"\"A dataset opening or dataset creation option can't be supported\n\n This will be raised from Rasterio's shim modules. For example, when\n a user passes arguments to open_dataset() that can't be evaluated\n by GDAL 1.x.\n \"\"\"\n\nclass GDALVersionError(RasterioError):\n \"\"\"Raised if the runtime version of GDAL does not meet the required\n version of GDAL.\"\"\"\n\n\nclass WindowEvaluationError(ValueError):\n \"\"\"Raised when window evaluation fails\"\"\"\n\n\nclass RasterioDeprecationWarning(UserWarning):\n \"\"\"Rasterio module deprecations\"\"\"\n\n\nclass RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n\n\nclass BandOverviewError(UserWarning):\n \"\"\"Raised when a band overview access fails.\"\"\"\n\n\nclass WarpOptionsError(RasterioError):\n \"\"\"Raised when options for a warp operation are invalid\"\"\"\n\n\nclass UnsupportedOperation(RasterioError):\n \"\"\"Raised when reading from a file opened in 'w' mode\"\"\"\n\n\nclass OverviewCreationError(RasterioError):\n \"\"\"Raised when creation of an overview fails\"\"\"\n", "path": "rasterio/errors.py"}]} | 1,939 | 91 |
gh_patches_debug_13127 | rasdani/github-patches | git_diff | GoogleCloudPlatform__PerfKitBenchmarker-680 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
silo benchmark fails behind proxy
From @mateusz-blaszkowski in #475:
> silo - failed with Clone of 'git://github.com/kohler/masstree-beta.git' into submodule path 'masstree' failed. I run the test behind the proxy and this is the case. I would have changed the path to Git repository to https:// but it is hidden somewhere in 'dbtest' (look a the command which failed: cd /tmp/pkb/silo && MODE=perf DEBUG=0 CHECK_INVARIANTS=0 make -j80 dbtest). Oh, i found that the exact path is specified here: https://github.com/stephentu/silo/blob/cc11ca1ea949ef266ee12a9b1c310392519d9e3b/.gitmodules
We should switch it to `https://`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `perfkitbenchmarker/linux_packages/silo.py`
Content:
```
1 # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 """Module containing Silo installation and cleanup functions."""
17
18 from perfkitbenchmarker import vm_util
19
20 GIT_REPO = 'https://github.com/stephentu/silo.git'
21 GIT_TAG = '62d2d498984bf69d3b46a74e310e1fd12fd1f692'
22 SILO_DIR = '%s/silo' % vm_util.VM_TMP_DIR
23 APT_PACKAGES = ('libjemalloc-dev libnuma-dev libdb++-dev '
24 'libmysqld-dev libaio-dev libssl-dev')
25 YUM_PACKAGES = ('jemalloc-devel numactl-devel libdb-cxx-devel mysql-devel '
26 'libaio-devel openssl-devel')
27
28
29 def _Install(vm):
30 """Installs the Silo package on the VM."""
31 nthreads = vm.num_cpus * 2
32 vm.Install('build_tools')
33 vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, SILO_DIR))
34 vm.RemoteCommand('cd {0} && git checkout {1}'.format(SILO_DIR,
35 GIT_TAG))
36 vm.RemoteCommand('cd {0} && MODE=perf DEBUG=0 CHECK_INVARIANTS=0 make\
37 -j{1} dbtest'.format(SILO_DIR, nthreads))
38
39
40 def YumInstall(vm):
41 """Installs the Silo package on the VM."""
42 vm.InstallPackages(YUM_PACKAGES)
43 _Install(vm)
44
45
46 def AptInstall(vm):
47 """Installs the Silo package on the VM."""
48 vm.InstallPackages(APT_PACKAGES)
49 _Install(vm)
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/perfkitbenchmarker/linux_packages/silo.py b/perfkitbenchmarker/linux_packages/silo.py
--- a/perfkitbenchmarker/linux_packages/silo.py
+++ b/perfkitbenchmarker/linux_packages/silo.py
@@ -33,6 +33,9 @@
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, SILO_DIR))
vm.RemoteCommand('cd {0} && git checkout {1}'.format(SILO_DIR,
GIT_TAG))
+ # This is due to a failing clone command when executing behind a proxy.
+ # Replacing the protocol to https instead of git fixes the issue.
+ vm.RemoteCommand('git config --global url."https://".insteadOf git://')
vm.RemoteCommand('cd {0} && MODE=perf DEBUG=0 CHECK_INVARIANTS=0 make\
-j{1} dbtest'.format(SILO_DIR, nthreads))
| {"golden_diff": "diff --git a/perfkitbenchmarker/linux_packages/silo.py b/perfkitbenchmarker/linux_packages/silo.py\n--- a/perfkitbenchmarker/linux_packages/silo.py\n+++ b/perfkitbenchmarker/linux_packages/silo.py\n@@ -33,6 +33,9 @@\n vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, SILO_DIR))\n vm.RemoteCommand('cd {0} && git checkout {1}'.format(SILO_DIR,\n GIT_TAG))\n+ # This is due to a failing clone command when executing behind a proxy.\n+ # Replacing the protocol to https instead of git fixes the issue.\n+ vm.RemoteCommand('git config --global url.\"https://\".insteadOf git://')\n vm.RemoteCommand('cd {0} && MODE=perf DEBUG=0 CHECK_INVARIANTS=0 make\\\n -j{1} dbtest'.format(SILO_DIR, nthreads))\n", "issue": "silo benchmark fails behind proxy\nFrom @mateusz-blaszkowski in #475: \n\n> silo - failed with Clone of 'git://github.com/kohler/masstree-beta.git' into submodule path 'masstree' failed. I run the test behind the proxy and this is the case. I would have changed the path to Git repository to https:// but it is hidden somewhere in 'dbtest' (look a the command which failed: cd /tmp/pkb/silo && MODE=perf DEBUG=0 CHECK_INVARIANTS=0 make -j80 dbtest). Oh, i found that the exact path is specified here: https://github.com/stephentu/silo/blob/cc11ca1ea949ef266ee12a9b1c310392519d9e3b/.gitmodules\n\nWe should switch it to `https://`.\n\n", "before_files": [{"content": "# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Module containing Silo installation and cleanup functions.\"\"\"\n\nfrom perfkitbenchmarker import vm_util\n\nGIT_REPO = 'https://github.com/stephentu/silo.git'\nGIT_TAG = '62d2d498984bf69d3b46a74e310e1fd12fd1f692'\nSILO_DIR = '%s/silo' % vm_util.VM_TMP_DIR\nAPT_PACKAGES = ('libjemalloc-dev libnuma-dev libdb++-dev '\n 'libmysqld-dev libaio-dev libssl-dev')\nYUM_PACKAGES = ('jemalloc-devel numactl-devel libdb-cxx-devel mysql-devel '\n 'libaio-devel openssl-devel')\n\n\ndef _Install(vm):\n \"\"\"Installs the Silo package on the VM.\"\"\"\n nthreads = vm.num_cpus * 2\n vm.Install('build_tools')\n vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, SILO_DIR))\n vm.RemoteCommand('cd {0} && git checkout {1}'.format(SILO_DIR,\n GIT_TAG))\n vm.RemoteCommand('cd {0} && MODE=perf DEBUG=0 CHECK_INVARIANTS=0 make\\\n -j{1} dbtest'.format(SILO_DIR, nthreads))\n\n\ndef YumInstall(vm):\n \"\"\"Installs the Silo package on the VM.\"\"\"\n vm.InstallPackages(YUM_PACKAGES)\n _Install(vm)\n\n\ndef AptInstall(vm):\n \"\"\"Installs the Silo package on the VM.\"\"\"\n vm.InstallPackages(APT_PACKAGES)\n _Install(vm)\n", "path": "perfkitbenchmarker/linux_packages/silo.py"}], "after_files": [{"content": "# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Module containing Silo installation and cleanup functions.\"\"\"\n\nfrom perfkitbenchmarker import vm_util\n\nGIT_REPO = 'https://github.com/stephentu/silo.git'\nGIT_TAG = '62d2d498984bf69d3b46a74e310e1fd12fd1f692'\nSILO_DIR = '%s/silo' % vm_util.VM_TMP_DIR\nAPT_PACKAGES = ('libjemalloc-dev libnuma-dev libdb++-dev '\n 'libmysqld-dev libaio-dev libssl-dev')\nYUM_PACKAGES = ('jemalloc-devel numactl-devel libdb-cxx-devel mysql-devel '\n 'libaio-devel openssl-devel')\n\n\ndef _Install(vm):\n \"\"\"Installs the Silo package on the VM.\"\"\"\n nthreads = vm.num_cpus * 2\n vm.Install('build_tools')\n vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, SILO_DIR))\n vm.RemoteCommand('cd {0} && git checkout {1}'.format(SILO_DIR,\n GIT_TAG))\n # This is due to a failing clone command when executing behind a proxy.\n # Replacing the protocol to https instead of git fixes the issue.\n vm.RemoteCommand('git config --global url.\"https://\".insteadOf git://')\n vm.RemoteCommand('cd {0} && MODE=perf DEBUG=0 CHECK_INVARIANTS=0 make\\\n -j{1} dbtest'.format(SILO_DIR, nthreads))\n\n\ndef YumInstall(vm):\n \"\"\"Installs the Silo package on the VM.\"\"\"\n vm.InstallPackages(YUM_PACKAGES)\n _Install(vm)\n\n\ndef AptInstall(vm):\n \"\"\"Installs the Silo package on the VM.\"\"\"\n vm.InstallPackages(APT_PACKAGES)\n _Install(vm)\n", "path": "perfkitbenchmarker/linux_packages/silo.py"}]} | 1,051 | 206 |
gh_patches_debug_27124 | rasdani/github-patches | git_diff | chainer__chainer-6807 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
F.mean_absolute_error numerically unstable with float16 arrays
In #5053, float16 support has been enabled for [F.mean_absolute_error](https://github.com/chainer/chainer/blob/master/chainer/functions/loss/mean_absolute_error.py), but it seems to produce NaN values quite easily. Usually this happens when using big batch sizes and/or if the absolute error difference is large.
The calculation is done by summing over all the absolute differences, and then dividing by the number of elements in the array. However, it appears that the summing can produce large numbers outside the possible range for `float16`. The current implementation can be broken down as follows:
```python
def forward_cpu(self, inputs):
x0, x1 = inputs
self.diff = x0 - x1
diff = self.diff.ravel()
abs_diff = abs(diff)
summed_abs_diff = abs_diff.sum() # numerically unstable, can result in inf
mean_abs_error = np.array(summed_abs_diff / diff.size, dtype=diff.dtype)
return mean_abs_error
```
Code to reproduce error:
```python
import chainer.functions as F
import numpy as np
a = np.full(shape=(64,1,16,16), fill_value=2, dtype=np.float16)
b = np.full(shape=(64,1,16,16), fill_value=-2, dtype=np.float16)
loss = F.mean_absolute_error(a,b)
# /home/user/.local/share/virtualenvs/.../lib/python3.6/site-packages/numpy/core/_methods.py:36: RuntimeWarning: overflow encountered in reduce
# return umr_sum(a, axis, dtype, out, keepdims, initial)
# variable(inf)
loss = F.mean_absolute_error(a.astype("float32"), b.astype("float32"))
# variable(4.)
```
Note that the actual loss (4) would still be valid in the float16 range, it is just that summing over many values results in an `inf`, which cannot then be divided to get a proper number.
Workaround ideas:
I've noticed the new `mixed16` mode that was implemented in #6456, and was wondering if there might be a similar way to do the intermediate calculations in `float32`, and cast the result back into `float16`? Thoughts?
System info:
```
Platform: Linux-4.15.0-46-generic-x86_64-with-debian-buster-sid
Chainer: 6.0.0b3
NumPy: 1.16.2
CuPy:
CuPy Version : 6.0.0b3
CUDA Root : /usr/local/cuda
CUDA Build Version : 10000
CUDA Driver Version : 10010
CUDA Runtime Version : 10000
cuDNN Build Version : 7402
cuDNN Version : 7402
NCCL Build Version : 2307
NCCL Runtime Version : 2307
iDeep: Not Available
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/loss/mean_absolute_error.py`
Content:
```
1 import numpy
2
3 import chainer
4 from chainer import backend
5 from chainer import function_node
6 from chainer.utils import type_check
7
8
9 class MeanAbsoluteError(function_node.FunctionNode):
10
11 """Mean absolute error function."""
12
13 def check_type_forward(self, in_types):
14 type_check._argname(in_types, ('x0', 'x1'))
15 type_check.expect(
16 in_types[0].dtype.kind == 'f',
17 in_types[0].dtype == in_types[1].dtype,
18 in_types[0].shape == in_types[1].shape
19 )
20
21 def forward_cpu(self, inputs):
22 x0, x1 = inputs
23 self.diff = x0 - x1
24 diff = self.diff.ravel()
25 return numpy.array(abs(diff).sum() / diff.size, dtype=diff.dtype),
26
27 def forward_gpu(self, inputs):
28 x0, x1 = inputs
29 self.diff = x0 - x1
30 diff = self.diff.ravel()
31 return abs(diff).sum() / diff.dtype.type(diff.size),
32
33 def backward(self, indexes, grad_outputs):
34 gy, = grad_outputs
35 coeff = gy * gy.data.dtype.type(1. / self.diff.size)
36 coeff = chainer.functions.broadcast_to(coeff, self.diff.shape)
37 gx0 = coeff * backend.get_array_module(gy.data).sign(self.diff)
38 return gx0, -gx0
39
40
41 def mean_absolute_error(x0, x1):
42 """Mean absolute error function.
43
44 The function computes the mean absolute error between two variables. The
45 mean is taken over the minibatch. Args ``x0`` and ``x1`` must have the
46 same dimensions. This function first calculates the absolute value
47 differences between the corresponding elements in x0 and x1, and then
48 returns the mean of those differences.
49
50 Args:
51 x0 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
52 x1 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
53
54 Returns:
55 ~chainer.Variable:
56 A variable holding an array representing the mean absolute
57 error of two inputs.
58
59 .. admonition:: Example
60
61 1D array examples:
62
63 >>> x = np.array([1, 2, 3]).astype(np.float32)
64 >>> y = np.array([0, 0, 0]).astype(np.float32)
65 >>> F.mean_absolute_error(x, y)
66 variable(2.)
67 >>> x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
68 >>> y = np.array([7, 8, 9, 10, 11, 12]).astype(np.float32)
69 >>> F.mean_absolute_error(x, y)
70 variable(6.)
71
72 2D array example:
73
74 In this example, there are 4 elements, and thus 4 errors
75 >>> x = np.array([[1, 2], [3, 4]]).astype(np.float32)
76 >>> y = np.array([[8, 8], [8, 8]]).astype(np.float32)
77 >>> F.mean_absolute_error(x, y)
78 variable(5.5)
79
80 3D array example:
81
82 In this example, there are 8 elements, and thus 8 errors
83 >>> x = np.reshape(np.array([1, 2, 3, 4, 5, 6, 7, 8]), (2, 2, 2))
84 >>> y = np.reshape(np.array([8, 8, 8, 8, 8, 8, 8, 8]), (2, 2, 2))
85 >>> x = x.astype(np.float32)
86 >>> y = y.astype(np.float32)
87 >>> F.mean_absolute_error(x, y)
88 variable(3.5)
89
90 """
91 return MeanAbsoluteError().apply((x0, x1))[0]
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/loss/mean_absolute_error.py b/chainer/functions/loss/mean_absolute_error.py
--- a/chainer/functions/loss/mean_absolute_error.py
+++ b/chainer/functions/loss/mean_absolute_error.py
@@ -6,6 +6,15 @@
from chainer.utils import type_check
+def _get_intermediate_dtype(dtype):
+ # Returns the dtype for intermediate calculation.
+ # For float16 input, float32 is used.
+ # Otherwise the same dtype as the parameter is used.
+ if dtype == numpy.float16:
+ return numpy.float32
+ return dtype
+
+
class MeanAbsoluteError(function_node.FunctionNode):
"""Mean absolute error function."""
@@ -21,14 +30,19 @@
def forward_cpu(self, inputs):
x0, x1 = inputs
self.diff = x0 - x1
- diff = self.diff.ravel()
- return numpy.array(abs(diff).sum() / diff.size, dtype=diff.dtype),
+ orig_dtype = self.diff.dtype
+ dtype = _get_intermediate_dtype(orig_dtype)
+ diff = self.diff.ravel().astype(dtype, copy=False)
+ return numpy.array(abs(diff).sum() / diff.size, dtype=orig_dtype),
def forward_gpu(self, inputs):
x0, x1 = inputs
self.diff = x0 - x1
- diff = self.diff.ravel()
- return abs(diff).sum() / diff.dtype.type(diff.size),
+ orig_dtype = self.diff.dtype
+ dtype = _get_intermediate_dtype(orig_dtype)
+ diff = self.diff.ravel().astype(dtype, copy=False)
+ return (abs(diff).sum() / diff.dtype.type(diff.size)).astype(
+ orig_dtype, copy=False),
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
| {"golden_diff": "diff --git a/chainer/functions/loss/mean_absolute_error.py b/chainer/functions/loss/mean_absolute_error.py\n--- a/chainer/functions/loss/mean_absolute_error.py\n+++ b/chainer/functions/loss/mean_absolute_error.py\n@@ -6,6 +6,15 @@\n from chainer.utils import type_check\n \n \n+def _get_intermediate_dtype(dtype):\n+ # Returns the dtype for intermediate calculation.\n+ # For float16 input, float32 is used.\n+ # Otherwise the same dtype as the parameter is used.\n+ if dtype == numpy.float16:\n+ return numpy.float32\n+ return dtype\n+\n+\n class MeanAbsoluteError(function_node.FunctionNode):\n \n \"\"\"Mean absolute error function.\"\"\"\n@@ -21,14 +30,19 @@\n def forward_cpu(self, inputs):\n x0, x1 = inputs\n self.diff = x0 - x1\n- diff = self.diff.ravel()\n- return numpy.array(abs(diff).sum() / diff.size, dtype=diff.dtype),\n+ orig_dtype = self.diff.dtype\n+ dtype = _get_intermediate_dtype(orig_dtype)\n+ diff = self.diff.ravel().astype(dtype, copy=False)\n+ return numpy.array(abs(diff).sum() / diff.size, dtype=orig_dtype),\n \n def forward_gpu(self, inputs):\n x0, x1 = inputs\n self.diff = x0 - x1\n- diff = self.diff.ravel()\n- return abs(diff).sum() / diff.dtype.type(diff.size),\n+ orig_dtype = self.diff.dtype\n+ dtype = _get_intermediate_dtype(orig_dtype)\n+ diff = self.diff.ravel().astype(dtype, copy=False)\n+ return (abs(diff).sum() / diff.dtype.type(diff.size)).astype(\n+ orig_dtype, copy=False),\n \n def backward(self, indexes, grad_outputs):\n gy, = grad_outputs\n", "issue": "F.mean_absolute_error numerically unstable with float16 arrays\nIn #5053, float16 support has been enabled for [F.mean_absolute_error](https://github.com/chainer/chainer/blob/master/chainer/functions/loss/mean_absolute_error.py), but it seems to produce NaN values quite easily. Usually this happens when using big batch sizes and/or if the absolute error difference is large.\r\n\r\nThe calculation is done by summing over all the absolute differences, and then dividing by the number of elements in the array. However, it appears that the summing can produce large numbers outside the possible range for `float16`. The current implementation can be broken down as follows:\r\n\r\n```python\r\ndef forward_cpu(self, inputs):\r\n x0, x1 = inputs\r\n self.diff = x0 - x1\r\n diff = self.diff.ravel()\r\n abs_diff = abs(diff)\r\n summed_abs_diff = abs_diff.sum() # numerically unstable, can result in inf\r\n mean_abs_error = np.array(summed_abs_diff / diff.size, dtype=diff.dtype)\r\n return mean_abs_error\r\n```\r\n\r\nCode to reproduce error:\r\n\r\n```python\r\nimport chainer.functions as F\r\nimport numpy as np\r\n\r\na = np.full(shape=(64,1,16,16), fill_value=2, dtype=np.float16)\r\nb = np.full(shape=(64,1,16,16), fill_value=-2, dtype=np.float16)\r\n\r\nloss = F.mean_absolute_error(a,b)\r\n# /home/user/.local/share/virtualenvs/.../lib/python3.6/site-packages/numpy/core/_methods.py:36: RuntimeWarning: overflow encountered in reduce\r\n# return umr_sum(a, axis, dtype, out, keepdims, initial)\r\n# variable(inf)\r\n\r\nloss = F.mean_absolute_error(a.astype(\"float32\"), b.astype(\"float32\"))\r\n# variable(4.)\r\n```\r\n\r\nNote that the actual loss (4) would still be valid in the float16 range, it is just that summing over many values results in an `inf`, which cannot then be divided to get a proper number.\r\n\r\nWorkaround ideas:\r\n\r\nI've noticed the new `mixed16` mode that was implemented in #6456, and was wondering if there might be a similar way to do the intermediate calculations in `float32`, and cast the result back into `float16`? Thoughts?\r\n\r\nSystem info:\r\n```\r\nPlatform: Linux-4.15.0-46-generic-x86_64-with-debian-buster-sid\r\nChainer: 6.0.0b3\r\nNumPy: 1.16.2\r\nCuPy:\r\n CuPy Version : 6.0.0b3\r\n CUDA Root : /usr/local/cuda\r\n CUDA Build Version : 10000\r\n CUDA Driver Version : 10010\r\n CUDA Runtime Version : 10000\r\n cuDNN Build Version : 7402\r\n cuDNN Version : 7402\r\n NCCL Build Version : 2307\r\n NCCL Runtime Version : 2307\r\niDeep: Not Available\r\n```\n", "before_files": [{"content": "import numpy\n\nimport chainer\nfrom chainer import backend\nfrom chainer import function_node\nfrom chainer.utils import type_check\n\n\nclass MeanAbsoluteError(function_node.FunctionNode):\n\n \"\"\"Mean absolute error function.\"\"\"\n\n def check_type_forward(self, in_types):\n type_check._argname(in_types, ('x0', 'x1'))\n type_check.expect(\n in_types[0].dtype.kind == 'f',\n in_types[0].dtype == in_types[1].dtype,\n in_types[0].shape == in_types[1].shape\n )\n\n def forward_cpu(self, inputs):\n x0, x1 = inputs\n self.diff = x0 - x1\n diff = self.diff.ravel()\n return numpy.array(abs(diff).sum() / diff.size, dtype=diff.dtype),\n\n def forward_gpu(self, inputs):\n x0, x1 = inputs\n self.diff = x0 - x1\n diff = self.diff.ravel()\n return abs(diff).sum() / diff.dtype.type(diff.size),\n\n def backward(self, indexes, grad_outputs):\n gy, = grad_outputs\n coeff = gy * gy.data.dtype.type(1. / self.diff.size)\n coeff = chainer.functions.broadcast_to(coeff, self.diff.shape)\n gx0 = coeff * backend.get_array_module(gy.data).sign(self.diff)\n return gx0, -gx0\n\n\ndef mean_absolute_error(x0, x1):\n \"\"\"Mean absolute error function.\n\n The function computes the mean absolute error between two variables. The\n mean is taken over the minibatch. Args ``x0`` and ``x1`` must have the\n same dimensions. This function first calculates the absolute value\n differences between the corresponding elements in x0 and x1, and then\n returns the mean of those differences.\n\n Args:\n x0 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.\n x1 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.\n\n Returns:\n ~chainer.Variable:\n A variable holding an array representing the mean absolute\n error of two inputs.\n\n .. admonition:: Example\n\n 1D array examples:\n\n >>> x = np.array([1, 2, 3]).astype(np.float32)\n >>> y = np.array([0, 0, 0]).astype(np.float32)\n >>> F.mean_absolute_error(x, y)\n variable(2.)\n >>> x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)\n >>> y = np.array([7, 8, 9, 10, 11, 12]).astype(np.float32)\n >>> F.mean_absolute_error(x, y)\n variable(6.)\n\n 2D array example:\n\n In this example, there are 4 elements, and thus 4 errors\n >>> x = np.array([[1, 2], [3, 4]]).astype(np.float32)\n >>> y = np.array([[8, 8], [8, 8]]).astype(np.float32)\n >>> F.mean_absolute_error(x, y)\n variable(5.5)\n\n 3D array example:\n\n In this example, there are 8 elements, and thus 8 errors\n >>> x = np.reshape(np.array([1, 2, 3, 4, 5, 6, 7, 8]), (2, 2, 2))\n >>> y = np.reshape(np.array([8, 8, 8, 8, 8, 8, 8, 8]), (2, 2, 2))\n >>> x = x.astype(np.float32)\n >>> y = y.astype(np.float32)\n >>> F.mean_absolute_error(x, y)\n variable(3.5)\n\n \"\"\"\n return MeanAbsoluteError().apply((x0, x1))[0]\n", "path": "chainer/functions/loss/mean_absolute_error.py"}], "after_files": [{"content": "import numpy\n\nimport chainer\nfrom chainer import backend\nfrom chainer import function_node\nfrom chainer.utils import type_check\n\n\ndef _get_intermediate_dtype(dtype):\n # Returns the dtype for intermediate calculation.\n # For float16 input, float32 is used.\n # Otherwise the same dtype as the parameter is used.\n if dtype == numpy.float16:\n return numpy.float32\n return dtype\n\n\nclass MeanAbsoluteError(function_node.FunctionNode):\n\n \"\"\"Mean absolute error function.\"\"\"\n\n def check_type_forward(self, in_types):\n type_check._argname(in_types, ('x0', 'x1'))\n type_check.expect(\n in_types[0].dtype.kind == 'f',\n in_types[0].dtype == in_types[1].dtype,\n in_types[0].shape == in_types[1].shape\n )\n\n def forward_cpu(self, inputs):\n x0, x1 = inputs\n self.diff = x0 - x1\n orig_dtype = self.diff.dtype\n dtype = _get_intermediate_dtype(orig_dtype)\n diff = self.diff.ravel().astype(dtype, copy=False)\n return numpy.array(abs(diff).sum() / diff.size, dtype=orig_dtype),\n\n def forward_gpu(self, inputs):\n x0, x1 = inputs\n self.diff = x0 - x1\n orig_dtype = self.diff.dtype\n dtype = _get_intermediate_dtype(orig_dtype)\n diff = self.diff.ravel().astype(dtype, copy=False)\n return (abs(diff).sum() / diff.dtype.type(diff.size)).astype(\n orig_dtype, copy=False),\n\n def backward(self, indexes, grad_outputs):\n gy, = grad_outputs\n coeff = gy * gy.data.dtype.type(1. / self.diff.size)\n coeff = chainer.functions.broadcast_to(coeff, self.diff.shape)\n gx0 = coeff * backend.get_array_module(gy.data).sign(self.diff)\n return gx0, -gx0\n\n\ndef mean_absolute_error(x0, x1):\n \"\"\"Mean absolute error function.\n\n The function computes the mean absolute error between two variables. The\n mean is taken over the minibatch. Args ``x0`` and ``x1`` must have the\n same dimensions. This function first calculates the absolute value\n differences between the corresponding elements in x0 and x1, and then\n returns the mean of those differences.\n\n Args:\n x0 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.\n x1 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.\n\n Returns:\n ~chainer.Variable:\n A variable holding an array representing the mean absolute\n error of two inputs.\n\n .. admonition:: Example\n\n 1D array examples:\n\n >>> x = np.array([1, 2, 3]).astype(np.float32)\n >>> y = np.array([0, 0, 0]).astype(np.float32)\n >>> F.mean_absolute_error(x, y)\n variable(2.)\n >>> x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)\n >>> y = np.array([7, 8, 9, 10, 11, 12]).astype(np.float32)\n >>> F.mean_absolute_error(x, y)\n variable(6.)\n\n 2D array example:\n\n In this example, there are 4 elements, and thus 4 errors\n >>> x = np.array([[1, 2], [3, 4]]).astype(np.float32)\n >>> y = np.array([[8, 8], [8, 8]]).astype(np.float32)\n >>> F.mean_absolute_error(x, y)\n variable(5.5)\n\n 3D array example:\n\n In this example, there are 8 elements, and thus 8 errors\n >>> x = np.reshape(np.array([1, 2, 3, 4, 5, 6, 7, 8]), (2, 2, 2))\n >>> y = np.reshape(np.array([8, 8, 8, 8, 8, 8, 8, 8]), (2, 2, 2))\n >>> x = x.astype(np.float32)\n >>> y = y.astype(np.float32)\n >>> F.mean_absolute_error(x, y)\n variable(3.5)\n\n \"\"\"\n return MeanAbsoluteError().apply((x0, x1))[0]\n", "path": "chainer/functions/loss/mean_absolute_error.py"}]} | 2,017 | 413 |
gh_patches_debug_32491 | rasdani/github-patches | git_diff | openai__gym-1573 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support wrapper transformations to VecEnv
@tristandeleu @pzhokhov @christopherhesse It might be rather convenient for the user experience to provide a list of wrapper transformations for the atomic environments when creating vectorized environment, e.g.
```python
transforms = [AtariPreprocessing, SignReward, ...]
env = gym.vector.make('Pong-v0', 16, True, transforms=transforms)
```
For additional arguments, the user is required to use `partial()` to define them within the transform list. So that each internal environment is wrapped according to the transformation list.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/vector/__init__.py`
Content:
```
1 from gym.vector.async_vector_env import AsyncVectorEnv
2 from gym.vector.sync_vector_env import SyncVectorEnv
3 from gym.vector.vector_env import VectorEnv
4
5 __all__ = ['AsyncVectorEnv', 'SyncVectorEnv', 'VectorEnv', 'make']
6
7 def make(id, num_envs=1, asynchronous=True, **kwargs):
8 """Create a vectorized environment from multiple copies of an environment,
9 from its id
10
11 Parameters
12 ----------
13 id : str
14 The environment ID. This must be a valid ID from the registry.
15
16 num_envs : int
17 Number of copies of the environment.
18
19 asynchronous : bool (default: `True`)
20 If `True`, wraps the environments in an `AsyncVectorEnv` (which uses
21 `multiprocessing` to run the environments in parallel). If `False`,
22 wraps the environments in a `SyncVectorEnv`.
23
24 Returns
25 -------
26 env : `gym.vector.VectorEnv` instance
27 The vectorized environment.
28
29 Example
30 -------
31 >>> import gym
32 >>> env = gym.vector.make('CartPole-v1', 3)
33 >>> env.reset()
34 array([[-0.04456399, 0.04653909, 0.01326909, -0.02099827],
35 [ 0.03073904, 0.00145001, -0.03088818, -0.03131252],
36 [ 0.03468829, 0.01500225, 0.01230312, 0.01825218]],
37 dtype=float32)
38 """
39 from gym.envs import make as make_
40 def _make_env():
41 return make_(id, **kwargs)
42 env_fns = [_make_env for _ in range(num_envs)]
43 return AsyncVectorEnv(env_fns) if asynchronous else SyncVectorEnv(env_fns)
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gym/vector/__init__.py b/gym/vector/__init__.py
--- a/gym/vector/__init__.py
+++ b/gym/vector/__init__.py
@@ -1,10 +1,15 @@
+try:
+ from collections.abc import Iterable
+except ImportError:
+ Iterable = (tuple, list)
+
from gym.vector.async_vector_env import AsyncVectorEnv
from gym.vector.sync_vector_env import SyncVectorEnv
from gym.vector.vector_env import VectorEnv
__all__ = ['AsyncVectorEnv', 'SyncVectorEnv', 'VectorEnv', 'make']
-def make(id, num_envs=1, asynchronous=True, **kwargs):
+def make(id, num_envs=1, asynchronous=True, wrappers=None, **kwargs):
"""Create a vectorized environment from multiple copies of an environment,
from its id
@@ -20,6 +25,10 @@
If `True`, wraps the environments in an `AsyncVectorEnv` (which uses
`multiprocessing` to run the environments in parallel). If `False`,
wraps the environments in a `SyncVectorEnv`.
+
+ wrappers : Callable or Iterable of Callables (default: `None`)
+ If not `None`, then apply the wrappers to each internal
+ environment during creation.
Returns
-------
@@ -38,6 +47,15 @@
"""
from gym.envs import make as make_
def _make_env():
- return make_(id, **kwargs)
+ env = make_(id, **kwargs)
+ if wrappers is not None:
+ if callable(wrappers):
+ env = wrappers(env)
+ elif isinstance(wrappers, Iterable) and all([callable(w) for w in wrappers]):
+ for wrapper in wrappers:
+ env = wrapper(env)
+ else:
+ raise NotImplementedError
+ return env
env_fns = [_make_env for _ in range(num_envs)]
return AsyncVectorEnv(env_fns) if asynchronous else SyncVectorEnv(env_fns)
| {"golden_diff": "diff --git a/gym/vector/__init__.py b/gym/vector/__init__.py\n--- a/gym/vector/__init__.py\n+++ b/gym/vector/__init__.py\n@@ -1,10 +1,15 @@\n+try:\n+ from collections.abc import Iterable\n+except ImportError:\n+ Iterable = (tuple, list)\n+\n from gym.vector.async_vector_env import AsyncVectorEnv\n from gym.vector.sync_vector_env import SyncVectorEnv\n from gym.vector.vector_env import VectorEnv\n \n __all__ = ['AsyncVectorEnv', 'SyncVectorEnv', 'VectorEnv', 'make']\n \n-def make(id, num_envs=1, asynchronous=True, **kwargs):\n+def make(id, num_envs=1, asynchronous=True, wrappers=None, **kwargs):\n \"\"\"Create a vectorized environment from multiple copies of an environment,\n from its id\n \n@@ -20,6 +25,10 @@\n If `True`, wraps the environments in an `AsyncVectorEnv` (which uses \n `multiprocessing` to run the environments in parallel). If `False`,\n wraps the environments in a `SyncVectorEnv`.\n+ \n+ wrappers : Callable or Iterable of Callables (default: `None`)\n+ If not `None`, then apply the wrappers to each internal \n+ environment during creation. \n \n Returns\n -------\n@@ -38,6 +47,15 @@\n \"\"\"\n from gym.envs import make as make_\n def _make_env():\n- return make_(id, **kwargs)\n+ env = make_(id, **kwargs)\n+ if wrappers is not None:\n+ if callable(wrappers):\n+ env = wrappers(env)\n+ elif isinstance(wrappers, Iterable) and all([callable(w) for w in wrappers]):\n+ for wrapper in wrappers:\n+ env = wrapper(env)\n+ else:\n+ raise NotImplementedError\n+ return env\n env_fns = [_make_env for _ in range(num_envs)]\n return AsyncVectorEnv(env_fns) if asynchronous else SyncVectorEnv(env_fns)\n", "issue": "Support wrapper transformations to VecEnv\n@tristandeleu @pzhokhov @christopherhesse It might be rather convenient for the user experience to provide a list of wrapper transformations for the atomic environments when creating vectorized environment, e.g.\r\n\r\n```python\r\ntransforms = [AtariPreprocessing, SignReward, ...]\r\nenv = gym.vector.make('Pong-v0', 16, True, transforms=transforms)\r\n```\r\nFor additional arguments, the user is required to use `partial()` to define them within the transform list. So that each internal environment is wrapped according to the transformation list. \n", "before_files": [{"content": "from gym.vector.async_vector_env import AsyncVectorEnv\nfrom gym.vector.sync_vector_env import SyncVectorEnv\nfrom gym.vector.vector_env import VectorEnv\n\n__all__ = ['AsyncVectorEnv', 'SyncVectorEnv', 'VectorEnv', 'make']\n\ndef make(id, num_envs=1, asynchronous=True, **kwargs):\n \"\"\"Create a vectorized environment from multiple copies of an environment,\n from its id\n\n Parameters\n ----------\n id : str\n The environment ID. This must be a valid ID from the registry.\n\n num_envs : int\n Number of copies of the environment. \n\n asynchronous : bool (default: `True`)\n If `True`, wraps the environments in an `AsyncVectorEnv` (which uses \n `multiprocessing` to run the environments in parallel). If `False`,\n wraps the environments in a `SyncVectorEnv`.\n\n Returns\n -------\n env : `gym.vector.VectorEnv` instance\n The vectorized environment.\n\n Example\n -------\n >>> import gym\n >>> env = gym.vector.make('CartPole-v1', 3)\n >>> env.reset()\n array([[-0.04456399, 0.04653909, 0.01326909, -0.02099827],\n [ 0.03073904, 0.00145001, -0.03088818, -0.03131252],\n [ 0.03468829, 0.01500225, 0.01230312, 0.01825218]],\n dtype=float32)\n \"\"\"\n from gym.envs import make as make_\n def _make_env():\n return make_(id, **kwargs)\n env_fns = [_make_env for _ in range(num_envs)]\n return AsyncVectorEnv(env_fns) if asynchronous else SyncVectorEnv(env_fns)\n", "path": "gym/vector/__init__.py"}], "after_files": [{"content": "try:\n from collections.abc import Iterable\nexcept ImportError:\n Iterable = (tuple, list)\n\nfrom gym.vector.async_vector_env import AsyncVectorEnv\nfrom gym.vector.sync_vector_env import SyncVectorEnv\nfrom gym.vector.vector_env import VectorEnv\n\n__all__ = ['AsyncVectorEnv', 'SyncVectorEnv', 'VectorEnv', 'make']\n\ndef make(id, num_envs=1, asynchronous=True, wrappers=None, **kwargs):\n \"\"\"Create a vectorized environment from multiple copies of an environment,\n from its id\n\n Parameters\n ----------\n id : str\n The environment ID. This must be a valid ID from the registry.\n\n num_envs : int\n Number of copies of the environment. \n\n asynchronous : bool (default: `True`)\n If `True`, wraps the environments in an `AsyncVectorEnv` (which uses \n `multiprocessing` to run the environments in parallel). If `False`,\n wraps the environments in a `SyncVectorEnv`.\n \n wrappers : Callable or Iterable of Callables (default: `None`)\n If not `None`, then apply the wrappers to each internal \n environment during creation. \n\n Returns\n -------\n env : `gym.vector.VectorEnv` instance\n The vectorized environment.\n\n Example\n -------\n >>> import gym\n >>> env = gym.vector.make('CartPole-v1', 3)\n >>> env.reset()\n array([[-0.04456399, 0.04653909, 0.01326909, -0.02099827],\n [ 0.03073904, 0.00145001, -0.03088818, -0.03131252],\n [ 0.03468829, 0.01500225, 0.01230312, 0.01825218]],\n dtype=float32)\n \"\"\"\n from gym.envs import make as make_\n def _make_env():\n env = make_(id, **kwargs)\n if wrappers is not None:\n if callable(wrappers):\n env = wrappers(env)\n elif isinstance(wrappers, Iterable) and all([callable(w) for w in wrappers]):\n for wrapper in wrappers:\n env = wrapper(env)\n else:\n raise NotImplementedError\n return env\n env_fns = [_make_env for _ in range(num_envs)]\n return AsyncVectorEnv(env_fns) if asynchronous else SyncVectorEnv(env_fns)\n", "path": "gym/vector/__init__.py"}]} | 938 | 449 |
gh_patches_debug_12946 | rasdani/github-patches | git_diff | awslabs__gluonts-1884 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DeepAR with NegativeBinomial cannot generate values above 1e6
## Description
A DeepAR model with NegativeBinomial output distribution cannot generate values significantly above 1e6.
## To Reproduce
I attach a jupyter notebook where I generate artificial timeseries with values between 0 and 1e8, train a model and plot the forecast. I compressed the notebook with zip as .ipynb files are not supported as attachments.
[1e6.ipynb.zip](https://github.com/awslabs/gluon-ts/files/8069187/1e6.ipynb.zip)
## Error message or code output
Please see the attached notebook.

## Environment
- Operating system: Ubuntu 20.04, linux kernel 5.13.0-28-generic
- Python version: 3.8.10
- GluonTS version: 0.8.1
- MXNet version: 1.9.0
I vaguely recall that
I observed this issue alredy in gluonts versions 0.4.x.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/gluonts/mx/distribution/neg_binomial.py`
Content:
```
1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License").
4 # You may not use this file except in compliance with the License.
5 # A copy of the License is located at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # or in the "license" file accompanying this file. This file is distributed
10 # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 # express or implied. See the License for the specific language governing
12 # permissions and limitations under the License.
13
14 from typing import Dict, List, Optional, Tuple
15
16 import numpy as np
17
18 from gluonts.core.component import validated
19 from gluonts.mx import Tensor
20
21 from .deterministic import DeterministicOutput
22 from .distribution import Distribution, _sample_multiple, getF, softplus
23 from .distribution_output import DistributionOutput
24 from .mixture import MixtureDistributionOutput
25
26
27 class NegativeBinomial(Distribution):
28 r"""
29 Negative binomial distribution, i.e. the distribution of the number of
30 successes in a sequence of independent Bernoulli trials.
31
32 Parameters
33 ----------
34 mu
35 Tensor containing the means, of shape `(*batch_shape, *event_shape)`.
36 alpha
37 Tensor of the shape parameters, of shape `(*batch_shape, *event_shape)`.
38 F
39 """
40
41 is_reparameterizable = False
42
43 @validated()
44 def __init__(self, mu: Tensor, alpha: Tensor) -> None:
45 self.mu = mu
46 self.alpha = alpha
47
48 @property
49 def F(self):
50 return getF(self.mu)
51
52 @property
53 def batch_shape(self) -> Tuple:
54 return self.mu.shape
55
56 @property
57 def event_shape(self) -> Tuple:
58 return ()
59
60 @property
61 def event_dim(self) -> int:
62 return 0
63
64 def log_prob(self, x: Tensor) -> Tensor:
65 alphaInv = 1.0 / self.alpha
66 alpha_times_mu = self.alpha * self.mu
67 F = self.F
68 ll = (
69 x * F.log(alpha_times_mu / (1.0 + alpha_times_mu))
70 - alphaInv * F.log1p(alpha_times_mu)
71 + F.gammaln(x + alphaInv)
72 - F.gammaln(x + 1.0)
73 - F.gammaln(alphaInv)
74 )
75 return ll
76
77 @property
78 def mean(self) -> Tensor:
79 return self.mu
80
81 @property
82 def stddev(self) -> Tensor:
83 return self.F.sqrt(self.mu * (1.0 + self.mu * self.alpha))
84
85 def sample(
86 self, num_samples: Optional[int] = None, dtype=np.float32
87 ) -> Tensor:
88 def s(mu: Tensor, alpha: Tensor) -> Tensor:
89 F = self.F
90 tol = 1e-5
91 r = 1.0 / alpha
92 theta = alpha * mu
93 r = F.minimum(F.maximum(tol, r), 1e10)
94 theta = F.minimum(F.maximum(tol, theta), 1e10)
95 x = F.minimum(F.random.gamma(r, theta), 1e6)
96 return F.random.poisson(lam=x, dtype=dtype)
97
98 return _sample_multiple(
99 s, mu=self.mu, alpha=self.alpha, num_samples=num_samples
100 )
101
102 @property
103 def args(self) -> List:
104 return [self.mu, self.alpha]
105
106
107 class NegativeBinomialOutput(DistributionOutput):
108 args_dim: Dict[str, int] = {"mu": 1, "alpha": 1}
109 distr_cls: type = NegativeBinomial
110
111 @classmethod
112 def domain_map(cls, F, mu, alpha):
113 epsilon = np.finfo(cls._dtype).eps # machine epsilon
114
115 mu = softplus(F, mu) + epsilon
116 alpha = softplus(F, alpha) + epsilon
117 return mu.squeeze(axis=-1), alpha.squeeze(axis=-1)
118
119 # Overwrites the parent class method.
120 # We cannot scale using the affine transformation since negative binomial should return integers.
121 # Instead we scale the parameters.
122 def distribution(
123 self,
124 distr_args,
125 loc: Optional[Tensor] = None,
126 scale: Optional[Tensor] = None,
127 ) -> NegativeBinomial:
128 mu, alpha = distr_args
129 if scale is None:
130 return NegativeBinomial(mu, alpha)
131 else:
132 F = getF(mu)
133 mu = F.broadcast_mul(mu, scale)
134 return NegativeBinomial(mu, alpha, F)
135
136 @property
137 def event_shape(self) -> Tuple:
138 return ()
139
140
141 def ZeroInflatedNegativeBinomialOutput() -> MixtureDistributionOutput:
142 return MixtureDistributionOutput(
143 distr_outputs=[NegativeBinomialOutput(), DeterministicOutput(0)]
144 )
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/gluonts/mx/distribution/neg_binomial.py b/src/gluonts/mx/distribution/neg_binomial.py
--- a/src/gluonts/mx/distribution/neg_binomial.py
+++ b/src/gluonts/mx/distribution/neg_binomial.py
@@ -87,13 +87,9 @@
) -> Tensor:
def s(mu: Tensor, alpha: Tensor) -> Tensor:
F = self.F
- tol = 1e-5
r = 1.0 / alpha
theta = alpha * mu
- r = F.minimum(F.maximum(tol, r), 1e10)
- theta = F.minimum(F.maximum(tol, theta), 1e10)
- x = F.minimum(F.random.gamma(r, theta), 1e6)
- return F.random.poisson(lam=x, dtype=dtype)
+ return F.random.poisson(lam=F.random.gamma(r, theta), dtype=dtype)
return _sample_multiple(
s, mu=self.mu, alpha=self.alpha, num_samples=num_samples
| {"golden_diff": "diff --git a/src/gluonts/mx/distribution/neg_binomial.py b/src/gluonts/mx/distribution/neg_binomial.py\n--- a/src/gluonts/mx/distribution/neg_binomial.py\n+++ b/src/gluonts/mx/distribution/neg_binomial.py\n@@ -87,13 +87,9 @@\n ) -> Tensor:\n def s(mu: Tensor, alpha: Tensor) -> Tensor:\n F = self.F\n- tol = 1e-5\n r = 1.0 / alpha\n theta = alpha * mu\n- r = F.minimum(F.maximum(tol, r), 1e10)\n- theta = F.minimum(F.maximum(tol, theta), 1e10)\n- x = F.minimum(F.random.gamma(r, theta), 1e6)\n- return F.random.poisson(lam=x, dtype=dtype)\n+ return F.random.poisson(lam=F.random.gamma(r, theta), dtype=dtype)\n \n return _sample_multiple(\n s, mu=self.mu, alpha=self.alpha, num_samples=num_samples\n", "issue": "DeepAR with NegativeBinomial cannot generate values above 1e6\n## Description\r\nA DeepAR model with NegativeBinomial output distribution cannot generate values significantly above 1e6.\r\n\r\n## To Reproduce\r\nI attach a jupyter notebook where I generate artificial timeseries with values between 0 and 1e8, train a model and plot the forecast. I compressed the notebook with zip as .ipynb files are not supported as attachments.\r\n\r\n[1e6.ipynb.zip](https://github.com/awslabs/gluon-ts/files/8069187/1e6.ipynb.zip)\r\n\r\n## Error message or code output\r\nPlease see the attached notebook.\r\n\r\n\r\n\r\n## Environment\r\n- Operating system: Ubuntu 20.04, linux kernel 5.13.0-28-generic\r\n- Python version: 3.8.10\r\n- GluonTS version: 0.8.1\r\n- MXNet version: 1.9.0\r\n\r\nI vaguely recall that \r\nI observed this issue alredy in gluonts versions 0.4.x.\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom typing import Dict, List, Optional, Tuple\n\nimport numpy as np\n\nfrom gluonts.core.component import validated\nfrom gluonts.mx import Tensor\n\nfrom .deterministic import DeterministicOutput\nfrom .distribution import Distribution, _sample_multiple, getF, softplus\nfrom .distribution_output import DistributionOutput\nfrom .mixture import MixtureDistributionOutput\n\n\nclass NegativeBinomial(Distribution):\n r\"\"\"\n Negative binomial distribution, i.e. the distribution of the number of\n successes in a sequence of independent Bernoulli trials.\n\n Parameters\n ----------\n mu\n Tensor containing the means, of shape `(*batch_shape, *event_shape)`.\n alpha\n Tensor of the shape parameters, of shape `(*batch_shape, *event_shape)`.\n F\n \"\"\"\n\n is_reparameterizable = False\n\n @validated()\n def __init__(self, mu: Tensor, alpha: Tensor) -> None:\n self.mu = mu\n self.alpha = alpha\n\n @property\n def F(self):\n return getF(self.mu)\n\n @property\n def batch_shape(self) -> Tuple:\n return self.mu.shape\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n\n @property\n def event_dim(self) -> int:\n return 0\n\n def log_prob(self, x: Tensor) -> Tensor:\n alphaInv = 1.0 / self.alpha\n alpha_times_mu = self.alpha * self.mu\n F = self.F\n ll = (\n x * F.log(alpha_times_mu / (1.0 + alpha_times_mu))\n - alphaInv * F.log1p(alpha_times_mu)\n + F.gammaln(x + alphaInv)\n - F.gammaln(x + 1.0)\n - F.gammaln(alphaInv)\n )\n return ll\n\n @property\n def mean(self) -> Tensor:\n return self.mu\n\n @property\n def stddev(self) -> Tensor:\n return self.F.sqrt(self.mu * (1.0 + self.mu * self.alpha))\n\n def sample(\n self, num_samples: Optional[int] = None, dtype=np.float32\n ) -> Tensor:\n def s(mu: Tensor, alpha: Tensor) -> Tensor:\n F = self.F\n tol = 1e-5\n r = 1.0 / alpha\n theta = alpha * mu\n r = F.minimum(F.maximum(tol, r), 1e10)\n theta = F.minimum(F.maximum(tol, theta), 1e10)\n x = F.minimum(F.random.gamma(r, theta), 1e6)\n return F.random.poisson(lam=x, dtype=dtype)\n\n return _sample_multiple(\n s, mu=self.mu, alpha=self.alpha, num_samples=num_samples\n )\n\n @property\n def args(self) -> List:\n return [self.mu, self.alpha]\n\n\nclass NegativeBinomialOutput(DistributionOutput):\n args_dim: Dict[str, int] = {\"mu\": 1, \"alpha\": 1}\n distr_cls: type = NegativeBinomial\n\n @classmethod\n def domain_map(cls, F, mu, alpha):\n epsilon = np.finfo(cls._dtype).eps # machine epsilon\n\n mu = softplus(F, mu) + epsilon\n alpha = softplus(F, alpha) + epsilon\n return mu.squeeze(axis=-1), alpha.squeeze(axis=-1)\n\n # Overwrites the parent class method.\n # We cannot scale using the affine transformation since negative binomial should return integers.\n # Instead we scale the parameters.\n def distribution(\n self,\n distr_args,\n loc: Optional[Tensor] = None,\n scale: Optional[Tensor] = None,\n ) -> NegativeBinomial:\n mu, alpha = distr_args\n if scale is None:\n return NegativeBinomial(mu, alpha)\n else:\n F = getF(mu)\n mu = F.broadcast_mul(mu, scale)\n return NegativeBinomial(mu, alpha, F)\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n\n\ndef ZeroInflatedNegativeBinomialOutput() -> MixtureDistributionOutput:\n return MixtureDistributionOutput(\n distr_outputs=[NegativeBinomialOutput(), DeterministicOutput(0)]\n )\n", "path": "src/gluonts/mx/distribution/neg_binomial.py"}], "after_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom typing import Dict, List, Optional, Tuple\n\nimport numpy as np\n\nfrom gluonts.core.component import validated\nfrom gluonts.mx import Tensor\n\nfrom .deterministic import DeterministicOutput\nfrom .distribution import Distribution, _sample_multiple, getF, softplus\nfrom .distribution_output import DistributionOutput\nfrom .mixture import MixtureDistributionOutput\n\n\nclass NegativeBinomial(Distribution):\n r\"\"\"\n Negative binomial distribution, i.e. the distribution of the number of\n successes in a sequence of independent Bernoulli trials.\n\n Parameters\n ----------\n mu\n Tensor containing the means, of shape `(*batch_shape, *event_shape)`.\n alpha\n Tensor of the shape parameters, of shape `(*batch_shape, *event_shape)`.\n F\n \"\"\"\n\n is_reparameterizable = False\n\n @validated()\n def __init__(self, mu: Tensor, alpha: Tensor) -> None:\n self.mu = mu\n self.alpha = alpha\n\n @property\n def F(self):\n return getF(self.mu)\n\n @property\n def batch_shape(self) -> Tuple:\n return self.mu.shape\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n\n @property\n def event_dim(self) -> int:\n return 0\n\n def log_prob(self, x: Tensor) -> Tensor:\n alphaInv = 1.0 / self.alpha\n alpha_times_mu = self.alpha * self.mu\n F = self.F\n ll = (\n x * F.log(alpha_times_mu / (1.0 + alpha_times_mu))\n - alphaInv * F.log1p(alpha_times_mu)\n + F.gammaln(x + alphaInv)\n - F.gammaln(x + 1.0)\n - F.gammaln(alphaInv)\n )\n return ll\n\n @property\n def mean(self) -> Tensor:\n return self.mu\n\n @property\n def stddev(self) -> Tensor:\n return self.F.sqrt(self.mu * (1.0 + self.mu * self.alpha))\n\n def sample(\n self, num_samples: Optional[int] = None, dtype=np.float32\n ) -> Tensor:\n def s(mu: Tensor, alpha: Tensor) -> Tensor:\n F = self.F\n r = 1.0 / alpha\n theta = alpha * mu\n return F.random.poisson(lam=F.random.gamma(r, theta), dtype=dtype)\n\n return _sample_multiple(\n s, mu=self.mu, alpha=self.alpha, num_samples=num_samples\n )\n\n @property\n def args(self) -> List:\n return [self.mu, self.alpha]\n\n\nclass NegativeBinomialOutput(DistributionOutput):\n args_dim: Dict[str, int] = {\"mu\": 1, \"alpha\": 1}\n distr_cls: type = NegativeBinomial\n\n @classmethod\n def domain_map(cls, F, mu, alpha):\n epsilon = np.finfo(cls._dtype).eps # machine epsilon\n\n mu = softplus(F, mu) + epsilon\n alpha = softplus(F, alpha) + epsilon\n return mu.squeeze(axis=-1), alpha.squeeze(axis=-1)\n\n # Overwrites the parent class method.\n # We cannot scale using the affine transformation since negative binomial should return integers.\n # Instead we scale the parameters.\n def distribution(\n self,\n distr_args,\n loc: Optional[Tensor] = None,\n scale: Optional[Tensor] = None,\n ) -> NegativeBinomial:\n mu, alpha = distr_args\n if scale is None:\n return NegativeBinomial(mu, alpha)\n else:\n F = getF(mu)\n mu = F.broadcast_mul(mu, scale)\n return NegativeBinomial(mu, alpha, F)\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n\n\ndef ZeroInflatedNegativeBinomialOutput() -> MixtureDistributionOutput:\n return MixtureDistributionOutput(\n distr_outputs=[NegativeBinomialOutput(), DeterministicOutput(0)]\n )\n", "path": "src/gluonts/mx/distribution/neg_binomial.py"}]} | 1,978 | 247 |
gh_patches_debug_16813 | rasdani/github-patches | git_diff | nautobot__nautobot-5593 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Plugins not loaded with Gunicorn
### Environment
* Nautobot version (Docker tag too if applicable): 2.2.1
* Python version: 3.11
* Database platform, version: psql
* Middleware(s):
### Steps to Reproduce
1. Use systemd
2. With gunicorn 21.2.0 or 22.0.0
<!-- What did you expect to happen? -->
### Expected Behavior
All applications to show
### Observed Behavior
We attempted to upgrade our dev environment from 2.1.9 to 2.2.1 but are hitting a weird issue where our plugins are reported as missing. We are only loading 1 or 2 basic plugins right now while we work on updating all our other plugins for 2.x. Oddly we are only seeing this issue on 1 out of 3 identical servers with identical Nautobot installs.
This looks very much like this issue from 2021: [Plugin Load Failure · Issue #95 · nautobot/nautobot (github.com)](https://github.com/nautobot/nautobot/issues/95)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/core/wsgi.py`
Content:
```
1 import logging
2 import os
3
4 from django.core import cache
5 from django.core.wsgi import get_wsgi_application
6 from django.db import connections
7
8 os.environ["DJANGO_SETTINGS_MODULE"] = "nautobot_config"
9
10 # Use try/except because we might not be running uWSGI. If `settings.WEBSERVER_WARMUP` is `True`,
11 # will first call `get_internal_wsgi_application` which does not have `uwsgi` module loaded
12 # already. Therefore, `settings.WEBSERVER_WARMUP` to `False` for this code to be loaded.
13 try:
14 import uwsgidecorators
15
16 @uwsgidecorators.postfork
17 def fix_uwsgi():
18 import uwsgi
19
20 logging.getLogger(__name__).info(
21 f"Closing existing DB and cache connections on worker {uwsgi.worker_id()} after uWSGI forked ..."
22 )
23 connections.close_all()
24 cache.close_caches()
25
26 except ImportError:
27 pass
28
29 application = get_wsgi_application()
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nautobot/core/wsgi.py b/nautobot/core/wsgi.py
--- a/nautobot/core/wsgi.py
+++ b/nautobot/core/wsgi.py
@@ -1,11 +1,18 @@
import logging
-import os
from django.core import cache
from django.core.wsgi import get_wsgi_application
from django.db import connections
-os.environ["DJANGO_SETTINGS_MODULE"] = "nautobot_config"
+import nautobot
+
+# This is the Django default left here for visibility on how the Nautobot pattern
+# differs.
+# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nautobot.core.settings")
+
+# Instead of just pointing to `DJANGO_SETTINGS_MODULE` and letting Django run with it,
+# we're using the custom Nautobot loader code to read environment or config path for us.
+nautobot.setup()
# Use try/except because we might not be running uWSGI. If `settings.WEBSERVER_WARMUP` is `True`,
# will first call `get_internal_wsgi_application` which does not have `uwsgi` module loaded
| {"golden_diff": "diff --git a/nautobot/core/wsgi.py b/nautobot/core/wsgi.py\n--- a/nautobot/core/wsgi.py\n+++ b/nautobot/core/wsgi.py\n@@ -1,11 +1,18 @@\n import logging\n-import os\n \n from django.core import cache\n from django.core.wsgi import get_wsgi_application\n from django.db import connections\n \n-os.environ[\"DJANGO_SETTINGS_MODULE\"] = \"nautobot_config\"\n+import nautobot\n+\n+# This is the Django default left here for visibility on how the Nautobot pattern\n+# differs.\n+# os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"nautobot.core.settings\")\n+\n+# Instead of just pointing to `DJANGO_SETTINGS_MODULE` and letting Django run with it,\n+# we're using the custom Nautobot loader code to read environment or config path for us.\n+nautobot.setup()\n \n # Use try/except because we might not be running uWSGI. If `settings.WEBSERVER_WARMUP` is `True`,\n # will first call `get_internal_wsgi_application` which does not have `uwsgi` module loaded\n", "issue": "Plugins not loaded with Gunicorn\n\r\n### Environment\r\n\r\n* Nautobot version (Docker tag too if applicable): 2.2.1\r\n* Python version: 3.11\r\n* Database platform, version: psql\r\n* Middleware(s):\r\n\r\n\r\n### Steps to Reproduce\r\n1. Use systemd\r\n2. With gunicorn 21.2.0 or 22.0.0\r\n\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\n\r\nAll applications to show \r\n\r\n### Observed Behavior\r\n\r\nWe attempted to upgrade our dev environment from 2.1.9 to 2.2.1 but are hitting a weird issue where our plugins are reported as missing. We are only loading 1 or 2 basic plugins right now while we work on updating all our other plugins for 2.x. Oddly we are only seeing this issue on 1 out of 3 identical servers with identical Nautobot installs.\r\n\r\nThis looks very much like this issue from 2021: [Plugin Load Failure \u00b7 Issue #95 \u00b7 nautobot/nautobot (github.com)](https://github.com/nautobot/nautobot/issues/95)\n", "before_files": [{"content": "import logging\nimport os\n\nfrom django.core import cache\nfrom django.core.wsgi import get_wsgi_application\nfrom django.db import connections\n\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = \"nautobot_config\"\n\n# Use try/except because we might not be running uWSGI. If `settings.WEBSERVER_WARMUP` is `True`,\n# will first call `get_internal_wsgi_application` which does not have `uwsgi` module loaded\n# already. Therefore, `settings.WEBSERVER_WARMUP` to `False` for this code to be loaded.\ntry:\n import uwsgidecorators\n\n @uwsgidecorators.postfork\n def fix_uwsgi():\n import uwsgi\n\n logging.getLogger(__name__).info(\n f\"Closing existing DB and cache connections on worker {uwsgi.worker_id()} after uWSGI forked ...\"\n )\n connections.close_all()\n cache.close_caches()\n\nexcept ImportError:\n pass\n\napplication = get_wsgi_application()\n", "path": "nautobot/core/wsgi.py"}], "after_files": [{"content": "import logging\n\nfrom django.core import cache\nfrom django.core.wsgi import get_wsgi_application\nfrom django.db import connections\n\nimport nautobot\n\n# This is the Django default left here for visibility on how the Nautobot pattern\n# differs.\n# os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"nautobot.core.settings\")\n\n# Instead of just pointing to `DJANGO_SETTINGS_MODULE` and letting Django run with it,\n# we're using the custom Nautobot loader code to read environment or config path for us.\nnautobot.setup()\n\n# Use try/except because we might not be running uWSGI. If `settings.WEBSERVER_WARMUP` is `True`,\n# will first call `get_internal_wsgi_application` which does not have `uwsgi` module loaded\n# already. Therefore, `settings.WEBSERVER_WARMUP` to `False` for this code to be loaded.\ntry:\n import uwsgidecorators\n\n @uwsgidecorators.postfork\n def fix_uwsgi():\n import uwsgi\n\n logging.getLogger(__name__).info(\n f\"Closing existing DB and cache connections on worker {uwsgi.worker_id()} after uWSGI forked ...\"\n )\n connections.close_all()\n cache.close_caches()\n\nexcept ImportError:\n pass\n\napplication = get_wsgi_application()\n", "path": "nautobot/core/wsgi.py"}]} | 779 | 239 |
gh_patches_debug_12067 | rasdani/github-patches | git_diff | sktime__sktime-1453 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] sktime.datatypes._panel._examples raises pandas.core.common.SettingWithCopyError
**Describe the bug**
Attempting to install [tsai](https://pypi.org/project/tsai/) as an upstream package also installs this package, but the install raises an error that traces to line 67 in "/opt/conda/lib/python3.8/site-packages/sktime/datatypes/_panel/_examples.py"
```
X.iloc[0][0] = pd.Series([1, 2, 3])
```
**To Reproduce**
Importing any code that executes the code starting at line 67 of /sktime/datatypes/_panel/_examples.py can raise a Pandas error, depending on Pandas version that may be installed
**Expected behavior**
No error should be raised on install or import of sktime as a dependency.
**Additional context**
<!--
Add any other context about the problem here.
-->
**Versions**
System:
python: 3.9.1 (default, Sep 16 2021, 11:42:30) [Clang 12.0.5 (clang-1205.0.22.11)]
executable: /.../.pyenv/versions/3.9.1/bin/python
machine: macOS-11.6-x86_64-i386-64bit
Python dependencies:
pip: 21.2.4
setuptools: 49.2.1
sklearn: 1.0
sktime: 0.8.0
statsmodels: 0.12.2
numpy: 1.20.3
scipy: 1.7.1
Cython: None
pandas: 1.3.3
matplotlib: 3.4.3
joblib: 1.0.1
numba: 0.53.1
pmdarima: None
tsfresh: 0.18.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sktime/datatypes/_panel/_examples.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Example generation for testing.
3
4 Exports dict of examples, useful for testing as fixtures.
5
6 example_dict: dict indexed by triple
7 1st element = mtype - str
8 2nd element = considered as this scitype - str
9 3rd element = int - index of example
10 elements are data objects, considered examples for the mtype
11 all examples with same index are considered "same" on scitype content
12 if None, indicates that representation is not possible
13
14 example_lossy: dict of bool indexed by pairs of str
15 1st element = mtype - str
16 2nd element = considered as this scitype - str
17 3rd element = int - index of example
18 elements are bool, indicate whether representation has information removed
19 all examples with same index are considered "same" on scitype content
20
21 overall, conversions from non-lossy representations to any other ones
22 should yield the element exactly, identidally (given same index)
23 """
24
25 import pandas as pd
26 import numpy as np
27
28 example_dict = dict()
29 example_dict_lossy = dict()
30
31 ###
32
33
34 X = np.array(
35 [[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 55, 6]], [[1, 2, 3], [42, 5, 6]]],
36 dtype=np.int64,
37 )
38
39 example_dict[("numpy3D", "Panel", 0)] = X
40 example_dict_lossy[("numpy3D", "Panel", 0)] = False
41
42 cols = [f"var_{i}" for i in range(2)]
43 Xlist = [
44 pd.DataFrame([[1, 4], [2, 5], [3, 6]], columns=cols),
45 pd.DataFrame([[1, 4], [2, 55], [3, 6]], columns=cols),
46 pd.DataFrame([[1, 42], [2, 5], [3, 6]], columns=cols),
47 ]
48
49 example_dict[("df-list", "Panel", 0)] = Xlist
50 example_dict_lossy[("df-list", "Panel", 0)] = False
51
52 cols = ["instances", "timepoints"] + [f"var_{i}" for i in range(2)]
53
54 Xlist = [
55 pd.DataFrame([[0, 0, 1, 4], [0, 1, 2, 5], [0, 2, 3, 6]], columns=cols),
56 pd.DataFrame([[1, 0, 1, 4], [1, 1, 2, 55], [1, 2, 3, 6]], columns=cols),
57 pd.DataFrame([[2, 0, 1, 42], [2, 1, 2, 5], [2, 2, 3, 6]], columns=cols),
58 ]
59 X = pd.concat(Xlist)
60 X = X.set_index(["instances", "timepoints"])
61
62 example_dict[("pd-multiindex", "Panel", 0)] = X
63 example_dict_lossy[("pd-multiindex", "Panel", 0)] = False
64
65 cols = [f"var_{i}" for i in range(2)]
66 X = pd.DataFrame(columns=cols, index=[0, 1, 2])
67 X.iloc[0][0] = pd.Series([1, 2, 3])
68 X.iloc[0][1] = pd.Series([4, 5, 6])
69 X.iloc[1][0] = pd.Series([1, 2, 3])
70 X.iloc[1][1] = pd.Series([4, 55, 6])
71 X.iloc[2][0] = pd.Series([1, 2, 3])
72 X.iloc[2][1] = pd.Series([42, 5, 6])
73
74 example_dict[("nested_univ", "Panel", 0)] = X
75 example_dict_lossy[("nested_univ", "Panel", 0)] = False
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sktime/datatypes/_panel/_examples.py b/sktime/datatypes/_panel/_examples.py
--- a/sktime/datatypes/_panel/_examples.py
+++ b/sktime/datatypes/_panel/_examples.py
@@ -64,12 +64,13 @@
cols = [f"var_{i}" for i in range(2)]
X = pd.DataFrame(columns=cols, index=[0, 1, 2])
-X.iloc[0][0] = pd.Series([1, 2, 3])
-X.iloc[0][1] = pd.Series([4, 5, 6])
-X.iloc[1][0] = pd.Series([1, 2, 3])
-X.iloc[1][1] = pd.Series([4, 55, 6])
-X.iloc[2][0] = pd.Series([1, 2, 3])
-X.iloc[2][1] = pd.Series([42, 5, 6])
+X["var_0"] = pd.Series(
+ [pd.Series([1, 2, 3]), pd.Series([1, 2, 3]), pd.Series([1, 2, 3])]
+)
+
+X["var_1"] = pd.Series(
+ [pd.Series([4, 5, 6]), pd.Series([4, 55, 6]), pd.Series([42, 5, 6])]
+)
example_dict[("nested_univ", "Panel", 0)] = X
example_dict_lossy[("nested_univ", "Panel", 0)] = False
| {"golden_diff": "diff --git a/sktime/datatypes/_panel/_examples.py b/sktime/datatypes/_panel/_examples.py\n--- a/sktime/datatypes/_panel/_examples.py\n+++ b/sktime/datatypes/_panel/_examples.py\n@@ -64,12 +64,13 @@\n \n cols = [f\"var_{i}\" for i in range(2)]\n X = pd.DataFrame(columns=cols, index=[0, 1, 2])\n-X.iloc[0][0] = pd.Series([1, 2, 3])\n-X.iloc[0][1] = pd.Series([4, 5, 6])\n-X.iloc[1][0] = pd.Series([1, 2, 3])\n-X.iloc[1][1] = pd.Series([4, 55, 6])\n-X.iloc[2][0] = pd.Series([1, 2, 3])\n-X.iloc[2][1] = pd.Series([42, 5, 6])\n+X[\"var_0\"] = pd.Series(\n+ [pd.Series([1, 2, 3]), pd.Series([1, 2, 3]), pd.Series([1, 2, 3])]\n+)\n+\n+X[\"var_1\"] = pd.Series(\n+ [pd.Series([4, 5, 6]), pd.Series([4, 55, 6]), pd.Series([42, 5, 6])]\n+)\n \n example_dict[(\"nested_univ\", \"Panel\", 0)] = X\n example_dict_lossy[(\"nested_univ\", \"Panel\", 0)] = False\n", "issue": "[BUG] sktime.datatypes._panel._examples raises pandas.core.common.SettingWithCopyError\n**Describe the bug**\r\nAttempting to install [tsai](https://pypi.org/project/tsai/) as an upstream package also installs this package, but the install raises an error that traces to line 67 in \"/opt/conda/lib/python3.8/site-packages/sktime/datatypes/_panel/_examples.py\"\r\n```\r\n X.iloc[0][0] = pd.Series([1, 2, 3])\r\n```\r\n\r\n**To Reproduce**\r\nImporting any code that executes the code starting at line 67 of /sktime/datatypes/_panel/_examples.py can raise a Pandas error, depending on Pandas version that may be installed\r\n\r\n**Expected behavior**\r\nNo error should be raised on install or import of sktime as a dependency. \r\n\r\n**Additional context**\r\n<!--\r\nAdd any other context about the problem here.\r\n-->\r\n\r\n**Versions**\r\nSystem:\r\n python: 3.9.1 (default, Sep 16 2021, 11:42:30) [Clang 12.0.5 (clang-1205.0.22.11)]\r\nexecutable: /.../.pyenv/versions/3.9.1/bin/python\r\n machine: macOS-11.6-x86_64-i386-64bit\r\n\r\nPython dependencies:\r\n pip: 21.2.4\r\n setuptools: 49.2.1\r\n sklearn: 1.0\r\n sktime: 0.8.0\r\n statsmodels: 0.12.2\r\n numpy: 1.20.3\r\n scipy: 1.7.1\r\n Cython: None\r\n pandas: 1.3.3\r\n matplotlib: 3.4.3\r\n joblib: 1.0.1\r\n numba: 0.53.1\r\n pmdarima: None\r\n tsfresh: 0.18.0\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Example generation for testing.\n\nExports dict of examples, useful for testing as fixtures.\n\nexample_dict: dict indexed by triple\n 1st element = mtype - str\n 2nd element = considered as this scitype - str\n 3rd element = int - index of example\nelements are data objects, considered examples for the mtype\n all examples with same index are considered \"same\" on scitype content\n if None, indicates that representation is not possible\n\nexample_lossy: dict of bool indexed by pairs of str\n 1st element = mtype - str\n 2nd element = considered as this scitype - str\n 3rd element = int - index of example\nelements are bool, indicate whether representation has information removed\n all examples with same index are considered \"same\" on scitype content\n\noverall, conversions from non-lossy representations to any other ones\n should yield the element exactly, identidally (given same index)\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\nexample_dict = dict()\nexample_dict_lossy = dict()\n\n###\n\n\nX = np.array(\n [[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 55, 6]], [[1, 2, 3], [42, 5, 6]]],\n dtype=np.int64,\n)\n\nexample_dict[(\"numpy3D\", \"Panel\", 0)] = X\nexample_dict_lossy[(\"numpy3D\", \"Panel\", 0)] = False\n\ncols = [f\"var_{i}\" for i in range(2)]\nXlist = [\n pd.DataFrame([[1, 4], [2, 5], [3, 6]], columns=cols),\n pd.DataFrame([[1, 4], [2, 55], [3, 6]], columns=cols),\n pd.DataFrame([[1, 42], [2, 5], [3, 6]], columns=cols),\n]\n\nexample_dict[(\"df-list\", \"Panel\", 0)] = Xlist\nexample_dict_lossy[(\"df-list\", \"Panel\", 0)] = False\n\ncols = [\"instances\", \"timepoints\"] + [f\"var_{i}\" for i in range(2)]\n\nXlist = [\n pd.DataFrame([[0, 0, 1, 4], [0, 1, 2, 5], [0, 2, 3, 6]], columns=cols),\n pd.DataFrame([[1, 0, 1, 4], [1, 1, 2, 55], [1, 2, 3, 6]], columns=cols),\n pd.DataFrame([[2, 0, 1, 42], [2, 1, 2, 5], [2, 2, 3, 6]], columns=cols),\n]\nX = pd.concat(Xlist)\nX = X.set_index([\"instances\", \"timepoints\"])\n\nexample_dict[(\"pd-multiindex\", \"Panel\", 0)] = X\nexample_dict_lossy[(\"pd-multiindex\", \"Panel\", 0)] = False\n\ncols = [f\"var_{i}\" for i in range(2)]\nX = pd.DataFrame(columns=cols, index=[0, 1, 2])\nX.iloc[0][0] = pd.Series([1, 2, 3])\nX.iloc[0][1] = pd.Series([4, 5, 6])\nX.iloc[1][0] = pd.Series([1, 2, 3])\nX.iloc[1][1] = pd.Series([4, 55, 6])\nX.iloc[2][0] = pd.Series([1, 2, 3])\nX.iloc[2][1] = pd.Series([42, 5, 6])\n\nexample_dict[(\"nested_univ\", \"Panel\", 0)] = X\nexample_dict_lossy[(\"nested_univ\", \"Panel\", 0)] = False\n", "path": "sktime/datatypes/_panel/_examples.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Example generation for testing.\n\nExports dict of examples, useful for testing as fixtures.\n\nexample_dict: dict indexed by triple\n 1st element = mtype - str\n 2nd element = considered as this scitype - str\n 3rd element = int - index of example\nelements are data objects, considered examples for the mtype\n all examples with same index are considered \"same\" on scitype content\n if None, indicates that representation is not possible\n\nexample_lossy: dict of bool indexed by pairs of str\n 1st element = mtype - str\n 2nd element = considered as this scitype - str\n 3rd element = int - index of example\nelements are bool, indicate whether representation has information removed\n all examples with same index are considered \"same\" on scitype content\n\noverall, conversions from non-lossy representations to any other ones\n should yield the element exactly, identidally (given same index)\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\nexample_dict = dict()\nexample_dict_lossy = dict()\n\n###\n\n\nX = np.array(\n [[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 55, 6]], [[1, 2, 3], [42, 5, 6]]],\n dtype=np.int64,\n)\n\nexample_dict[(\"numpy3D\", \"Panel\", 0)] = X\nexample_dict_lossy[(\"numpy3D\", \"Panel\", 0)] = False\n\ncols = [f\"var_{i}\" for i in range(2)]\nXlist = [\n pd.DataFrame([[1, 4], [2, 5], [3, 6]], columns=cols),\n pd.DataFrame([[1, 4], [2, 55], [3, 6]], columns=cols),\n pd.DataFrame([[1, 42], [2, 5], [3, 6]], columns=cols),\n]\n\nexample_dict[(\"df-list\", \"Panel\", 0)] = Xlist\nexample_dict_lossy[(\"df-list\", \"Panel\", 0)] = False\n\ncols = [\"instances\", \"timepoints\"] + [f\"var_{i}\" for i in range(2)]\n\nXlist = [\n pd.DataFrame([[0, 0, 1, 4], [0, 1, 2, 5], [0, 2, 3, 6]], columns=cols),\n pd.DataFrame([[1, 0, 1, 4], [1, 1, 2, 55], [1, 2, 3, 6]], columns=cols),\n pd.DataFrame([[2, 0, 1, 42], [2, 1, 2, 5], [2, 2, 3, 6]], columns=cols),\n]\nX = pd.concat(Xlist)\nX = X.set_index([\"instances\", \"timepoints\"])\n\nexample_dict[(\"pd-multiindex\", \"Panel\", 0)] = X\nexample_dict_lossy[(\"pd-multiindex\", \"Panel\", 0)] = False\n\ncols = [f\"var_{i}\" for i in range(2)]\nX = pd.DataFrame(columns=cols, index=[0, 1, 2])\nX[\"var_0\"] = pd.Series(\n [pd.Series([1, 2, 3]), pd.Series([1, 2, 3]), pd.Series([1, 2, 3])]\n)\n\nX[\"var_1\"] = pd.Series(\n [pd.Series([4, 5, 6]), pd.Series([4, 55, 6]), pd.Series([42, 5, 6])]\n)\n\nexample_dict[(\"nested_univ\", \"Panel\", 0)] = X\nexample_dict_lossy[(\"nested_univ\", \"Panel\", 0)] = False\n", "path": "sktime/datatypes/_panel/_examples.py"}]} | 1,755 | 355 |
gh_patches_debug_319 | rasdani/github-patches | git_diff | pwr-Solaar__Solaar-730 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
better identification of Solaar versions
`git describe` produces
0.9.2-339-g39791be
Instead it should produce something based on 1.0.1
`git describe --tags` produces
1.0.1-58-g39791be
which is much better.
I think that all that is required is to upgrade the 1.0.1 tag that already exists.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/solaar/__init__.py`
Content:
```
1 # -*- python-mode -*-
2 # -*- coding: UTF-8 -*-
3
4 ## Copyright (C) 2012-2013 Daniel Pavel
5 ##
6 ## This program is free software; you can redistribute it and/or modify
7 ## it under the terms of the GNU General Public License as published by
8 ## the Free Software Foundation; either version 2 of the License, or
9 ## (at your option) any later version.
10 ##
11 ## This program is distributed in the hope that it will be useful,
12 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 ## GNU General Public License for more details.
15 ##
16 ## You should have received a copy of the GNU General Public License along
17 ## with this program; if not, write to the Free Software Foundation, Inc.,
18 ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19
20 from __future__ import absolute_import, division, print_function, unicode_literals
21
22 __version__ = '1.0.1'
23 NAME = 'Solaar'
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/solaar/__init__.py b/lib/solaar/__init__.py
--- a/lib/solaar/__init__.py
+++ b/lib/solaar/__init__.py
@@ -19,5 +19,5 @@
from __future__ import absolute_import, division, print_function, unicode_literals
-__version__ = '1.0.1'
+__version__ = '1.0.2-rc1'
NAME = 'Solaar'
| {"golden_diff": "diff --git a/lib/solaar/__init__.py b/lib/solaar/__init__.py\n--- a/lib/solaar/__init__.py\n+++ b/lib/solaar/__init__.py\n@@ -19,5 +19,5 @@\n \n from __future__ import absolute_import, division, print_function, unicode_literals\n \n-__version__ = '1.0.1'\n+__version__ = '1.0.2-rc1'\n NAME = 'Solaar'\n", "issue": "better identification of Solaar versions\n`git describe` produces\r\n0.9.2-339-g39791be\r\nInstead it should produce something based on 1.0.1\r\n`git describe --tags` produces\r\n1.0.1-58-g39791be\r\nwhich is much better.\r\n\r\nI think that all that is required is to upgrade the 1.0.1 tag that already exists.\n", "before_files": [{"content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__version__ = '1.0.1'\nNAME = 'Solaar'\n", "path": "lib/solaar/__init__.py"}], "after_files": [{"content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__version__ = '1.0.2-rc1'\nNAME = 'Solaar'\n", "path": "lib/solaar/__init__.py"}]} | 632 | 107 |
gh_patches_debug_9857 | rasdani/github-patches | git_diff | saulpw__visidata-2160 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[parquet] can't load parquet directory anymore: `IsADirectoryError`
**Small description**
Hi @saulpw @anjakefala @takacsd - it seems that forcing opening the path as file with `.open()` - introduced with #2133 - breaks the use case where the multiple parquet files are stored in a directory, and this directory is then read by visidata. This is common with Hive partitioning or when working with spark. A simple fix would be to check if the path is a directory with `os.path.is_dir()` and then retaining old behavior of passing it as a string to `read_table()`. If it is not an existing directory, we move to the new way of opening as a binary buffer.
I have already added this workaround to my clone of visidata, and it fixes my issue, but maybe you have some better ideas how to handle it instead of `if-else` statement in the `ParquetSheet`.
**Expected result**
```bash
vd -f parquet parquet_dir
```
should load a parquet into visidata
**Actual result with screenshot**

**Additional context**
```bash
# freshest develop
visidata@9fd728b72c115e50e99c24b455caaf020381b48e
pyarrow==12.0.0
python 3.10.2
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `visidata/loaders/parquet.py`
Content:
```
1 from visidata import Sheet, VisiData, TypedWrapper, anytype, date, vlen, Column, vd
2 from collections import defaultdict
3
4
5 @VisiData.api
6 def open_parquet(vd, p):
7 return ParquetSheet(p.name, source=p)
8
9
10 class ParquetColumn(Column):
11 def calcValue(self, row):
12 val = self.source[row["__rownum__"]]
13 if val.type == 'large_string':
14 return memoryview(val.as_buffer())[:2**20].tobytes().decode('utf-8')
15 else:
16 return val.as_py()
17
18
19 class ParquetSheet(Sheet):
20 # rowdef: {'__rownum__':int, parquet_col:overridden_value, ...}
21 def iterload(self):
22 pa = vd.importExternal("pyarrow", "pyarrow")
23 pq = vd.importExternal("pyarrow.parquet", "pyarrow")
24 from visidata.loaders.arrow import arrow_to_vdtype
25
26 with self.source.open('rb') as f:
27 self.tbl = pq.read_table(f)
28
29 self.columns = []
30 for colname, col in zip(self.tbl.column_names, self.tbl.columns):
31 c = ParquetColumn(colname,
32 type=arrow_to_vdtype(col.type),
33 source=col,
34 cache=(col.type.id == pa.lib.Type_LARGE_STRING))
35 self.addColumn(c)
36
37 for i in range(self.tbl.num_rows):
38 yield dict(__rownum__=i)
39
40
41 @VisiData.api
42 def save_parquet(vd, p, sheet):
43 pa = vd.importExternal("pyarrow")
44 pq = vd.importExternal("pyarrow.parquet", "pyarrow")
45
46 typemap = {
47 anytype: pa.string(),
48 int: pa.int64(),
49 vlen: pa.int64(),
50 float: pa.float64(),
51 str: pa.string(),
52 date: pa.date64(),
53 # list: pa.array(),
54 }
55
56 for t in vd.numericTypes:
57 if t not in typemap:
58 typemap[t] = pa.float64()
59
60 databycol = defaultdict(list) # col -> [values]
61
62 for typedvals in sheet.iterdispvals(format=False):
63 for col, val in typedvals.items():
64 if isinstance(val, TypedWrapper):
65 val = None
66
67 databycol[col].append(val)
68
69 data = [
70 pa.array(vals, type=typemap.get(col.type, pa.string()))
71 for col, vals in databycol.items()
72 ]
73
74 schema = pa.schema(
75 [(c.name, typemap.get(c.type, pa.string())) for c in sheet.visibleCols]
76 )
77 with p.open_bytes(mode="w") as outf:
78 with pq.ParquetWriter(outf, schema) as writer:
79 writer.write_batch(
80 pa.record_batch(data, names=[c.name for c in sheet.visibleCols])
81 )
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/visidata/loaders/parquet.py b/visidata/loaders/parquet.py
--- a/visidata/loaders/parquet.py
+++ b/visidata/loaders/parquet.py
@@ -23,8 +23,11 @@
pq = vd.importExternal("pyarrow.parquet", "pyarrow")
from visidata.loaders.arrow import arrow_to_vdtype
- with self.source.open('rb') as f:
- self.tbl = pq.read_table(f)
+ if self.source.is_dir():
+ self.tbl = pq.read_table(str(self.source))
+ else:
+ with self.source.open('rb') as f:
+ self.tbl = pq.read_table(f)
self.columns = []
for colname, col in zip(self.tbl.column_names, self.tbl.columns):
| {"golden_diff": "diff --git a/visidata/loaders/parquet.py b/visidata/loaders/parquet.py\n--- a/visidata/loaders/parquet.py\n+++ b/visidata/loaders/parquet.py\n@@ -23,8 +23,11 @@\n pq = vd.importExternal(\"pyarrow.parquet\", \"pyarrow\")\n from visidata.loaders.arrow import arrow_to_vdtype\n \n- with self.source.open('rb') as f:\n- self.tbl = pq.read_table(f)\n+ if self.source.is_dir():\n+ self.tbl = pq.read_table(str(self.source))\n+ else: \n+ with self.source.open('rb') as f:\n+ self.tbl = pq.read_table(f)\n \n self.columns = []\n for colname, col in zip(self.tbl.column_names, self.tbl.columns):\n", "issue": "[parquet] can't load parquet directory anymore: `IsADirectoryError`\n**Small description**\r\n\r\nHi @saulpw @anjakefala @takacsd - it seems that forcing opening the path as file with `.open()` - introduced with #2133 - breaks the use case where the multiple parquet files are stored in a directory, and this directory is then read by visidata. This is common with Hive partitioning or when working with spark. A simple fix would be to check if the path is a directory with `os.path.is_dir()` and then retaining old behavior of passing it as a string to `read_table()`. If it is not an existing directory, we move to the new way of opening as a binary buffer.\r\n\r\nI have already added this workaround to my clone of visidata, and it fixes my issue, but maybe you have some better ideas how to handle it instead of `if-else` statement in the `ParquetSheet`.\r\n\r\n**Expected result**\r\n\r\n```bash\r\nvd -f parquet parquet_dir\r\n```\r\nshould load a parquet into visidata\r\n\r\n**Actual result with screenshot**\r\n\r\n\r\n**Additional context**\r\n\r\n```bash\r\n# freshest develop\r\nvisidata@9fd728b72c115e50e99c24b455caaf020381b48e\r\n\r\npyarrow==12.0.0\r\npython 3.10.2\r\n```\r\n\n", "before_files": [{"content": "from visidata import Sheet, VisiData, TypedWrapper, anytype, date, vlen, Column, vd\nfrom collections import defaultdict\n\n\[email protected]\ndef open_parquet(vd, p):\n return ParquetSheet(p.name, source=p)\n\n\nclass ParquetColumn(Column):\n def calcValue(self, row):\n val = self.source[row[\"__rownum__\"]]\n if val.type == 'large_string':\n return memoryview(val.as_buffer())[:2**20].tobytes().decode('utf-8')\n else:\n return val.as_py()\n\n\nclass ParquetSheet(Sheet):\n # rowdef: {'__rownum__':int, parquet_col:overridden_value, ...}\n def iterload(self):\n pa = vd.importExternal(\"pyarrow\", \"pyarrow\")\n pq = vd.importExternal(\"pyarrow.parquet\", \"pyarrow\")\n from visidata.loaders.arrow import arrow_to_vdtype\n\n with self.source.open('rb') as f:\n self.tbl = pq.read_table(f)\n\n self.columns = []\n for colname, col in zip(self.tbl.column_names, self.tbl.columns):\n c = ParquetColumn(colname,\n type=arrow_to_vdtype(col.type),\n source=col,\n cache=(col.type.id == pa.lib.Type_LARGE_STRING))\n self.addColumn(c)\n\n for i in range(self.tbl.num_rows):\n yield dict(__rownum__=i)\n\n\[email protected]\ndef save_parquet(vd, p, sheet):\n pa = vd.importExternal(\"pyarrow\")\n pq = vd.importExternal(\"pyarrow.parquet\", \"pyarrow\")\n\n typemap = {\n anytype: pa.string(),\n int: pa.int64(),\n vlen: pa.int64(),\n float: pa.float64(),\n str: pa.string(),\n date: pa.date64(),\n # list: pa.array(),\n }\n\n for t in vd.numericTypes:\n if t not in typemap:\n typemap[t] = pa.float64()\n\n databycol = defaultdict(list) # col -> [values]\n\n for typedvals in sheet.iterdispvals(format=False):\n for col, val in typedvals.items():\n if isinstance(val, TypedWrapper):\n val = None\n\n databycol[col].append(val)\n\n data = [\n pa.array(vals, type=typemap.get(col.type, pa.string()))\n for col, vals in databycol.items()\n ]\n\n schema = pa.schema(\n [(c.name, typemap.get(c.type, pa.string())) for c in sheet.visibleCols]\n )\n with p.open_bytes(mode=\"w\") as outf:\n with pq.ParquetWriter(outf, schema) as writer:\n writer.write_batch(\n pa.record_batch(data, names=[c.name for c in sheet.visibleCols])\n )\n", "path": "visidata/loaders/parquet.py"}], "after_files": [{"content": "from visidata import Sheet, VisiData, TypedWrapper, anytype, date, vlen, Column, vd\nfrom collections import defaultdict\n\n\[email protected]\ndef open_parquet(vd, p):\n return ParquetSheet(p.name, source=p)\n\n\nclass ParquetColumn(Column):\n def calcValue(self, row):\n val = self.source[row[\"__rownum__\"]]\n if val.type == 'large_string':\n return memoryview(val.as_buffer())[:2**20].tobytes().decode('utf-8')\n else:\n return val.as_py()\n\n\nclass ParquetSheet(Sheet):\n # rowdef: {'__rownum__':int, parquet_col:overridden_value, ...}\n def iterload(self):\n pa = vd.importExternal(\"pyarrow\", \"pyarrow\")\n pq = vd.importExternal(\"pyarrow.parquet\", \"pyarrow\")\n from visidata.loaders.arrow import arrow_to_vdtype\n\n if self.source.is_dir():\n self.tbl = pq.read_table(str(self.source))\n else: \n with self.source.open('rb') as f:\n self.tbl = pq.read_table(f)\n\n self.columns = []\n for colname, col in zip(self.tbl.column_names, self.tbl.columns):\n c = ParquetColumn(colname,\n type=arrow_to_vdtype(col.type),\n source=col,\n cache=(col.type.id == pa.lib.Type_LARGE_STRING))\n self.addColumn(c)\n\n for i in range(self.tbl.num_rows):\n yield dict(__rownum__=i)\n\n\[email protected]\ndef save_parquet(vd, p, sheet):\n pa = vd.importExternal(\"pyarrow\")\n pq = vd.importExternal(\"pyarrow.parquet\", \"pyarrow\")\n\n typemap = {\n anytype: pa.string(),\n int: pa.int64(),\n vlen: pa.int64(),\n float: pa.float64(),\n str: pa.string(),\n date: pa.date64(),\n # list: pa.array(),\n }\n\n for t in vd.numericTypes:\n if t not in typemap:\n typemap[t] = pa.float64()\n\n databycol = defaultdict(list) # col -> [values]\n\n for typedvals in sheet.iterdispvals(format=False):\n for col, val in typedvals.items():\n if isinstance(val, TypedWrapper):\n val = None\n\n databycol[col].append(val)\n\n data = [\n pa.array(vals, type=typemap.get(col.type, pa.string()))\n for col, vals in databycol.items()\n ]\n\n schema = pa.schema(\n [(c.name, typemap.get(c.type, pa.string())) for c in sheet.visibleCols]\n )\n with p.open_bytes(mode=\"w\") as outf:\n with pq.ParquetWriter(outf, schema) as writer:\n writer.write_batch(\n pa.record_batch(data, names=[c.name for c in sheet.visibleCols])\n )\n", "path": "visidata/loaders/parquet.py"}]} | 1,415 | 182 |
gh_patches_debug_17528 | rasdani/github-patches | git_diff | allegro__ralph-3222 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Table 'ralph_ng.transitions_transition' doesn't exist
when I follow the document to setup a develop environment, I met the error" default: django.db.utils.ProgrammingError: (1146, "Table 'ralph_ng.transitions_transition' doesn't exist") ". I think it is because there are no such tables when newly install ralph3 develop environment but ralph3 try to migrate them(from ralph2). I am on mac and have download the default box manually which will be used in vagrant up.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/ralph/lib/transitions/checks.py`
Content:
```
1 from django.core.checks import Error
2 from django.db.utils import OperationalError
3 from django.template.base import TemplateDoesNotExist
4 from django.template.loader import get_template
5
6
7 def check_transition_templates(transition_templates):
8 # to prevent AppRegistryNotReady
9 from ralph.lib.transitions.models import Transition
10
11 errors = []
12 if transition_templates:
13 if not isinstance(transition_templates, (list, tuple)):
14 errors.append(Error(
15 'TRANSITION_TEMPLATES must be a list or a tuple',
16 id='transitions.E001'
17 ))
18 else:
19 for index, item in enumerate(transition_templates):
20 try:
21 path, template = item
22 except (ValueError, TypeError):
23 errors.append(Error(
24 'Element #{} must be a two elements tuple'.format(
25 index
26 ),
27 id='transitions.E003'
28 ))
29 continue
30 try:
31 get_template(path)
32 except TemplateDoesNotExist:
33 errors.append(Error(
34 'Template {} ({}) doesn\'t exist'.format(
35 template, path
36 ),
37 hint='Check TRANSITION_TEMPLATES settings',
38 id='transitions.E002'
39 ))
40 excluded_templates = ['']
41 if transition_templates:
42 try:
43 excluded_templates.extend(
44 {template for template, _ in transition_templates}
45 )
46 except ValueError:
47 pass
48 transitions_with_custom_templates = Transition.objects.exclude(
49 template_name__in=excluded_templates
50 )
51 try:
52 for transition in transitions_with_custom_templates:
53 errors.append(Error(
54 'Template {} for {} transition is '
55 'defined only in transition'.format(
56 transition.template_name, transition
57 ),
58 hint=(
59 'Change your TRANSITION_TEMPLATES settings by adding'
60 ' ({}, "Your template name") and then '
61 'edit {} transition').format(
62 transition.template_name, transition
63 ),
64 id='transitions.E004'
65 ))
66 except OperationalError:
67 pass
68 return errors
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/ralph/lib/transitions/checks.py b/src/ralph/lib/transitions/checks.py
--- a/src/ralph/lib/transitions/checks.py
+++ b/src/ralph/lib/transitions/checks.py
@@ -1,9 +1,14 @@
+import logging
+
from django.core.checks import Error
-from django.db.utils import OperationalError
+from django.db.utils import DatabaseError
from django.template.base import TemplateDoesNotExist
from django.template.loader import get_template
+logger = logging.getLogger(__name__)
+
+
def check_transition_templates(transition_templates):
# to prevent AppRegistryNotReady
from ralph.lib.transitions.models import Transition
@@ -63,6 +68,6 @@
),
id='transitions.E004'
))
- except OperationalError:
- pass
+ except DatabaseError as e:
+ logger.error(e)
return errors
| {"golden_diff": "diff --git a/src/ralph/lib/transitions/checks.py b/src/ralph/lib/transitions/checks.py\n--- a/src/ralph/lib/transitions/checks.py\n+++ b/src/ralph/lib/transitions/checks.py\n@@ -1,9 +1,14 @@\n+import logging\n+\n from django.core.checks import Error\n-from django.db.utils import OperationalError\n+from django.db.utils import DatabaseError\n from django.template.base import TemplateDoesNotExist\n from django.template.loader import get_template\n \n \n+logger = logging.getLogger(__name__)\n+\n+\n def check_transition_templates(transition_templates):\n # to prevent AppRegistryNotReady\n from ralph.lib.transitions.models import Transition\n@@ -63,6 +68,6 @@\n ),\n id='transitions.E004'\n ))\n- except OperationalError:\n- pass\n+ except DatabaseError as e:\n+ logger.error(e)\n return errors\n", "issue": "Table 'ralph_ng.transitions_transition' doesn't exist\nwhen I follow the document to setup a develop environment, I met the error\" default: django.db.utils.ProgrammingError: (1146, \"Table 'ralph_ng.transitions_transition' doesn't exist\") \". I think it is because there are no such tables when newly install ralph3 develop environment but ralph3 try to migrate them(from ralph2). I am on mac and have download the default box manually which will be used in vagrant up.\n", "before_files": [{"content": "from django.core.checks import Error\nfrom django.db.utils import OperationalError\nfrom django.template.base import TemplateDoesNotExist\nfrom django.template.loader import get_template\n\n\ndef check_transition_templates(transition_templates):\n # to prevent AppRegistryNotReady\n from ralph.lib.transitions.models import Transition\n\n errors = []\n if transition_templates:\n if not isinstance(transition_templates, (list, tuple)):\n errors.append(Error(\n 'TRANSITION_TEMPLATES must be a list or a tuple',\n id='transitions.E001'\n ))\n else:\n for index, item in enumerate(transition_templates):\n try:\n path, template = item\n except (ValueError, TypeError):\n errors.append(Error(\n 'Element #{} must be a two elements tuple'.format(\n index\n ),\n id='transitions.E003'\n ))\n continue\n try:\n get_template(path)\n except TemplateDoesNotExist:\n errors.append(Error(\n 'Template {} ({}) doesn\\'t exist'.format(\n template, path\n ),\n hint='Check TRANSITION_TEMPLATES settings',\n id='transitions.E002'\n ))\n excluded_templates = ['']\n if transition_templates:\n try:\n excluded_templates.extend(\n {template for template, _ in transition_templates}\n )\n except ValueError:\n pass\n transitions_with_custom_templates = Transition.objects.exclude(\n template_name__in=excluded_templates\n )\n try:\n for transition in transitions_with_custom_templates:\n errors.append(Error(\n 'Template {} for {} transition is '\n 'defined only in transition'.format(\n transition.template_name, transition\n ),\n hint=(\n 'Change your TRANSITION_TEMPLATES settings by adding'\n ' ({}, \"Your template name\") and then '\n 'edit {} transition').format(\n transition.template_name, transition\n ),\n id='transitions.E004'\n ))\n except OperationalError:\n pass\n return errors\n", "path": "src/ralph/lib/transitions/checks.py"}], "after_files": [{"content": "import logging\n\nfrom django.core.checks import Error\nfrom django.db.utils import DatabaseError\nfrom django.template.base import TemplateDoesNotExist\nfrom django.template.loader import get_template\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef check_transition_templates(transition_templates):\n # to prevent AppRegistryNotReady\n from ralph.lib.transitions.models import Transition\n\n errors = []\n if transition_templates:\n if not isinstance(transition_templates, (list, tuple)):\n errors.append(Error(\n 'TRANSITION_TEMPLATES must be a list or a tuple',\n id='transitions.E001'\n ))\n else:\n for index, item in enumerate(transition_templates):\n try:\n path, template = item\n except (ValueError, TypeError):\n errors.append(Error(\n 'Element #{} must be a two elements tuple'.format(\n index\n ),\n id='transitions.E003'\n ))\n continue\n try:\n get_template(path)\n except TemplateDoesNotExist:\n errors.append(Error(\n 'Template {} ({}) doesn\\'t exist'.format(\n template, path\n ),\n hint='Check TRANSITION_TEMPLATES settings',\n id='transitions.E002'\n ))\n excluded_templates = ['']\n if transition_templates:\n try:\n excluded_templates.extend(\n {template for template, _ in transition_templates}\n )\n except ValueError:\n pass\n transitions_with_custom_templates = Transition.objects.exclude(\n template_name__in=excluded_templates\n )\n try:\n for transition in transitions_with_custom_templates:\n errors.append(Error(\n 'Template {} for {} transition is '\n 'defined only in transition'.format(\n transition.template_name, transition\n ),\n hint=(\n 'Change your TRANSITION_TEMPLATES settings by adding'\n ' ({}, \"Your template name\") and then '\n 'edit {} transition').format(\n transition.template_name, transition\n ),\n id='transitions.E004'\n ))\n except DatabaseError as e:\n logger.error(e)\n return errors\n", "path": "src/ralph/lib/transitions/checks.py"}]} | 926 | 202 |
gh_patches_debug_33816 | rasdani/github-patches | git_diff | marshmallow-code__webargs-464 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RFC: Only accept delimited string in DelimitedList
`DelimitedList` accepts either a list or a delimited string (e.g. "foo,bar,baz").
I'd like to make it more strict by only accepting a delimited list. Rather than adding a `strict` parameter, I'm thinking of dropping the whole "also accept a list" feature.
Any reason to support both?
I understand it inherits from `List` because once the string is parsed, it can be deserialized as a normal list. But are there cases where you'd expect either a list or a delimited string?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/webargs/fields.py`
Content:
```
1 """Field classes.
2
3 Includes all fields from `marshmallow.fields` in addition to a custom
4 `Nested` field and `DelimitedList`.
5
6 All fields can optionally take a special `location` keyword argument, which
7 tells webargs where to parse the request argument from.
8
9 .. code-block:: python
10
11 args = {
12 "active": fields.Bool(location="query"),
13 "content_type": fields.Str(data_key="Content-Type", location="headers"),
14 }
15
16 Note: `data_key` replaced `load_from` in marshmallow 3.
17 When using marshmallow 2, use `load_from`.
18 """
19 import marshmallow as ma
20
21 # Expose all fields from marshmallow.fields.
22 from marshmallow.fields import * # noqa: F40
23 from webargs.compat import MARSHMALLOW_VERSION_INFO
24 from webargs.dict2schema import dict2schema
25
26 __all__ = ["DelimitedList"] + ma.fields.__all__
27
28
29 class Nested(ma.fields.Nested):
30 """Same as `marshmallow.fields.Nested`, except can be passed a dictionary as
31 the first argument, which will be converted to a `marshmallow.Schema`.
32
33 .. note::
34
35 The schema class here will always be `marshmallow.Schema`, regardless
36 of whether a custom schema class is set on the parser. Pass an explicit schema
37 class if necessary.
38 """
39
40 def __init__(self, nested, *args, **kwargs):
41 if isinstance(nested, dict):
42 nested = dict2schema(nested)
43 super().__init__(nested, *args, **kwargs)
44
45
46 class DelimitedList(ma.fields.List):
47 """Same as `marshmallow.fields.List`, except can load from either a list or
48 a delimited string (e.g. "foo,bar,baz").
49
50 :param Field cls_or_instance: A field class or instance.
51 :param str delimiter: Delimiter between values.
52 :param bool as_string: Dump values to string.
53 """
54
55 delimiter = ","
56
57 def __init__(self, cls_or_instance, delimiter=None, as_string=False, **kwargs):
58 self.delimiter = delimiter or self.delimiter
59 self.as_string = as_string
60 super().__init__(cls_or_instance, **kwargs)
61
62 def _serialize(self, value, attr, obj):
63 ret = super()._serialize(value, attr, obj)
64 if self.as_string:
65 return self.delimiter.join(format(each) for each in ret)
66 return ret
67
68 def _deserialize(self, value, attr, data, **kwargs):
69 try:
70 ret = (
71 value
72 if ma.utils.is_iterable_but_not_string(value)
73 else value.split(self.delimiter)
74 )
75 except AttributeError:
76 if MARSHMALLOW_VERSION_INFO[0] < 3:
77 self.fail("invalid")
78 else:
79 raise self.make_error("invalid")
80 return super()._deserialize(ret, attr, data, **kwargs)
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/webargs/fields.py b/src/webargs/fields.py
--- a/src/webargs/fields.py
+++ b/src/webargs/fields.py
@@ -44,37 +44,35 @@
class DelimitedList(ma.fields.List):
- """Same as `marshmallow.fields.List`, except can load from either a list or
- a delimited string (e.g. "foo,bar,baz").
+ """A field which is similar to a List, but takes its input as a delimited
+ string (e.g. "foo,bar,baz").
+
+ Like List, it can be given a nested field type which it will use to
+ de/serialize each element of the list.
:param Field cls_or_instance: A field class or instance.
:param str delimiter: Delimiter between values.
- :param bool as_string: Dump values to string.
"""
+ default_error_messages = {"invalid": "Not a valid delimited list."}
delimiter = ","
- def __init__(self, cls_or_instance, delimiter=None, as_string=False, **kwargs):
+ def __init__(self, cls_or_instance, delimiter=None, **kwargs):
self.delimiter = delimiter or self.delimiter
- self.as_string = as_string
super().__init__(cls_or_instance, **kwargs)
def _serialize(self, value, attr, obj):
- ret = super()._serialize(value, attr, obj)
- if self.as_string:
- return self.delimiter.join(format(each) for each in ret)
- return ret
+ # serializing will start with List serialization, so that we correctly
+ # output lists of non-primitive types, e.g. DelimitedList(DateTime)
+ return self.delimiter.join(
+ format(each) for each in super()._serialize(value, attr, obj)
+ )
def _deserialize(self, value, attr, data, **kwargs):
- try:
- ret = (
- value
- if ma.utils.is_iterable_but_not_string(value)
- else value.split(self.delimiter)
- )
- except AttributeError:
+ # attempting to deserialize from a non-string source is an error
+ if not isinstance(value, (str, bytes)):
if MARSHMALLOW_VERSION_INFO[0] < 3:
self.fail("invalid")
else:
raise self.make_error("invalid")
- return super()._deserialize(ret, attr, data, **kwargs)
+ return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)
| {"golden_diff": "diff --git a/src/webargs/fields.py b/src/webargs/fields.py\n--- a/src/webargs/fields.py\n+++ b/src/webargs/fields.py\n@@ -44,37 +44,35 @@\n \n \n class DelimitedList(ma.fields.List):\n- \"\"\"Same as `marshmallow.fields.List`, except can load from either a list or\n- a delimited string (e.g. \"foo,bar,baz\").\n+ \"\"\"A field which is similar to a List, but takes its input as a delimited\n+ string (e.g. \"foo,bar,baz\").\n+\n+ Like List, it can be given a nested field type which it will use to\n+ de/serialize each element of the list.\n \n :param Field cls_or_instance: A field class or instance.\n :param str delimiter: Delimiter between values.\n- :param bool as_string: Dump values to string.\n \"\"\"\n \n+ default_error_messages = {\"invalid\": \"Not a valid delimited list.\"}\n delimiter = \",\"\n \n- def __init__(self, cls_or_instance, delimiter=None, as_string=False, **kwargs):\n+ def __init__(self, cls_or_instance, delimiter=None, **kwargs):\n self.delimiter = delimiter or self.delimiter\n- self.as_string = as_string\n super().__init__(cls_or_instance, **kwargs)\n \n def _serialize(self, value, attr, obj):\n- ret = super()._serialize(value, attr, obj)\n- if self.as_string:\n- return self.delimiter.join(format(each) for each in ret)\n- return ret\n+ # serializing will start with List serialization, so that we correctly\n+ # output lists of non-primitive types, e.g. DelimitedList(DateTime)\n+ return self.delimiter.join(\n+ format(each) for each in super()._serialize(value, attr, obj)\n+ )\n \n def _deserialize(self, value, attr, data, **kwargs):\n- try:\n- ret = (\n- value\n- if ma.utils.is_iterable_but_not_string(value)\n- else value.split(self.delimiter)\n- )\n- except AttributeError:\n+ # attempting to deserialize from a non-string source is an error\n+ if not isinstance(value, (str, bytes)):\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n self.fail(\"invalid\")\n else:\n raise self.make_error(\"invalid\")\n- return super()._deserialize(ret, attr, data, **kwargs)\n+ return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)\n", "issue": "RFC: Only accept delimited string in DelimitedList\n`DelimitedList` accepts either a list or a delimited string (e.g. \"foo,bar,baz\").\r\n\r\nI'd like to make it more strict by only accepting a delimited list. Rather than adding a `strict` parameter, I'm thinking of dropping the whole \"also accept a list\" feature.\r\n\r\nAny reason to support both?\r\n\r\nI understand it inherits from `List` because once the string is parsed, it can be deserialized as a normal list. But are there cases where you'd expect either a list or a delimited string?\n", "before_files": [{"content": "\"\"\"Field classes.\n\nIncludes all fields from `marshmallow.fields` in addition to a custom\n`Nested` field and `DelimitedList`.\n\nAll fields can optionally take a special `location` keyword argument, which\ntells webargs where to parse the request argument from.\n\n.. code-block:: python\n\n args = {\n \"active\": fields.Bool(location=\"query\"),\n \"content_type\": fields.Str(data_key=\"Content-Type\", location=\"headers\"),\n }\n\nNote: `data_key` replaced `load_from` in marshmallow 3.\nWhen using marshmallow 2, use `load_from`.\n\"\"\"\nimport marshmallow as ma\n\n# Expose all fields from marshmallow.fields.\nfrom marshmallow.fields import * # noqa: F40\nfrom webargs.compat import MARSHMALLOW_VERSION_INFO\nfrom webargs.dict2schema import dict2schema\n\n__all__ = [\"DelimitedList\"] + ma.fields.__all__\n\n\nclass Nested(ma.fields.Nested):\n \"\"\"Same as `marshmallow.fields.Nested`, except can be passed a dictionary as\n the first argument, which will be converted to a `marshmallow.Schema`.\n\n .. note::\n\n The schema class here will always be `marshmallow.Schema`, regardless\n of whether a custom schema class is set on the parser. Pass an explicit schema\n class if necessary.\n \"\"\"\n\n def __init__(self, nested, *args, **kwargs):\n if isinstance(nested, dict):\n nested = dict2schema(nested)\n super().__init__(nested, *args, **kwargs)\n\n\nclass DelimitedList(ma.fields.List):\n \"\"\"Same as `marshmallow.fields.List`, except can load from either a list or\n a delimited string (e.g. \"foo,bar,baz\").\n\n :param Field cls_or_instance: A field class or instance.\n :param str delimiter: Delimiter between values.\n :param bool as_string: Dump values to string.\n \"\"\"\n\n delimiter = \",\"\n\n def __init__(self, cls_or_instance, delimiter=None, as_string=False, **kwargs):\n self.delimiter = delimiter or self.delimiter\n self.as_string = as_string\n super().__init__(cls_or_instance, **kwargs)\n\n def _serialize(self, value, attr, obj):\n ret = super()._serialize(value, attr, obj)\n if self.as_string:\n return self.delimiter.join(format(each) for each in ret)\n return ret\n\n def _deserialize(self, value, attr, data, **kwargs):\n try:\n ret = (\n value\n if ma.utils.is_iterable_but_not_string(value)\n else value.split(self.delimiter)\n )\n except AttributeError:\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n self.fail(\"invalid\")\n else:\n raise self.make_error(\"invalid\")\n return super()._deserialize(ret, attr, data, **kwargs)\n", "path": "src/webargs/fields.py"}], "after_files": [{"content": "\"\"\"Field classes.\n\nIncludes all fields from `marshmallow.fields` in addition to a custom\n`Nested` field and `DelimitedList`.\n\nAll fields can optionally take a special `location` keyword argument, which\ntells webargs where to parse the request argument from.\n\n.. code-block:: python\n\n args = {\n \"active\": fields.Bool(location=\"query\"),\n \"content_type\": fields.Str(data_key=\"Content-Type\", location=\"headers\"),\n }\n\nNote: `data_key` replaced `load_from` in marshmallow 3.\nWhen using marshmallow 2, use `load_from`.\n\"\"\"\nimport marshmallow as ma\n\n# Expose all fields from marshmallow.fields.\nfrom marshmallow.fields import * # noqa: F40\nfrom webargs.compat import MARSHMALLOW_VERSION_INFO\nfrom webargs.dict2schema import dict2schema\n\n__all__ = [\"DelimitedList\"] + ma.fields.__all__\n\n\nclass Nested(ma.fields.Nested):\n \"\"\"Same as `marshmallow.fields.Nested`, except can be passed a dictionary as\n the first argument, which will be converted to a `marshmallow.Schema`.\n\n .. note::\n\n The schema class here will always be `marshmallow.Schema`, regardless\n of whether a custom schema class is set on the parser. Pass an explicit schema\n class if necessary.\n \"\"\"\n\n def __init__(self, nested, *args, **kwargs):\n if isinstance(nested, dict):\n nested = dict2schema(nested)\n super().__init__(nested, *args, **kwargs)\n\n\nclass DelimitedList(ma.fields.List):\n \"\"\"A field which is similar to a List, but takes its input as a delimited\n string (e.g. \"foo,bar,baz\").\n\n Like List, it can be given a nested field type which it will use to\n de/serialize each element of the list.\n\n :param Field cls_or_instance: A field class or instance.\n :param str delimiter: Delimiter between values.\n \"\"\"\n\n default_error_messages = {\"invalid\": \"Not a valid delimited list.\"}\n delimiter = \",\"\n\n def __init__(self, cls_or_instance, delimiter=None, **kwargs):\n self.delimiter = delimiter or self.delimiter\n super().__init__(cls_or_instance, **kwargs)\n\n def _serialize(self, value, attr, obj):\n # serializing will start with List serialization, so that we correctly\n # output lists of non-primitive types, e.g. DelimitedList(DateTime)\n return self.delimiter.join(\n format(each) for each in super()._serialize(value, attr, obj)\n )\n\n def _deserialize(self, value, attr, data, **kwargs):\n # attempting to deserialize from a non-string source is an error\n if not isinstance(value, (str, bytes)):\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n self.fail(\"invalid\")\n else:\n raise self.make_error(\"invalid\")\n return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)\n", "path": "src/webargs/fields.py"}]} | 1,162 | 565 |
gh_patches_debug_49452 | rasdani/github-patches | git_diff | wagtail__wagtail-840 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Paginator and search pagination expect different parameters for page
The Paginator (as in `django.core.paginator`) used pretty much everywhere uses `page` as the query parameter. The search view, however, [expects](https://github.com/torchbox/wagtail/blob/100797796df0bc8ca96035092f32a9275d2b3713/wagtail/wagtailsearch/views/queries.py#L28) a `p` query parameter for pagination.
While not a bug, it is a bit confusing and makes it less elegant to share a pagination include. Certainly made me scratch my head.
Worth a PR?
Cheers,
Dan
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtailsearch/views/frontend.py`
Content:
```
1 import json
2
3 from django.conf import settings
4 from django.shortcuts import render
5 from django.http import HttpResponse
6 from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
7
8 from wagtail.wagtailcore import models
9 from wagtail.wagtailsearch.models import Query
10
11
12 def search(
13 request,
14 template=None,
15 template_ajax=None,
16 results_per_page=10,
17 use_json=False,
18 json_attrs=['title', 'url'],
19 show_unpublished=False,
20 search_title_only=False,
21 extra_filters={},
22 path=None,
23 ):
24
25 # Get default templates
26 if template is None:
27 if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE'):
28 template = settings.WAGTAILSEARCH_RESULTS_TEMPLATE
29 else:
30 template = 'wagtailsearch/search_results.html'
31
32 if template_ajax is None:
33 if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX'):
34 template_ajax = settings.WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX
35 else:
36 template_ajax = template
37
38 # Get query string and page from GET paramters
39 query_string = request.GET.get('q', '')
40 page = request.GET.get('p', 1)
41
42 # Search
43 if query_string != '':
44 search_results = models.Page.search(
45 query_string,
46 show_unpublished=show_unpublished,
47 search_title_only=search_title_only,
48 extra_filters=extra_filters,
49 path=path if path else request.site.root_page.path
50 )
51
52 # Get query object
53 query = Query.get(query_string)
54
55 # Add hit
56 query.add_hit()
57
58 # Pagination
59 paginator = Paginator(search_results, results_per_page)
60 try:
61 search_results = paginator.page(page)
62 except PageNotAnInteger:
63 search_results = paginator.page(1)
64 except EmptyPage:
65 search_results = paginator.page(paginator.num_pages)
66 else:
67 query = None
68 search_results = None
69
70 if use_json: # Return a json response
71 if search_results:
72 search_results_json = []
73 for result in search_results:
74 result_specific = result.specific
75
76 search_results_json.append(dict(
77 (attr, getattr(result_specific, attr))
78 for attr in json_attrs
79 if hasattr(result_specific, attr)
80 ))
81
82 return HttpResponse(json.dumps(search_results_json))
83 else:
84 return HttpResponse('[]')
85 else: # Render a template
86 if request.is_ajax() and template_ajax:
87 template = template_ajax
88
89 return render(request, template, dict(
90 query_string=query_string,
91 search_results=search_results,
92 is_ajax=request.is_ajax(),
93 query=query
94 ))
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/wagtailsearch/views/frontend.py b/wagtail/wagtailsearch/views/frontend.py
--- a/wagtail/wagtailsearch/views/frontend.py
+++ b/wagtail/wagtailsearch/views/frontend.py
@@ -37,7 +37,7 @@
# Get query string and page from GET paramters
query_string = request.GET.get('q', '')
- page = request.GET.get('p', 1)
+ page = request.GET.get('page', request.GET.get('p', 1))
# Search
if query_string != '':
| {"golden_diff": "diff --git a/wagtail/wagtailsearch/views/frontend.py b/wagtail/wagtailsearch/views/frontend.py\n--- a/wagtail/wagtailsearch/views/frontend.py\n+++ b/wagtail/wagtailsearch/views/frontend.py\n@@ -37,7 +37,7 @@\n \n # Get query string and page from GET paramters\n query_string = request.GET.get('q', '')\n- page = request.GET.get('p', 1)\n+ page = request.GET.get('page', request.GET.get('p', 1))\n \n # Search\n if query_string != '':\n", "issue": "Paginator and search pagination expect different parameters for page\nThe Paginator (as in `django.core.paginator`) used pretty much everywhere uses `page` as the query parameter. The search view, however, [expects](https://github.com/torchbox/wagtail/blob/100797796df0bc8ca96035092f32a9275d2b3713/wagtail/wagtailsearch/views/queries.py#L28) a `p` query parameter for pagination.\n\nWhile not a bug, it is a bit confusing and makes it less elegant to share a pagination include. Certainly made me scratch my head.\n\nWorth a PR?\n\nCheers,\nDan\n\n", "before_files": [{"content": "import json\n\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom wagtail.wagtailcore import models\nfrom wagtail.wagtailsearch.models import Query\n\n\ndef search(\n request,\n template=None,\n template_ajax=None,\n results_per_page=10,\n use_json=False,\n json_attrs=['title', 'url'],\n show_unpublished=False,\n search_title_only=False,\n extra_filters={},\n path=None,\n ):\n\n # Get default templates\n if template is None:\n if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE'):\n template = settings.WAGTAILSEARCH_RESULTS_TEMPLATE\n else:\n template = 'wagtailsearch/search_results.html'\n\n if template_ajax is None:\n if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX'):\n template_ajax = settings.WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX\n else:\n template_ajax = template\n\n # Get query string and page from GET paramters\n query_string = request.GET.get('q', '')\n page = request.GET.get('p', 1)\n\n # Search\n if query_string != '':\n search_results = models.Page.search(\n query_string,\n show_unpublished=show_unpublished,\n search_title_only=search_title_only,\n extra_filters=extra_filters,\n path=path if path else request.site.root_page.path\n )\n\n # Get query object\n query = Query.get(query_string)\n\n # Add hit\n query.add_hit()\n\n # Pagination\n paginator = Paginator(search_results, results_per_page)\n try:\n search_results = paginator.page(page)\n except PageNotAnInteger:\n search_results = paginator.page(1)\n except EmptyPage:\n search_results = paginator.page(paginator.num_pages)\n else:\n query = None\n search_results = None\n\n if use_json: # Return a json response\n if search_results:\n search_results_json = []\n for result in search_results:\n result_specific = result.specific\n\n search_results_json.append(dict(\n (attr, getattr(result_specific, attr))\n for attr in json_attrs\n if hasattr(result_specific, attr)\n ))\n\n return HttpResponse(json.dumps(search_results_json))\n else:\n return HttpResponse('[]')\n else: # Render a template\n if request.is_ajax() and template_ajax:\n template = template_ajax\n\n return render(request, template, dict(\n query_string=query_string,\n search_results=search_results,\n is_ajax=request.is_ajax(),\n query=query\n ))\n", "path": "wagtail/wagtailsearch/views/frontend.py"}], "after_files": [{"content": "import json\n\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom wagtail.wagtailcore import models\nfrom wagtail.wagtailsearch.models import Query\n\n\ndef search(\n request,\n template=None,\n template_ajax=None,\n results_per_page=10,\n use_json=False,\n json_attrs=['title', 'url'],\n show_unpublished=False,\n search_title_only=False,\n extra_filters={},\n path=None,\n ):\n\n # Get default templates\n if template is None:\n if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE'):\n template = settings.WAGTAILSEARCH_RESULTS_TEMPLATE\n else:\n template = 'wagtailsearch/search_results.html'\n\n if template_ajax is None:\n if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX'):\n template_ajax = settings.WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX\n else:\n template_ajax = template\n\n # Get query string and page from GET paramters\n query_string = request.GET.get('q', '')\n page = request.GET.get('page', request.GET.get('p', 1))\n\n # Search\n if query_string != '':\n search_results = models.Page.search(\n query_string,\n show_unpublished=show_unpublished,\n search_title_only=search_title_only,\n extra_filters=extra_filters,\n path=path if path else request.site.root_page.path\n )\n\n # Get query object\n query = Query.get(query_string)\n\n # Add hit\n query.add_hit()\n\n # Pagination\n paginator = Paginator(search_results, results_per_page)\n try:\n search_results = paginator.page(page)\n except PageNotAnInteger:\n search_results = paginator.page(1)\n except EmptyPage:\n search_results = paginator.page(paginator.num_pages)\n else:\n query = None\n search_results = None\n\n if use_json: # Return a json response\n if search_results:\n search_results_json = []\n for result in search_results:\n result_specific = result.specific\n\n search_results_json.append(dict(\n (attr, getattr(result_specific, attr))\n for attr in json_attrs\n if hasattr(result_specific, attr)\n ))\n\n return HttpResponse(json.dumps(search_results_json))\n else:\n return HttpResponse('[]')\n else: # Render a template\n if request.is_ajax() and template_ajax:\n template = template_ajax\n\n return render(request, template, dict(\n query_string=query_string,\n search_results=search_results,\n is_ajax=request.is_ajax(),\n query=query\n ))\n", "path": "wagtail/wagtailsearch/views/frontend.py"}]} | 1,166 | 131 |
gh_patches_debug_10307 | rasdani/github-patches | git_diff | getnikola__nikola-2238 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
deploy crashes with state system
Will investigate later.
``` pytb
Traceback (most recent call last):
File "/home/kwpolska/virtualenvs/nikola-py3/lib/python3.5/site-packages/doit/doit_cmd.py", line 168, in run
return command.parse_execute(args)
File "/home/kwpolska/virtualenvs/nikola-py3/lib/python3.5/site-packages/doit/cmd_base.py", line 122, in parse_execute
return self.execute(params, args)
File "/home/kwpolska/git/nikola/nikola/plugin_categories.py", line 124, in execute
return self._execute(options, args)
File "/home/kwpolska/git/nikola/nikola/plugins/command/deploy.py", line 135, in _execute
self.site.state.set('last_deploy', new_deploy.isoformat())
File "/home/kwpolska/git/nikola/nikola/state.py", line 64, in set
self._save()
File "/home/kwpolska/git/nikola/nikola/state.py", line 82, in _save
json.dump(self._local.data, outf, sort_keys=True, indent=2)
File "/usr/lib64/python3.5/json/__init__.py", line 179, in dump
fp.write(chunk)
File "/home/kwpolska/virtualenvs/nikola-py3/lib/python3.5/tempfile.py", line 483, in func_wrapper
return func(*args, **kwargs)
TypeError: a bytes-like object is required, not 'str'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/state.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2016 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Persistent state implementation."""
28
29 import json
30 import os
31 import shutil
32 import tempfile
33 import threading
34
35 from . import utils
36
37
38 class Persistor():
39 """Persist stuff in a place.
40
41 This is an intentionally dumb implementation. It is *not* meant to be
42 fast, or useful for arbitrarily large data. Use lightly.
43
44 Intentionally it has no namespaces, sections, etc. Use as a
45 responsible adult.
46 """
47
48 def __init__(self, path):
49 """Where do you want it persisted."""
50 self._path = path
51 utils.makedirs(os.path.dirname(path))
52 self._local = threading.local()
53 self._local.data = {}
54
55 def get(self, key):
56 """Get data stored in key."""
57 self._read()
58 return self._local.data.get(key)
59
60 def set(self, key, value):
61 """Store value in key."""
62 self._read()
63 self._local.data[key] = value
64 self._save()
65
66 def delete(self, key):
67 """Delete key and the value it contains."""
68 self._read()
69 if key in self._local.data:
70 self._local.data.pop(key)
71 self._save()
72
73 def _read(self):
74 if os.path.isfile(self._path):
75 with open(self._path) as inf:
76 self._local.data = json.load(inf)
77
78 def _save(self):
79 dname = os.path.dirname(self._path)
80 with tempfile.NamedTemporaryFile(dir=dname, delete=False) as outf:
81 tname = outf.name
82 json.dump(self._local.data, outf, sort_keys=True, indent=2)
83 shutil.move(tname, self._path)
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nikola/state.py b/nikola/state.py
--- a/nikola/state.py
+++ b/nikola/state.py
@@ -78,6 +78,11 @@
def _save(self):
dname = os.path.dirname(self._path)
with tempfile.NamedTemporaryFile(dir=dname, delete=False) as outf:
+ # TODO replace with encoding='utf-8' and mode 'w+' in v8
tname = outf.name
- json.dump(self._local.data, outf, sort_keys=True, indent=2)
+ data = json.dumps(self._local.data, sort_keys=True, indent=2)
+ try:
+ outf.write(data)
+ except TypeError:
+ outf.write(data.encode('utf-8'))
shutil.move(tname, self._path)
| {"golden_diff": "diff --git a/nikola/state.py b/nikola/state.py\n--- a/nikola/state.py\n+++ b/nikola/state.py\n@@ -78,6 +78,11 @@\n def _save(self):\n dname = os.path.dirname(self._path)\n with tempfile.NamedTemporaryFile(dir=dname, delete=False) as outf:\n+ # TODO replace with encoding='utf-8' and mode 'w+' in v8\n tname = outf.name\n- json.dump(self._local.data, outf, sort_keys=True, indent=2)\n+ data = json.dumps(self._local.data, sort_keys=True, indent=2)\n+ try:\n+ outf.write(data)\n+ except TypeError:\n+ outf.write(data.encode('utf-8'))\n shutil.move(tname, self._path)\n", "issue": "deploy crashes with state system\nWill investigate later.\n\n``` pytb\nTraceback (most recent call last):\n File \"/home/kwpolska/virtualenvs/nikola-py3/lib/python3.5/site-packages/doit/doit_cmd.py\", line 168, in run\n return command.parse_execute(args)\n File \"/home/kwpolska/virtualenvs/nikola-py3/lib/python3.5/site-packages/doit/cmd_base.py\", line 122, in parse_execute\n return self.execute(params, args)\n File \"/home/kwpolska/git/nikola/nikola/plugin_categories.py\", line 124, in execute\n return self._execute(options, args)\n File \"/home/kwpolska/git/nikola/nikola/plugins/command/deploy.py\", line 135, in _execute\n self.site.state.set('last_deploy', new_deploy.isoformat())\n File \"/home/kwpolska/git/nikola/nikola/state.py\", line 64, in set\n self._save()\n File \"/home/kwpolska/git/nikola/nikola/state.py\", line 82, in _save\n json.dump(self._local.data, outf, sort_keys=True, indent=2)\n File \"/usr/lib64/python3.5/json/__init__.py\", line 179, in dump\n fp.write(chunk)\n File \"/home/kwpolska/virtualenvs/nikola-py3/lib/python3.5/tempfile.py\", line 483, in func_wrapper\n return func(*args, **kwargs)\nTypeError: a bytes-like object is required, not 'str'\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Persistent state implementation.\"\"\"\n\nimport json\nimport os\nimport shutil\nimport tempfile\nimport threading\n\nfrom . import utils\n\n\nclass Persistor():\n \"\"\"Persist stuff in a place.\n\n This is an intentionally dumb implementation. It is *not* meant to be\n fast, or useful for arbitrarily large data. Use lightly.\n\n Intentionally it has no namespaces, sections, etc. Use as a\n responsible adult.\n \"\"\"\n\n def __init__(self, path):\n \"\"\"Where do you want it persisted.\"\"\"\n self._path = path\n utils.makedirs(os.path.dirname(path))\n self._local = threading.local()\n self._local.data = {}\n\n def get(self, key):\n \"\"\"Get data stored in key.\"\"\"\n self._read()\n return self._local.data.get(key)\n\n def set(self, key, value):\n \"\"\"Store value in key.\"\"\"\n self._read()\n self._local.data[key] = value\n self._save()\n\n def delete(self, key):\n \"\"\"Delete key and the value it contains.\"\"\"\n self._read()\n if key in self._local.data:\n self._local.data.pop(key)\n self._save()\n\n def _read(self):\n if os.path.isfile(self._path):\n with open(self._path) as inf:\n self._local.data = json.load(inf)\n\n def _save(self):\n dname = os.path.dirname(self._path)\n with tempfile.NamedTemporaryFile(dir=dname, delete=False) as outf:\n tname = outf.name\n json.dump(self._local.data, outf, sort_keys=True, indent=2)\n shutil.move(tname, self._path)\n", "path": "nikola/state.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Persistent state implementation.\"\"\"\n\nimport json\nimport os\nimport shutil\nimport tempfile\nimport threading\n\nfrom . import utils\n\n\nclass Persistor():\n \"\"\"Persist stuff in a place.\n\n This is an intentionally dumb implementation. It is *not* meant to be\n fast, or useful for arbitrarily large data. Use lightly.\n\n Intentionally it has no namespaces, sections, etc. Use as a\n responsible adult.\n \"\"\"\n\n def __init__(self, path):\n \"\"\"Where do you want it persisted.\"\"\"\n self._path = path\n utils.makedirs(os.path.dirname(path))\n self._local = threading.local()\n self._local.data = {}\n\n def get(self, key):\n \"\"\"Get data stored in key.\"\"\"\n self._read()\n return self._local.data.get(key)\n\n def set(self, key, value):\n \"\"\"Store value in key.\"\"\"\n self._read()\n self._local.data[key] = value\n self._save()\n\n def delete(self, key):\n \"\"\"Delete key and the value it contains.\"\"\"\n self._read()\n if key in self._local.data:\n self._local.data.pop(key)\n self._save()\n\n def _read(self):\n if os.path.isfile(self._path):\n with open(self._path) as inf:\n self._local.data = json.load(inf)\n\n def _save(self):\n dname = os.path.dirname(self._path)\n with tempfile.NamedTemporaryFile(dir=dname, delete=False) as outf:\n # TODO replace with encoding='utf-8' and mode 'w+' in v8\n tname = outf.name\n data = json.dumps(self._local.data, sort_keys=True, indent=2)\n try:\n outf.write(data)\n except TypeError:\n outf.write(data.encode('utf-8'))\n shutil.move(tname, self._path)\n", "path": "nikola/state.py"}]} | 1,407 | 180 |
gh_patches_debug_21053 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-6083 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
API: filtrer les notifications par `is_read`
**Description du besoin**
Pour les besoins de l'extension, il serait intéressant de pouvoir filtrer les notifications (URL `/api/notifications`) selon leur propriété `is_read` pour ne récupérer que les non lues (les autres n'ayant pas d'intérêt pour ce cas d'usage).
**Description de la solution**
Ajouter un filtre pour `is_read` (booléen) sur l'URL `/api/notifications`
**Description des alternatives**
Pouvoir trier selon cette propriété (pour avoir les non-lues d'abord), _a minima_.
**Contexte additionnel**
Voir le code de [notifier.js#64](https://github.com/zestedesavoir/extensions-notificateurs/blob/master/Universal/notifier.js#L64) pour voir le cas d'usage en question (qui me permettrait de supprimer le `.filter()` ligne 78 tout en récupérant des notifications potentiellement anciennes mais non lues qui sont actuellement inaccessibles).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/notification/api/views.py`
Content:
```
1 import datetime
2 from django.core.cache import cache
3 from django.db.models.signals import post_delete
4 from django.db.models.signals import post_save
5 from dry_rest_permissions.generics import DRYPermissions
6 from rest_framework import filters
7 from rest_framework.generics import ListAPIView
8 from rest_framework.permissions import IsAuthenticated
9 from rest_framework_extensions.cache.decorators import cache_response
10 from rest_framework_extensions.etag.decorators import etag
11 from rest_framework_extensions.key_constructor import bits
12 from rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor
13
14 from zds.api.bits import DJRF3xPaginationKeyBit, UpdatedAtKeyBit
15 from zds.notification.api.serializers import NotificationSerializer
16 from zds.notification.models import Notification
17
18
19 class PagingNotificationListKeyConstructor(DefaultKeyConstructor):
20 pagination = DJRF3xPaginationKeyBit()
21 search = bits.QueryParamsKeyBit(["search", "ordering", "type"])
22 list_sql_query = bits.ListSqlQueryKeyBit()
23 unique_view_id = bits.UniqueViewIdKeyBit()
24 user = bits.UserKeyBit()
25 updated_at = UpdatedAtKeyBit("api_updated_notification")
26
27
28 def change_api_notification_updated_at(sender=None, instance=None, *args, **kwargs):
29 cache.set("api_updated_notification", datetime.datetime.utcnow())
30
31
32 post_save.connect(receiver=change_api_notification_updated_at, sender=Notification)
33 post_delete.connect(receiver=change_api_notification_updated_at, sender=Notification)
34
35
36 class NotificationListAPI(ListAPIView):
37 """
38 List of notification.
39 """
40
41 filter_backends = (filters.SearchFilter, filters.OrderingFilter)
42 search_fields = ("title",)
43 ordering_fields = (
44 "pubdate",
45 "title",
46 )
47 list_key_func = PagingNotificationListKeyConstructor()
48 serializer_class = NotificationSerializer
49 permission_classes = (
50 IsAuthenticated,
51 DRYPermissions,
52 )
53
54 @etag(list_key_func)
55 @cache_response(key_func=list_key_func)
56 def get(self, request, *args, **kwargs):
57 """
58 Lists all notifications of a user.
59 ---
60
61 parameters:
62 - name: Authorization
63 description: Bearer token to make an authenticated request.
64 required: true
65 paramType: header
66 - name: page
67 description: Restricts output to the given page number.
68 required: false
69 paramType: query
70 - name: page_size
71 description: Sets the number of notifications per page.
72 required: false
73 paramType: query
74 - name: search
75 description: Filters by title.
76 required: false
77 paramType: query
78 - name: ordering
79 description: Sorts the results. You can order by (-)pubdate or (-)title.
80 paramType: query
81 - name: type
82 description: Filters by notification type.
83 paramType: query
84 - name: subscription_type
85 description: Filters by subscription type.
86 paramType: query
87 - name: expand
88 description: Returns an object instead of an identifier representing the given field.
89 required: false
90 paramType: query
91 responseMessages:
92 - code: 401
93 message: Not Authenticated
94 - code: 404
95 message: Not Found
96 """
97 return self.list(request, *args, **kwargs)
98
99 def get_queryset(self):
100 queryset = Notification.objects.get_notifications_of(self.request.user)
101 subscription_type = self.request.query_params.get("subscription_type", None)
102 if subscription_type:
103 queryset = queryset.filter(subscription__content_type__model=subscription_type)
104 _type = self.request.query_params.get("type", None)
105 if _type:
106 queryset = queryset.filter(content_type__model=_type)
107 return queryset
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/notification/api/views.py b/zds/notification/api/views.py
--- a/zds/notification/api/views.py
+++ b/zds/notification/api/views.py
@@ -84,6 +84,9 @@
- name: subscription_type
description: Filters by subscription type.
paramType: query
+ - name: is_read
+ description: Filters by read status.
+ paramType: query
- name: expand
description: Returns an object instead of an identifier representing the given field.
required: false
@@ -104,4 +107,9 @@
_type = self.request.query_params.get("type", None)
if _type:
queryset = queryset.filter(content_type__model=_type)
+ is_read = str(self.request.query_params.get("is_read", None)).lower()
+ if is_read == "true":
+ queryset = queryset.filter(is_read=True)
+ elif is_read == "false":
+ queryset = queryset.filter(is_read=False)
return queryset
| {"golden_diff": "diff --git a/zds/notification/api/views.py b/zds/notification/api/views.py\n--- a/zds/notification/api/views.py\n+++ b/zds/notification/api/views.py\n@@ -84,6 +84,9 @@\n - name: subscription_type\n description: Filters by subscription type.\n paramType: query\n+ - name: is_read\n+ description: Filters by read status.\n+ paramType: query\n - name: expand\n description: Returns an object instead of an identifier representing the given field.\n required: false\n@@ -104,4 +107,9 @@\n _type = self.request.query_params.get(\"type\", None)\n if _type:\n queryset = queryset.filter(content_type__model=_type)\n+ is_read = str(self.request.query_params.get(\"is_read\", None)).lower()\n+ if is_read == \"true\":\n+ queryset = queryset.filter(is_read=True)\n+ elif is_read == \"false\":\n+ queryset = queryset.filter(is_read=False)\n return queryset\n", "issue": "API: filtrer les notifications par `is_read`\n**Description du besoin**\r\n\r\nPour les besoins de l'extension, il serait int\u00e9ressant de pouvoir filtrer les notifications (URL `/api/notifications`) selon leur propri\u00e9t\u00e9 `is_read` pour ne r\u00e9cup\u00e9rer que les non lues (les autres n'ayant pas d'int\u00e9r\u00eat pour ce cas d'usage).\r\n\r\n**Description de la solution**\r\n\r\nAjouter un filtre pour `is_read` (bool\u00e9en) sur l'URL `/api/notifications`\r\n\r\n**Description des alternatives**\r\n\r\nPouvoir trier selon cette propri\u00e9t\u00e9 (pour avoir les non-lues d'abord), _a minima_.\r\n\r\n**Contexte additionnel**\r\n\r\nVoir le code de [notifier.js#64](https://github.com/zestedesavoir/extensions-notificateurs/blob/master/Universal/notifier.js#L64) pour voir le cas d'usage en question (qui me permettrait de supprimer le `.filter()` ligne 78 tout en r\u00e9cup\u00e9rant des notifications potentiellement anciennes mais non lues qui sont actuellement inaccessibles).\r\n\n", "before_files": [{"content": "import datetime\nfrom django.core.cache import cache\nfrom django.db.models.signals import post_delete\nfrom django.db.models.signals import post_save\nfrom dry_rest_permissions.generics import DRYPermissions\nfrom rest_framework import filters\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework_extensions.cache.decorators import cache_response\nfrom rest_framework_extensions.etag.decorators import etag\nfrom rest_framework_extensions.key_constructor import bits\nfrom rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor\n\nfrom zds.api.bits import DJRF3xPaginationKeyBit, UpdatedAtKeyBit\nfrom zds.notification.api.serializers import NotificationSerializer\nfrom zds.notification.models import Notification\n\n\nclass PagingNotificationListKeyConstructor(DefaultKeyConstructor):\n pagination = DJRF3xPaginationKeyBit()\n search = bits.QueryParamsKeyBit([\"search\", \"ordering\", \"type\"])\n list_sql_query = bits.ListSqlQueryKeyBit()\n unique_view_id = bits.UniqueViewIdKeyBit()\n user = bits.UserKeyBit()\n updated_at = UpdatedAtKeyBit(\"api_updated_notification\")\n\n\ndef change_api_notification_updated_at(sender=None, instance=None, *args, **kwargs):\n cache.set(\"api_updated_notification\", datetime.datetime.utcnow())\n\n\npost_save.connect(receiver=change_api_notification_updated_at, sender=Notification)\npost_delete.connect(receiver=change_api_notification_updated_at, sender=Notification)\n\n\nclass NotificationListAPI(ListAPIView):\n \"\"\"\n List of notification.\n \"\"\"\n\n filter_backends = (filters.SearchFilter, filters.OrderingFilter)\n search_fields = (\"title\",)\n ordering_fields = (\n \"pubdate\",\n \"title\",\n )\n list_key_func = PagingNotificationListKeyConstructor()\n serializer_class = NotificationSerializer\n permission_classes = (\n IsAuthenticated,\n DRYPermissions,\n )\n\n @etag(list_key_func)\n @cache_response(key_func=list_key_func)\n def get(self, request, *args, **kwargs):\n \"\"\"\n Lists all notifications of a user.\n ---\n\n parameters:\n - name: Authorization\n description: Bearer token to make an authenticated request.\n required: true\n paramType: header\n - name: page\n description: Restricts output to the given page number.\n required: false\n paramType: query\n - name: page_size\n description: Sets the number of notifications per page.\n required: false\n paramType: query\n - name: search\n description: Filters by title.\n required: false\n paramType: query\n - name: ordering\n description: Sorts the results. You can order by (-)pubdate or (-)title.\n paramType: query\n - name: type\n description: Filters by notification type.\n paramType: query\n - name: subscription_type\n description: Filters by subscription type.\n paramType: query\n - name: expand\n description: Returns an object instead of an identifier representing the given field.\n required: false\n paramType: query\n responseMessages:\n - code: 401\n message: Not Authenticated\n - code: 404\n message: Not Found\n \"\"\"\n return self.list(request, *args, **kwargs)\n\n def get_queryset(self):\n queryset = Notification.objects.get_notifications_of(self.request.user)\n subscription_type = self.request.query_params.get(\"subscription_type\", None)\n if subscription_type:\n queryset = queryset.filter(subscription__content_type__model=subscription_type)\n _type = self.request.query_params.get(\"type\", None)\n if _type:\n queryset = queryset.filter(content_type__model=_type)\n return queryset\n", "path": "zds/notification/api/views.py"}], "after_files": [{"content": "import datetime\nfrom django.core.cache import cache\nfrom django.db.models.signals import post_delete\nfrom django.db.models.signals import post_save\nfrom dry_rest_permissions.generics import DRYPermissions\nfrom rest_framework import filters\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework_extensions.cache.decorators import cache_response\nfrom rest_framework_extensions.etag.decorators import etag\nfrom rest_framework_extensions.key_constructor import bits\nfrom rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor\n\nfrom zds.api.bits import DJRF3xPaginationKeyBit, UpdatedAtKeyBit\nfrom zds.notification.api.serializers import NotificationSerializer\nfrom zds.notification.models import Notification\n\n\nclass PagingNotificationListKeyConstructor(DefaultKeyConstructor):\n pagination = DJRF3xPaginationKeyBit()\n search = bits.QueryParamsKeyBit([\"search\", \"ordering\", \"type\"])\n list_sql_query = bits.ListSqlQueryKeyBit()\n unique_view_id = bits.UniqueViewIdKeyBit()\n user = bits.UserKeyBit()\n updated_at = UpdatedAtKeyBit(\"api_updated_notification\")\n\n\ndef change_api_notification_updated_at(sender=None, instance=None, *args, **kwargs):\n cache.set(\"api_updated_notification\", datetime.datetime.utcnow())\n\n\npost_save.connect(receiver=change_api_notification_updated_at, sender=Notification)\npost_delete.connect(receiver=change_api_notification_updated_at, sender=Notification)\n\n\nclass NotificationListAPI(ListAPIView):\n \"\"\"\n List of notification.\n \"\"\"\n\n filter_backends = (filters.SearchFilter, filters.OrderingFilter)\n search_fields = (\"title\",)\n ordering_fields = (\n \"pubdate\",\n \"title\",\n )\n list_key_func = PagingNotificationListKeyConstructor()\n serializer_class = NotificationSerializer\n permission_classes = (\n IsAuthenticated,\n DRYPermissions,\n )\n\n @etag(list_key_func)\n @cache_response(key_func=list_key_func)\n def get(self, request, *args, **kwargs):\n \"\"\"\n Lists all notifications of a user.\n ---\n\n parameters:\n - name: Authorization\n description: Bearer token to make an authenticated request.\n required: true\n paramType: header\n - name: page\n description: Restricts output to the given page number.\n required: false\n paramType: query\n - name: page_size\n description: Sets the number of notifications per page.\n required: false\n paramType: query\n - name: search\n description: Filters by title.\n required: false\n paramType: query\n - name: ordering\n description: Sorts the results. You can order by (-)pubdate or (-)title.\n paramType: query\n - name: type\n description: Filters by notification type.\n paramType: query\n - name: subscription_type\n description: Filters by subscription type.\n paramType: query\n - name: is_read\n description: Filters by read status.\n paramType: query\n - name: expand\n description: Returns an object instead of an identifier representing the given field.\n required: false\n paramType: query\n responseMessages:\n - code: 401\n message: Not Authenticated\n - code: 404\n message: Not Found\n \"\"\"\n return self.list(request, *args, **kwargs)\n\n def get_queryset(self):\n queryset = Notification.objects.get_notifications_of(self.request.user)\n subscription_type = self.request.query_params.get(\"subscription_type\", None)\n if subscription_type:\n queryset = queryset.filter(subscription__content_type__model=subscription_type)\n _type = self.request.query_params.get(\"type\", None)\n if _type:\n queryset = queryset.filter(content_type__model=_type)\n is_read = str(self.request.query_params.get(\"is_read\", None)).lower()\n if is_read == \"true\":\n queryset = queryset.filter(is_read=True)\n elif is_read == \"false\":\n queryset = queryset.filter(is_read=False)\n return queryset\n", "path": "zds/notification/api/views.py"}]} | 1,503 | 225 |
gh_patches_debug_8845 | rasdani/github-patches | git_diff | safe-global__safe-config-service-14 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include provider info in the serialized response of `GET /safe-apps/`
The `/safe-apps` endpoint should include data about the provider if any
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/safe_apps/serializers.py`
Content:
```
1 from rest_framework import serializers
2
3 from .models import SafeApp
4
5
6 class SafeAppsResponseSerializer(serializers.ModelSerializer):
7 class Meta:
8 model = SafeApp
9 fields = ['url', 'name', 'icon_url', 'description', 'networks']
10
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/safe_apps/serializers.py b/src/safe_apps/serializers.py
--- a/src/safe_apps/serializers.py
+++ b/src/safe_apps/serializers.py
@@ -1,9 +1,17 @@
from rest_framework import serializers
-from .models import SafeApp
+from .models import SafeApp, Provider
+
+
+class ProviderSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = Provider
+ fields = ['url', 'name']
class SafeAppsResponseSerializer(serializers.ModelSerializer):
+ provider = ProviderSerializer()
+
class Meta:
model = SafeApp
- fields = ['url', 'name', 'icon_url', 'description', 'networks']
+ fields = ['url', 'name', 'icon_url', 'description', 'networks', 'provider']
| {"golden_diff": "diff --git a/src/safe_apps/serializers.py b/src/safe_apps/serializers.py\n--- a/src/safe_apps/serializers.py\n+++ b/src/safe_apps/serializers.py\n@@ -1,9 +1,17 @@\n from rest_framework import serializers\n \n-from .models import SafeApp\n+from .models import SafeApp, Provider\n+\n+\n+class ProviderSerializer(serializers.ModelSerializer):\n+ class Meta:\n+ model = Provider\n+ fields = ['url', 'name']\n \n \n class SafeAppsResponseSerializer(serializers.ModelSerializer):\n+ provider = ProviderSerializer()\n+\n class Meta:\n model = SafeApp\n- fields = ['url', 'name', 'icon_url', 'description', 'networks']\n+ fields = ['url', 'name', 'icon_url', 'description', 'networks', 'provider']\n", "issue": "Include provider info in the serialized response of `GET /safe-apps/`\nThe `/safe-apps` endpoint should include data about the provider if any\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom .models import SafeApp\n\n\nclass SafeAppsResponseSerializer(serializers.ModelSerializer):\n class Meta:\n model = SafeApp\n fields = ['url', 'name', 'icon_url', 'description', 'networks']\n", "path": "src/safe_apps/serializers.py"}], "after_files": [{"content": "from rest_framework import serializers\n\nfrom .models import SafeApp, Provider\n\n\nclass ProviderSerializer(serializers.ModelSerializer):\n class Meta:\n model = Provider\n fields = ['url', 'name']\n\n\nclass SafeAppsResponseSerializer(serializers.ModelSerializer):\n provider = ProviderSerializer()\n\n class Meta:\n model = SafeApp\n fields = ['url', 'name', 'icon_url', 'description', 'networks', 'provider']\n", "path": "src/safe_apps/serializers.py"}]} | 359 | 180 |
gh_patches_debug_29629 | rasdani/github-patches | git_diff | aio-libs__aiohttp-4556 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GET Requests to link-local IPv6 addresses don't work on Python 3.7+
🐞 **Describe the bug**
The aiohttp resolver loses information related to linklocal IPv6 addresses on Python 3.7+ due to a changes in the representation returned by `socket.getaddrinfo()`
💡 **To Reproduce**
Try to get an URL like `http://[fe80::1%eth0]:8080/`, it will result in an OSError (Invalid argument) exception.
This seems to be due to the way that scopeid's are handled in [resolver.py](https://github.com/aio-libs/aiohttp/blob/72c2acd4850b1cbc638b413a7c28d96882b4d7e8/aiohttp/resolver.py#L31-L37):
Run `socket.getaddrinfo('fe80::1%eth0', 8080, family=socket.AF_INET6, proto=socket.IPPROTO_TCP)[0][4]` on python 3.6:
```python
socket.getaddrinfo('fe80::1%eth0', 8080, family=socket.AF_INET6, proto=socket.IPPROTO_TCP)[0][4]
>>> socket.getaddrinfo('fe80::1%eth0', 8080, family=socket.AF_INET6, proto=socket.IPPROTO_TCP)[0][4]
('fe80::1%eth0', 8080, 0, 4)
```
Run it on python 3.7:
```python
>>> socket.getaddrinfo('fe80::1%eth0', 8080, family=socket.AF_INET6, proto=socket.IPPROTO_TCP)[0][4]
('fe80::1', 8080, 0, 4)y
```
The `address` element of the tuple no longer includes the textual representation of the scope id, it's only contained in the matching scope_id element of the tuple - which then is missing when later callings _loop.create_connection().
💡 **Expected behavior**
The URL is successfully retrieved for link local IPv6 addresses.
📋 **Logs/tracebacks**
```python-traceback (paste your traceback in the next line)
N/A
```
📋 **Your version of the Python**
```console
$ python3 --version
Python 3.6.6
$ python3.7 --version
Python 3.7.5
```
📋 **Your version of the aiohttp/yarl/multidict distributions**
```console
$ python -m pip show aiohttp
python -m pip show aiohttp
Name: aiohttp
Version: 3.6.2
```
```console
$ python -m pip show multidict
Name: multidict
Version: 4.7.4
```
```console
$ python -m pip show yarl
Name: yarl
Version: 1.4.2
```
📋 **Additional context**
OS: Centos7 Linux
Proxy Server: No
Related to: client
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aiohttp/resolver.py`
Content:
```
1 import socket
2 from typing import Any, Dict, List
3
4 from .abc import AbstractResolver
5 from .helpers import get_running_loop
6
7 __all__ = ('ThreadedResolver', 'AsyncResolver', 'DefaultResolver')
8
9 try:
10 import aiodns
11
12 # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')
13 except ImportError: # pragma: no cover
14 aiodns = None
15
16 aiodns_default = False
17
18
19 class ThreadedResolver(AbstractResolver):
20 """Use Executor for synchronous getaddrinfo() calls, which defaults to
21 concurrent.futures.ThreadPoolExecutor.
22 """
23
24 def __init__(self) -> None:
25 self._loop = get_running_loop()
26
27 async def resolve(self, host: str, port: int=0,
28 family: int=socket.AF_INET) -> List[Dict[str, Any]]:
29 infos = await self._loop.getaddrinfo(
30 host, port, type=socket.SOCK_STREAM, family=family)
31
32 hosts = []
33 for family, _, proto, _, address in infos:
34 hosts.append(
35 {'hostname': host,
36 'host': address[0], 'port': address[1],
37 'family': family, 'proto': proto,
38 'flags': socket.AI_NUMERICHOST})
39
40 return hosts
41
42 async def close(self) -> None:
43 pass
44
45
46 class AsyncResolver(AbstractResolver):
47 """Use the `aiodns` package to make asynchronous DNS lookups"""
48
49 def __init__(self, *args: Any, **kwargs: Any) -> None:
50 if aiodns is None:
51 raise RuntimeError("Resolver requires aiodns library")
52
53 self._loop = get_running_loop()
54 self._resolver = aiodns.DNSResolver(*args, loop=self._loop, **kwargs)
55
56 async def resolve(self, host: str, port: int=0,
57 family: int=socket.AF_INET) -> List[Dict[str, Any]]:
58 try:
59 resp = await self._resolver.gethostbyname(host, family)
60 except aiodns.error.DNSError as exc:
61 msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
62 raise OSError(msg) from exc
63 hosts = []
64 for address in resp.addresses:
65 hosts.append(
66 {'hostname': host,
67 'host': address, 'port': port,
68 'family': family, 'proto': 0,
69 'flags': socket.AI_NUMERICHOST})
70
71 if not hosts:
72 raise OSError("DNS lookup failed")
73
74 return hosts
75
76 async def close(self) -> None:
77 return self._resolver.cancel()
78
79
80 DefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aiohttp/resolver.py b/aiohttp/resolver.py
--- a/aiohttp/resolver.py
+++ b/aiohttp/resolver.py
@@ -31,11 +31,23 @@
hosts = []
for family, _, proto, _, address in infos:
- hosts.append(
- {'hostname': host,
- 'host': address[0], 'port': address[1],
- 'family': family, 'proto': proto,
- 'flags': socket.AI_NUMERICHOST})
+ if family == socket.AF_INET6 and address[3]: # type: ignore
+ # This is essential for link-local IPv6 addresses.
+ # LL IPv6 is a VERY rare case. Strictly speaking, we should use
+ # getnameinfo() unconditionally, but performance makes sense.
+ host, _port = socket.getnameinfo(
+ address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
+ port = int(_port)
+ else:
+ host, port = address[:2]
+ hosts.append({
+ 'hostname': host,
+ 'host': host,
+ 'port': port,
+ 'family': family,
+ 'proto': proto,
+ 'flags': socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,
+ })
return hosts
@@ -62,11 +74,14 @@
raise OSError(msg) from exc
hosts = []
for address in resp.addresses:
- hosts.append(
- {'hostname': host,
- 'host': address, 'port': port,
- 'family': family, 'proto': 0,
- 'flags': socket.AI_NUMERICHOST})
+ hosts.append({
+ 'hostname': host,
+ 'host': address,
+ 'port': port,
+ 'family': family,
+ 'proto': 0,
+ 'flags': socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,
+ })
if not hosts:
raise OSError("DNS lookup failed")
| {"golden_diff": "diff --git a/aiohttp/resolver.py b/aiohttp/resolver.py\n--- a/aiohttp/resolver.py\n+++ b/aiohttp/resolver.py\n@@ -31,11 +31,23 @@\n \n hosts = []\n for family, _, proto, _, address in infos:\n- hosts.append(\n- {'hostname': host,\n- 'host': address[0], 'port': address[1],\n- 'family': family, 'proto': proto,\n- 'flags': socket.AI_NUMERICHOST})\n+ if family == socket.AF_INET6 and address[3]: # type: ignore\n+ # This is essential for link-local IPv6 addresses.\n+ # LL IPv6 is a VERY rare case. Strictly speaking, we should use\n+ # getnameinfo() unconditionally, but performance makes sense.\n+ host, _port = socket.getnameinfo(\n+ address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)\n+ port = int(_port)\n+ else:\n+ host, port = address[:2]\n+ hosts.append({\n+ 'hostname': host,\n+ 'host': host,\n+ 'port': port,\n+ 'family': family,\n+ 'proto': proto,\n+ 'flags': socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n+ })\n \n return hosts\n \n@@ -62,11 +74,14 @@\n raise OSError(msg) from exc\n hosts = []\n for address in resp.addresses:\n- hosts.append(\n- {'hostname': host,\n- 'host': address, 'port': port,\n- 'family': family, 'proto': 0,\n- 'flags': socket.AI_NUMERICHOST})\n+ hosts.append({\n+ 'hostname': host,\n+ 'host': address,\n+ 'port': port,\n+ 'family': family,\n+ 'proto': 0,\n+ 'flags': socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n+ })\n \n if not hosts:\n raise OSError(\"DNS lookup failed\")\n", "issue": "GET Requests to link-local IPv6 addresses don't work on Python 3.7+\n\ud83d\udc1e **Describe the bug**\r\nThe aiohttp resolver loses information related to linklocal IPv6 addresses on Python 3.7+ due to a changes in the representation returned by `socket.getaddrinfo()`\r\n\r\n\ud83d\udca1 **To Reproduce**\r\nTry to get an URL like `http://[fe80::1%eth0]:8080/`, it will result in an OSError (Invalid argument) exception.\r\n\r\nThis seems to be due to the way that scopeid's are handled in [resolver.py](https://github.com/aio-libs/aiohttp/blob/72c2acd4850b1cbc638b413a7c28d96882b4d7e8/aiohttp/resolver.py#L31-L37):\r\n\r\nRun `socket.getaddrinfo('fe80::1%eth0', 8080, family=socket.AF_INET6, proto=socket.IPPROTO_TCP)[0][4]` on python 3.6:\r\n```python\r\nsocket.getaddrinfo('fe80::1%eth0', 8080, family=socket.AF_INET6, proto=socket.IPPROTO_TCP)[0][4]\r\n>>> socket.getaddrinfo('fe80::1%eth0', 8080, family=socket.AF_INET6, proto=socket.IPPROTO_TCP)[0][4]\r\n('fe80::1%eth0', 8080, 0, 4)\r\n```\r\n\r\nRun it on python 3.7:\r\n```python\r\n>>> socket.getaddrinfo('fe80::1%eth0', 8080, family=socket.AF_INET6, proto=socket.IPPROTO_TCP)[0][4]\r\n('fe80::1', 8080, 0, 4)y\r\n```\r\n\r\nThe `address` element of the tuple no longer includes the textual representation of the scope id, it's only contained in the matching scope_id element of the tuple - which then is missing when later callings _loop.create_connection().\r\n\r\n\ud83d\udca1 **Expected behavior**\r\nThe URL is successfully retrieved for link local IPv6 addresses.\r\n\r\n\r\n\ud83d\udccb **Logs/tracebacks**\r\n```python-traceback (paste your traceback in the next line)\r\nN/A\r\n```\r\n\r\n\ud83d\udccb **Your version of the Python**\r\n```console\r\n$ python3 --version\r\nPython 3.6.6\r\n$ python3.7 --version\r\nPython 3.7.5\r\n```\r\n\r\n\ud83d\udccb **Your version of the aiohttp/yarl/multidict distributions**\r\n```console\r\n$ python -m pip show aiohttp\r\npython -m pip show aiohttp\r\nName: aiohttp\r\nVersion: 3.6.2\r\n```\r\n```console\r\n$ python -m pip show multidict\r\nName: multidict\r\nVersion: 4.7.4\r\n```\r\n```console\r\n$ python -m pip show yarl\r\nName: yarl\r\nVersion: 1.4.2\r\n```\r\n\r\n\ud83d\udccb **Additional context**\r\nOS: Centos7 Linux\r\nProxy Server: No\r\nRelated to: client\n", "before_files": [{"content": "import socket\nfrom typing import Any, Dict, List\n\nfrom .abc import AbstractResolver\nfrom .helpers import get_running_loop\n\n__all__ = ('ThreadedResolver', 'AsyncResolver', 'DefaultResolver')\n\ntry:\n import aiodns\n\n # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')\nexcept ImportError: # pragma: no cover\n aiodns = None\n\naiodns_default = False\n\n\nclass ThreadedResolver(AbstractResolver):\n \"\"\"Use Executor for synchronous getaddrinfo() calls, which defaults to\n concurrent.futures.ThreadPoolExecutor.\n \"\"\"\n\n def __init__(self) -> None:\n self._loop = get_running_loop()\n\n async def resolve(self, host: str, port: int=0,\n family: int=socket.AF_INET) -> List[Dict[str, Any]]:\n infos = await self._loop.getaddrinfo(\n host, port, type=socket.SOCK_STREAM, family=family)\n\n hosts = []\n for family, _, proto, _, address in infos:\n hosts.append(\n {'hostname': host,\n 'host': address[0], 'port': address[1],\n 'family': family, 'proto': proto,\n 'flags': socket.AI_NUMERICHOST})\n\n return hosts\n\n async def close(self) -> None:\n pass\n\n\nclass AsyncResolver(AbstractResolver):\n \"\"\"Use the `aiodns` package to make asynchronous DNS lookups\"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n if aiodns is None:\n raise RuntimeError(\"Resolver requires aiodns library\")\n\n self._loop = get_running_loop()\n self._resolver = aiodns.DNSResolver(*args, loop=self._loop, **kwargs)\n\n async def resolve(self, host: str, port: int=0,\n family: int=socket.AF_INET) -> List[Dict[str, Any]]:\n try:\n resp = await self._resolver.gethostbyname(host, family)\n except aiodns.error.DNSError as exc:\n msg = exc.args[1] if len(exc.args) >= 1 else \"DNS lookup failed\"\n raise OSError(msg) from exc\n hosts = []\n for address in resp.addresses:\n hosts.append(\n {'hostname': host,\n 'host': address, 'port': port,\n 'family': family, 'proto': 0,\n 'flags': socket.AI_NUMERICHOST})\n\n if not hosts:\n raise OSError(\"DNS lookup failed\")\n\n return hosts\n\n async def close(self) -> None:\n return self._resolver.cancel()\n\n\nDefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver\n", "path": "aiohttp/resolver.py"}], "after_files": [{"content": "import socket\nfrom typing import Any, Dict, List\n\nfrom .abc import AbstractResolver\nfrom .helpers import get_running_loop\n\n__all__ = ('ThreadedResolver', 'AsyncResolver', 'DefaultResolver')\n\ntry:\n import aiodns\n\n # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')\nexcept ImportError: # pragma: no cover\n aiodns = None\n\naiodns_default = False\n\n\nclass ThreadedResolver(AbstractResolver):\n \"\"\"Use Executor for synchronous getaddrinfo() calls, which defaults to\n concurrent.futures.ThreadPoolExecutor.\n \"\"\"\n\n def __init__(self) -> None:\n self._loop = get_running_loop()\n\n async def resolve(self, host: str, port: int=0,\n family: int=socket.AF_INET) -> List[Dict[str, Any]]:\n infos = await self._loop.getaddrinfo(\n host, port, type=socket.SOCK_STREAM, family=family)\n\n hosts = []\n for family, _, proto, _, address in infos:\n if family == socket.AF_INET6 and address[3]: # type: ignore\n # This is essential for link-local IPv6 addresses.\n # LL IPv6 is a VERY rare case. Strictly speaking, we should use\n # getnameinfo() unconditionally, but performance makes sense.\n host, _port = socket.getnameinfo(\n address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)\n port = int(_port)\n else:\n host, port = address[:2]\n hosts.append({\n 'hostname': host,\n 'host': host,\n 'port': port,\n 'family': family,\n 'proto': proto,\n 'flags': socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n })\n\n return hosts\n\n async def close(self) -> None:\n pass\n\n\nclass AsyncResolver(AbstractResolver):\n \"\"\"Use the `aiodns` package to make asynchronous DNS lookups\"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n if aiodns is None:\n raise RuntimeError(\"Resolver requires aiodns library\")\n\n self._loop = get_running_loop()\n self._resolver = aiodns.DNSResolver(*args, loop=self._loop, **kwargs)\n\n async def resolve(self, host: str, port: int=0,\n family: int=socket.AF_INET) -> List[Dict[str, Any]]:\n try:\n resp = await self._resolver.gethostbyname(host, family)\n except aiodns.error.DNSError as exc:\n msg = exc.args[1] if len(exc.args) >= 1 else \"DNS lookup failed\"\n raise OSError(msg) from exc\n hosts = []\n for address in resp.addresses:\n hosts.append({\n 'hostname': host,\n 'host': address,\n 'port': port,\n 'family': family,\n 'proto': 0,\n 'flags': socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n })\n\n if not hosts:\n raise OSError(\"DNS lookup failed\")\n\n return hosts\n\n async def close(self) -> None:\n return self._resolver.cancel()\n\n\nDefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver\n", "path": "aiohttp/resolver.py"}]} | 1,683 | 458 |
gh_patches_debug_22504 | rasdani/github-patches | git_diff | wright-group__WrightTools-360 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Windows Tempfile Error
On Windows, tempfiles attempted to be opened using h5py cause errors.
I do not have the error message in front of me at present, but I believe it was a 'file already exists' flavor of problem.
We may need to remove the created tmpfile and just use the name....
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `WrightTools/_base.py`
Content:
```
1 """WrightTools base classes and associated."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import shutil
8 import weakref
9 import tempfile
10 import posixpath
11
12 import numpy as np
13
14 import h5py
15
16
17 # --- define --------------------------------------------------------------------------------------
18
19
20 wt5_version = '0.0.0'
21
22
23 # --- dataset -------------------------------------------------------------------------------------
24
25
26 class Dataset(h5py.Dataset):
27 instances = {}
28
29
30 # --- group ---------------------------------------------------------------------------------------
31
32
33 class Group(h5py.Group):
34 instances = {}
35 class_name = 'Group'
36
37 def __init__(self, filepath=None, parent=None, name=None, **kwargs):
38 if filepath is None:
39 return
40 if parent == '':
41 parent = posixpath.sep
42 # file
43 self.filepath = filepath
44 path = parent + posixpath.sep + name
45 file = h5py.File(self.filepath, 'a')
46 file.require_group(parent)
47 file.require_group(path)
48 h5py.Group.__init__(self, bind=file[path].id)
49 self.__n = 0
50 self.fid = self.file.fid
51 if name is not None:
52 self.attrs['name'] = name
53 self.attrs.update(kwargs)
54 self.attrs['class'] = self.class_name
55 # load from file
56 self._items = []
57 for name in self.item_names:
58 self._items.append(self[name])
59 setattr(self, name, self[name])
60 # kwargs
61 self.attrs.update(kwargs)
62 # the following are populated if not already recorded
63 self.__version__
64 self.natural_name
65
66 def __new__(cls, *args, **kwargs):
67 # extract
68 filepath = args[0] if len(args) > 0 else kwargs.get('filepath', None)
69 parent = args[1] if len(args) > 1 else kwargs.get('parent', None)
70 name = args[2] if len(args) > 2 else kwargs.get('name', cls.class_name.lower())
71 edit_local = args[3] if len(args) > 3 else kwargs.get('edit_local', False)
72 # tempfile
73 tmpfile = None
74 if edit_local and filepath is None:
75 raise Exception # TODO: better exception
76 if not edit_local:
77 tmpfile = tempfile.NamedTemporaryFile(prefix='', suffix='.wt5')
78 p = tmpfile.name
79 if filepath:
80 shutil.copyfile(src=filepath, dst=p)
81 elif edit_local and filepath:
82 p = filepath
83 # construct fullpath
84 if parent is None:
85 parent = ''
86 name = '/'
87 fullpath = p + '::' + parent + name
88 # create and/or return
89 if fullpath not in cls.instances.keys():
90 kwargs['filepath'] = p
91 kwargs['parent'] = parent
92 kwargs['name'] = name
93 instance = super(Group, cls).__new__(cls)
94 cls.__init__(instance, **kwargs)
95 cls.instances[fullpath] = instance
96 if tmpfile:
97 setattr(instance, '_tmpfile', tmpfile)
98 weakref.finalize(instance, instance.close)
99 return instance
100 instance = cls.instances[fullpath]
101 return instance
102
103 @property
104 def __version__(self):
105 if '__version__' not in self.file.attrs.keys():
106 self.file.attrs['__version__'] = wt5_version
107 return self.file.attrs['__version__']
108
109 @property
110 def fullpath(self):
111 return self.filepath + '::' + self.name
112
113 @property
114 def item_names(self):
115 if 'item_names' not in self.attrs.keys():
116 self.attrs['item_names'] = np.array([], dtype='S')
117 return self.attrs['item_names']
118
119 @property
120 def natural_name(self):
121 if 'name' not in self.attrs.keys():
122 self.attrs['name'] = self.__class__.default_name
123 return self.attrs['name']
124
125 @property
126 def parent(self):
127 from .collection import Collection
128 group = super().parent
129 parent = group.parent.name
130 if parent == posixpath.sep:
131 parent = None
132 return Collection(self.filepath, parent=parent, name=group.attrs['name'])
133
134 def close(self):
135 if(self.fid.valid > 0):
136 self.__class__.instances.pop(self.fullpath)
137 self.file.flush()
138 self.file.close()
139 if hasattr(self, '_tmpfile'):
140 self._tmpfile.close()
141
142 def flush(self):
143 self.file.flush()
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/WrightTools/_base.py b/WrightTools/_base.py
--- a/WrightTools/_base.py
+++ b/WrightTools/_base.py
@@ -5,6 +5,7 @@
import shutil
+import os
import weakref
import tempfile
import posixpath
@@ -74,8 +75,8 @@
if edit_local and filepath is None:
raise Exception # TODO: better exception
if not edit_local:
- tmpfile = tempfile.NamedTemporaryFile(prefix='', suffix='.wt5')
- p = tmpfile.name
+ tmpfile = tempfile.mkstemp(prefix='', suffix='.wt5')
+ p = tmpfile[1]
if filepath:
shutil.copyfile(src=filepath, dst=p)
elif edit_local and filepath:
@@ -137,7 +138,8 @@
self.file.flush()
self.file.close()
if hasattr(self, '_tmpfile'):
- self._tmpfile.close()
+ os.close(self._tmpfile[0])
+ os.remove(self._tmpfile[1])
def flush(self):
self.file.flush()
| {"golden_diff": "diff --git a/WrightTools/_base.py b/WrightTools/_base.py\n--- a/WrightTools/_base.py\n+++ b/WrightTools/_base.py\n@@ -5,6 +5,7 @@\n \n \n import shutil\n+import os\n import weakref\n import tempfile\n import posixpath\n@@ -74,8 +75,8 @@\n if edit_local and filepath is None:\n raise Exception # TODO: better exception\n if not edit_local:\n- tmpfile = tempfile.NamedTemporaryFile(prefix='', suffix='.wt5')\n- p = tmpfile.name\n+ tmpfile = tempfile.mkstemp(prefix='', suffix='.wt5')\n+ p = tmpfile[1]\n if filepath:\n shutil.copyfile(src=filepath, dst=p)\n elif edit_local and filepath:\n@@ -137,7 +138,8 @@\n self.file.flush()\n self.file.close()\n if hasattr(self, '_tmpfile'):\n- self._tmpfile.close()\n+ os.close(self._tmpfile[0])\n+ os.remove(self._tmpfile[1])\n \n def flush(self):\n self.file.flush()\n", "issue": "Windows Tempfile Error\nOn Windows, tempfiles attempted to be opened using h5py cause errors.\r\n\r\nI do not have the error message in front of me at present, but I believe it was a 'file already exists' flavor of problem. \r\n\r\nWe may need to remove the created tmpfile and just use the name....\n", "before_files": [{"content": "\"\"\"WrightTools base classes and associated.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport shutil\nimport weakref\nimport tempfile\nimport posixpath\n\nimport numpy as np\n\nimport h5py\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\nwt5_version = '0.0.0'\n\n\n# --- dataset -------------------------------------------------------------------------------------\n\n\nclass Dataset(h5py.Dataset):\n instances = {}\n\n\n# --- group ---------------------------------------------------------------------------------------\n\n\nclass Group(h5py.Group):\n instances = {}\n class_name = 'Group'\n\n def __init__(self, filepath=None, parent=None, name=None, **kwargs):\n if filepath is None:\n return\n if parent == '':\n parent = posixpath.sep\n # file\n self.filepath = filepath\n path = parent + posixpath.sep + name\n file = h5py.File(self.filepath, 'a')\n file.require_group(parent)\n file.require_group(path)\n h5py.Group.__init__(self, bind=file[path].id)\n self.__n = 0\n self.fid = self.file.fid\n if name is not None:\n self.attrs['name'] = name\n self.attrs.update(kwargs)\n self.attrs['class'] = self.class_name\n # load from file\n self._items = []\n for name in self.item_names:\n self._items.append(self[name])\n setattr(self, name, self[name])\n # kwargs\n self.attrs.update(kwargs)\n # the following are populated if not already recorded\n self.__version__\n self.natural_name\n\n def __new__(cls, *args, **kwargs):\n # extract\n filepath = args[0] if len(args) > 0 else kwargs.get('filepath', None)\n parent = args[1] if len(args) > 1 else kwargs.get('parent', None)\n name = args[2] if len(args) > 2 else kwargs.get('name', cls.class_name.lower())\n edit_local = args[3] if len(args) > 3 else kwargs.get('edit_local', False)\n # tempfile\n tmpfile = None\n if edit_local and filepath is None:\n raise Exception # TODO: better exception\n if not edit_local:\n tmpfile = tempfile.NamedTemporaryFile(prefix='', suffix='.wt5')\n p = tmpfile.name\n if filepath:\n shutil.copyfile(src=filepath, dst=p)\n elif edit_local and filepath:\n p = filepath\n # construct fullpath\n if parent is None:\n parent = ''\n name = '/'\n fullpath = p + '::' + parent + name\n # create and/or return\n if fullpath not in cls.instances.keys():\n kwargs['filepath'] = p\n kwargs['parent'] = parent\n kwargs['name'] = name\n instance = super(Group, cls).__new__(cls)\n cls.__init__(instance, **kwargs)\n cls.instances[fullpath] = instance\n if tmpfile:\n setattr(instance, '_tmpfile', tmpfile)\n weakref.finalize(instance, instance.close)\n return instance\n instance = cls.instances[fullpath]\n return instance\n\n @property\n def __version__(self):\n if '__version__' not in self.file.attrs.keys():\n self.file.attrs['__version__'] = wt5_version\n return self.file.attrs['__version__']\n\n @property\n def fullpath(self):\n return self.filepath + '::' + self.name\n\n @property\n def item_names(self):\n if 'item_names' not in self.attrs.keys():\n self.attrs['item_names'] = np.array([], dtype='S')\n return self.attrs['item_names']\n\n @property\n def natural_name(self):\n if 'name' not in self.attrs.keys():\n self.attrs['name'] = self.__class__.default_name\n return self.attrs['name']\n\n @property\n def parent(self):\n from .collection import Collection\n group = super().parent\n parent = group.parent.name\n if parent == posixpath.sep:\n parent = None\n return Collection(self.filepath, parent=parent, name=group.attrs['name'])\n\n def close(self):\n if(self.fid.valid > 0):\n self.__class__.instances.pop(self.fullpath)\n self.file.flush()\n self.file.close()\n if hasattr(self, '_tmpfile'):\n self._tmpfile.close()\n\n def flush(self):\n self.file.flush()\n", "path": "WrightTools/_base.py"}], "after_files": [{"content": "\"\"\"WrightTools base classes and associated.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport shutil\nimport os\nimport weakref\nimport tempfile\nimport posixpath\n\nimport numpy as np\n\nimport h5py\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\nwt5_version = '0.0.0'\n\n\n# --- dataset -------------------------------------------------------------------------------------\n\n\nclass Dataset(h5py.Dataset):\n instances = {}\n\n\n# --- group ---------------------------------------------------------------------------------------\n\n\nclass Group(h5py.Group):\n instances = {}\n class_name = 'Group'\n\n def __init__(self, filepath=None, parent=None, name=None, **kwargs):\n if filepath is None:\n return\n if parent == '':\n parent = posixpath.sep\n # file\n self.filepath = filepath\n path = parent + posixpath.sep + name\n file = h5py.File(self.filepath, 'a')\n file.require_group(parent)\n file.require_group(path)\n h5py.Group.__init__(self, bind=file[path].id)\n self.__n = 0\n self.fid = self.file.fid\n if name is not None:\n self.attrs['name'] = name\n self.attrs.update(kwargs)\n self.attrs['class'] = self.class_name\n # load from file\n self._items = []\n for name in self.item_names:\n self._items.append(self[name])\n setattr(self, name, self[name])\n # kwargs\n self.attrs.update(kwargs)\n # the following are populated if not already recorded\n self.__version__\n self.natural_name\n\n def __new__(cls, *args, **kwargs):\n # extract\n filepath = args[0] if len(args) > 0 else kwargs.get('filepath', None)\n parent = args[1] if len(args) > 1 else kwargs.get('parent', None)\n name = args[2] if len(args) > 2 else kwargs.get('name', cls.class_name.lower())\n edit_local = args[3] if len(args) > 3 else kwargs.get('edit_local', False)\n # tempfile\n tmpfile = None\n if edit_local and filepath is None:\n raise Exception # TODO: better exception\n if not edit_local:\n tmpfile = tempfile.mkstemp(prefix='', suffix='.wt5')\n p = tmpfile[1]\n if filepath:\n shutil.copyfile(src=filepath, dst=p)\n elif edit_local and filepath:\n p = filepath\n # construct fullpath\n if parent is None:\n parent = ''\n name = '/'\n fullpath = p + '::' + parent + name\n # create and/or return\n if fullpath not in cls.instances.keys():\n kwargs['filepath'] = p\n kwargs['parent'] = parent\n kwargs['name'] = name\n instance = super(Group, cls).__new__(cls)\n cls.__init__(instance, **kwargs)\n cls.instances[fullpath] = instance\n if tmpfile:\n setattr(instance, '_tmpfile', tmpfile)\n weakref.finalize(instance, instance.close)\n return instance\n instance = cls.instances[fullpath]\n return instance\n\n @property\n def __version__(self):\n if '__version__' not in self.file.attrs.keys():\n self.file.attrs['__version__'] = wt5_version\n return self.file.attrs['__version__']\n\n @property\n def fullpath(self):\n return self.filepath + '::' + self.name\n\n @property\n def item_names(self):\n if 'item_names' not in self.attrs.keys():\n self.attrs['item_names'] = np.array([], dtype='S')\n return self.attrs['item_names']\n\n @property\n def natural_name(self):\n if 'name' not in self.attrs.keys():\n self.attrs['name'] = self.__class__.default_name\n return self.attrs['name']\n\n @property\n def parent(self):\n from .collection import Collection\n group = super().parent\n parent = group.parent.name\n if parent == posixpath.sep:\n parent = None\n return Collection(self.filepath, parent=parent, name=group.attrs['name'])\n\n def close(self):\n if(self.fid.valid > 0):\n self.__class__.instances.pop(self.fullpath)\n self.file.flush()\n self.file.close()\n if hasattr(self, '_tmpfile'):\n os.close(self._tmpfile[0])\n os.remove(self._tmpfile[1])\n\n def flush(self):\n self.file.flush()\n", "path": "WrightTools/_base.py"}]} | 1,615 | 247 |
gh_patches_debug_25508 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-728 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
UploadFile causes ASGI application's exception when it is in debug mode
Apparently when we use the ASGI application in debug mode, it cannot print the value of variables if they are not JSON serializable.
In my use case, when I tried to use the [file upload](https://strawberry.rocks/docs/features/file-upload) example in debug mode this issue ended up happening.
I believe it is because of this:
https://github.com/strawberry-graphql/strawberry/blob/de215370b247a417af8a8dd5fc382d71e305bcd7/strawberry/utils/debug.py#L26-L29
Perhaps converting variables to string might help.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/utils/debug.py`
Content:
```
1 import datetime
2 import json
3 import typing
4
5 from pygments import highlight, lexers
6 from pygments.formatters import Terminal256Formatter
7
8 from .graphql_lexer import GraphQLLexer
9
10
11 def pretty_print_graphql_operation(
12 operation_name: str, query: str, variables: typing.Dict["str", typing.Any]
13 ): # pragma: no cover
14 """Pretty print a GraphQL operation using pygments.
15
16 Won't print introspection operation to prevent noise in the output."""
17
18 if operation_name == "IntrospectionQuery":
19 return
20
21 now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
22
23 print(f"[{now}]: {operation_name or 'No operation name'}")
24 print(highlight(query, GraphQLLexer(), Terminal256Formatter()))
25
26 if variables:
27 variables_json = json.dumps(variables, indent=4)
28
29 print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter()))
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/utils/debug.py b/strawberry/utils/debug.py
--- a/strawberry/utils/debug.py
+++ b/strawberry/utils/debug.py
@@ -1,6 +1,7 @@
import datetime
import json
-import typing
+from json import JSONEncoder
+from typing import Any, Dict
from pygments import highlight, lexers
from pygments.formatters import Terminal256Formatter
@@ -8,9 +9,14 @@
from .graphql_lexer import GraphQLLexer
+class StrawberryJSONEncoder(JSONEncoder):
+ def default(self, o: Any) -> Any:
+ return repr(o)
+
+
def pretty_print_graphql_operation(
- operation_name: str, query: str, variables: typing.Dict["str", typing.Any]
-): # pragma: no cover
+ operation_name: str, query: str, variables: Dict["str", Any]
+):
"""Pretty print a GraphQL operation using pygments.
Won't print introspection operation to prevent noise in the output."""
@@ -24,6 +30,6 @@
print(highlight(query, GraphQLLexer(), Terminal256Formatter()))
if variables:
- variables_json = json.dumps(variables, indent=4)
+ variables_json = json.dumps(variables, indent=4, cls=StrawberryJSONEncoder)
print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter()))
| {"golden_diff": "diff --git a/strawberry/utils/debug.py b/strawberry/utils/debug.py\n--- a/strawberry/utils/debug.py\n+++ b/strawberry/utils/debug.py\n@@ -1,6 +1,7 @@\n import datetime\n import json\n-import typing\n+from json import JSONEncoder\n+from typing import Any, Dict\n \n from pygments import highlight, lexers\n from pygments.formatters import Terminal256Formatter\n@@ -8,9 +9,14 @@\n from .graphql_lexer import GraphQLLexer\n \n \n+class StrawberryJSONEncoder(JSONEncoder):\n+ def default(self, o: Any) -> Any:\n+ return repr(o)\n+\n+\n def pretty_print_graphql_operation(\n- operation_name: str, query: str, variables: typing.Dict[\"str\", typing.Any]\n-): # pragma: no cover\n+ operation_name: str, query: str, variables: Dict[\"str\", Any]\n+):\n \"\"\"Pretty print a GraphQL operation using pygments.\n \n Won't print introspection operation to prevent noise in the output.\"\"\"\n@@ -24,6 +30,6 @@\n print(highlight(query, GraphQLLexer(), Terminal256Formatter()))\n \n if variables:\n- variables_json = json.dumps(variables, indent=4)\n+ variables_json = json.dumps(variables, indent=4, cls=StrawberryJSONEncoder)\n \n print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter()))\n", "issue": "UploadFile causes ASGI application's exception when it is in debug mode\nApparently when we use the ASGI application in debug mode, it cannot print the value of variables if they are not JSON serializable.\r\n\r\nIn my use case, when I tried to use the [file upload](https://strawberry.rocks/docs/features/file-upload) example in debug mode this issue ended up happening.\r\n\r\nI believe it is because of this:\r\n\r\nhttps://github.com/strawberry-graphql/strawberry/blob/de215370b247a417af8a8dd5fc382d71e305bcd7/strawberry/utils/debug.py#L26-L29\r\n\r\nPerhaps converting variables to string might help.\n", "before_files": [{"content": "import datetime\nimport json\nimport typing\n\nfrom pygments import highlight, lexers\nfrom pygments.formatters import Terminal256Formatter\n\nfrom .graphql_lexer import GraphQLLexer\n\n\ndef pretty_print_graphql_operation(\n operation_name: str, query: str, variables: typing.Dict[\"str\", typing.Any]\n): # pragma: no cover\n \"\"\"Pretty print a GraphQL operation using pygments.\n\n Won't print introspection operation to prevent noise in the output.\"\"\"\n\n if operation_name == \"IntrospectionQuery\":\n return\n\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n print(f\"[{now}]: {operation_name or 'No operation name'}\")\n print(highlight(query, GraphQLLexer(), Terminal256Formatter()))\n\n if variables:\n variables_json = json.dumps(variables, indent=4)\n\n print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter()))\n", "path": "strawberry/utils/debug.py"}], "after_files": [{"content": "import datetime\nimport json\nfrom json import JSONEncoder\nfrom typing import Any, Dict\n\nfrom pygments import highlight, lexers\nfrom pygments.formatters import Terminal256Formatter\n\nfrom .graphql_lexer import GraphQLLexer\n\n\nclass StrawberryJSONEncoder(JSONEncoder):\n def default(self, o: Any) -> Any:\n return repr(o)\n\n\ndef pretty_print_graphql_operation(\n operation_name: str, query: str, variables: Dict[\"str\", Any]\n):\n \"\"\"Pretty print a GraphQL operation using pygments.\n\n Won't print introspection operation to prevent noise in the output.\"\"\"\n\n if operation_name == \"IntrospectionQuery\":\n return\n\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n print(f\"[{now}]: {operation_name or 'No operation name'}\")\n print(highlight(query, GraphQLLexer(), Terminal256Formatter()))\n\n if variables:\n variables_json = json.dumps(variables, indent=4, cls=StrawberryJSONEncoder)\n\n print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter()))\n", "path": "strawberry/utils/debug.py"}]} | 683 | 317 |
gh_patches_debug_33721 | rasdani/github-patches | git_diff | docker__docker-py-1178 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support create network EnableIPv6 and Labels options
Check the remote API:
https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-network
There are two missing JSON parameters:
```
EnableIPv6 - Enable IPv6 on the network
Labels - Labels to set on the network, specified as a map: {"key":"value" [,"key2":"value2"]}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/api/network.py`
Content:
```
1 import json
2
3 from ..errors import InvalidVersion
4 from ..utils import check_resource, minimum_version
5 from ..utils import version_lt
6
7
8 class NetworkApiMixin(object):
9 @minimum_version('1.21')
10 def networks(self, names=None, ids=None):
11 filters = {}
12 if names:
13 filters['name'] = names
14 if ids:
15 filters['id'] = ids
16
17 params = {'filters': json.dumps(filters)}
18
19 url = self._url("/networks")
20 res = self._get(url, params=params)
21 return self._result(res, json=True)
22
23 @minimum_version('1.21')
24 def create_network(self, name, driver=None, options=None, ipam=None,
25 check_duplicate=None, internal=False):
26 if options is not None and not isinstance(options, dict):
27 raise TypeError('options must be a dictionary')
28
29 data = {
30 'Name': name,
31 'Driver': driver,
32 'Options': options,
33 'IPAM': ipam,
34 'CheckDuplicate': check_duplicate
35 }
36
37 if internal:
38 if version_lt(self._version, '1.22'):
39 raise InvalidVersion('Internal networks are not '
40 'supported in API version < 1.22')
41 data['Internal'] = True
42
43 url = self._url("/networks/create")
44 res = self._post_json(url, data=data)
45 return self._result(res, json=True)
46
47 @minimum_version('1.21')
48 def remove_network(self, net_id):
49 url = self._url("/networks/{0}", net_id)
50 res = self._delete(url)
51 self._raise_for_status(res)
52
53 @minimum_version('1.21')
54 def inspect_network(self, net_id):
55 url = self._url("/networks/{0}", net_id)
56 res = self._get(url)
57 return self._result(res, json=True)
58
59 @check_resource
60 @minimum_version('1.21')
61 def connect_container_to_network(self, container, net_id,
62 ipv4_address=None, ipv6_address=None,
63 aliases=None, links=None,
64 link_local_ips=None):
65 data = {
66 "Container": container,
67 "EndpointConfig": self.create_endpoint_config(
68 aliases=aliases, links=links, ipv4_address=ipv4_address,
69 ipv6_address=ipv6_address, link_local_ips=link_local_ips
70 ),
71 }
72
73 url = self._url("/networks/{0}/connect", net_id)
74 res = self._post_json(url, data=data)
75 self._raise_for_status(res)
76
77 @check_resource
78 @minimum_version('1.21')
79 def disconnect_container_from_network(self, container, net_id):
80 data = {"container": container}
81 url = self._url("/networks/{0}/disconnect", net_id)
82 res = self._post_json(url, data=data)
83 self._raise_for_status(res)
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/api/network.py b/docker/api/network.py
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -22,7 +22,8 @@
@minimum_version('1.21')
def create_network(self, name, driver=None, options=None, ipam=None,
- check_duplicate=None, internal=False):
+ check_duplicate=None, internal=False, labels=None,
+ enable_ipv6=False):
if options is not None and not isinstance(options, dict):
raise TypeError('options must be a dictionary')
@@ -34,6 +35,22 @@
'CheckDuplicate': check_duplicate
}
+ if labels is not None:
+ if version_lt(self._version, '1.23'):
+ raise InvalidVersion(
+ 'network labels were introduced in API 1.23'
+ )
+ if not isinstance(labels, dict):
+ raise TypeError('labels must be a dictionary')
+ data["Labels"] = labels
+
+ if enable_ipv6:
+ if version_lt(self._version, '1.23'):
+ raise InvalidVersion(
+ 'enable_ipv6 was introduced in API 1.23'
+ )
+ data['EnableIPv6'] = True
+
if internal:
if version_lt(self._version, '1.22'):
raise InvalidVersion('Internal networks are not '
@@ -76,8 +93,15 @@
@check_resource
@minimum_version('1.21')
- def disconnect_container_from_network(self, container, net_id):
- data = {"container": container}
+ def disconnect_container_from_network(self, container, net_id,
+ force=False):
+ data = {"Container": container}
+ if force:
+ if version_lt(self._version, '1.22'):
+ raise InvalidVersion(
+ 'Forced disconnect was introduced in API 1.22'
+ )
+ data['Force'] = force
url = self._url("/networks/{0}/disconnect", net_id)
res = self._post_json(url, data=data)
self._raise_for_status(res)
| {"golden_diff": "diff --git a/docker/api/network.py b/docker/api/network.py\n--- a/docker/api/network.py\n+++ b/docker/api/network.py\n@@ -22,7 +22,8 @@\n \n @minimum_version('1.21')\n def create_network(self, name, driver=None, options=None, ipam=None,\n- check_duplicate=None, internal=False):\n+ check_duplicate=None, internal=False, labels=None,\n+ enable_ipv6=False):\n if options is not None and not isinstance(options, dict):\n raise TypeError('options must be a dictionary')\n \n@@ -34,6 +35,22 @@\n 'CheckDuplicate': check_duplicate\n }\n \n+ if labels is not None:\n+ if version_lt(self._version, '1.23'):\n+ raise InvalidVersion(\n+ 'network labels were introduced in API 1.23'\n+ )\n+ if not isinstance(labels, dict):\n+ raise TypeError('labels must be a dictionary')\n+ data[\"Labels\"] = labels\n+\n+ if enable_ipv6:\n+ if version_lt(self._version, '1.23'):\n+ raise InvalidVersion(\n+ 'enable_ipv6 was introduced in API 1.23'\n+ )\n+ data['EnableIPv6'] = True\n+\n if internal:\n if version_lt(self._version, '1.22'):\n raise InvalidVersion('Internal networks are not '\n@@ -76,8 +93,15 @@\n \n @check_resource\n @minimum_version('1.21')\n- def disconnect_container_from_network(self, container, net_id):\n- data = {\"container\": container}\n+ def disconnect_container_from_network(self, container, net_id,\n+ force=False):\n+ data = {\"Container\": container}\n+ if force:\n+ if version_lt(self._version, '1.22'):\n+ raise InvalidVersion(\n+ 'Forced disconnect was introduced in API 1.22'\n+ )\n+ data['Force'] = force\n url = self._url(\"/networks/{0}/disconnect\", net_id)\n res = self._post_json(url, data=data)\n self._raise_for_status(res)\n", "issue": "Support create network EnableIPv6 and Labels options \nCheck the remote API:\nhttps://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-network\n\nThere are two missing JSON parameters:\n\n```\nEnableIPv6 - Enable IPv6 on the network\nLabels - Labels to set on the network, specified as a map: {\"key\":\"value\" [,\"key2\":\"value2\"]}\n```\n\n", "before_files": [{"content": "import json\n\nfrom ..errors import InvalidVersion\nfrom ..utils import check_resource, minimum_version\nfrom ..utils import version_lt\n\n\nclass NetworkApiMixin(object):\n @minimum_version('1.21')\n def networks(self, names=None, ids=None):\n filters = {}\n if names:\n filters['name'] = names\n if ids:\n filters['id'] = ids\n\n params = {'filters': json.dumps(filters)}\n\n url = self._url(\"/networks\")\n res = self._get(url, params=params)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def create_network(self, name, driver=None, options=None, ipam=None,\n check_duplicate=None, internal=False):\n if options is not None and not isinstance(options, dict):\n raise TypeError('options must be a dictionary')\n\n data = {\n 'Name': name,\n 'Driver': driver,\n 'Options': options,\n 'IPAM': ipam,\n 'CheckDuplicate': check_duplicate\n }\n\n if internal:\n if version_lt(self._version, '1.22'):\n raise InvalidVersion('Internal networks are not '\n 'supported in API version < 1.22')\n data['Internal'] = True\n\n url = self._url(\"/networks/create\")\n res = self._post_json(url, data=data)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def remove_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._delete(url)\n self._raise_for_status(res)\n\n @minimum_version('1.21')\n def inspect_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._get(url)\n return self._result(res, json=True)\n\n @check_resource\n @minimum_version('1.21')\n def connect_container_to_network(self, container, net_id,\n ipv4_address=None, ipv6_address=None,\n aliases=None, links=None,\n link_local_ips=None):\n data = {\n \"Container\": container,\n \"EndpointConfig\": self.create_endpoint_config(\n aliases=aliases, links=links, ipv4_address=ipv4_address,\n ipv6_address=ipv6_address, link_local_ips=link_local_ips\n ),\n }\n\n url = self._url(\"/networks/{0}/connect\", net_id)\n res = self._post_json(url, data=data)\n self._raise_for_status(res)\n\n @check_resource\n @minimum_version('1.21')\n def disconnect_container_from_network(self, container, net_id):\n data = {\"container\": container}\n url = self._url(\"/networks/{0}/disconnect\", net_id)\n res = self._post_json(url, data=data)\n self._raise_for_status(res)\n", "path": "docker/api/network.py"}], "after_files": [{"content": "import json\n\nfrom ..errors import InvalidVersion\nfrom ..utils import check_resource, minimum_version\nfrom ..utils import version_lt\n\n\nclass NetworkApiMixin(object):\n @minimum_version('1.21')\n def networks(self, names=None, ids=None):\n filters = {}\n if names:\n filters['name'] = names\n if ids:\n filters['id'] = ids\n\n params = {'filters': json.dumps(filters)}\n\n url = self._url(\"/networks\")\n res = self._get(url, params=params)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def create_network(self, name, driver=None, options=None, ipam=None,\n check_duplicate=None, internal=False, labels=None,\n enable_ipv6=False):\n if options is not None and not isinstance(options, dict):\n raise TypeError('options must be a dictionary')\n\n data = {\n 'Name': name,\n 'Driver': driver,\n 'Options': options,\n 'IPAM': ipam,\n 'CheckDuplicate': check_duplicate\n }\n\n if labels is not None:\n if version_lt(self._version, '1.23'):\n raise InvalidVersion(\n 'network labels were introduced in API 1.23'\n )\n if not isinstance(labels, dict):\n raise TypeError('labels must be a dictionary')\n data[\"Labels\"] = labels\n\n if enable_ipv6:\n if version_lt(self._version, '1.23'):\n raise InvalidVersion(\n 'enable_ipv6 was introduced in API 1.23'\n )\n data['EnableIPv6'] = True\n\n if internal:\n if version_lt(self._version, '1.22'):\n raise InvalidVersion('Internal networks are not '\n 'supported in API version < 1.22')\n data['Internal'] = True\n\n url = self._url(\"/networks/create\")\n res = self._post_json(url, data=data)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def remove_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._delete(url)\n self._raise_for_status(res)\n\n @minimum_version('1.21')\n def inspect_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._get(url)\n return self._result(res, json=True)\n\n @check_resource\n @minimum_version('1.21')\n def connect_container_to_network(self, container, net_id,\n ipv4_address=None, ipv6_address=None,\n aliases=None, links=None,\n link_local_ips=None):\n data = {\n \"Container\": container,\n \"EndpointConfig\": self.create_endpoint_config(\n aliases=aliases, links=links, ipv4_address=ipv4_address,\n ipv6_address=ipv6_address, link_local_ips=link_local_ips\n ),\n }\n\n url = self._url(\"/networks/{0}/connect\", net_id)\n res = self._post_json(url, data=data)\n self._raise_for_status(res)\n\n @check_resource\n @minimum_version('1.21')\n def disconnect_container_from_network(self, container, net_id,\n force=False):\n data = {\"Container\": container}\n if force:\n if version_lt(self._version, '1.22'):\n raise InvalidVersion(\n 'Forced disconnect was introduced in API 1.22'\n )\n data['Force'] = force\n url = self._url(\"/networks/{0}/disconnect\", net_id)\n res = self._post_json(url, data=data)\n self._raise_for_status(res)\n", "path": "docker/api/network.py"}]} | 1,142 | 480 |
gh_patches_debug_20448 | rasdani/github-patches | git_diff | litestar-org__litestar-3454 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docs: Document SSE
### Summary
The SSE documentation is currently lacking:
- Docs for `ServerSentEventMessage`
- Sending messages a dicts
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3011">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3011/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3011/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/examples/responses/sse_responses.py`
Content:
```
1 from asyncio import sleep
2 from typing import AsyncGenerator
3
4 from litestar import Litestar, get
5 from litestar.response import ServerSentEvent
6
7
8 async def my_generator() -> AsyncGenerator[bytes, None]:
9 count = 0
10 while count < 10:
11 await sleep(0.01)
12 count += 1
13 yield str(count)
14
15
16 @get(path="/count", sync_to_thread=False)
17 def sse_handler() -> ServerSentEvent:
18 return ServerSentEvent(my_generator())
19
20
21 app = Litestar(route_handlers=[sse_handler])
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/examples/responses/sse_responses.py b/docs/examples/responses/sse_responses.py
--- a/docs/examples/responses/sse_responses.py
+++ b/docs/examples/responses/sse_responses.py
@@ -2,15 +2,28 @@
from typing import AsyncGenerator
from litestar import Litestar, get
-from litestar.response import ServerSentEvent
+from litestar.response import ServerSentEvent, ServerSentEventMessage
+from litestar.types import SSEData
-async def my_generator() -> AsyncGenerator[bytes, None]:
+async def my_generator() -> AsyncGenerator[SSEData, None]:
count = 0
while count < 10:
await sleep(0.01)
count += 1
+ # In the generator you can yield integers, strings, bytes, dictionaries, or ServerSentEventMessage objects
+ # dicts can have the following keys: data, event, id, retry, comment
+
+ # here we yield an integer
+ yield count
+ # here a string
yield str(count)
+ # here bytes
+ yield str(count).encode("utf-8")
+ # here a dictionary
+ yield {"data": 2 * count, "event": "event2", "retry": 10}
+ # here a ServerSentEventMessage object
+ yield ServerSentEventMessage(event="something-with-comment", retry=1000, comment="some comment")
@get(path="/count", sync_to_thread=False)
| {"golden_diff": "diff --git a/docs/examples/responses/sse_responses.py b/docs/examples/responses/sse_responses.py\n--- a/docs/examples/responses/sse_responses.py\n+++ b/docs/examples/responses/sse_responses.py\n@@ -2,15 +2,28 @@\n from typing import AsyncGenerator\n \n from litestar import Litestar, get\n-from litestar.response import ServerSentEvent\n+from litestar.response import ServerSentEvent, ServerSentEventMessage\n+from litestar.types import SSEData\n \n \n-async def my_generator() -> AsyncGenerator[bytes, None]:\n+async def my_generator() -> AsyncGenerator[SSEData, None]:\n count = 0\n while count < 10:\n await sleep(0.01)\n count += 1\n+ # In the generator you can yield integers, strings, bytes, dictionaries, or ServerSentEventMessage objects\n+ # dicts can have the following keys: data, event, id, retry, comment\n+\n+ # here we yield an integer\n+ yield count\n+ # here a string\n yield str(count)\n+ # here bytes\n+ yield str(count).encode(\"utf-8\")\n+ # here a dictionary\n+ yield {\"data\": 2 * count, \"event\": \"event2\", \"retry\": 10}\n+ # here a ServerSentEventMessage object\n+ yield ServerSentEventMessage(event=\"something-with-comment\", retry=1000, comment=\"some comment\")\n \n \n @get(path=\"/count\", sync_to_thread=False)\n", "issue": "Docs: Document SSE\n### Summary\n\nThe SSE documentation is currently lacking:\r\n\r\n- Docs for `ServerSentEventMessage`\r\n- Sending messages a dicts\r\n\r\n\n\n<!-- POLAR PLEDGE BADGE START -->\n---\n> [!NOTE] \n> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and \n> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.\n>\n> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)\n> * If you would like to see an issue prioritized, make a pledge towards it!\n> * We receive the pledge once the issue is completed & verified\n> * This, along with engagement in the community, helps us know which features are a priority to our users.\n\n<a href=\"https://polar.sh/litestar-org/litestar/issues/3011\">\n<picture>\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/3011/pledge.svg?darkmode=1\">\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/3011/pledge.svg\">\n</picture>\n</a>\n<!-- POLAR PLEDGE BADGE END -->\n\n", "before_files": [{"content": "from asyncio import sleep\nfrom typing import AsyncGenerator\n\nfrom litestar import Litestar, get\nfrom litestar.response import ServerSentEvent\n\n\nasync def my_generator() -> AsyncGenerator[bytes, None]:\n count = 0\n while count < 10:\n await sleep(0.01)\n count += 1\n yield str(count)\n\n\n@get(path=\"/count\", sync_to_thread=False)\ndef sse_handler() -> ServerSentEvent:\n return ServerSentEvent(my_generator())\n\n\napp = Litestar(route_handlers=[sse_handler])\n", "path": "docs/examples/responses/sse_responses.py"}], "after_files": [{"content": "from asyncio import sleep\nfrom typing import AsyncGenerator\n\nfrom litestar import Litestar, get\nfrom litestar.response import ServerSentEvent, ServerSentEventMessage\nfrom litestar.types import SSEData\n\n\nasync def my_generator() -> AsyncGenerator[SSEData, None]:\n count = 0\n while count < 10:\n await sleep(0.01)\n count += 1\n # In the generator you can yield integers, strings, bytes, dictionaries, or ServerSentEventMessage objects\n # dicts can have the following keys: data, event, id, retry, comment\n\n # here we yield an integer\n yield count\n # here a string\n yield str(count)\n # here bytes\n yield str(count).encode(\"utf-8\")\n # here a dictionary\n yield {\"data\": 2 * count, \"event\": \"event2\", \"retry\": 10}\n # here a ServerSentEventMessage object\n yield ServerSentEventMessage(event=\"something-with-comment\", retry=1000, comment=\"some comment\")\n\n\n@get(path=\"/count\", sync_to_thread=False)\ndef sse_handler() -> ServerSentEvent:\n return ServerSentEvent(my_generator())\n\n\napp = Litestar(route_handlers=[sse_handler])\n", "path": "docs/examples/responses/sse_responses.py"}]} | 739 | 332 |
gh_patches_debug_26741 | rasdani/github-patches | git_diff | pre-commit__pre-commit-893 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect shebang in .git/hooks/pre-commit for python3 only installations
The shebang for `.git/hooks/pre-commit` is `#!/usr/bin/env python`. I work with setups where `python3` is the only python in env.
Could the shebang be the install python instead? I.e. the installation under `INSTALL_PYTHON = '/usr/bin/python3'`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands/install_uninstall.py`
Content:
```
1 from __future__ import print_function
2 from __future__ import unicode_literals
3
4 import io
5 import logging
6 import os.path
7 import sys
8
9 from pre_commit import git
10 from pre_commit import output
11 from pre_commit.repository import repositories
12 from pre_commit.util import cmd_output
13 from pre_commit.util import make_executable
14 from pre_commit.util import mkdirp
15 from pre_commit.util import resource_text
16
17
18 logger = logging.getLogger(__name__)
19
20 # This is used to identify the hook file we install
21 PRIOR_HASHES = (
22 '4d9958c90bc262f47553e2c073f14cfe',
23 'd8ee923c46731b42cd95cc869add4062',
24 '49fd668cb42069aa1b6048464be5d395',
25 '79f09a650522a87b0da915d0d983b2de',
26 'e358c9dae00eac5d06b38dfdb1e33a8c',
27 )
28 CURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'
29 TEMPLATE_START = '# start templated\n'
30 TEMPLATE_END = '# end templated\n'
31
32
33 def _hook_paths(git_root, hook_type):
34 pth = os.path.join(git.get_git_dir(git_root), 'hooks', hook_type)
35 return pth, '{}.legacy'.format(pth)
36
37
38 def is_our_script(filename):
39 if not os.path.exists(filename):
40 return False
41 with io.open(filename) as f:
42 contents = f.read()
43 return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)
44
45
46 def install(
47 runner, store, overwrite=False, hooks=False, hook_type='pre-commit',
48 skip_on_missing_conf=False,
49 ):
50 """Install the pre-commit hooks."""
51 if cmd_output('git', 'config', 'core.hooksPath', retcode=None)[1].strip():
52 logger.error(
53 'Cowardly refusing to install hooks with `core.hooksPath` set.\n'
54 'hint: `git config --unset-all core.hooksPath`',
55 )
56 return 1
57
58 hook_path, legacy_path = _hook_paths(runner.git_root, hook_type)
59
60 mkdirp(os.path.dirname(hook_path))
61
62 # If we have an existing hook, move it to pre-commit.legacy
63 if os.path.lexists(hook_path) and not is_our_script(hook_path):
64 os.rename(hook_path, legacy_path)
65
66 # If we specify overwrite, we simply delete the legacy file
67 if overwrite and os.path.exists(legacy_path):
68 os.remove(legacy_path)
69 elif os.path.exists(legacy_path):
70 output.write_line(
71 'Running in migration mode with existing hooks at {}\n'
72 'Use -f to use only pre-commit.'.format(legacy_path),
73 )
74
75 params = {
76 'CONFIG': runner.config_file,
77 'HOOK_TYPE': hook_type,
78 'INSTALL_PYTHON': sys.executable,
79 'SKIP_ON_MISSING_CONFIG': skip_on_missing_conf,
80 }
81
82 with io.open(hook_path, 'w') as hook_file:
83 contents = resource_text('hook-tmpl')
84 before, rest = contents.split(TEMPLATE_START)
85 to_template, after = rest.split(TEMPLATE_END)
86
87 hook_file.write(before + TEMPLATE_START)
88 for line in to_template.splitlines():
89 var = line.split()[0]
90 hook_file.write('{} = {!r}\n'.format(var, params[var]))
91 hook_file.write(TEMPLATE_END + after)
92 make_executable(hook_path)
93
94 output.write_line('pre-commit installed at {}'.format(hook_path))
95
96 # If they requested we install all of the hooks, do so.
97 if hooks:
98 install_hooks(runner, store)
99
100 return 0
101
102
103 def install_hooks(runner, store):
104 for repository in repositories(runner.config, store):
105 repository.require_installed()
106
107
108 def uninstall(runner, hook_type='pre-commit'):
109 """Uninstall the pre-commit hooks."""
110 hook_path, legacy_path = _hook_paths(runner.git_root, hook_type)
111
112 # If our file doesn't exist or it isn't ours, gtfo.
113 if not os.path.exists(hook_path) or not is_our_script(hook_path):
114 return 0
115
116 os.remove(hook_path)
117 output.write_line('{} uninstalled'.format(hook_type))
118
119 if os.path.exists(legacy_path):
120 os.rename(legacy_path, hook_path)
121 output.write_line('Restored previous hooks to {}'.format(hook_path))
122
123 return 0
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py
--- a/pre_commit/commands/install_uninstall.py
+++ b/pre_commit/commands/install_uninstall.py
@@ -8,6 +8,7 @@
from pre_commit import git
from pre_commit import output
+from pre_commit.languages import python
from pre_commit.repository import repositories
from pre_commit.util import cmd_output
from pre_commit.util import make_executable
@@ -43,6 +44,16 @@
return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)
+def shebang():
+ if sys.platform == 'win32':
+ py = 'python'
+ else:
+ py = python.get_default_version()
+ if py == 'default':
+ py = 'python'
+ return '#!/usr/bin/env {}'.format(py)
+
+
def install(
runner, store, overwrite=False, hooks=False, hook_type='pre-commit',
skip_on_missing_conf=False,
@@ -84,6 +95,8 @@
before, rest = contents.split(TEMPLATE_START)
to_template, after = rest.split(TEMPLATE_END)
+ before = before.replace('#!/usr/bin/env python', shebang())
+
hook_file.write(before + TEMPLATE_START)
for line in to_template.splitlines():
var = line.split()[0]
| {"golden_diff": "diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py\n--- a/pre_commit/commands/install_uninstall.py\n+++ b/pre_commit/commands/install_uninstall.py\n@@ -8,6 +8,7 @@\n \n from pre_commit import git\n from pre_commit import output\n+from pre_commit.languages import python\n from pre_commit.repository import repositories\n from pre_commit.util import cmd_output\n from pre_commit.util import make_executable\n@@ -43,6 +44,16 @@\n return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)\n \n \n+def shebang():\n+ if sys.platform == 'win32':\n+ py = 'python'\n+ else:\n+ py = python.get_default_version()\n+ if py == 'default':\n+ py = 'python'\n+ return '#!/usr/bin/env {}'.format(py)\n+\n+\n def install(\n runner, store, overwrite=False, hooks=False, hook_type='pre-commit',\n skip_on_missing_conf=False,\n@@ -84,6 +95,8 @@\n before, rest = contents.split(TEMPLATE_START)\n to_template, after = rest.split(TEMPLATE_END)\n \n+ before = before.replace('#!/usr/bin/env python', shebang())\n+\n hook_file.write(before + TEMPLATE_START)\n for line in to_template.splitlines():\n var = line.split()[0]\n", "issue": "Incorrect shebang in .git/hooks/pre-commit for python3 only installations\nThe shebang for `.git/hooks/pre-commit` is `#!/usr/bin/env python`. I work with setups where `python3` is the only python in env.\r\n\r\nCould the shebang be the install python instead? I.e. the installation under `INSTALL_PYTHON = '/usr/bin/python3'`\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport io\nimport logging\nimport os.path\nimport sys\n\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.repository import repositories\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import make_executable\nfrom pre_commit.util import mkdirp\nfrom pre_commit.util import resource_text\n\n\nlogger = logging.getLogger(__name__)\n\n# This is used to identify the hook file we install\nPRIOR_HASHES = (\n '4d9958c90bc262f47553e2c073f14cfe',\n 'd8ee923c46731b42cd95cc869add4062',\n '49fd668cb42069aa1b6048464be5d395',\n '79f09a650522a87b0da915d0d983b2de',\n 'e358c9dae00eac5d06b38dfdb1e33a8c',\n)\nCURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'\nTEMPLATE_START = '# start templated\\n'\nTEMPLATE_END = '# end templated\\n'\n\n\ndef _hook_paths(git_root, hook_type):\n pth = os.path.join(git.get_git_dir(git_root), 'hooks', hook_type)\n return pth, '{}.legacy'.format(pth)\n\n\ndef is_our_script(filename):\n if not os.path.exists(filename):\n return False\n with io.open(filename) as f:\n contents = f.read()\n return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)\n\n\ndef install(\n runner, store, overwrite=False, hooks=False, hook_type='pre-commit',\n skip_on_missing_conf=False,\n):\n \"\"\"Install the pre-commit hooks.\"\"\"\n if cmd_output('git', 'config', 'core.hooksPath', retcode=None)[1].strip():\n logger.error(\n 'Cowardly refusing to install hooks with `core.hooksPath` set.\\n'\n 'hint: `git config --unset-all core.hooksPath`',\n )\n return 1\n\n hook_path, legacy_path = _hook_paths(runner.git_root, hook_type)\n\n mkdirp(os.path.dirname(hook_path))\n\n # If we have an existing hook, move it to pre-commit.legacy\n if os.path.lexists(hook_path) and not is_our_script(hook_path):\n os.rename(hook_path, legacy_path)\n\n # If we specify overwrite, we simply delete the legacy file\n if overwrite and os.path.exists(legacy_path):\n os.remove(legacy_path)\n elif os.path.exists(legacy_path):\n output.write_line(\n 'Running in migration mode with existing hooks at {}\\n'\n 'Use -f to use only pre-commit.'.format(legacy_path),\n )\n\n params = {\n 'CONFIG': runner.config_file,\n 'HOOK_TYPE': hook_type,\n 'INSTALL_PYTHON': sys.executable,\n 'SKIP_ON_MISSING_CONFIG': skip_on_missing_conf,\n }\n\n with io.open(hook_path, 'w') as hook_file:\n contents = resource_text('hook-tmpl')\n before, rest = contents.split(TEMPLATE_START)\n to_template, after = rest.split(TEMPLATE_END)\n\n hook_file.write(before + TEMPLATE_START)\n for line in to_template.splitlines():\n var = line.split()[0]\n hook_file.write('{} = {!r}\\n'.format(var, params[var]))\n hook_file.write(TEMPLATE_END + after)\n make_executable(hook_path)\n\n output.write_line('pre-commit installed at {}'.format(hook_path))\n\n # If they requested we install all of the hooks, do so.\n if hooks:\n install_hooks(runner, store)\n\n return 0\n\n\ndef install_hooks(runner, store):\n for repository in repositories(runner.config, store):\n repository.require_installed()\n\n\ndef uninstall(runner, hook_type='pre-commit'):\n \"\"\"Uninstall the pre-commit hooks.\"\"\"\n hook_path, legacy_path = _hook_paths(runner.git_root, hook_type)\n\n # If our file doesn't exist or it isn't ours, gtfo.\n if not os.path.exists(hook_path) or not is_our_script(hook_path):\n return 0\n\n os.remove(hook_path)\n output.write_line('{} uninstalled'.format(hook_type))\n\n if os.path.exists(legacy_path):\n os.rename(legacy_path, hook_path)\n output.write_line('Restored previous hooks to {}'.format(hook_path))\n\n return 0\n", "path": "pre_commit/commands/install_uninstall.py"}], "after_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport io\nimport logging\nimport os.path\nimport sys\n\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.languages import python\nfrom pre_commit.repository import repositories\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import make_executable\nfrom pre_commit.util import mkdirp\nfrom pre_commit.util import resource_text\n\n\nlogger = logging.getLogger(__name__)\n\n# This is used to identify the hook file we install\nPRIOR_HASHES = (\n '4d9958c90bc262f47553e2c073f14cfe',\n 'd8ee923c46731b42cd95cc869add4062',\n '49fd668cb42069aa1b6048464be5d395',\n '79f09a650522a87b0da915d0d983b2de',\n 'e358c9dae00eac5d06b38dfdb1e33a8c',\n)\nCURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'\nTEMPLATE_START = '# start templated\\n'\nTEMPLATE_END = '# end templated\\n'\n\n\ndef _hook_paths(git_root, hook_type):\n pth = os.path.join(git.get_git_dir(git_root), 'hooks', hook_type)\n return pth, '{}.legacy'.format(pth)\n\n\ndef is_our_script(filename):\n if not os.path.exists(filename):\n return False\n with io.open(filename) as f:\n contents = f.read()\n return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)\n\n\ndef shebang():\n if sys.platform == 'win32':\n py = 'python'\n else:\n py = python.get_default_version()\n if py == 'default':\n py = 'python'\n return '#!/usr/bin/env {}'.format(py)\n\n\ndef install(\n runner, store, overwrite=False, hooks=False, hook_type='pre-commit',\n skip_on_missing_conf=False,\n):\n \"\"\"Install the pre-commit hooks.\"\"\"\n if cmd_output('git', 'config', 'core.hooksPath', retcode=None)[1].strip():\n logger.error(\n 'Cowardly refusing to install hooks with `core.hooksPath` set.\\n'\n 'hint: `git config --unset-all core.hooksPath`',\n )\n return 1\n\n hook_path, legacy_path = _hook_paths(runner.git_root, hook_type)\n\n mkdirp(os.path.dirname(hook_path))\n\n # If we have an existing hook, move it to pre-commit.legacy\n if os.path.lexists(hook_path) and not is_our_script(hook_path):\n os.rename(hook_path, legacy_path)\n\n # If we specify overwrite, we simply delete the legacy file\n if overwrite and os.path.exists(legacy_path):\n os.remove(legacy_path)\n elif os.path.exists(legacy_path):\n output.write_line(\n 'Running in migration mode with existing hooks at {}\\n'\n 'Use -f to use only pre-commit.'.format(legacy_path),\n )\n\n params = {\n 'CONFIG': runner.config_file,\n 'HOOK_TYPE': hook_type,\n 'INSTALL_PYTHON': sys.executable,\n 'SKIP_ON_MISSING_CONFIG': skip_on_missing_conf,\n }\n\n with io.open(hook_path, 'w') as hook_file:\n contents = resource_text('hook-tmpl')\n before, rest = contents.split(TEMPLATE_START)\n to_template, after = rest.split(TEMPLATE_END)\n\n before = before.replace('#!/usr/bin/env python', shebang())\n\n hook_file.write(before + TEMPLATE_START)\n for line in to_template.splitlines():\n var = line.split()[0]\n hook_file.write('{} = {!r}\\n'.format(var, params[var]))\n hook_file.write(TEMPLATE_END + after)\n make_executable(hook_path)\n\n output.write_line('pre-commit installed at {}'.format(hook_path))\n\n # If they requested we install all of the hooks, do so.\n if hooks:\n install_hooks(runner, store)\n\n return 0\n\n\ndef install_hooks(runner, store):\n for repository in repositories(runner.config, store):\n repository.require_installed()\n\n\ndef uninstall(runner, hook_type='pre-commit'):\n \"\"\"Uninstall the pre-commit hooks.\"\"\"\n hook_path, legacy_path = _hook_paths(runner.git_root, hook_type)\n\n # If our file doesn't exist or it isn't ours, gtfo.\n if not os.path.exists(hook_path) or not is_our_script(hook_path):\n return 0\n\n os.remove(hook_path)\n output.write_line('{} uninstalled'.format(hook_type))\n\n if os.path.exists(legacy_path):\n os.rename(legacy_path, hook_path)\n output.write_line('Restored previous hooks to {}'.format(hook_path))\n\n return 0\n", "path": "pre_commit/commands/install_uninstall.py"}]} | 1,668 | 303 |
gh_patches_debug_35746 | rasdani/github-patches | git_diff | vispy__vispy-1391 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SceneGraph: HowTo view single scene in different viewboxes
Using https://github.com/vispy/vispy/blob/master/examples/basics/scene/one_scene_four_cams.py to view a single scene in four different viewboxes doesn't work.
The scene is actually generated four times, not only once. There are reminders of multi-parenting commented out in the example, but this won't work any more (since removal of multi-parenting).
Is it possible to have one scene viewed from different angels (eg. top view, front view and side view) without recreating the scene four times?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/basics/scene/one_scene_four_cams.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # -----------------------------------------------------------------------------
3 # Copyright (c) Vispy Development Team. All Rights Reserved.
4 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
5 # -----------------------------------------------------------------------------
6 # vispy: gallery 2
7
8 """
9 Demonstrating a single scene that is shown in four different viewboxes,
10 each with a different camera.
11 """
12
13 # todo: the panzoom camera sometimes work, sometimes not. Not sure why.
14 # we should probably make iterating over children deterministic, so that
15 # an error like this becomes easier to reproduce ...
16
17 import sys
18
19 from vispy import app, scene, io
20
21 canvas = scene.SceneCanvas(keys='interactive')
22 canvas.size = 800, 600
23 canvas.show()
24
25 # Create two ViewBoxes, place side-by-side
26 vb1 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)
27 vb2 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)
28 vb3 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)
29 vb4 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)
30 scenes = vb1.scene, vb2.scene, vb3.scene, vb4.scene
31
32 # Put viewboxes in a grid
33 grid = canvas.central_widget.add_grid()
34 grid.padding = 6
35 grid.add_widget(vb1, 0, 0)
36 grid.add_widget(vb2, 0, 1)
37 grid.add_widget(vb3, 1, 0)
38 grid.add_widget(vb4, 1, 1)
39
40 # Create some visuals to show
41 # AK: Ideally, we could just create one visual that is present in all
42 # scenes, but that results in flicker for the PanZoomCamera, I suspect
43 # due to errors in transform caching.
44 im1 = io.load_crate().astype('float32') / 255
45 #image1 = scene.visuals.Image(im1, grid=(20, 20), parent=scenes)
46 for par in scenes:
47 image = scene.visuals.Image(im1, grid=(20, 20), parent=par)
48
49 #vol1 = np.load(io.load_data_file('volume/stent.npz'))['arr_0']
50 #volume1 = scene.visuals.Volume(vol1, parent=scenes)
51 #volume1.transform = scene.STTransform(translate=(0, 0, 10))
52
53 # Assign cameras
54 vb1.camera = scene.BaseCamera()
55 vb2.camera = scene.PanZoomCamera()
56 vb3.camera = scene.TurntableCamera()
57 vb4.camera = scene.FlyCamera()
58
59
60 # If True, show a cuboid at each camera
61 if False:
62 cube = scene.visuals.Cube((3, 3, 5))
63 cube.transform = scene.STTransform(translate=(0, 0, 6))
64 for vb in (vb1, vb2, vb3, vb4):
65 vb.camera.parents = scenes
66 cube.add_parent(vb.camera)
67
68 if __name__ == '__main__':
69 if sys.flags.interactive != 1:
70 app.run()
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/basics/scene/one_scene_four_cams.py b/examples/basics/scene/one_scene_four_cams.py
--- a/examples/basics/scene/one_scene_four_cams.py
+++ b/examples/basics/scene/one_scene_four_cams.py
@@ -8,11 +8,12 @@
"""
Demonstrating a single scene that is shown in four different viewboxes,
each with a different camera.
-"""
-# todo: the panzoom camera sometimes work, sometimes not. Not sure why.
-# we should probably make iterating over children deterministic, so that
-# an error like this becomes easier to reproduce ...
+Note:
+ This example just creates four scenes using the same visual.
+ Multiple views are currently not available. See #1124 how this could
+ be achieved.
+"""
import sys
@@ -22,7 +23,7 @@
canvas.size = 800, 600
canvas.show()
-# Create two ViewBoxes, place side-by-side
+# Create four ViewBoxes
vb1 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)
vb2 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)
vb3 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)
@@ -38,33 +39,16 @@
grid.add_widget(vb4, 1, 1)
# Create some visuals to show
-# AK: Ideally, we could just create one visual that is present in all
-# scenes, but that results in flicker for the PanZoomCamera, I suspect
-# due to errors in transform caching.
im1 = io.load_crate().astype('float32') / 255
-#image1 = scene.visuals.Image(im1, grid=(20, 20), parent=scenes)
for par in scenes:
image = scene.visuals.Image(im1, grid=(20, 20), parent=par)
-#vol1 = np.load(io.load_data_file('volume/stent.npz'))['arr_0']
-#volume1 = scene.visuals.Volume(vol1, parent=scenes)
-#volume1.transform = scene.STTransform(translate=(0, 0, 10))
-
# Assign cameras
vb1.camera = scene.BaseCamera()
vb2.camera = scene.PanZoomCamera()
vb3.camera = scene.TurntableCamera()
vb4.camera = scene.FlyCamera()
-
-# If True, show a cuboid at each camera
-if False:
- cube = scene.visuals.Cube((3, 3, 5))
- cube.transform = scene.STTransform(translate=(0, 0, 6))
- for vb in (vb1, vb2, vb3, vb4):
- vb.camera.parents = scenes
- cube.add_parent(vb.camera)
-
if __name__ == '__main__':
if sys.flags.interactive != 1:
app.run()
| {"golden_diff": "diff --git a/examples/basics/scene/one_scene_four_cams.py b/examples/basics/scene/one_scene_four_cams.py\n--- a/examples/basics/scene/one_scene_four_cams.py\n+++ b/examples/basics/scene/one_scene_four_cams.py\n@@ -8,11 +8,12 @@\n \"\"\"\n Demonstrating a single scene that is shown in four different viewboxes,\n each with a different camera.\n-\"\"\"\n \n-# todo: the panzoom camera sometimes work, sometimes not. Not sure why.\n-# we should probably make iterating over children deterministic, so that\n-# an error like this becomes easier to reproduce ...\n+Note:\n+ This example just creates four scenes using the same visual.\n+ Multiple views are currently not available. See #1124 how this could\n+ be achieved.\n+\"\"\"\n \n import sys\n \n@@ -22,7 +23,7 @@\n canvas.size = 800, 600\n canvas.show()\n \n-# Create two ViewBoxes, place side-by-side\n+# Create four ViewBoxes\n vb1 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\n vb2 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\n vb3 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\n@@ -38,33 +39,16 @@\n grid.add_widget(vb4, 1, 1)\n \n # Create some visuals to show\n-# AK: Ideally, we could just create one visual that is present in all\n-# scenes, but that results in flicker for the PanZoomCamera, I suspect\n-# due to errors in transform caching.\n im1 = io.load_crate().astype('float32') / 255\n-#image1 = scene.visuals.Image(im1, grid=(20, 20), parent=scenes)\n for par in scenes:\n image = scene.visuals.Image(im1, grid=(20, 20), parent=par)\n \n-#vol1 = np.load(io.load_data_file('volume/stent.npz'))['arr_0']\n-#volume1 = scene.visuals.Volume(vol1, parent=scenes)\n-#volume1.transform = scene.STTransform(translate=(0, 0, 10))\n-\n # Assign cameras\n vb1.camera = scene.BaseCamera()\n vb2.camera = scene.PanZoomCamera()\n vb3.camera = scene.TurntableCamera()\n vb4.camera = scene.FlyCamera()\n \n-\n-# If True, show a cuboid at each camera\n-if False:\n- cube = scene.visuals.Cube((3, 3, 5))\n- cube.transform = scene.STTransform(translate=(0, 0, 6))\n- for vb in (vb1, vb2, vb3, vb4):\n- vb.camera.parents = scenes\n- cube.add_parent(vb.camera)\n-\n if __name__ == '__main__':\n if sys.flags.interactive != 1:\n app.run()\n", "issue": "SceneGraph: HowTo view single scene in different viewboxes\nUsing https://github.com/vispy/vispy/blob/master/examples/basics/scene/one_scene_four_cams.py to view a single scene in four different viewboxes doesn't work.\n\nThe scene is actually generated four times, not only once. There are reminders of multi-parenting commented out in the example, but this won't work any more (since removal of multi-parenting).\n\nIs it possible to have one scene viewed from different angels (eg. top view, front view and side view) without recreating the scene four times?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n# vispy: gallery 2\n\n\"\"\"\nDemonstrating a single scene that is shown in four different viewboxes,\neach with a different camera.\n\"\"\"\n\n# todo: the panzoom camera sometimes work, sometimes not. Not sure why.\n# we should probably make iterating over children deterministic, so that\n# an error like this becomes easier to reproduce ...\n\nimport sys\n\nfrom vispy import app, scene, io\n\ncanvas = scene.SceneCanvas(keys='interactive')\ncanvas.size = 800, 600\ncanvas.show()\n\n# Create two ViewBoxes, place side-by-side\nvb1 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\nvb2 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\nvb3 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\nvb4 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\nscenes = vb1.scene, vb2.scene, vb3.scene, vb4.scene\n\n# Put viewboxes in a grid\ngrid = canvas.central_widget.add_grid()\ngrid.padding = 6\ngrid.add_widget(vb1, 0, 0)\ngrid.add_widget(vb2, 0, 1)\ngrid.add_widget(vb3, 1, 0)\ngrid.add_widget(vb4, 1, 1)\n\n# Create some visuals to show\n# AK: Ideally, we could just create one visual that is present in all\n# scenes, but that results in flicker for the PanZoomCamera, I suspect\n# due to errors in transform caching.\nim1 = io.load_crate().astype('float32') / 255\n#image1 = scene.visuals.Image(im1, grid=(20, 20), parent=scenes)\nfor par in scenes:\n image = scene.visuals.Image(im1, grid=(20, 20), parent=par)\n\n#vol1 = np.load(io.load_data_file('volume/stent.npz'))['arr_0']\n#volume1 = scene.visuals.Volume(vol1, parent=scenes)\n#volume1.transform = scene.STTransform(translate=(0, 0, 10))\n\n# Assign cameras\nvb1.camera = scene.BaseCamera()\nvb2.camera = scene.PanZoomCamera()\nvb3.camera = scene.TurntableCamera()\nvb4.camera = scene.FlyCamera()\n\n\n# If True, show a cuboid at each camera\nif False:\n cube = scene.visuals.Cube((3, 3, 5))\n cube.transform = scene.STTransform(translate=(0, 0, 6))\n for vb in (vb1, vb2, vb3, vb4):\n vb.camera.parents = scenes\n cube.add_parent(vb.camera)\n\nif __name__ == '__main__':\n if sys.flags.interactive != 1:\n app.run()\n", "path": "examples/basics/scene/one_scene_four_cams.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n# vispy: gallery 2\n\n\"\"\"\nDemonstrating a single scene that is shown in four different viewboxes,\neach with a different camera.\n\nNote:\n This example just creates four scenes using the same visual.\n Multiple views are currently not available. See #1124 how this could\n be achieved.\n\"\"\"\n\nimport sys\n\nfrom vispy import app, scene, io\n\ncanvas = scene.SceneCanvas(keys='interactive')\ncanvas.size = 800, 600\ncanvas.show()\n\n# Create four ViewBoxes\nvb1 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\nvb2 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\nvb3 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\nvb4 = scene.widgets.ViewBox(border_color='white', parent=canvas.scene)\nscenes = vb1.scene, vb2.scene, vb3.scene, vb4.scene\n\n# Put viewboxes in a grid\ngrid = canvas.central_widget.add_grid()\ngrid.padding = 6\ngrid.add_widget(vb1, 0, 0)\ngrid.add_widget(vb2, 0, 1)\ngrid.add_widget(vb3, 1, 0)\ngrid.add_widget(vb4, 1, 1)\n\n# Create some visuals to show\nim1 = io.load_crate().astype('float32') / 255\nfor par in scenes:\n image = scene.visuals.Image(im1, grid=(20, 20), parent=par)\n\n# Assign cameras\nvb1.camera = scene.BaseCamera()\nvb2.camera = scene.PanZoomCamera()\nvb3.camera = scene.TurntableCamera()\nvb4.camera = scene.FlyCamera()\n\nif __name__ == '__main__':\n if sys.flags.interactive != 1:\n app.run()\n", "path": "examples/basics/scene/one_scene_four_cams.py"}]} | 1,181 | 653 |
gh_patches_debug_26263 | rasdani/github-patches | git_diff | pypa__pip-2303 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Selfcheck failure on Windows
I get this warning all the time:
```
There was an error checking the latest version of pip
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\pip\utils\outdated.py", line 115, in pip_version_check
state.save(pypi_version, current_time)
File "C:\Python27\lib\site-packages\pip\utils\outdated.py", line 62, in save
with open(self.statefile_path) as statefile:
IOError: [Errno 2] No such file or directory: u'C:\\Users\\ionel_000\\AppData\\Local\\pip\\Cache\\selfcheck.json'
```
If I create the file, it complains about invalid json. I've put `{}` inside, the warning has gone away, but this seems very wrong to me.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pip/utils/outdated.py`
Content:
```
1 from __future__ import absolute_import
2
3 import datetime
4 import json
5 import logging
6 import os.path
7 import sys
8
9 from pip._vendor import lockfile
10 from pip._vendor import pkg_resources
11
12 from pip.compat import total_seconds
13 from pip.index import PyPI
14 from pip.locations import USER_CACHE_DIR, running_under_virtualenv
15
16
17 SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
18
19
20 logger = logging.getLogger(__name__)
21
22
23 class VirtualenvSelfCheckState(object):
24 def __init__(self):
25 self.statefile_path = os.path.join(sys.prefix, "pip-selfcheck.json")
26
27 # Load the existing state
28 try:
29 with open(self.statefile_path) as statefile:
30 self.state = json.load(statefile)
31 except (IOError, ValueError):
32 self.state = {}
33
34 def save(self, pypi_version, current_time):
35 # Attempt to write out our version check file
36 with open(self.statefile_path, "w") as statefile:
37 json.dump(
38 {
39 "last_check": current_time.strftime(SELFCHECK_DATE_FMT),
40 "pypi_version": pypi_version,
41 },
42 statefile,
43 sort_keys=True,
44 separators=(",", ":")
45 )
46
47
48 class GlobalSelfCheckState(object):
49 def __init__(self):
50 self.statefile_path = os.path.join(USER_CACHE_DIR, "selfcheck.json")
51
52 # Load the existing state
53 try:
54 with open(self.statefile_path) as statefile:
55 self.state = json.load(statefile)[sys.prefix]
56 except (IOError, ValueError, KeyError):
57 self.state = {}
58
59 def save(self, pypi_version, current_time):
60 # Attempt to write out our version check file
61 with lockfile.LockFile(self.statefile_path):
62 with open(self.statefile_path) as statefile:
63 state = json.load(statefile)
64
65 state[sys.prefix] = {
66 "last_check": current_time.strftime(SELFCHECK_DATE_FMT),
67 "pypi_version": pypi_version,
68 }
69
70 with open(self.statefile_path, "w") as statefile:
71 json.dump(state, statefile, sort_keys=True,
72 separators=(",", ":"))
73
74
75 def load_selfcheck_statefile():
76 if running_under_virtualenv():
77 return VirtualenvSelfCheckState()
78 else:
79 return GlobalSelfCheckState()
80
81
82 def pip_version_check(session):
83 """Check for an update for pip.
84
85 Limit the frequency of checks to once per week. State is stored either in
86 the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
87 of the pip script path.
88 """
89 import pip # imported here to prevent circular imports
90 pypi_version = None
91
92 try:
93 state = load_selfcheck_statefile()
94
95 current_time = datetime.datetime.utcnow()
96 # Determine if we need to refresh the state
97 if "last_check" in state.state and "pypi_version" in state.state:
98 last_check = datetime.datetime.strptime(
99 state.state["last_check"],
100 SELFCHECK_DATE_FMT
101 )
102 if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60:
103 pypi_version = state.state["pypi_version"]
104
105 # Refresh the version if we need to or just see if we need to warn
106 if pypi_version is None:
107 resp = session.get(
108 PyPI.pip_json_url,
109 headers={"Accept": "application/json"},
110 )
111 resp.raise_for_status()
112 pypi_version = resp.json()["info"]["version"]
113
114 # save that we've performed a check
115 state.save(pypi_version, current_time)
116
117 pip_version = pkg_resources.parse_version(pip.__version__)
118
119 # Determine if our pypi_version is older
120 if pip_version < pkg_resources.parse_version(pypi_version):
121 logger.warning(
122 "You are using pip version %s, however version %s is "
123 "available.\nYou should consider upgrading via the "
124 "'pip install --upgrade pip' command." % (pip.__version__,
125 pypi_version)
126 )
127
128 except Exception:
129 logger.debug(
130 "There was an error checking the latest version of pip",
131 exc_info=True,
132 )
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pip/utils/outdated.py b/pip/utils/outdated.py
--- a/pip/utils/outdated.py
+++ b/pip/utils/outdated.py
@@ -1,6 +1,7 @@
from __future__ import absolute_import
import datetime
+import errno
import json
import logging
import os.path
@@ -12,6 +13,7 @@
from pip.compat import total_seconds
from pip.index import PyPI
from pip.locations import USER_CACHE_DIR, running_under_virtualenv
+from pip.utils.filesystem import check_path_owner
SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
@@ -57,6 +59,19 @@
self.state = {}
def save(self, pypi_version, current_time):
+ # Check to make sure that we own the directory
+ if not check_path_owner(
+ os.path.dirname(self.statefile_path), os.geteuid()):
+ return
+
+ # Now that we've ensured the directory is owned by this user, we'll go
+ # ahead and make sure that all our directories are created.
+ try:
+ os.makedirs(os.path.dirname(self.statefile_path))
+ except OSError as exc:
+ if exc.errno != errno.EEXIST:
+ raise
+
# Attempt to write out our version check file
with lockfile.LockFile(self.statefile_path):
with open(self.statefile_path) as statefile:
| {"golden_diff": "diff --git a/pip/utils/outdated.py b/pip/utils/outdated.py\n--- a/pip/utils/outdated.py\n+++ b/pip/utils/outdated.py\n@@ -1,6 +1,7 @@\n from __future__ import absolute_import\n \n import datetime\n+import errno\n import json\n import logging\n import os.path\n@@ -12,6 +13,7 @@\n from pip.compat import total_seconds\n from pip.index import PyPI\n from pip.locations import USER_CACHE_DIR, running_under_virtualenv\n+from pip.utils.filesystem import check_path_owner\n \n \n SELFCHECK_DATE_FMT = \"%Y-%m-%dT%H:%M:%SZ\"\n@@ -57,6 +59,19 @@\n self.state = {}\n \n def save(self, pypi_version, current_time):\n+ # Check to make sure that we own the directory\n+ if not check_path_owner(\n+ os.path.dirname(self.statefile_path), os.geteuid()):\n+ return\n+\n+ # Now that we've ensured the directory is owned by this user, we'll go\n+ # ahead and make sure that all our directories are created.\n+ try:\n+ os.makedirs(os.path.dirname(self.statefile_path))\n+ except OSError as exc:\n+ if exc.errno != errno.EEXIST:\n+ raise\n+\n # Attempt to write out our version check file\n with lockfile.LockFile(self.statefile_path):\n with open(self.statefile_path) as statefile:\n", "issue": "Selfcheck failure on Windows\nI get this warning all the time:\n\n```\nThere was an error checking the latest version of pip\nTraceback (most recent call last):\n File \"C:\\Python27\\lib\\site-packages\\pip\\utils\\outdated.py\", line 115, in pip_version_check\n state.save(pypi_version, current_time)\n File \"C:\\Python27\\lib\\site-packages\\pip\\utils\\outdated.py\", line 62, in save\n with open(self.statefile_path) as statefile:\nIOError: [Errno 2] No such file or directory: u'C:\\\\Users\\\\ionel_000\\\\AppData\\\\Local\\\\pip\\\\Cache\\\\selfcheck.json'\n```\n\nIf I create the file, it complains about invalid json. I've put `{}` inside, the warning has gone away, but this seems very wrong to me.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport datetime\nimport json\nimport logging\nimport os.path\nimport sys\n\nfrom pip._vendor import lockfile\nfrom pip._vendor import pkg_resources\n\nfrom pip.compat import total_seconds\nfrom pip.index import PyPI\nfrom pip.locations import USER_CACHE_DIR, running_under_virtualenv\n\n\nSELFCHECK_DATE_FMT = \"%Y-%m-%dT%H:%M:%SZ\"\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass VirtualenvSelfCheckState(object):\n def __init__(self):\n self.statefile_path = os.path.join(sys.prefix, \"pip-selfcheck.json\")\n\n # Load the existing state\n try:\n with open(self.statefile_path) as statefile:\n self.state = json.load(statefile)\n except (IOError, ValueError):\n self.state = {}\n\n def save(self, pypi_version, current_time):\n # Attempt to write out our version check file\n with open(self.statefile_path, \"w\") as statefile:\n json.dump(\n {\n \"last_check\": current_time.strftime(SELFCHECK_DATE_FMT),\n \"pypi_version\": pypi_version,\n },\n statefile,\n sort_keys=True,\n separators=(\",\", \":\")\n )\n\n\nclass GlobalSelfCheckState(object):\n def __init__(self):\n self.statefile_path = os.path.join(USER_CACHE_DIR, \"selfcheck.json\")\n\n # Load the existing state\n try:\n with open(self.statefile_path) as statefile:\n self.state = json.load(statefile)[sys.prefix]\n except (IOError, ValueError, KeyError):\n self.state = {}\n\n def save(self, pypi_version, current_time):\n # Attempt to write out our version check file\n with lockfile.LockFile(self.statefile_path):\n with open(self.statefile_path) as statefile:\n state = json.load(statefile)\n\n state[sys.prefix] = {\n \"last_check\": current_time.strftime(SELFCHECK_DATE_FMT),\n \"pypi_version\": pypi_version,\n }\n\n with open(self.statefile_path, \"w\") as statefile:\n json.dump(state, statefile, sort_keys=True,\n separators=(\",\", \":\"))\n\n\ndef load_selfcheck_statefile():\n if running_under_virtualenv():\n return VirtualenvSelfCheckState()\n else:\n return GlobalSelfCheckState()\n\n\ndef pip_version_check(session):\n \"\"\"Check for an update for pip.\n\n Limit the frequency of checks to once per week. State is stored either in\n the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix\n of the pip script path.\n \"\"\"\n import pip # imported here to prevent circular imports\n pypi_version = None\n\n try:\n state = load_selfcheck_statefile()\n\n current_time = datetime.datetime.utcnow()\n # Determine if we need to refresh the state\n if \"last_check\" in state.state and \"pypi_version\" in state.state:\n last_check = datetime.datetime.strptime(\n state.state[\"last_check\"],\n SELFCHECK_DATE_FMT\n )\n if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60:\n pypi_version = state.state[\"pypi_version\"]\n\n # Refresh the version if we need to or just see if we need to warn\n if pypi_version is None:\n resp = session.get(\n PyPI.pip_json_url,\n headers={\"Accept\": \"application/json\"},\n )\n resp.raise_for_status()\n pypi_version = resp.json()[\"info\"][\"version\"]\n\n # save that we've performed a check\n state.save(pypi_version, current_time)\n\n pip_version = pkg_resources.parse_version(pip.__version__)\n\n # Determine if our pypi_version is older\n if pip_version < pkg_resources.parse_version(pypi_version):\n logger.warning(\n \"You are using pip version %s, however version %s is \"\n \"available.\\nYou should consider upgrading via the \"\n \"'pip install --upgrade pip' command.\" % (pip.__version__,\n pypi_version)\n )\n\n except Exception:\n logger.debug(\n \"There was an error checking the latest version of pip\",\n exc_info=True,\n )\n", "path": "pip/utils/outdated.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport datetime\nimport errno\nimport json\nimport logging\nimport os.path\nimport sys\n\nfrom pip._vendor import lockfile\nfrom pip._vendor import pkg_resources\n\nfrom pip.compat import total_seconds\nfrom pip.index import PyPI\nfrom pip.locations import USER_CACHE_DIR, running_under_virtualenv\nfrom pip.utils.filesystem import check_path_owner\n\n\nSELFCHECK_DATE_FMT = \"%Y-%m-%dT%H:%M:%SZ\"\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass VirtualenvSelfCheckState(object):\n def __init__(self):\n self.statefile_path = os.path.join(sys.prefix, \"pip-selfcheck.json\")\n\n # Load the existing state\n try:\n with open(self.statefile_path) as statefile:\n self.state = json.load(statefile)\n except (IOError, ValueError):\n self.state = {}\n\n def save(self, pypi_version, current_time):\n # Attempt to write out our version check file\n with open(self.statefile_path, \"w\") as statefile:\n json.dump(\n {\n \"last_check\": current_time.strftime(SELFCHECK_DATE_FMT),\n \"pypi_version\": pypi_version,\n },\n statefile,\n sort_keys=True,\n separators=(\",\", \":\")\n )\n\n\nclass GlobalSelfCheckState(object):\n def __init__(self):\n self.statefile_path = os.path.join(USER_CACHE_DIR, \"selfcheck.json\")\n\n # Load the existing state\n try:\n with open(self.statefile_path) as statefile:\n self.state = json.load(statefile)[sys.prefix]\n except (IOError, ValueError, KeyError):\n self.state = {}\n\n def save(self, pypi_version, current_time):\n # Check to make sure that we own the directory\n if not check_path_owner(\n os.path.dirname(self.statefile_path), os.geteuid()):\n return\n\n # Now that we've ensured the directory is owned by this user, we'll go\n # ahead and make sure that all our directories are created.\n try:\n os.makedirs(os.path.dirname(self.statefile_path))\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n # Attempt to write out our version check file\n with lockfile.LockFile(self.statefile_path):\n with open(self.statefile_path) as statefile:\n state = json.load(statefile)\n\n state[sys.prefix] = {\n \"last_check\": current_time.strftime(SELFCHECK_DATE_FMT),\n \"pypi_version\": pypi_version,\n }\n\n with open(self.statefile_path, \"w\") as statefile:\n json.dump(state, statefile, sort_keys=True,\n separators=(\",\", \":\"))\n\n\ndef load_selfcheck_statefile():\n if running_under_virtualenv():\n return VirtualenvSelfCheckState()\n else:\n return GlobalSelfCheckState()\n\n\ndef pip_version_check(session):\n \"\"\"Check for an update for pip.\n\n Limit the frequency of checks to once per week. State is stored either in\n the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix\n of the pip script path.\n \"\"\"\n import pip # imported here to prevent circular imports\n pypi_version = None\n\n try:\n state = load_selfcheck_statefile()\n\n current_time = datetime.datetime.utcnow()\n # Determine if we need to refresh the state\n if \"last_check\" in state.state and \"pypi_version\" in state.state:\n last_check = datetime.datetime.strptime(\n state.state[\"last_check\"],\n SELFCHECK_DATE_FMT\n )\n if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60:\n pypi_version = state.state[\"pypi_version\"]\n\n # Refresh the version if we need to or just see if we need to warn\n if pypi_version is None:\n resp = session.get(\n PyPI.pip_json_url,\n headers={\"Accept\": \"application/json\"},\n )\n resp.raise_for_status()\n pypi_version = resp.json()[\"info\"][\"version\"]\n\n # save that we've performed a check\n state.save(pypi_version, current_time)\n\n pip_version = pkg_resources.parse_version(pip.__version__)\n\n # Determine if our pypi_version is older\n if pip_version < pkg_resources.parse_version(pypi_version):\n logger.warning(\n \"You are using pip version %s, however version %s is \"\n \"available.\\nYou should consider upgrading via the \"\n \"'pip install --upgrade pip' command.\" % (pip.__version__,\n pypi_version)\n )\n\n except Exception:\n logger.debug(\n \"There was an error checking the latest version of pip\",\n exc_info=True,\n )\n", "path": "pip/utils/outdated.py"}]} | 1,665 | 319 |
gh_patches_debug_3852 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1773 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Taiwan real-time data has stopped working
Taiwain seems to have been offline recently
It used to work correctly, something may have changed in the data source?
Kibana error description [here](https://kibana.electricitymap.org/app/kibana#/discover/10af54f0-0c4a-11e9-85c1-1d63df8c862c?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(message,extra.key,level),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:level,negate:!f,params:(query:ERROR,type:phrase),type:phrase,value:ERROR),query:(match:(level:(query:ERROR,type:phrase)))),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:'@timestamp',negate:!f,params:(query:'2019-02-13T09:56:26.971Z',type:phrase),type:phrase,value:'February%2013th%202019,%2010:56:26.971'),query:(match:('@timestamp':(query:'2019-02-13T09:56:26.971Z',type:phrase))))),index:'96f67170-0c49-11e9-85c1-1d63df8c862c',interval:auto,query:(language:lucene,query:''),sort:!('@timestamp',asc)))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/TW.py`
Content:
```
1 #!/usr/bin/env python3
2 import arrow
3 import requests
4 import pandas
5 import dateutil
6
7
8 def fetch_production(zone_key='TW', session=None, target_datetime=None, logger=None):
9 if target_datetime:
10 raise NotImplementedError('This parser is not yet able to parse past dates')
11
12 url = 'http://data.taipower.com.tw/opendata01/apply/file/d006001/001.txt'
13 response = requests.get(url)
14 data = response.json()
15
16 dumpDate = data['']
17 prodData = data['aaData']
18
19 tz = 'Asia/Taipei'
20 dumpDate = arrow.get(dumpDate, 'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz))
21
22 objData = pandas.DataFrame(prodData)
23
24 objData.columns = ['fueltype', 'name', 'capacity', 'output', 'percentage',
25 'additional']
26
27 objData['fueltype'] = objData.fueltype.str.split('(').str[1]
28 objData['fueltype'] = objData.fueltype.str.split(')').str[0]
29 objData.drop('additional', axis=1, inplace=True)
30 objData.drop('percentage', axis=1, inplace=True)
31
32 objData = objData.convert_objects(convert_numeric=True)
33 production = pandas.DataFrame(objData.groupby('fueltype').sum())
34 production.columns = ['capacity', 'output']
35
36 coal_capacity = production.ix['Coal'].capacity + production.ix['IPP-Coal'].capacity
37 gas_capacity = production.ix['LNG'].capacity + production.ix['IPP-LNG'].capacity
38 oil_capacity = production.ix['Oil'].capacity + production.ix['Diesel'].capacity
39
40 coal_production = production.ix['Coal'].output + production.ix['IPP-Coal'].output
41 gas_production = production.ix['LNG'].output + production.ix['IPP-LNG'].output
42 oil_production = production.ix['Oil'].output + production.ix['Diesel'].output
43
44 # For storage, note that load will be negative, and generation positive.
45 # We require the opposite
46
47 returndata = {
48 'zoneKey': zone_key,
49 'datetime': dumpDate.datetime,
50 'production': {
51 'coal': coal_production,
52 'gas': gas_production,
53 'oil': oil_production,
54 'hydro': production.ix['Hydro'].output,
55 'nuclear': production.ix['Nuclear'].output,
56 'solar': production.ix['Solar'].output,
57 'wind': production.ix['Wind'].output,
58 'unknown': production.ix['Co-Gen'].output
59 },
60 'capacity': {
61 'coal': coal_capacity,
62 'gas': gas_capacity,
63 'oil': oil_capacity,
64 'hydro': production.ix['Hydro'].capacity,
65 'hydro storage':production.ix['Pumping Gen'].capacity,
66 'nuclear': production.ix['Nuclear'].capacity,
67 'solar': production.ix['Solar'].capacity,
68 'wind': production.ix['Wind'].capacity,
69 'unknown': production.ix['Co-Gen'].capacity
70 },
71 'storage': {
72 'hydro': -1 * production.ix['Pumping Load'].output - production.ix['Pumping Gen'].output
73 },
74 'source': 'taipower.com.tw'
75 }
76
77 return returndata
78
79
80 if __name__ == '__main__':
81 print(fetch_production())
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/TW.py b/parsers/TW.py
--- a/parsers/TW.py
+++ b/parsers/TW.py
@@ -9,7 +9,7 @@
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
- url = 'http://data.taipower.com.tw/opendata01/apply/file/d006001/001.txt'
+ url = 'http://www.taipower.com.tw/d006/loadGraph/loadGraph/data/genary.txt'
response = requests.get(url)
data = response.json()
| {"golden_diff": "diff --git a/parsers/TW.py b/parsers/TW.py\n--- a/parsers/TW.py\n+++ b/parsers/TW.py\n@@ -9,7 +9,7 @@\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n \n- url = 'http://data.taipower.com.tw/opendata01/apply/file/d006001/001.txt'\n+ url = 'http://www.taipower.com.tw/d006/loadGraph/loadGraph/data/genary.txt'\n response = requests.get(url)\n data = response.json()\n", "issue": "Taiwan real-time data has stopped working\nTaiwain seems to have been offline recently\r\nIt used to work correctly, something may have changed in the data source?\r\n\r\nKibana error description [here](https://kibana.electricitymap.org/app/kibana#/discover/10af54f0-0c4a-11e9-85c1-1d63df8c862c?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(message,extra.key,level),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:level,negate:!f,params:(query:ERROR,type:phrase),type:phrase,value:ERROR),query:(match:(level:(query:ERROR,type:phrase)))),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:'@timestamp',negate:!f,params:(query:'2019-02-13T09:56:26.971Z',type:phrase),type:phrase,value:'February%2013th%202019,%2010:56:26.971'),query:(match:('@timestamp':(query:'2019-02-13T09:56:26.971Z',type:phrase))))),index:'96f67170-0c49-11e9-85c1-1d63df8c862c',interval:auto,query:(language:lucene,query:''),sort:!('@timestamp',asc)))\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport arrow\nimport requests\nimport pandas\nimport dateutil\n\n\ndef fetch_production(zone_key='TW', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n url = 'http://data.taipower.com.tw/opendata01/apply/file/d006001/001.txt'\n response = requests.get(url)\n data = response.json()\n\n dumpDate = data['']\n prodData = data['aaData']\n\n tz = 'Asia/Taipei'\n dumpDate = arrow.get(dumpDate, 'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz))\n\n objData = pandas.DataFrame(prodData)\n\n objData.columns = ['fueltype', 'name', 'capacity', 'output', 'percentage',\n 'additional']\n\n objData['fueltype'] = objData.fueltype.str.split('(').str[1]\n objData['fueltype'] = objData.fueltype.str.split(')').str[0]\n objData.drop('additional', axis=1, inplace=True)\n objData.drop('percentage', axis=1, inplace=True)\n\n objData = objData.convert_objects(convert_numeric=True)\n production = pandas.DataFrame(objData.groupby('fueltype').sum())\n production.columns = ['capacity', 'output']\n\n coal_capacity = production.ix['Coal'].capacity + production.ix['IPP-Coal'].capacity\n gas_capacity = production.ix['LNG'].capacity + production.ix['IPP-LNG'].capacity\n oil_capacity = production.ix['Oil'].capacity + production.ix['Diesel'].capacity\n\n coal_production = production.ix['Coal'].output + production.ix['IPP-Coal'].output\n gas_production = production.ix['LNG'].output + production.ix['IPP-LNG'].output\n oil_production = production.ix['Oil'].output + production.ix['Diesel'].output\n\n # For storage, note that load will be negative, and generation positive.\n # We require the opposite\n\n returndata = {\n 'zoneKey': zone_key,\n 'datetime': dumpDate.datetime,\n 'production': {\n 'coal': coal_production,\n 'gas': gas_production,\n 'oil': oil_production,\n 'hydro': production.ix['Hydro'].output,\n 'nuclear': production.ix['Nuclear'].output,\n 'solar': production.ix['Solar'].output,\n 'wind': production.ix['Wind'].output,\n 'unknown': production.ix['Co-Gen'].output\n },\n 'capacity': {\n 'coal': coal_capacity,\n 'gas': gas_capacity,\n 'oil': oil_capacity,\n 'hydro': production.ix['Hydro'].capacity,\n 'hydro storage':production.ix['Pumping Gen'].capacity,\n 'nuclear': production.ix['Nuclear'].capacity,\n 'solar': production.ix['Solar'].capacity,\n 'wind': production.ix['Wind'].capacity,\n 'unknown': production.ix['Co-Gen'].capacity\n },\n 'storage': {\n 'hydro': -1 * production.ix['Pumping Load'].output - production.ix['Pumping Gen'].output\n },\n 'source': 'taipower.com.tw'\n }\n\n return returndata\n\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/TW.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport arrow\nimport requests\nimport pandas\nimport dateutil\n\n\ndef fetch_production(zone_key='TW', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n url = 'http://www.taipower.com.tw/d006/loadGraph/loadGraph/data/genary.txt'\n response = requests.get(url)\n data = response.json()\n\n dumpDate = data['']\n prodData = data['aaData']\n\n tz = 'Asia/Taipei'\n dumpDate = arrow.get(dumpDate, 'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz))\n\n objData = pandas.DataFrame(prodData)\n\n objData.columns = ['fueltype', 'name', 'capacity', 'output', 'percentage',\n 'additional']\n\n objData['fueltype'] = objData.fueltype.str.split('(').str[1]\n objData['fueltype'] = objData.fueltype.str.split(')').str[0]\n objData.drop('additional', axis=1, inplace=True)\n objData.drop('percentage', axis=1, inplace=True)\n\n objData = objData.convert_objects(convert_numeric=True)\n production = pandas.DataFrame(objData.groupby('fueltype').sum())\n production.columns = ['capacity', 'output']\n\n coal_capacity = production.ix['Coal'].capacity + production.ix['IPP-Coal'].capacity\n gas_capacity = production.ix['LNG'].capacity + production.ix['IPP-LNG'].capacity\n oil_capacity = production.ix['Oil'].capacity + production.ix['Diesel'].capacity\n\n coal_production = production.ix['Coal'].output + production.ix['IPP-Coal'].output\n gas_production = production.ix['LNG'].output + production.ix['IPP-LNG'].output\n oil_production = production.ix['Oil'].output + production.ix['Diesel'].output\n\n # For storage, note that load will be negative, and generation positive.\n # We require the opposite\n\n returndata = {\n 'zoneKey': zone_key,\n 'datetime': dumpDate.datetime,\n 'production': {\n 'coal': coal_production,\n 'gas': gas_production,\n 'oil': oil_production,\n 'hydro': production.ix['Hydro'].output,\n 'nuclear': production.ix['Nuclear'].output,\n 'solar': production.ix['Solar'].output,\n 'wind': production.ix['Wind'].output,\n 'unknown': production.ix['Co-Gen'].output\n },\n 'capacity': {\n 'coal': coal_capacity,\n 'gas': gas_capacity,\n 'oil': oil_capacity,\n 'hydro': production.ix['Hydro'].capacity,\n 'hydro storage':production.ix['Pumping Gen'].capacity,\n 'nuclear': production.ix['Nuclear'].capacity,\n 'solar': production.ix['Solar'].capacity,\n 'wind': production.ix['Wind'].capacity,\n 'unknown': production.ix['Co-Gen'].capacity\n },\n 'storage': {\n 'hydro': -1 * production.ix['Pumping Load'].output - production.ix['Pumping Gen'].output\n },\n 'source': 'taipower.com.tw'\n }\n\n return returndata\n\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/TW.py"}]} | 1,620 | 136 |
gh_patches_debug_25842 | rasdani/github-patches | git_diff | amundsen-io__amundsen-1303 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug Report: Glue search_tables with Filters and result tables more than 100 items
<!--- Provide a general summary of the issue in the Title above -->
<!--- Look through existing open and closed issues to see if someone has reported the issue before -->
There is a bug while applying Filters for the database which contains more than 100 items. Since glue returns 100 items per page and to move to the next page we need to specify `NextToken`.
I have 138 tables, which means I will be iterating 2 times over the result.
The filter:
```python
{
'Key': 'DatabaseName',
'Value': glue_database_name
}
```
Every time I run the code I get different results: the length of the list is always the same - 138. However, the length of the set is always different. It ranges from 1 to 30.
I run my check over 10 times.
I took look at the documentation and found a proper parameter `MaxResults` for further checking. Since I know precisely desired table count, I put it as 150 and the issue has totally gone.
## Expected Behavior
Get the exact same result for filtered tables.
## Current Behavior
Query result from [`self._glue.search_tables(**kwargs)`](https://github.com/amundsen-io/amundsen/blob/main/databuilder/databuilder/extractor/glue_extractor.py#L78) contains duplicates
## Possible Solution
I'm not sure, but I think for the next (second) iteration (page, which contains up to 100 items) we are using a new `NextToken` with previous filters. Maybe the problem lies here.
## Steps to Reproduce
1. Have more than 100 glue tables in a single DB in AWS
2. Query it using the abovementioned `DatabaseName` filter
3. Observe duplicates in the list
## Hot-fix
1. Add `MaxResults` to [`kwargs`](https://github.com/amundsen-io/amundsen/blob/main/databuilder/databuilder/extractor/glue_extractor.py#L80) that is more than your actual size of overall tables
2. Observe a proper behavior
## Context
Q: How has this issue affected you?
A: It affects our production system
## Your Environment
```
amundsen-databuilder==4.5.3
amundsen-gremlin==0.0.9
Flask==1.1.4
gremlinpython==3.4.9
requests-aws4auth==1.1.1
typing-extensions==3.10.0
overrides==6.1.0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `databuilder/databuilder/extractor/glue_extractor.py`
Content:
```
1 # Copyright Contributors to the Amundsen project.
2 # SPDX-License-Identifier: Apache-2.0
3
4 from typing import (
5 Any, Dict, Iterator, List, Union,
6 )
7
8 import boto3
9 from pyhocon import ConfigFactory, ConfigTree
10
11 from databuilder.extractor.base_extractor import Extractor
12 from databuilder.models.table_metadata import ColumnMetadata, TableMetadata
13
14
15 class GlueExtractor(Extractor):
16 """
17 Extracts tables and columns metadata from AWS Glue metastore
18 """
19
20 CLUSTER_KEY = 'cluster'
21 FILTER_KEY = 'filters'
22 DEFAULT_CONFIG = ConfigFactory.from_dict({CLUSTER_KEY: 'gold', FILTER_KEY: None})
23
24 def init(self, conf: ConfigTree) -> None:
25 conf = conf.with_fallback(GlueExtractor.DEFAULT_CONFIG)
26 self._cluster = conf.get_string(GlueExtractor.CLUSTER_KEY)
27 self._filters = conf.get(GlueExtractor.FILTER_KEY)
28 self._glue = boto3.client('glue')
29 self._extract_iter: Union[None, Iterator] = None
30
31 def extract(self) -> Union[TableMetadata, None]:
32 if not self._extract_iter:
33 self._extract_iter = self._get_extract_iter()
34 try:
35 return next(self._extract_iter)
36 except StopIteration:
37 return None
38
39 def get_scope(self) -> str:
40 return 'extractor.glue'
41
42 def _get_extract_iter(self) -> Iterator[TableMetadata]:
43 """
44 It gets all tables and yields TableMetadata
45 :return:
46 """
47 for row in self._get_raw_extract_iter():
48 columns, i = [], 0
49
50 for column in row['StorageDescriptor']['Columns'] \
51 + row.get('PartitionKeys', []):
52 columns.append(ColumnMetadata(
53 column['Name'],
54 column['Comment'] if 'Comment' in column else None,
55 column['Type'],
56 i
57 ))
58 i += 1
59
60 yield TableMetadata(
61 'glue',
62 self._cluster,
63 row['DatabaseName'],
64 row['Name'],
65 row.get('Description') or row.get('Parameters', {}).get('comment'),
66 columns,
67 row.get('TableType') == 'VIRTUAL_VIEW',
68 )
69
70 def _get_raw_extract_iter(self) -> Iterator[Dict[str, Any]]:
71 """
72 Provides iterator of results row from glue client
73 :return:
74 """
75 tables = self._search_tables()
76 return iter(tables)
77
78 def _search_tables(self) -> List[Dict[str, Any]]:
79 tables = []
80 kwargs = {}
81 if self._filters is not None:
82 kwargs['Filters'] = self._filters
83 data = self._glue.search_tables(**kwargs)
84 tables += data['TableList']
85 while 'NextToken' in data:
86 token = data['NextToken']
87 kwargs['NextToken'] = token
88 data = self._glue.search_tables(**kwargs)
89 tables += data['TableList']
90 return tables
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/databuilder/databuilder/extractor/glue_extractor.py b/databuilder/databuilder/extractor/glue_extractor.py
--- a/databuilder/databuilder/extractor/glue_extractor.py
+++ b/databuilder/databuilder/extractor/glue_extractor.py
@@ -19,12 +19,14 @@
CLUSTER_KEY = 'cluster'
FILTER_KEY = 'filters'
- DEFAULT_CONFIG = ConfigFactory.from_dict({CLUSTER_KEY: 'gold', FILTER_KEY: None})
+ MAX_RESULTS_KEY = 'max_results'
+ DEFAULT_CONFIG = ConfigFactory.from_dict({CLUSTER_KEY: 'gold', FILTER_KEY: None, MAX_RESULTS_KEY: 500})
def init(self, conf: ConfigTree) -> None:
conf = conf.with_fallback(GlueExtractor.DEFAULT_CONFIG)
self._cluster = conf.get_string(GlueExtractor.CLUSTER_KEY)
self._filters = conf.get(GlueExtractor.FILTER_KEY)
+ self._max_results = conf.get(GlueExtractor.MAX_RESULTS_KEY)
self._glue = boto3.client('glue')
self._extract_iter: Union[None, Iterator] = None
@@ -80,6 +82,7 @@
kwargs = {}
if self._filters is not None:
kwargs['Filters'] = self._filters
+ kwargs['MaxResults'] = self._max_results
data = self._glue.search_tables(**kwargs)
tables += data['TableList']
while 'NextToken' in data:
| {"golden_diff": "diff --git a/databuilder/databuilder/extractor/glue_extractor.py b/databuilder/databuilder/extractor/glue_extractor.py\n--- a/databuilder/databuilder/extractor/glue_extractor.py\n+++ b/databuilder/databuilder/extractor/glue_extractor.py\n@@ -19,12 +19,14 @@\n \n CLUSTER_KEY = 'cluster'\n FILTER_KEY = 'filters'\n- DEFAULT_CONFIG = ConfigFactory.from_dict({CLUSTER_KEY: 'gold', FILTER_KEY: None})\n+ MAX_RESULTS_KEY = 'max_results'\n+ DEFAULT_CONFIG = ConfigFactory.from_dict({CLUSTER_KEY: 'gold', FILTER_KEY: None, MAX_RESULTS_KEY: 500})\n \n def init(self, conf: ConfigTree) -> None:\n conf = conf.with_fallback(GlueExtractor.DEFAULT_CONFIG)\n self._cluster = conf.get_string(GlueExtractor.CLUSTER_KEY)\n self._filters = conf.get(GlueExtractor.FILTER_KEY)\n+ self._max_results = conf.get(GlueExtractor.MAX_RESULTS_KEY)\n self._glue = boto3.client('glue')\n self._extract_iter: Union[None, Iterator] = None\n \n@@ -80,6 +82,7 @@\n kwargs = {}\n if self._filters is not None:\n kwargs['Filters'] = self._filters\n+ kwargs['MaxResults'] = self._max_results\n data = self._glue.search_tables(**kwargs)\n tables += data['TableList']\n while 'NextToken' in data:\n", "issue": "Bug Report: Glue search_tables with Filters and result tables more than 100 items\n<!--- Provide a general summary of the issue in the Title above -->\r\n<!--- Look through existing open and closed issues to see if someone has reported the issue before -->\r\n\r\nThere is a bug while applying Filters for the database which contains more than 100 items. Since glue returns 100 items per page and to move to the next page we need to specify `NextToken`.\r\nI have 138 tables, which means I will be iterating 2 times over the result.\r\n\r\nThe filter:\r\n```python\r\n{\r\n 'Key': 'DatabaseName',\r\n 'Value': glue_database_name\r\n}\r\n```\r\n\r\nEvery time I run the code I get different results: the length of the list is always the same - 138. However, the length of the set is always different. It ranges from 1 to 30.\r\nI run my check over 10 times.\r\n\r\nI took look at the documentation and found a proper parameter `MaxResults` for further checking. Since I know precisely desired table count, I put it as 150 and the issue has totally gone.\r\n\r\n## Expected Behavior\r\nGet the exact same result for filtered tables.\r\n\r\n## Current Behavior\r\nQuery result from [`self._glue.search_tables(**kwargs)`](https://github.com/amundsen-io/amundsen/blob/main/databuilder/databuilder/extractor/glue_extractor.py#L78) contains duplicates\r\n\r\n## Possible Solution\r\nI'm not sure, but I think for the next (second) iteration (page, which contains up to 100 items) we are using a new `NextToken` with previous filters. Maybe the problem lies here.\r\n\r\n## Steps to Reproduce\r\n1. Have more than 100 glue tables in a single DB in AWS\r\n2. Query it using the abovementioned `DatabaseName` filter\r\n3. Observe duplicates in the list\r\n\r\n## Hot-fix\r\n1. Add `MaxResults` to [`kwargs`](https://github.com/amundsen-io/amundsen/blob/main/databuilder/databuilder/extractor/glue_extractor.py#L80) that is more than your actual size of overall tables\r\n2. Observe a proper behavior\r\n\r\n## Context\r\nQ: How has this issue affected you?\r\nA: It affects our production system\r\n\r\n## Your Environment\r\n```\r\namundsen-databuilder==4.5.3\r\namundsen-gremlin==0.0.9\r\nFlask==1.1.4\r\ngremlinpython==3.4.9\r\nrequests-aws4auth==1.1.1\r\ntyping-extensions==3.10.0\r\noverrides==6.1.0\r\n```\n", "before_files": [{"content": "# Copyright Contributors to the Amundsen project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom typing import (\n Any, Dict, Iterator, List, Union,\n)\n\nimport boto3\nfrom pyhocon import ConfigFactory, ConfigTree\n\nfrom databuilder.extractor.base_extractor import Extractor\nfrom databuilder.models.table_metadata import ColumnMetadata, TableMetadata\n\n\nclass GlueExtractor(Extractor):\n \"\"\"\n Extracts tables and columns metadata from AWS Glue metastore\n \"\"\"\n\n CLUSTER_KEY = 'cluster'\n FILTER_KEY = 'filters'\n DEFAULT_CONFIG = ConfigFactory.from_dict({CLUSTER_KEY: 'gold', FILTER_KEY: None})\n\n def init(self, conf: ConfigTree) -> None:\n conf = conf.with_fallback(GlueExtractor.DEFAULT_CONFIG)\n self._cluster = conf.get_string(GlueExtractor.CLUSTER_KEY)\n self._filters = conf.get(GlueExtractor.FILTER_KEY)\n self._glue = boto3.client('glue')\n self._extract_iter: Union[None, Iterator] = None\n\n def extract(self) -> Union[TableMetadata, None]:\n if not self._extract_iter:\n self._extract_iter = self._get_extract_iter()\n try:\n return next(self._extract_iter)\n except StopIteration:\n return None\n\n def get_scope(self) -> str:\n return 'extractor.glue'\n\n def _get_extract_iter(self) -> Iterator[TableMetadata]:\n \"\"\"\n It gets all tables and yields TableMetadata\n :return:\n \"\"\"\n for row in self._get_raw_extract_iter():\n columns, i = [], 0\n\n for column in row['StorageDescriptor']['Columns'] \\\n + row.get('PartitionKeys', []):\n columns.append(ColumnMetadata(\n column['Name'],\n column['Comment'] if 'Comment' in column else None,\n column['Type'],\n i\n ))\n i += 1\n\n yield TableMetadata(\n 'glue',\n self._cluster,\n row['DatabaseName'],\n row['Name'],\n row.get('Description') or row.get('Parameters', {}).get('comment'),\n columns,\n row.get('TableType') == 'VIRTUAL_VIEW',\n )\n\n def _get_raw_extract_iter(self) -> Iterator[Dict[str, Any]]:\n \"\"\"\n Provides iterator of results row from glue client\n :return:\n \"\"\"\n tables = self._search_tables()\n return iter(tables)\n\n def _search_tables(self) -> List[Dict[str, Any]]:\n tables = []\n kwargs = {}\n if self._filters is not None:\n kwargs['Filters'] = self._filters\n data = self._glue.search_tables(**kwargs)\n tables += data['TableList']\n while 'NextToken' in data:\n token = data['NextToken']\n kwargs['NextToken'] = token\n data = self._glue.search_tables(**kwargs)\n tables += data['TableList']\n return tables\n", "path": "databuilder/databuilder/extractor/glue_extractor.py"}], "after_files": [{"content": "# Copyright Contributors to the Amundsen project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom typing import (\n Any, Dict, Iterator, List, Union,\n)\n\nimport boto3\nfrom pyhocon import ConfigFactory, ConfigTree\n\nfrom databuilder.extractor.base_extractor import Extractor\nfrom databuilder.models.table_metadata import ColumnMetadata, TableMetadata\n\n\nclass GlueExtractor(Extractor):\n \"\"\"\n Extracts tables and columns metadata from AWS Glue metastore\n \"\"\"\n\n CLUSTER_KEY = 'cluster'\n FILTER_KEY = 'filters'\n MAX_RESULTS_KEY = 'max_results'\n DEFAULT_CONFIG = ConfigFactory.from_dict({CLUSTER_KEY: 'gold', FILTER_KEY: None, MAX_RESULTS_KEY: 500})\n\n def init(self, conf: ConfigTree) -> None:\n conf = conf.with_fallback(GlueExtractor.DEFAULT_CONFIG)\n self._cluster = conf.get_string(GlueExtractor.CLUSTER_KEY)\n self._filters = conf.get(GlueExtractor.FILTER_KEY)\n self._max_results = conf.get(GlueExtractor.MAX_RESULTS_KEY)\n self._glue = boto3.client('glue')\n self._extract_iter: Union[None, Iterator] = None\n\n def extract(self) -> Union[TableMetadata, None]:\n if not self._extract_iter:\n self._extract_iter = self._get_extract_iter()\n try:\n return next(self._extract_iter)\n except StopIteration:\n return None\n\n def get_scope(self) -> str:\n return 'extractor.glue'\n\n def _get_extract_iter(self) -> Iterator[TableMetadata]:\n \"\"\"\n It gets all tables and yields TableMetadata\n :return:\n \"\"\"\n for row in self._get_raw_extract_iter():\n columns, i = [], 0\n\n for column in row['StorageDescriptor']['Columns'] \\\n + row.get('PartitionKeys', []):\n columns.append(ColumnMetadata(\n column['Name'],\n column['Comment'] if 'Comment' in column else None,\n column['Type'],\n i\n ))\n i += 1\n\n yield TableMetadata(\n 'glue',\n self._cluster,\n row['DatabaseName'],\n row['Name'],\n row.get('Description') or row.get('Parameters', {}).get('comment'),\n columns,\n row.get('TableType') == 'VIRTUAL_VIEW',\n )\n\n def _get_raw_extract_iter(self) -> Iterator[Dict[str, Any]]:\n \"\"\"\n Provides iterator of results row from glue client\n :return:\n \"\"\"\n tables = self._search_tables()\n return iter(tables)\n\n def _search_tables(self) -> List[Dict[str, Any]]:\n tables = []\n kwargs = {}\n if self._filters is not None:\n kwargs['Filters'] = self._filters\n kwargs['MaxResults'] = self._max_results\n data = self._glue.search_tables(**kwargs)\n tables += data['TableList']\n while 'NextToken' in data:\n token = data['NextToken']\n kwargs['NextToken'] = token\n data = self._glue.search_tables(**kwargs)\n tables += data['TableList']\n return tables\n", "path": "databuilder/databuilder/extractor/glue_extractor.py"}]} | 1,661 | 338 |
gh_patches_debug_23504 | rasdani/github-patches | git_diff | iterative__dvc-8197 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
installing from Ubuntu repo does not install s3 adapter
# Bug Report
DVC version 2.6.3

DVC version 2.21.1

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/pyinstaller/build.py`
Content:
```
1 import os
2 import pathlib
3 from subprocess import STDOUT, check_call
4
5 path = pathlib.Path(__file__).parent.absolute()
6 hooks = path / "hooks"
7 dvc = path.parent.parent / "dvc"
8 entry = dvc / "__main__.py"
9
10 check_call(
11 [
12 "pyinstaller",
13 "--additional-hooks-dir",
14 os.fspath(hooks),
15 "--name",
16 "dvc",
17 "-y",
18 os.fspath(entry),
19 ],
20 cwd=path,
21 stderr=STDOUT,
22 )
23
24 check_call(
25 [
26 path / "dist" / "dvc" / "dvc",
27 "doctor",
28 ],
29 stderr=STDOUT,
30 )
31
```
Path: `scripts/pyinstaller/hooks/hook-dvc.py`
Content:
```
1 from PyInstaller.utils.hooks import ( # pylint:disable=import-error
2 copy_metadata,
3 )
4
5 # needed for `dvc doctor` to show dep versions
6 datas = copy_metadata("adlfs", recursive=True)
7 datas += copy_metadata("knack")
8 datas += copy_metadata("gcsfs")
9 datas += copy_metadata("pyarrow")
10 datas += copy_metadata("pydrive2")
11 datas += copy_metadata("s3fs", recursive=True)
12 datas += copy_metadata("boto3")
13 datas += copy_metadata("ossfs")
14 datas += copy_metadata("sshfs")
15 datas += copy_metadata("webdav4")
16 datas += copy_metadata("aiohttp")
17 datas += copy_metadata("aiohttp_retry")
18
19 # https://github.com/pypa/setuptools/issues/1963
20 hiddenimports = ["pkg_resources.py2_warn"]
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/pyinstaller/build.py b/scripts/pyinstaller/build.py
--- a/scripts/pyinstaller/build.py
+++ b/scripts/pyinstaller/build.py
@@ -1,6 +1,6 @@
import os
import pathlib
-from subprocess import STDOUT, check_call
+from subprocess import STDOUT, check_call, check_output
path = pathlib.Path(__file__).parent.absolute()
hooks = path / "hooks"
@@ -21,10 +21,27 @@
stderr=STDOUT,
)
-check_call(
+out = check_output(
[
path / "dist" / "dvc" / "dvc",
"doctor",
],
stderr=STDOUT,
-)
+).decode()
+
+remotes = [
+ "s3",
+ "oss",
+ "gdrive",
+ "gs",
+ "hdfs",
+ "http",
+ "webhdfs",
+ "azure",
+ "ssh",
+ "webdav",
+]
+
+print(out)
+for remote in remotes:
+ assert f"\t{remote}" in out, f"Missing support for {remote}"
diff --git a/scripts/pyinstaller/hooks/hook-dvc.py b/scripts/pyinstaller/hooks/hook-dvc.py
--- a/scripts/pyinstaller/hooks/hook-dvc.py
+++ b/scripts/pyinstaller/hooks/hook-dvc.py
@@ -16,5 +16,15 @@
datas += copy_metadata("aiohttp")
datas += copy_metadata("aiohttp_retry")
-# https://github.com/pypa/setuptools/issues/1963
-hiddenimports = ["pkg_resources.py2_warn"]
+hiddenimports = [
+ "dvc_azure",
+ "dvc_gdrive",
+ "dvc_gs",
+ "dvc_hdfs",
+ "dvc_oss",
+ "dvc_s3",
+ "dvc_webdav",
+ "dvc_webhdfs",
+ # https://github.com/pypa/setuptools/issues/1963
+ "pkg_resources.py2_warn",
+]
| {"golden_diff": "diff --git a/scripts/pyinstaller/build.py b/scripts/pyinstaller/build.py\n--- a/scripts/pyinstaller/build.py\n+++ b/scripts/pyinstaller/build.py\n@@ -1,6 +1,6 @@\n import os\n import pathlib\n-from subprocess import STDOUT, check_call\n+from subprocess import STDOUT, check_call, check_output\n \n path = pathlib.Path(__file__).parent.absolute()\n hooks = path / \"hooks\"\n@@ -21,10 +21,27 @@\n stderr=STDOUT,\n )\n \n-check_call(\n+out = check_output(\n [\n path / \"dist\" / \"dvc\" / \"dvc\",\n \"doctor\",\n ],\n stderr=STDOUT,\n-)\n+).decode()\n+\n+remotes = [\n+ \"s3\",\n+ \"oss\",\n+ \"gdrive\",\n+ \"gs\",\n+ \"hdfs\",\n+ \"http\",\n+ \"webhdfs\",\n+ \"azure\",\n+ \"ssh\",\n+ \"webdav\",\n+]\n+\n+print(out)\n+for remote in remotes:\n+ assert f\"\\t{remote}\" in out, f\"Missing support for {remote}\"\ndiff --git a/scripts/pyinstaller/hooks/hook-dvc.py b/scripts/pyinstaller/hooks/hook-dvc.py\n--- a/scripts/pyinstaller/hooks/hook-dvc.py\n+++ b/scripts/pyinstaller/hooks/hook-dvc.py\n@@ -16,5 +16,15 @@\n datas += copy_metadata(\"aiohttp\")\n datas += copy_metadata(\"aiohttp_retry\")\n \n-# https://github.com/pypa/setuptools/issues/1963\n-hiddenimports = [\"pkg_resources.py2_warn\"]\n+hiddenimports = [\n+ \"dvc_azure\",\n+ \"dvc_gdrive\",\n+ \"dvc_gs\",\n+ \"dvc_hdfs\",\n+ \"dvc_oss\",\n+ \"dvc_s3\",\n+ \"dvc_webdav\",\n+ \"dvc_webhdfs\",\n+ # https://github.com/pypa/setuptools/issues/1963\n+ \"pkg_resources.py2_warn\",\n+]\n", "issue": "installing from Ubuntu repo does not install s3 adapter\n# Bug Report\r\n\r\nDVC version 2.6.3\r\n\r\n\r\n\r\nDVC version 2.21.1\r\n\r\n\r\n\n", "before_files": [{"content": "import os\nimport pathlib\nfrom subprocess import STDOUT, check_call\n\npath = pathlib.Path(__file__).parent.absolute()\nhooks = path / \"hooks\"\ndvc = path.parent.parent / \"dvc\"\nentry = dvc / \"__main__.py\"\n\ncheck_call(\n [\n \"pyinstaller\",\n \"--additional-hooks-dir\",\n os.fspath(hooks),\n \"--name\",\n \"dvc\",\n \"-y\",\n os.fspath(entry),\n ],\n cwd=path,\n stderr=STDOUT,\n)\n\ncheck_call(\n [\n path / \"dist\" / \"dvc\" / \"dvc\",\n \"doctor\",\n ],\n stderr=STDOUT,\n)\n", "path": "scripts/pyinstaller/build.py"}, {"content": "from PyInstaller.utils.hooks import ( # pylint:disable=import-error\n copy_metadata,\n)\n\n# needed for `dvc doctor` to show dep versions\ndatas = copy_metadata(\"adlfs\", recursive=True)\ndatas += copy_metadata(\"knack\")\ndatas += copy_metadata(\"gcsfs\")\ndatas += copy_metadata(\"pyarrow\")\ndatas += copy_metadata(\"pydrive2\")\ndatas += copy_metadata(\"s3fs\", recursive=True)\ndatas += copy_metadata(\"boto3\")\ndatas += copy_metadata(\"ossfs\")\ndatas += copy_metadata(\"sshfs\")\ndatas += copy_metadata(\"webdav4\")\ndatas += copy_metadata(\"aiohttp\")\ndatas += copy_metadata(\"aiohttp_retry\")\n\n# https://github.com/pypa/setuptools/issues/1963\nhiddenimports = [\"pkg_resources.py2_warn\"]\n", "path": "scripts/pyinstaller/hooks/hook-dvc.py"}], "after_files": [{"content": "import os\nimport pathlib\nfrom subprocess import STDOUT, check_call, check_output\n\npath = pathlib.Path(__file__).parent.absolute()\nhooks = path / \"hooks\"\ndvc = path.parent.parent / \"dvc\"\nentry = dvc / \"__main__.py\"\n\ncheck_call(\n [\n \"pyinstaller\",\n \"--additional-hooks-dir\",\n os.fspath(hooks),\n \"--name\",\n \"dvc\",\n \"-y\",\n os.fspath(entry),\n ],\n cwd=path,\n stderr=STDOUT,\n)\n\nout = check_output(\n [\n path / \"dist\" / \"dvc\" / \"dvc\",\n \"doctor\",\n ],\n stderr=STDOUT,\n).decode()\n\nremotes = [\n \"s3\",\n \"oss\",\n \"gdrive\",\n \"gs\",\n \"hdfs\",\n \"http\",\n \"webhdfs\",\n \"azure\",\n \"ssh\",\n \"webdav\",\n]\n\nprint(out)\nfor remote in remotes:\n assert f\"\\t{remote}\" in out, f\"Missing support for {remote}\"\n", "path": "scripts/pyinstaller/build.py"}, {"content": "from PyInstaller.utils.hooks import ( # pylint:disable=import-error\n copy_metadata,\n)\n\n# needed for `dvc doctor` to show dep versions\ndatas = copy_metadata(\"adlfs\", recursive=True)\ndatas += copy_metadata(\"knack\")\ndatas += copy_metadata(\"gcsfs\")\ndatas += copy_metadata(\"pyarrow\")\ndatas += copy_metadata(\"pydrive2\")\ndatas += copy_metadata(\"s3fs\", recursive=True)\ndatas += copy_metadata(\"boto3\")\ndatas += copy_metadata(\"ossfs\")\ndatas += copy_metadata(\"sshfs\")\ndatas += copy_metadata(\"webdav4\")\ndatas += copy_metadata(\"aiohttp\")\ndatas += copy_metadata(\"aiohttp_retry\")\n\nhiddenimports = [\n \"dvc_azure\",\n \"dvc_gdrive\",\n \"dvc_gs\",\n \"dvc_hdfs\",\n \"dvc_oss\",\n \"dvc_s3\",\n \"dvc_webdav\",\n \"dvc_webhdfs\",\n # https://github.com/pypa/setuptools/issues/1963\n \"pkg_resources.py2_warn\",\n]\n", "path": "scripts/pyinstaller/hooks/hook-dvc.py"}]} | 837 | 456 |
gh_patches_debug_5606 | rasdani/github-patches | git_diff | ansible__ansible-lint-477 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
False positive EANSIBLE0014 also flags vars in shell task
# Issue Type
- Bug report
# Ansible and Ansible Lint details
```
ansible --version
ansible 2.3.0.0
ansible-lint --version
ansible-lint 3.4.13
```
- ansible installation method: pip
- ansible-lint installation method: pip
# Desired Behaviour
EANSIBLE0014 should validate only command task, not shell.
# Actual Behaviour (Bug report only)
When ansible-lint validating playbook with shell tasks with env vars
```
- hosts: "localhost"
gather_facts: no
become: no
tasks:
- shell: 'MYVAR="$(date)" env | grep MY'
```
it fails and complains about Env vars shouldn't be in command
```
test-play.yaml:5: [EANSIBLE0014] Environment variables don't work as part of command
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/ansiblelint/rules/EnvVarsInCommandRule.py`
Content:
```
1 # Copyright (c) 2016 Will Thames <[email protected]>
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 # THE SOFTWARE.
20
21 from ansiblelint import AnsibleLintRule
22 from ansiblelint.utils import LINE_NUMBER_KEY, FILENAME_KEY, get_first_cmd_arg
23
24
25 class EnvVarsInCommandRule(AnsibleLintRule):
26 id = '304'
27 shortdesc = "Environment variables don't work as part of command"
28 description = (
29 'Environment variables should be passed to ``shell`` or ``command`` '
30 'through environment argument'
31 )
32 severity = 'VERY_HIGH'
33 tags = ['command-shell', 'bug', 'ANSIBLE0014']
34 version_added = 'historic'
35
36 expected_args = ['chdir', 'creates', 'executable', 'removes', 'stdin', 'warn',
37 'cmd', '__ansible_module__', '__ansible_arguments__',
38 LINE_NUMBER_KEY, FILENAME_KEY]
39
40 def matchtask(self, file, task):
41 if task["action"]["__ansible_module__"] in ['shell', 'command']:
42 first_cmd_arg = get_first_cmd_arg(task)
43 if not first_cmd_arg:
44 return
45
46 return any([arg not in self.expected_args for arg in task['action']] +
47 ["=" in first_cmd_arg])
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/ansiblelint/rules/EnvVarsInCommandRule.py b/lib/ansiblelint/rules/EnvVarsInCommandRule.py
--- a/lib/ansiblelint/rules/EnvVarsInCommandRule.py
+++ b/lib/ansiblelint/rules/EnvVarsInCommandRule.py
@@ -38,7 +38,7 @@
LINE_NUMBER_KEY, FILENAME_KEY]
def matchtask(self, file, task):
- if task["action"]["__ansible_module__"] in ['shell', 'command']:
+ if task["action"]["__ansible_module__"] in ['command']:
first_cmd_arg = get_first_cmd_arg(task)
if not first_cmd_arg:
return
| {"golden_diff": "diff --git a/lib/ansiblelint/rules/EnvVarsInCommandRule.py b/lib/ansiblelint/rules/EnvVarsInCommandRule.py\n--- a/lib/ansiblelint/rules/EnvVarsInCommandRule.py\n+++ b/lib/ansiblelint/rules/EnvVarsInCommandRule.py\n@@ -38,7 +38,7 @@\n LINE_NUMBER_KEY, FILENAME_KEY]\n \n def matchtask(self, file, task):\n- if task[\"action\"][\"__ansible_module__\"] in ['shell', 'command']:\n+ if task[\"action\"][\"__ansible_module__\"] in ['command']:\n first_cmd_arg = get_first_cmd_arg(task)\n if not first_cmd_arg:\n return\n", "issue": "False positive EANSIBLE0014 also flags vars in shell task\n# Issue Type\r\n- Bug report\r\n\r\n# Ansible and Ansible Lint details\r\n```\r\nansible --version\r\nansible 2.3.0.0\r\nansible-lint --version\r\nansible-lint 3.4.13\r\n```\r\n\r\n- ansible installation method: pip\r\n- ansible-lint installation method: pip\r\n\r\n# Desired Behaviour\r\n\r\nEANSIBLE0014 should validate only command task, not shell.\r\n\r\n# Actual Behaviour (Bug report only)\r\n\r\nWhen ansible-lint validating playbook with shell tasks with env vars\r\n```\r\n- hosts: \"localhost\"\r\n gather_facts: no\r\n become: no\r\n tasks:\r\n - shell: 'MYVAR=\"$(date)\" env | grep MY'\r\n```\r\nit fails and complains about Env vars shouldn't be in command\r\n```\r\ntest-play.yaml:5: [EANSIBLE0014] Environment variables don't work as part of command\r\n```\r\n\n", "before_files": [{"content": "# Copyright (c) 2016 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom ansiblelint import AnsibleLintRule\nfrom ansiblelint.utils import LINE_NUMBER_KEY, FILENAME_KEY, get_first_cmd_arg\n\n\nclass EnvVarsInCommandRule(AnsibleLintRule):\n id = '304'\n shortdesc = \"Environment variables don't work as part of command\"\n description = (\n 'Environment variables should be passed to ``shell`` or ``command`` '\n 'through environment argument'\n )\n severity = 'VERY_HIGH'\n tags = ['command-shell', 'bug', 'ANSIBLE0014']\n version_added = 'historic'\n\n expected_args = ['chdir', 'creates', 'executable', 'removes', 'stdin', 'warn',\n 'cmd', '__ansible_module__', '__ansible_arguments__',\n LINE_NUMBER_KEY, FILENAME_KEY]\n\n def matchtask(self, file, task):\n if task[\"action\"][\"__ansible_module__\"] in ['shell', 'command']:\n first_cmd_arg = get_first_cmd_arg(task)\n if not first_cmd_arg:\n return\n\n return any([arg not in self.expected_args for arg in task['action']] +\n [\"=\" in first_cmd_arg])\n", "path": "lib/ansiblelint/rules/EnvVarsInCommandRule.py"}], "after_files": [{"content": "# Copyright (c) 2016 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom ansiblelint import AnsibleLintRule\nfrom ansiblelint.utils import LINE_NUMBER_KEY, FILENAME_KEY, get_first_cmd_arg\n\n\nclass EnvVarsInCommandRule(AnsibleLintRule):\n id = '304'\n shortdesc = \"Environment variables don't work as part of command\"\n description = (\n 'Environment variables should be passed to ``shell`` or ``command`` '\n 'through environment argument'\n )\n severity = 'VERY_HIGH'\n tags = ['command-shell', 'bug', 'ANSIBLE0014']\n version_added = 'historic'\n\n expected_args = ['chdir', 'creates', 'executable', 'removes', 'stdin', 'warn',\n 'cmd', '__ansible_module__', '__ansible_arguments__',\n LINE_NUMBER_KEY, FILENAME_KEY]\n\n def matchtask(self, file, task):\n if task[\"action\"][\"__ansible_module__\"] in ['command']:\n first_cmd_arg = get_first_cmd_arg(task)\n if not first_cmd_arg:\n return\n\n return any([arg not in self.expected_args for arg in task['action']] +\n [\"=\" in first_cmd_arg])\n", "path": "lib/ansiblelint/rules/EnvVarsInCommandRule.py"}]} | 1,051 | 148 |
gh_patches_debug_20627 | rasdani/github-patches | git_diff | ciudadanointeligente__votainteligente-portal-electoral-283 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Candidate has_answered siempre en false
¿Cómo se hace para que deje de mostrar el enlace a twitter para candidatos que tienen todas las respuestas?
¿Cómo se hace para cambiar "pídele" por "pedile"?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elections/models.py`
Content:
```
1 # coding=utf-8
2 from django.db import models
3 from autoslug import AutoSlugField
4 from taggit.managers import TaggableManager
5 from django.core.urlresolvers import reverse
6 from popolo.models import Person, Area
7 from django.utils.translation import ugettext_lazy as _
8 from markdown_deux.templatetags.markdown_deux_tags import markdown_allowed
9 from candidator.models import Category, Topic as CanTopic
10 from picklefield.fields import PickledObjectField
11 from django.conf import settings
12 from django.utils.encoding import python_2_unicode_compatible
13 from django.contrib.flatpages.models import FlatPage
14 import copy
15
16
17 class ExtraInfoMixin(models.Model):
18 extra_info = PickledObjectField(default={})
19
20 class Meta:
21 abstract = True
22
23 def __init__(self, *args, **kwargs):
24 super(ExtraInfoMixin, self).__init__(*args, **kwargs)
25 default_extra_info = copy.copy(self.default_extra_info)
26 default_extra_info.update(self.extra_info)
27 self.extra_info = default_extra_info
28
29
30 class Candidate(Person, ExtraInfoMixin):
31 election = models.ForeignKey('Election', related_name='candidates', null=True)
32
33 default_extra_info = settings.DEFAULT_CANDIDATE_EXTRA_INFO
34
35 @property
36 def twitter(self):
37 links = self.contact_details.filter(contact_type="TWITTER")
38 if links:
39 return links.first()
40
41 class Meta:
42 verbose_name = _("Candidato")
43 verbose_name_plural = _("Candidatos")
44
45
46 class CandidateFlatPage(FlatPage):
47 candidate = models.ForeignKey(Candidate, related_name='flatpages')
48
49 class Meta:
50 verbose_name = _(u"Página estáticas por candidato")
51 verbose_name_plural = _(u"Páginas estáticas por candidato")
52
53 def get_absolute_url(self):
54 return reverse('candidate_flatpage', kwargs={'election_slug': self.candidate.election.slug,
55 'slug': self.candidate.id,
56 'url': self.url
57 }
58 )
59
60
61 class PersonalData(models.Model):
62 candidate = models.ForeignKey('Candidate', related_name="personal_datas")
63 label = models.CharField(max_length=512)
64 value = models.CharField(max_length=1024)
65
66
67 class Topic(CanTopic):
68 class Meta:
69 proxy = True
70 verbose_name = _(u"Pregunta")
71 verbose_name_plural = _(u"Preguntas")
72
73 @property
74 def election(self):
75 category = QuestionCategory.objects.get(category_ptr=self.category)
76 return category.election
77
78
79 @python_2_unicode_compatible
80 class QuestionCategory(Category):
81 election = models.ForeignKey('Election', related_name='categories', null=True)
82
83 def __str__(self):
84 return u'<%s> in <%s>' % (self.name, self.election.name)
85
86 class Meta:
87 verbose_name = _(u"Categoría de pregunta")
88 verbose_name_plural = _(u"Categorías de pregunta")
89
90
91 class Election(ExtraInfoMixin, models.Model):
92 name = models.CharField(max_length=255)
93 slug = AutoSlugField(populate_from='name', unique=True)
94 description = models.TextField(blank=True)
95 tags = TaggableManager(blank=True)
96 searchable = models.BooleanField(default=True)
97 highlighted = models.BooleanField(default=False)
98 extra_info_title = models.CharField(max_length=50, blank=True, null=True)
99 extra_info_content = models.TextField(max_length=3000, blank=True, null=True, help_text=_("Puedes usar Markdown. <br/> ")
100 + markdown_allowed())
101 uses_preguntales = models.BooleanField(default=True, help_text=_(u"Esta elección debe usar preguntales?"))
102 uses_ranking = models.BooleanField(default=True, help_text=_(u"Esta elección debe usar ranking"))
103 uses_face_to_face = models.BooleanField(default=True, help_text=_(u"Esta elección debe usar frente a frente"))
104 uses_soul_mate = models.BooleanField(default=True, help_text=_(u"Esta elección debe usar 1/2 naranja"))
105 uses_questionary = models.BooleanField(default=True, help_text=_(u"Esta elección debe usar cuestionario"))
106
107 default_extra_info = settings.DEFAULT_ELECTION_EXTRA_INFO
108 area = models.ForeignKey(Area, null=True, related_name="elections")
109
110 def __unicode__(self):
111 return self.name
112
113 def get_absolute_url(self):
114 return reverse('election_view', kwargs={'slug': self.slug})
115
116 def get_extra_info_url(self):
117 return reverse('election_extra_info', kwargs={'slug': self.slug})
118
119 class Meta:
120 verbose_name = _(u'Mi Elección')
121 verbose_name_plural = _(u'Mis Elecciones')
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elections/models.py b/elections/models.py
--- a/elections/models.py
+++ b/elections/models.py
@@ -6,7 +6,7 @@
from popolo.models import Person, Area
from django.utils.translation import ugettext_lazy as _
from markdown_deux.templatetags.markdown_deux_tags import markdown_allowed
-from candidator.models import Category, Topic as CanTopic
+from candidator.models import Category, Topic as CanTopic, TakenPosition
from picklefield.fields import PickledObjectField
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
@@ -38,6 +38,11 @@
if links:
return links.first()
+ @property
+ def has_answered(self):
+ are_there_answers = TakenPosition.objects.filter(person=self, position__isnull=False).exists()
+ return are_there_answers
+
class Meta:
verbose_name = _("Candidato")
verbose_name_plural = _("Candidatos")
| {"golden_diff": "diff --git a/elections/models.py b/elections/models.py\n--- a/elections/models.py\n+++ b/elections/models.py\n@@ -6,7 +6,7 @@\n from popolo.models import Person, Area\n from django.utils.translation import ugettext_lazy as _\n from markdown_deux.templatetags.markdown_deux_tags import markdown_allowed\n-from candidator.models import Category, Topic as CanTopic\n+from candidator.models import Category, Topic as CanTopic, TakenPosition\n from picklefield.fields import PickledObjectField\n from django.conf import settings\n from django.utils.encoding import python_2_unicode_compatible\n@@ -38,6 +38,11 @@\n if links:\n return links.first()\n \n+ @property\n+ def has_answered(self):\n+ are_there_answers = TakenPosition.objects.filter(person=self, position__isnull=False).exists()\n+ return are_there_answers\n+\n class Meta:\n verbose_name = _(\"Candidato\")\n verbose_name_plural = _(\"Candidatos\")\n", "issue": "Candidate has_answered siempre en false\n\u00bfC\u00f3mo se hace para que deje de mostrar el enlace a twitter para candidatos que tienen todas las respuestas?\n\u00bfC\u00f3mo se hace para cambiar \"p\u00eddele\" por \"pedile\"?\n\n", "before_files": [{"content": "# coding=utf-8\nfrom django.db import models\nfrom autoslug import AutoSlugField\nfrom taggit.managers import TaggableManager\nfrom django.core.urlresolvers import reverse\nfrom popolo.models import Person, Area\nfrom django.utils.translation import ugettext_lazy as _\nfrom markdown_deux.templatetags.markdown_deux_tags import markdown_allowed\nfrom candidator.models import Category, Topic as CanTopic\nfrom picklefield.fields import PickledObjectField\nfrom django.conf import settings\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.contrib.flatpages.models import FlatPage\nimport copy\n\n\nclass ExtraInfoMixin(models.Model):\n extra_info = PickledObjectField(default={})\n\n class Meta:\n abstract = True\n\n def __init__(self, *args, **kwargs):\n super(ExtraInfoMixin, self).__init__(*args, **kwargs)\n default_extra_info = copy.copy(self.default_extra_info)\n default_extra_info.update(self.extra_info)\n self.extra_info = default_extra_info\n\n\nclass Candidate(Person, ExtraInfoMixin):\n election = models.ForeignKey('Election', related_name='candidates', null=True)\n\n default_extra_info = settings.DEFAULT_CANDIDATE_EXTRA_INFO\n\n @property\n def twitter(self):\n links = self.contact_details.filter(contact_type=\"TWITTER\")\n if links:\n return links.first()\n\n class Meta:\n verbose_name = _(\"Candidato\")\n verbose_name_plural = _(\"Candidatos\")\n\n\nclass CandidateFlatPage(FlatPage):\n candidate = models.ForeignKey(Candidate, related_name='flatpages')\n\n class Meta:\n verbose_name = _(u\"P\u00e1gina est\u00e1ticas por candidato\")\n verbose_name_plural = _(u\"P\u00e1ginas est\u00e1ticas por candidato\")\n\n def get_absolute_url(self):\n return reverse('candidate_flatpage', kwargs={'election_slug': self.candidate.election.slug,\n 'slug': self.candidate.id,\n 'url': self.url\n }\n )\n\n\nclass PersonalData(models.Model):\n candidate = models.ForeignKey('Candidate', related_name=\"personal_datas\")\n label = models.CharField(max_length=512)\n value = models.CharField(max_length=1024)\n\n\nclass Topic(CanTopic):\n class Meta:\n proxy = True\n verbose_name = _(u\"Pregunta\")\n verbose_name_plural = _(u\"Preguntas\")\n\n @property\n def election(self):\n category = QuestionCategory.objects.get(category_ptr=self.category)\n return category.election\n\n\n@python_2_unicode_compatible\nclass QuestionCategory(Category):\n election = models.ForeignKey('Election', related_name='categories', null=True)\n\n def __str__(self):\n return u'<%s> in <%s>' % (self.name, self.election.name)\n\n class Meta:\n verbose_name = _(u\"Categor\u00eda de pregunta\")\n verbose_name_plural = _(u\"Categor\u00edas de pregunta\")\n\n\nclass Election(ExtraInfoMixin, models.Model):\n name = models.CharField(max_length=255)\n slug = AutoSlugField(populate_from='name', unique=True)\n description = models.TextField(blank=True)\n tags = TaggableManager(blank=True)\n searchable = models.BooleanField(default=True)\n highlighted = models.BooleanField(default=False)\n extra_info_title = models.CharField(max_length=50, blank=True, null=True)\n extra_info_content = models.TextField(max_length=3000, blank=True, null=True, help_text=_(\"Puedes usar Markdown. <br/> \")\n + markdown_allowed())\n uses_preguntales = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar preguntales?\"))\n uses_ranking = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar ranking\"))\n uses_face_to_face = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar frente a frente\"))\n uses_soul_mate = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar 1/2 naranja\"))\n uses_questionary = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar cuestionario\"))\n\n default_extra_info = settings.DEFAULT_ELECTION_EXTRA_INFO\n area = models.ForeignKey(Area, null=True, related_name=\"elections\")\n\n def __unicode__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse('election_view', kwargs={'slug': self.slug})\n\n def get_extra_info_url(self):\n return reverse('election_extra_info', kwargs={'slug': self.slug})\n\n class Meta:\n verbose_name = _(u'Mi Elecci\u00f3n')\n verbose_name_plural = _(u'Mis Elecciones')\n", "path": "elections/models.py"}], "after_files": [{"content": "# coding=utf-8\nfrom django.db import models\nfrom autoslug import AutoSlugField\nfrom taggit.managers import TaggableManager\nfrom django.core.urlresolvers import reverse\nfrom popolo.models import Person, Area\nfrom django.utils.translation import ugettext_lazy as _\nfrom markdown_deux.templatetags.markdown_deux_tags import markdown_allowed\nfrom candidator.models import Category, Topic as CanTopic, TakenPosition\nfrom picklefield.fields import PickledObjectField\nfrom django.conf import settings\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.contrib.flatpages.models import FlatPage\nimport copy\n\n\nclass ExtraInfoMixin(models.Model):\n extra_info = PickledObjectField(default={})\n\n class Meta:\n abstract = True\n\n def __init__(self, *args, **kwargs):\n super(ExtraInfoMixin, self).__init__(*args, **kwargs)\n default_extra_info = copy.copy(self.default_extra_info)\n default_extra_info.update(self.extra_info)\n self.extra_info = default_extra_info\n\n\nclass Candidate(Person, ExtraInfoMixin):\n election = models.ForeignKey('Election', related_name='candidates', null=True)\n\n default_extra_info = settings.DEFAULT_CANDIDATE_EXTRA_INFO\n\n @property\n def twitter(self):\n links = self.contact_details.filter(contact_type=\"TWITTER\")\n if links:\n return links.first()\n\n @property\n def has_answered(self):\n are_there_answers = TakenPosition.objects.filter(person=self, position__isnull=False).exists()\n return are_there_answers\n\n class Meta:\n verbose_name = _(\"Candidato\")\n verbose_name_plural = _(\"Candidatos\")\n\n\nclass CandidateFlatPage(FlatPage):\n candidate = models.ForeignKey(Candidate, related_name='flatpages')\n\n class Meta:\n verbose_name = _(u\"P\u00e1gina est\u00e1ticas por candidato\")\n verbose_name_plural = _(u\"P\u00e1ginas est\u00e1ticas por candidato\")\n\n def get_absolute_url(self):\n return reverse('candidate_flatpage', kwargs={'election_slug': self.candidate.election.slug,\n 'slug': self.candidate.id,\n 'url': self.url\n }\n )\n\n\nclass PersonalData(models.Model):\n candidate = models.ForeignKey('Candidate', related_name=\"personal_datas\")\n label = models.CharField(max_length=512)\n value = models.CharField(max_length=1024)\n\n\nclass Topic(CanTopic):\n class Meta:\n proxy = True\n verbose_name = _(u\"Pregunta\")\n verbose_name_plural = _(u\"Preguntas\")\n\n @property\n def election(self):\n category = QuestionCategory.objects.get(category_ptr=self.category)\n return category.election\n\n\n@python_2_unicode_compatible\nclass QuestionCategory(Category):\n election = models.ForeignKey('Election', related_name='categories', null=True)\n\n def __str__(self):\n return u'<%s> in <%s>' % (self.name, self.election.name)\n\n class Meta:\n verbose_name = _(u\"Categor\u00eda de pregunta\")\n verbose_name_plural = _(u\"Categor\u00edas de pregunta\")\n\n\nclass Election(ExtraInfoMixin, models.Model):\n name = models.CharField(max_length=255)\n slug = AutoSlugField(populate_from='name', unique=True)\n description = models.TextField(blank=True)\n tags = TaggableManager(blank=True)\n searchable = models.BooleanField(default=True)\n highlighted = models.BooleanField(default=False)\n extra_info_title = models.CharField(max_length=50, blank=True, null=True)\n extra_info_content = models.TextField(max_length=3000, blank=True, null=True, help_text=_(\"Puedes usar Markdown. <br/> \")\n + markdown_allowed())\n uses_preguntales = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar preguntales?\"))\n uses_ranking = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar ranking\"))\n uses_face_to_face = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar frente a frente\"))\n uses_soul_mate = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar 1/2 naranja\"))\n uses_questionary = models.BooleanField(default=True, help_text=_(u\"Esta elecci\u00f3n debe usar cuestionario\"))\n\n default_extra_info = settings.DEFAULT_ELECTION_EXTRA_INFO\n area = models.ForeignKey(Area, null=True, related_name=\"elections\")\n\n def __unicode__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse('election_view', kwargs={'slug': self.slug})\n\n def get_extra_info_url(self):\n return reverse('election_extra_info', kwargs={'slug': self.slug})\n\n class Meta:\n verbose_name = _(u'Mi Elecci\u00f3n')\n verbose_name_plural = _(u'Mis Elecciones')\n", "path": "elections/models.py"}]} | 1,574 | 221 |
gh_patches_debug_6647 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1647 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[META 555] Add automated span type/subtype checking against shared spec
Spec PR: https://github.com/elastic/apm/pull/443
To start, we would just ensure that all span types/subtypes appear in the spec. In the future we will work on cross-agent alignment.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/instrumentation/packages/asyncio/aiopg.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from elasticapm.contrib.asyncio.traces import async_capture_span
32 from elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule
33 from elasticapm.instrumentation.packages.dbapi2 import extract_signature
34
35
36 class AioPGInstrumentation(AsyncAbstractInstrumentedModule):
37 name = "aiopg"
38
39 instrument_list = [
40 ("aiopg.cursor", "Cursor.execute"),
41 ("aiopg.cursor", "Cursor.callproc"),
42 ("aiopg.connection", "Cursor.execute"),
43 ("aiopg.connection", "Cursor.callproc"),
44 ]
45
46 async def call(self, module, method, wrapped, instance, args, kwargs):
47 if method == "Cursor.execute":
48 query = args[0] if len(args) else kwargs["operation"]
49 query = _bake_sql(instance.raw, query)
50 name = extract_signature(query)
51 context = {"db": {"type": "sql", "statement": query}}
52 action = "query"
53 elif method == "Cursor.callproc":
54 func = args[0] if len(args) else kwargs["procname"]
55 name = func + "()"
56 context = None
57 action = "exec"
58 else:
59 raise AssertionError("call from uninstrumented method")
60 async with async_capture_span(
61 name, leaf=True, span_type="db", span_subtype="postgres", span_action=action, extra=context
62 ):
63 return await wrapped(*args, **kwargs)
64
65
66 def _bake_sql(cursor, sql):
67 # if this is a Composable object, use its `as_string` method
68 # see http://initd.org/psycopg/docs/sql.html
69 if hasattr(sql, "as_string"):
70 return sql.as_string(cursor)
71 return sql
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticapm/instrumentation/packages/asyncio/aiopg.py b/elasticapm/instrumentation/packages/asyncio/aiopg.py
--- a/elasticapm/instrumentation/packages/asyncio/aiopg.py
+++ b/elasticapm/instrumentation/packages/asyncio/aiopg.py
@@ -58,7 +58,7 @@
else:
raise AssertionError("call from uninstrumented method")
async with async_capture_span(
- name, leaf=True, span_type="db", span_subtype="postgres", span_action=action, extra=context
+ name, leaf=True, span_type="db", span_subtype="postgresql", span_action=action, extra=context
):
return await wrapped(*args, **kwargs)
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/asyncio/aiopg.py b/elasticapm/instrumentation/packages/asyncio/aiopg.py\n--- a/elasticapm/instrumentation/packages/asyncio/aiopg.py\n+++ b/elasticapm/instrumentation/packages/asyncio/aiopg.py\n@@ -58,7 +58,7 @@\n else:\n raise AssertionError(\"call from uninstrumented method\")\n async with async_capture_span(\n- name, leaf=True, span_type=\"db\", span_subtype=\"postgres\", span_action=action, extra=context\n+ name, leaf=True, span_type=\"db\", span_subtype=\"postgresql\", span_action=action, extra=context\n ):\n return await wrapped(*args, **kwargs)\n", "issue": "[META 555] Add automated span type/subtype checking against shared spec\nSpec PR: https://github.com/elastic/apm/pull/443\r\n\r\nTo start, we would just ensure that all span types/subtypes appear in the spec. In the future we will work on cross-agent alignment.\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm.contrib.asyncio.traces import async_capture_span\nfrom elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule\nfrom elasticapm.instrumentation.packages.dbapi2 import extract_signature\n\n\nclass AioPGInstrumentation(AsyncAbstractInstrumentedModule):\n name = \"aiopg\"\n\n instrument_list = [\n (\"aiopg.cursor\", \"Cursor.execute\"),\n (\"aiopg.cursor\", \"Cursor.callproc\"),\n (\"aiopg.connection\", \"Cursor.execute\"),\n (\"aiopg.connection\", \"Cursor.callproc\"),\n ]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n if method == \"Cursor.execute\":\n query = args[0] if len(args) else kwargs[\"operation\"]\n query = _bake_sql(instance.raw, query)\n name = extract_signature(query)\n context = {\"db\": {\"type\": \"sql\", \"statement\": query}}\n action = \"query\"\n elif method == \"Cursor.callproc\":\n func = args[0] if len(args) else kwargs[\"procname\"]\n name = func + \"()\"\n context = None\n action = \"exec\"\n else:\n raise AssertionError(\"call from uninstrumented method\")\n async with async_capture_span(\n name, leaf=True, span_type=\"db\", span_subtype=\"postgres\", span_action=action, extra=context\n ):\n return await wrapped(*args, **kwargs)\n\n\ndef _bake_sql(cursor, sql):\n # if this is a Composable object, use its `as_string` method\n # see http://initd.org/psycopg/docs/sql.html\n if hasattr(sql, \"as_string\"):\n return sql.as_string(cursor)\n return sql\n", "path": "elasticapm/instrumentation/packages/asyncio/aiopg.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm.contrib.asyncio.traces import async_capture_span\nfrom elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule\nfrom elasticapm.instrumentation.packages.dbapi2 import extract_signature\n\n\nclass AioPGInstrumentation(AsyncAbstractInstrumentedModule):\n name = \"aiopg\"\n\n instrument_list = [\n (\"aiopg.cursor\", \"Cursor.execute\"),\n (\"aiopg.cursor\", \"Cursor.callproc\"),\n (\"aiopg.connection\", \"Cursor.execute\"),\n (\"aiopg.connection\", \"Cursor.callproc\"),\n ]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n if method == \"Cursor.execute\":\n query = args[0] if len(args) else kwargs[\"operation\"]\n query = _bake_sql(instance.raw, query)\n name = extract_signature(query)\n context = {\"db\": {\"type\": \"sql\", \"statement\": query}}\n action = \"query\"\n elif method == \"Cursor.callproc\":\n func = args[0] if len(args) else kwargs[\"procname\"]\n name = func + \"()\"\n context = None\n action = \"exec\"\n else:\n raise AssertionError(\"call from uninstrumented method\")\n async with async_capture_span(\n name, leaf=True, span_type=\"db\", span_subtype=\"postgresql\", span_action=action, extra=context\n ):\n return await wrapped(*args, **kwargs)\n\n\ndef _bake_sql(cursor, sql):\n # if this is a Composable object, use its `as_string` method\n # see http://initd.org/psycopg/docs/sql.html\n if hasattr(sql, \"as_string\"):\n return sql.as_string(cursor)\n return sql\n", "path": "elasticapm/instrumentation/packages/asyncio/aiopg.py"}]} | 1,184 | 171 |
gh_patches_debug_34381 | rasdani/github-patches | git_diff | facebookresearch__hydra-1560 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[callbacks] call on_*_end events in reverse order
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hydra/core/callbacks.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import warnings
3 from typing import Any
4
5 from omegaconf import DictConfig
6
7 from hydra.core.utils import JobReturn
8 from hydra.utils import instantiate
9
10
11 class Callbacks:
12 def __init__(self, config: DictConfig) -> None:
13 self.callbacks = []
14 for params in config.hydra.callbacks.values():
15 self.callbacks.append(instantiate(params))
16
17 def _notify(self, function_name: str, **kwargs: Any) -> None:
18 for c in self.callbacks:
19 try:
20 getattr(c, function_name)(**kwargs)
21 except Exception as e:
22 warnings.warn(
23 f"Callback {type(c).__name__}.{function_name} raised {type(e).__name__}: {e}"
24 )
25
26 def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:
27 self._notify(function_name="on_run_start", config=config, **kwargs)
28
29 def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:
30 self._notify(function_name="on_run_end", config=config, **kwargs)
31
32 def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:
33 self._notify(function_name="on_multirun_start", config=config, **kwargs)
34
35 def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:
36 self._notify(function_name="on_multirun_end", config=config, **kwargs)
37
38 def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:
39 self._notify(function_name="on_job_start", config=config, **kwargs)
40
41 def on_job_end(
42 self, config: DictConfig, job_return: JobReturn, **kwargs: Any
43 ) -> None:
44 self._notify(
45 function_name="on_job_end", config=config, job_return=job_return, **kwargs
46 )
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hydra/core/callbacks.py b/hydra/core/callbacks.py
--- a/hydra/core/callbacks.py
+++ b/hydra/core/callbacks.py
@@ -14,8 +14,9 @@
for params in config.hydra.callbacks.values():
self.callbacks.append(instantiate(params))
- def _notify(self, function_name: str, **kwargs: Any) -> None:
- for c in self.callbacks:
+ def _notify(self, function_name: str, reverse: bool = False, **kwargs: Any) -> None:
+ callbacks = reversed(self.callbacks) if reverse else self.callbacks
+ for c in callbacks:
try:
getattr(c, function_name)(**kwargs)
except Exception as e:
@@ -27,13 +28,15 @@
self._notify(function_name="on_run_start", config=config, **kwargs)
def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:
- self._notify(function_name="on_run_end", config=config, **kwargs)
+ self._notify(function_name="on_run_end", config=config, reverse=True, **kwargs)
def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:
self._notify(function_name="on_multirun_start", config=config, **kwargs)
def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:
- self._notify(function_name="on_multirun_end", config=config, **kwargs)
+ self._notify(
+ function_name="on_multirun_end", reverse=True, config=config, **kwargs
+ )
def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:
self._notify(function_name="on_job_start", config=config, **kwargs)
@@ -42,5 +45,9 @@
self, config: DictConfig, job_return: JobReturn, **kwargs: Any
) -> None:
self._notify(
- function_name="on_job_end", config=config, job_return=job_return, **kwargs
+ function_name="on_job_end",
+ config=config,
+ job_return=job_return,
+ reverse=True,
+ **kwargs,
)
| {"golden_diff": "diff --git a/hydra/core/callbacks.py b/hydra/core/callbacks.py\n--- a/hydra/core/callbacks.py\n+++ b/hydra/core/callbacks.py\n@@ -14,8 +14,9 @@\n for params in config.hydra.callbacks.values():\n self.callbacks.append(instantiate(params))\n \n- def _notify(self, function_name: str, **kwargs: Any) -> None:\n- for c in self.callbacks:\n+ def _notify(self, function_name: str, reverse: bool = False, **kwargs: Any) -> None:\n+ callbacks = reversed(self.callbacks) if reverse else self.callbacks\n+ for c in callbacks:\n try:\n getattr(c, function_name)(**kwargs)\n except Exception as e:\n@@ -27,13 +28,15 @@\n self._notify(function_name=\"on_run_start\", config=config, **kwargs)\n \n def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:\n- self._notify(function_name=\"on_run_end\", config=config, **kwargs)\n+ self._notify(function_name=\"on_run_end\", config=config, reverse=True, **kwargs)\n \n def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_multirun_start\", config=config, **kwargs)\n \n def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:\n- self._notify(function_name=\"on_multirun_end\", config=config, **kwargs)\n+ self._notify(\n+ function_name=\"on_multirun_end\", reverse=True, config=config, **kwargs\n+ )\n \n def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_job_start\", config=config, **kwargs)\n@@ -42,5 +45,9 @@\n self, config: DictConfig, job_return: JobReturn, **kwargs: Any\n ) -> None:\n self._notify(\n- function_name=\"on_job_end\", config=config, job_return=job_return, **kwargs\n+ function_name=\"on_job_end\",\n+ config=config,\n+ job_return=job_return,\n+ reverse=True,\n+ **kwargs,\n )\n", "issue": "[callbacks] call on_*_end events in reverse order\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport warnings\nfrom typing import Any\n\nfrom omegaconf import DictConfig\n\nfrom hydra.core.utils import JobReturn\nfrom hydra.utils import instantiate\n\n\nclass Callbacks:\n def __init__(self, config: DictConfig) -> None:\n self.callbacks = []\n for params in config.hydra.callbacks.values():\n self.callbacks.append(instantiate(params))\n\n def _notify(self, function_name: str, **kwargs: Any) -> None:\n for c in self.callbacks:\n try:\n getattr(c, function_name)(**kwargs)\n except Exception as e:\n warnings.warn(\n f\"Callback {type(c).__name__}.{function_name} raised {type(e).__name__}: {e}\"\n )\n\n def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_run_start\", config=config, **kwargs)\n\n def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_run_end\", config=config, **kwargs)\n\n def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_multirun_start\", config=config, **kwargs)\n\n def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_multirun_end\", config=config, **kwargs)\n\n def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_job_start\", config=config, **kwargs)\n\n def on_job_end(\n self, config: DictConfig, job_return: JobReturn, **kwargs: Any\n ) -> None:\n self._notify(\n function_name=\"on_job_end\", config=config, job_return=job_return, **kwargs\n )\n", "path": "hydra/core/callbacks.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport warnings\nfrom typing import Any\n\nfrom omegaconf import DictConfig\n\nfrom hydra.core.utils import JobReturn\nfrom hydra.utils import instantiate\n\n\nclass Callbacks:\n def __init__(self, config: DictConfig) -> None:\n self.callbacks = []\n for params in config.hydra.callbacks.values():\n self.callbacks.append(instantiate(params))\n\n def _notify(self, function_name: str, reverse: bool = False, **kwargs: Any) -> None:\n callbacks = reversed(self.callbacks) if reverse else self.callbacks\n for c in callbacks:\n try:\n getattr(c, function_name)(**kwargs)\n except Exception as e:\n warnings.warn(\n f\"Callback {type(c).__name__}.{function_name} raised {type(e).__name__}: {e}\"\n )\n\n def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_run_start\", config=config, **kwargs)\n\n def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_run_end\", config=config, reverse=True, **kwargs)\n\n def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_multirun_start\", config=config, **kwargs)\n\n def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(\n function_name=\"on_multirun_end\", reverse=True, config=config, **kwargs\n )\n\n def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:\n self._notify(function_name=\"on_job_start\", config=config, **kwargs)\n\n def on_job_end(\n self, config: DictConfig, job_return: JobReturn, **kwargs: Any\n ) -> None:\n self._notify(\n function_name=\"on_job_end\",\n config=config,\n job_return=job_return,\n reverse=True,\n **kwargs,\n )\n", "path": "hydra/core/callbacks.py"}]} | 786 | 504 |
gh_patches_debug_23761 | rasdani/github-patches | git_diff | fossasia__open-event-server-5139 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add custom form for attendees
**Is your feature request related to a problem? Please describe.**
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
Add custom form for attendees
**Describe the solution you'd like**
<!-- A clear and concise description of what you want to happen. -->
**Describe alternatives you've considered**
<!-- A clear and concise description of any alternative solutions or features you've considered. -->
**Additional context**
<!-- Add any other context or screenshots about the feature request here. -->
**Working on it**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/models/custom_form.py`
Content:
```
1 import json
2 from sqlalchemy.schema import UniqueConstraint
3
4 from app.models import db
5 from app.models.base import SoftDeletionModel
6
7 SESSION_FORM = {
8 "title": {"include": 1, "require": 1},
9 "subtitle": {"include": 0, "require": 0},
10 "short_abstract": {"include": 1, "require": 0},
11 "long_abstract": {"include": 0, "require": 0},
12 "comments": {"include": 1, "require": 0},
13 "track": {"include": 0, "require": 0},
14 "session_type": {"include": 0, "require": 0},
15 "language": {"include": 0, "require": 0},
16 "slides": {"include": 1, "require": 0},
17 "video": {"include": 0, "require": 0},
18 "audio": {"include": 0, "require": 0}
19 }
20
21 SPEAKER_FORM = {
22 "name": {"include": 1, "require": 1},
23 "email": {"include": 1, "require": 1},
24 "photo": {"include": 1, "require": 0},
25 "organisation": {"include": 1, "require": 0},
26 "position": {"include": 1, "require": 0},
27 "country": {"include": 1, "require": 0},
28 "short_biography": {"include": 1, "require": 0},
29 "long_biography": {"include": 0, "require": 0},
30 "mobile": {"include": 0, "require": 0},
31 "website": {"include": 1, "require": 0},
32 "facebook": {"include": 0, "require": 0},
33 "twitter": {"include": 1, "require": 0},
34 "github": {"include": 0, "require": 0},
35 "linkedin": {"include": 0, "require": 0}
36 }
37
38 session_form_str = json.dumps(SESSION_FORM, separators=(',', ':'))
39 speaker_form_str = json.dumps(SPEAKER_FORM, separators=(',', ':'))
40
41
42 class CustomForms(SoftDeletionModel):
43 """custom form model class"""
44 __tablename__ = 'custom_forms'
45 __table_args__ = (UniqueConstraint('event_id', 'field_identifier', 'form', name='custom_form_identifier'), )
46 id = db.Column(db.Integer, primary_key=True)
47 field_identifier = db.Column(db.String, nullable=False)
48 form = db.Column(db.String, nullable=False)
49 type = db.Column(db.String, nullable=False)
50 is_required = db.Column(db.Boolean)
51 is_included = db.Column(db.Boolean)
52 is_fixed = db.Column(db.Boolean)
53 event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))
54
55 def __init__(self,
56 event_id=None,
57 field_identifier=None,
58 form=None,
59 type=None,
60 is_required=None,
61 is_included=None,
62 is_fixed=None,
63 deleted_at=None):
64 self.event_id = event_id
65 self.field_identifier = field_identifier,
66 self.form = form,
67 self.type = type,
68 self.is_required = is_required,
69 self.is_included = is_included,
70 self.is_fixed = is_fixed
71 self.deleted_at = deleted_at
72
73 def __repr__(self):
74 return '<CustomForm %r>' % self.id
75
76 def __str__(self):
77 return self.__repr__()
78
79 @property
80 def serialize(self):
81 """Return object data in easily serializable format"""
82
83 return {
84 'id': self.id,
85 'field_identifier': self.field_identifier,
86 'form': self.form,
87 'type': self.type,
88 'is_required': self.is_required,
89 'is_included': self.is_included,
90 'is_fixed': self.is_fixed
91 }
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/models/custom_form.py b/app/models/custom_form.py
--- a/app/models/custom_form.py
+++ b/app/models/custom_form.py
@@ -35,8 +35,34 @@
"linkedin": {"include": 0, "require": 0}
}
+ATTENDEE_FORM = {
+ "firstname": {"include": 1, "require": 1},
+ "lastname": {"include": 1, "require": 1},
+ "email": {"include": 1, "require": 0},
+ "address": {"include": 1, "require": 0},
+ "city": {"include": 1, "require": 0},
+ "state": {"include": 1, "require": 0},
+ "country": {"include": 1, "require": 0},
+ "job_title": {"include": 1, "require": 0},
+ "phone": {"include": 1, "require": 0},
+ "tax_business_info": {"include": 0, "require": 0},
+ "billing_address": {"include": 0, "require": 0},
+ "home_address": {"include": 0, "require": 0},
+ "shipping_address": {"include": 0, "require": 0},
+ "company": {"include": 0, "require": 0},
+ "work_address": {"include": 0, "require": 0},
+ "work_phone": {"include": 0, "require": 0},
+ "website": {"include": 1, "require": 0},
+ "blog": {"include": 0, "require": 0},
+ "twitter": {"include": 1, "require": 0},
+ "facebook": {"include": 0, "require": 0},
+ "github": {"include": 1, "require": 0},
+ "gender": {"include": 0, "require": 0},
+}
+
session_form_str = json.dumps(SESSION_FORM, separators=(',', ':'))
speaker_form_str = json.dumps(SPEAKER_FORM, separators=(',', ':'))
+attendee_form_str = json.dumps(ATTENDEE_FORM, separators=(',', ':'))
class CustomForms(SoftDeletionModel):
| {"golden_diff": "diff --git a/app/models/custom_form.py b/app/models/custom_form.py\n--- a/app/models/custom_form.py\n+++ b/app/models/custom_form.py\n@@ -35,8 +35,34 @@\n \"linkedin\": {\"include\": 0, \"require\": 0}\n }\n \n+ATTENDEE_FORM = {\n+ \"firstname\": {\"include\": 1, \"require\": 1},\n+ \"lastname\": {\"include\": 1, \"require\": 1},\n+ \"email\": {\"include\": 1, \"require\": 0},\n+ \"address\": {\"include\": 1, \"require\": 0},\n+ \"city\": {\"include\": 1, \"require\": 0},\n+ \"state\": {\"include\": 1, \"require\": 0},\n+ \"country\": {\"include\": 1, \"require\": 0},\n+ \"job_title\": {\"include\": 1, \"require\": 0},\n+ \"phone\": {\"include\": 1, \"require\": 0},\n+ \"tax_business_info\": {\"include\": 0, \"require\": 0},\n+ \"billing_address\": {\"include\": 0, \"require\": 0},\n+ \"home_address\": {\"include\": 0, \"require\": 0},\n+ \"shipping_address\": {\"include\": 0, \"require\": 0},\n+ \"company\": {\"include\": 0, \"require\": 0},\n+ \"work_address\": {\"include\": 0, \"require\": 0},\n+ \"work_phone\": {\"include\": 0, \"require\": 0},\n+ \"website\": {\"include\": 1, \"require\": 0},\n+ \"blog\": {\"include\": 0, \"require\": 0},\n+ \"twitter\": {\"include\": 1, \"require\": 0},\n+ \"facebook\": {\"include\": 0, \"require\": 0},\n+ \"github\": {\"include\": 1, \"require\": 0},\n+ \"gender\": {\"include\": 0, \"require\": 0},\n+}\n+\n session_form_str = json.dumps(SESSION_FORM, separators=(',', ':'))\n speaker_form_str = json.dumps(SPEAKER_FORM, separators=(',', ':'))\n+attendee_form_str = json.dumps(ATTENDEE_FORM, separators=(',', ':'))\n \n \n class CustomForms(SoftDeletionModel):\n", "issue": "Add custom form for attendees\n**Is your feature request related to a problem? Please describe.**\r\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\r\n\r\nAdd custom form for attendees\r\n\r\n**Describe the solution you'd like**\r\n<!-- A clear and concise description of what you want to happen. -->\r\n\r\n**Describe alternatives you've considered**\r\n<!-- A clear and concise description of any alternative solutions or features you've considered. -->\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\r\n**Working on it**\n", "before_files": [{"content": "import json\nfrom sqlalchemy.schema import UniqueConstraint\n\nfrom app.models import db\nfrom app.models.base import SoftDeletionModel\n\nSESSION_FORM = {\n \"title\": {\"include\": 1, \"require\": 1},\n \"subtitle\": {\"include\": 0, \"require\": 0},\n \"short_abstract\": {\"include\": 1, \"require\": 0},\n \"long_abstract\": {\"include\": 0, \"require\": 0},\n \"comments\": {\"include\": 1, \"require\": 0},\n \"track\": {\"include\": 0, \"require\": 0},\n \"session_type\": {\"include\": 0, \"require\": 0},\n \"language\": {\"include\": 0, \"require\": 0},\n \"slides\": {\"include\": 1, \"require\": 0},\n \"video\": {\"include\": 0, \"require\": 0},\n \"audio\": {\"include\": 0, \"require\": 0}\n}\n\nSPEAKER_FORM = {\n \"name\": {\"include\": 1, \"require\": 1},\n \"email\": {\"include\": 1, \"require\": 1},\n \"photo\": {\"include\": 1, \"require\": 0},\n \"organisation\": {\"include\": 1, \"require\": 0},\n \"position\": {\"include\": 1, \"require\": 0},\n \"country\": {\"include\": 1, \"require\": 0},\n \"short_biography\": {\"include\": 1, \"require\": 0},\n \"long_biography\": {\"include\": 0, \"require\": 0},\n \"mobile\": {\"include\": 0, \"require\": 0},\n \"website\": {\"include\": 1, \"require\": 0},\n \"facebook\": {\"include\": 0, \"require\": 0},\n \"twitter\": {\"include\": 1, \"require\": 0},\n \"github\": {\"include\": 0, \"require\": 0},\n \"linkedin\": {\"include\": 0, \"require\": 0}\n}\n\nsession_form_str = json.dumps(SESSION_FORM, separators=(',', ':'))\nspeaker_form_str = json.dumps(SPEAKER_FORM, separators=(',', ':'))\n\n\nclass CustomForms(SoftDeletionModel):\n \"\"\"custom form model class\"\"\"\n __tablename__ = 'custom_forms'\n __table_args__ = (UniqueConstraint('event_id', 'field_identifier', 'form', name='custom_form_identifier'), )\n id = db.Column(db.Integer, primary_key=True)\n field_identifier = db.Column(db.String, nullable=False)\n form = db.Column(db.String, nullable=False)\n type = db.Column(db.String, nullable=False)\n is_required = db.Column(db.Boolean)\n is_included = db.Column(db.Boolean)\n is_fixed = db.Column(db.Boolean)\n event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))\n\n def __init__(self,\n event_id=None,\n field_identifier=None,\n form=None,\n type=None,\n is_required=None,\n is_included=None,\n is_fixed=None,\n deleted_at=None):\n self.event_id = event_id\n self.field_identifier = field_identifier,\n self.form = form,\n self.type = type,\n self.is_required = is_required,\n self.is_included = is_included,\n self.is_fixed = is_fixed\n self.deleted_at = deleted_at\n\n def __repr__(self):\n return '<CustomForm %r>' % self.id\n\n def __str__(self):\n return self.__repr__()\n\n @property\n def serialize(self):\n \"\"\"Return object data in easily serializable format\"\"\"\n\n return {\n 'id': self.id,\n 'field_identifier': self.field_identifier,\n 'form': self.form,\n 'type': self.type,\n 'is_required': self.is_required,\n 'is_included': self.is_included,\n 'is_fixed': self.is_fixed\n }\n", "path": "app/models/custom_form.py"}], "after_files": [{"content": "import json\nfrom sqlalchemy.schema import UniqueConstraint\n\nfrom app.models import db\nfrom app.models.base import SoftDeletionModel\n\nSESSION_FORM = {\n \"title\": {\"include\": 1, \"require\": 1},\n \"subtitle\": {\"include\": 0, \"require\": 0},\n \"short_abstract\": {\"include\": 1, \"require\": 0},\n \"long_abstract\": {\"include\": 0, \"require\": 0},\n \"comments\": {\"include\": 1, \"require\": 0},\n \"track\": {\"include\": 0, \"require\": 0},\n \"session_type\": {\"include\": 0, \"require\": 0},\n \"language\": {\"include\": 0, \"require\": 0},\n \"slides\": {\"include\": 1, \"require\": 0},\n \"video\": {\"include\": 0, \"require\": 0},\n \"audio\": {\"include\": 0, \"require\": 0}\n}\n\nSPEAKER_FORM = {\n \"name\": {\"include\": 1, \"require\": 1},\n \"email\": {\"include\": 1, \"require\": 1},\n \"photo\": {\"include\": 1, \"require\": 0},\n \"organisation\": {\"include\": 1, \"require\": 0},\n \"position\": {\"include\": 1, \"require\": 0},\n \"country\": {\"include\": 1, \"require\": 0},\n \"short_biography\": {\"include\": 1, \"require\": 0},\n \"long_biography\": {\"include\": 0, \"require\": 0},\n \"mobile\": {\"include\": 0, \"require\": 0},\n \"website\": {\"include\": 1, \"require\": 0},\n \"facebook\": {\"include\": 0, \"require\": 0},\n \"twitter\": {\"include\": 1, \"require\": 0},\n \"github\": {\"include\": 0, \"require\": 0},\n \"linkedin\": {\"include\": 0, \"require\": 0}\n}\n\nATTENDEE_FORM = {\n \"firstname\": {\"include\": 1, \"require\": 1},\n \"lastname\": {\"include\": 1, \"require\": 1},\n \"email\": {\"include\": 1, \"require\": 0},\n \"address\": {\"include\": 1, \"require\": 0},\n \"city\": {\"include\": 1, \"require\": 0},\n \"state\": {\"include\": 1, \"require\": 0},\n \"country\": {\"include\": 1, \"require\": 0},\n \"job_title\": {\"include\": 1, \"require\": 0},\n \"phone\": {\"include\": 1, \"require\": 0},\n \"tax_business_info\": {\"include\": 0, \"require\": 0},\n \"billing_address\": {\"include\": 0, \"require\": 0},\n \"home_address\": {\"include\": 0, \"require\": 0},\n \"shipping_address\": {\"include\": 0, \"require\": 0},\n \"company\": {\"include\": 0, \"require\": 0},\n \"work_address\": {\"include\": 0, \"require\": 0},\n \"work_phone\": {\"include\": 0, \"require\": 0},\n \"website\": {\"include\": 1, \"require\": 0},\n \"blog\": {\"include\": 0, \"require\": 0},\n \"twitter\": {\"include\": 1, \"require\": 0},\n \"facebook\": {\"include\": 0, \"require\": 0},\n \"github\": {\"include\": 1, \"require\": 0},\n \"gender\": {\"include\": 0, \"require\": 0},\n}\n\nsession_form_str = json.dumps(SESSION_FORM, separators=(',', ':'))\nspeaker_form_str = json.dumps(SPEAKER_FORM, separators=(',', ':'))\nattendee_form_str = json.dumps(ATTENDEE_FORM, separators=(',', ':'))\n\n\nclass CustomForms(SoftDeletionModel):\n \"\"\"custom form model class\"\"\"\n __tablename__ = 'custom_forms'\n __table_args__ = (UniqueConstraint('event_id', 'field_identifier', 'form', name='custom_form_identifier'), )\n id = db.Column(db.Integer, primary_key=True)\n field_identifier = db.Column(db.String, nullable=False)\n form = db.Column(db.String, nullable=False)\n type = db.Column(db.String, nullable=False)\n is_required = db.Column(db.Boolean)\n is_included = db.Column(db.Boolean)\n is_fixed = db.Column(db.Boolean)\n event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))\n\n def __init__(self,\n event_id=None,\n field_identifier=None,\n form=None,\n type=None,\n is_required=None,\n is_included=None,\n is_fixed=None,\n deleted_at=None):\n self.event_id = event_id\n self.field_identifier = field_identifier,\n self.form = form,\n self.type = type,\n self.is_required = is_required,\n self.is_included = is_included,\n self.is_fixed = is_fixed\n self.deleted_at = deleted_at\n\n def __repr__(self):\n return '<CustomForm %r>' % self.id\n\n def __str__(self):\n return self.__repr__()\n\n @property\n def serialize(self):\n \"\"\"Return object data in easily serializable format\"\"\"\n\n return {\n 'id': self.id,\n 'field_identifier': self.field_identifier,\n 'form': self.form,\n 'type': self.type,\n 'is_required': self.is_required,\n 'is_included': self.is_included,\n 'is_fixed': self.is_fixed\n }\n", "path": "app/models/custom_form.py"}]} | 1,406 | 517 |
gh_patches_debug_9114 | rasdani/github-patches | git_diff | UTNkar__moore-183 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Centre drive embeds
<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->
### Description
Center drive embeds. Currently it looks a bit weird.

### Steps to Reproduce
1. [First Step]
2. [Second Step]
3. [and so on...]
<!-- Please select the appropriate "topic category"/blue and "issue type"/yellow label -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/google/models.py`
Content:
```
1 from datetime import date
2
3 from django.db import models
4 from django.utils.translation import ugettext_lazy as _
5 from wagtail.wagtailadmin.edit_handlers import FieldPanel, StreamFieldPanel, \
6 TabbedInterface, ObjectList
7 from wagtail.wagtailcore import blocks
8 from wagtail.wagtailcore.fields import StreamField, RichTextField
9 from wagtail.wagtailcore.models import Page
10 from wagtail.wagtailsearch import index
11
12 from blocks.models import WAGTAIL_STATIC_BLOCKTYPES
13 from utils.translation import TranslatedField
14
15
16 class GoogleFormBlock(blocks.StructBlock):
17 form_id = blocks.CharBlock()
18 height = blocks.IntegerBlock()
19
20 class Meta:
21 label = _('Google Form')
22 icon = 'fa-check-square-o'
23 template = 'google/blocks/form.html'
24 group = _('Meta')
25
26
27 class GoogleFormIndex(Page):
28 title_sv = models.CharField(max_length=255)
29 translated_title = TranslatedField('title', 'title_sv')
30
31 description_en = RichTextField(
32 verbose_name=_('English description'),
33 blank=True,
34 )
35 description_sv = RichTextField(
36 verbose_name=_('Swedish description'),
37 blank=True,
38 )
39 description = TranslatedField('description_en', 'description_sv')
40
41 # Editor panels configuration
42 content_panels = Page.content_panels + [
43 FieldPanel('title_sv', classname="full title"),
44 FieldPanel('description_en'),
45 FieldPanel('description_sv'),
46 ]
47
48 # Sub-page type rules
49 subpage_types = ['google.GoogleFormPage']
50
51 def get_context(self, request, **kwargs):
52 context = super(GoogleFormIndex, self).get_context(request, **kwargs)
53
54 # Add extra variables and return the updated context
55 context['google_forms'] = GoogleFormPage.objects.child_of(self).live()\
56 .order_by('-deadline')
57 return context
58
59
60 class GoogleFormPage(Page):
61 title_sv = models.CharField(max_length=255)
62 translated_title = TranslatedField('title', 'title_sv')
63
64 # TODO: Limit to one form!
65 form_en = StreamField([('google_form', GoogleFormBlock())])
66 form_sv = StreamField([('google_form', GoogleFormBlock())])
67 form = TranslatedField('form_en', 'form_sv')
68
69 deadline = models.DateField(verbose_name=_('Form deadline'))
70
71 results_en = StreamField(
72 WAGTAIL_STATIC_BLOCKTYPES,
73 blank=True,
74 )
75 results_sv = StreamField(
76 WAGTAIL_STATIC_BLOCKTYPES,
77 blank=True,
78 )
79 results = TranslatedField('results_en', 'results_sv')
80
81 @property
82 def is_past_due(self) -> bool:
83 return date.today() > self.deadline
84
85 # Editor panels configuration
86 content_panels = Page.content_panels + [
87 FieldPanel('title_sv', classname="full title"),
88 FieldPanel('deadline'),
89 StreamFieldPanel('form_en'),
90 StreamFieldPanel('form_sv'),
91 ]
92
93 edit_handler = TabbedInterface([
94 ObjectList(content_panels, heading=_('Common')),
95 ObjectList([StreamFieldPanel('results_en')], heading=_('English')),
96 ObjectList([StreamFieldPanel('results_sv')], heading=_('Swedish')),
97 ObjectList(
98 Page.promote_panels + Page.settings_panels, heading=_('Settings')
99 ),
100 ])
101
102 # Search index configuration
103 search_fields = Page.search_fields + [
104 index.SearchField('title_sv'),
105 index.FilterField('results_en'),
106 index.FilterField('results_sv'),
107 index.FilterField('deadline'),
108 ]
109
110 # Parent page / subpage type rules
111 parent_page_types = ['google.GoogleFormIndex']
112 subpage_types = []
113
114
115 class GoogleDriveBlock(blocks.StructBlock):
116 folder_id = blocks.CharBlock()
117 view = blocks.ChoiceBlock(choices=[
118 ('list', _('List')),
119 ('grid', _('Grid')),
120 ])
121 height = blocks.IntegerBlock()
122
123 class Meta:
124 label = _('Google Drive')
125 icon = 'fa-folder-open'
126 template = 'google/blocks/drive.html'
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/google/models.py b/src/google/models.py
--- a/src/google/models.py
+++ b/src/google/models.py
@@ -21,7 +21,7 @@
label = _('Google Form')
icon = 'fa-check-square-o'
template = 'google/blocks/form.html'
- group = _('Meta')
+ group = _('Embed')
class GoogleFormIndex(Page):
@@ -124,3 +124,4 @@
label = _('Google Drive')
icon = 'fa-folder-open'
template = 'google/blocks/drive.html'
+ group = _('Embed')
| {"golden_diff": "diff --git a/src/google/models.py b/src/google/models.py\n--- a/src/google/models.py\n+++ b/src/google/models.py\n@@ -21,7 +21,7 @@\n label = _('Google Form')\n icon = 'fa-check-square-o'\n template = 'google/blocks/form.html'\n- group = _('Meta')\n+ group = _('Embed')\n \n \n class GoogleFormIndex(Page):\n@@ -124,3 +124,4 @@\n label = _('Google Drive')\n icon = 'fa-folder-open'\n template = 'google/blocks/drive.html'\n+ group = _('Embed')\n", "issue": "Centre drive embeds\n<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->\r\n\r\n### Description\r\n\r\nCenter drive embeds. Currently it looks a bit weird.\r\n\r\n\r\n\r\n\r\n### Steps to Reproduce\r\n\r\n1. [First Step]\r\n2. [Second Step]\r\n3. [and so on...]\r\n\r\n<!-- Please select the appropriate \"topic category\"/blue and \"issue type\"/yellow label -->\r\n\n", "before_files": [{"content": "from datetime import date\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom wagtail.wagtailadmin.edit_handlers import FieldPanel, StreamFieldPanel, \\\n TabbedInterface, ObjectList\nfrom wagtail.wagtailcore import blocks\nfrom wagtail.wagtailcore.fields import StreamField, RichTextField\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailsearch import index\n\nfrom blocks.models import WAGTAIL_STATIC_BLOCKTYPES\nfrom utils.translation import TranslatedField\n\n\nclass GoogleFormBlock(blocks.StructBlock):\n form_id = blocks.CharBlock()\n height = blocks.IntegerBlock()\n\n class Meta:\n label = _('Google Form')\n icon = 'fa-check-square-o'\n template = 'google/blocks/form.html'\n group = _('Meta')\n\n\nclass GoogleFormIndex(Page):\n title_sv = models.CharField(max_length=255)\n translated_title = TranslatedField('title', 'title_sv')\n\n description_en = RichTextField(\n verbose_name=_('English description'),\n blank=True,\n )\n description_sv = RichTextField(\n verbose_name=_('Swedish description'),\n blank=True,\n )\n description = TranslatedField('description_en', 'description_sv')\n\n # Editor panels configuration\n content_panels = Page.content_panels + [\n FieldPanel('title_sv', classname=\"full title\"),\n FieldPanel('description_en'),\n FieldPanel('description_sv'),\n ]\n\n # Sub-page type rules\n subpage_types = ['google.GoogleFormPage']\n\n def get_context(self, request, **kwargs):\n context = super(GoogleFormIndex, self).get_context(request, **kwargs)\n\n # Add extra variables and return the updated context\n context['google_forms'] = GoogleFormPage.objects.child_of(self).live()\\\n .order_by('-deadline')\n return context\n\n\nclass GoogleFormPage(Page):\n title_sv = models.CharField(max_length=255)\n translated_title = TranslatedField('title', 'title_sv')\n\n # TODO: Limit to one form!\n form_en = StreamField([('google_form', GoogleFormBlock())])\n form_sv = StreamField([('google_form', GoogleFormBlock())])\n form = TranslatedField('form_en', 'form_sv')\n\n deadline = models.DateField(verbose_name=_('Form deadline'))\n\n results_en = StreamField(\n WAGTAIL_STATIC_BLOCKTYPES,\n blank=True,\n )\n results_sv = StreamField(\n WAGTAIL_STATIC_BLOCKTYPES,\n blank=True,\n )\n results = TranslatedField('results_en', 'results_sv')\n\n @property\n def is_past_due(self) -> bool:\n return date.today() > self.deadline\n\n # Editor panels configuration\n content_panels = Page.content_panels + [\n FieldPanel('title_sv', classname=\"full title\"),\n FieldPanel('deadline'),\n StreamFieldPanel('form_en'),\n StreamFieldPanel('form_sv'),\n ]\n\n edit_handler = TabbedInterface([\n ObjectList(content_panels, heading=_('Common')),\n ObjectList([StreamFieldPanel('results_en')], heading=_('English')),\n ObjectList([StreamFieldPanel('results_sv')], heading=_('Swedish')),\n ObjectList(\n Page.promote_panels + Page.settings_panels, heading=_('Settings')\n ),\n ])\n\n # Search index configuration\n search_fields = Page.search_fields + [\n index.SearchField('title_sv'),\n index.FilterField('results_en'),\n index.FilterField('results_sv'),\n index.FilterField('deadline'),\n ]\n\n # Parent page / subpage type rules\n parent_page_types = ['google.GoogleFormIndex']\n subpage_types = []\n\n\nclass GoogleDriveBlock(blocks.StructBlock):\n folder_id = blocks.CharBlock()\n view = blocks.ChoiceBlock(choices=[\n ('list', _('List')),\n ('grid', _('Grid')),\n ])\n height = blocks.IntegerBlock()\n\n class Meta:\n label = _('Google Drive')\n icon = 'fa-folder-open'\n template = 'google/blocks/drive.html'\n", "path": "src/google/models.py"}], "after_files": [{"content": "from datetime import date\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom wagtail.wagtailadmin.edit_handlers import FieldPanel, StreamFieldPanel, \\\n TabbedInterface, ObjectList\nfrom wagtail.wagtailcore import blocks\nfrom wagtail.wagtailcore.fields import StreamField, RichTextField\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailsearch import index\n\nfrom blocks.models import WAGTAIL_STATIC_BLOCKTYPES\nfrom utils.translation import TranslatedField\n\n\nclass GoogleFormBlock(blocks.StructBlock):\n form_id = blocks.CharBlock()\n height = blocks.IntegerBlock()\n\n class Meta:\n label = _('Google Form')\n icon = 'fa-check-square-o'\n template = 'google/blocks/form.html'\n group = _('Embed')\n\n\nclass GoogleFormIndex(Page):\n title_sv = models.CharField(max_length=255)\n translated_title = TranslatedField('title', 'title_sv')\n\n description_en = RichTextField(\n verbose_name=_('English description'),\n blank=True,\n )\n description_sv = RichTextField(\n verbose_name=_('Swedish description'),\n blank=True,\n )\n description = TranslatedField('description_en', 'description_sv')\n\n # Editor panels configuration\n content_panels = Page.content_panels + [\n FieldPanel('title_sv', classname=\"full title\"),\n FieldPanel('description_en'),\n FieldPanel('description_sv'),\n ]\n\n # Sub-page type rules\n subpage_types = ['google.GoogleFormPage']\n\n def get_context(self, request, **kwargs):\n context = super(GoogleFormIndex, self).get_context(request, **kwargs)\n\n # Add extra variables and return the updated context\n context['google_forms'] = GoogleFormPage.objects.child_of(self).live()\\\n .order_by('-deadline')\n return context\n\n\nclass GoogleFormPage(Page):\n title_sv = models.CharField(max_length=255)\n translated_title = TranslatedField('title', 'title_sv')\n\n # TODO: Limit to one form!\n form_en = StreamField([('google_form', GoogleFormBlock())])\n form_sv = StreamField([('google_form', GoogleFormBlock())])\n form = TranslatedField('form_en', 'form_sv')\n\n deadline = models.DateField(verbose_name=_('Form deadline'))\n\n results_en = StreamField(\n WAGTAIL_STATIC_BLOCKTYPES,\n blank=True,\n )\n results_sv = StreamField(\n WAGTAIL_STATIC_BLOCKTYPES,\n blank=True,\n )\n results = TranslatedField('results_en', 'results_sv')\n\n @property\n def is_past_due(self) -> bool:\n return date.today() > self.deadline\n\n # Editor panels configuration\n content_panels = Page.content_panels + [\n FieldPanel('title_sv', classname=\"full title\"),\n FieldPanel('deadline'),\n StreamFieldPanel('form_en'),\n StreamFieldPanel('form_sv'),\n ]\n\n edit_handler = TabbedInterface([\n ObjectList(content_panels, heading=_('Common')),\n ObjectList([StreamFieldPanel('results_en')], heading=_('English')),\n ObjectList([StreamFieldPanel('results_sv')], heading=_('Swedish')),\n ObjectList(\n Page.promote_panels + Page.settings_panels, heading=_('Settings')\n ),\n ])\n\n # Search index configuration\n search_fields = Page.search_fields + [\n index.SearchField('title_sv'),\n index.FilterField('results_en'),\n index.FilterField('results_sv'),\n index.FilterField('deadline'),\n ]\n\n # Parent page / subpage type rules\n parent_page_types = ['google.GoogleFormIndex']\n subpage_types = []\n\n\nclass GoogleDriveBlock(blocks.StructBlock):\n folder_id = blocks.CharBlock()\n view = blocks.ChoiceBlock(choices=[\n ('list', _('List')),\n ('grid', _('Grid')),\n ])\n height = blocks.IntegerBlock()\n\n class Meta:\n label = _('Google Drive')\n icon = 'fa-folder-open'\n template = 'google/blocks/drive.html'\n group = _('Embed')\n", "path": "src/google/models.py"}]} | 1,559 | 134 |
gh_patches_debug_41589 | rasdani/github-patches | git_diff | getsentry__sentry-python-851 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django 3.1 async views do not work
When using sentry versions greater than 0.16.3, (tested on 0.18.0), Django 3.1 aysnc views do not work.
```
log.py 224 ERROR Internal Server Error: /async_ok
Traceback (most recent call last):
File "/Users/williamchu/dev/sentry-python/.tox/py3.8-django-3.1/lib/python3.8/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/Users/williamchu/dev/sentry-python/.tox/py3.8-django-3.1/lib/python3.8/site-packages/django/core/handlers/base.py", line 186, in _get_response
self.check_response(response, callback)
File "/Users/williamchu/dev/sentry-python/.tox/py3.8-django-3.1/lib/python3.8/site-packages/django/core/handlers/base.py", line 312, in check_response
raise ValueError(
ValueError: The view tests.integrations.django.myapp.views.async_ok didn't return an HttpResponse object. It returned an unawaited coroutine instead. You may need to add an 'await' into your view.
```
I have made a branch with a test case to demonstrate this: https://github.com/uptickmetachu/sentry-python/tree/django3.1-test-async-view
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/django/views.py`
Content:
```
1 from sentry_sdk.hub import Hub
2 from sentry_sdk._types import MYPY
3 from sentry_sdk import _functools
4
5 if MYPY:
6 from typing import Any
7
8
9 def patch_views():
10 # type: () -> None
11
12 from django.core.handlers.base import BaseHandler
13 from sentry_sdk.integrations.django import DjangoIntegration
14
15 old_make_view_atomic = BaseHandler.make_view_atomic
16
17 @_functools.wraps(old_make_view_atomic)
18 def sentry_patched_make_view_atomic(self, *args, **kwargs):
19 # type: (Any, *Any, **Any) -> Any
20 callback = old_make_view_atomic(self, *args, **kwargs)
21
22 # XXX: The wrapper function is created for every request. Find more
23 # efficient way to wrap views (or build a cache?)
24
25 hub = Hub.current
26 integration = hub.get_integration(DjangoIntegration)
27
28 if integration is not None and integration.middleware_spans:
29
30 @_functools.wraps(callback)
31 def sentry_wrapped_callback(request, *args, **kwargs):
32 # type: (Any, *Any, **Any) -> Any
33 with hub.start_span(
34 op="django.view", description=request.resolver_match.view_name
35 ):
36 return callback(request, *args, **kwargs)
37
38 else:
39 sentry_wrapped_callback = callback
40
41 return sentry_wrapped_callback
42
43 BaseHandler.make_view_atomic = sentry_patched_make_view_atomic
44
```
Path: `sentry_sdk/integrations/django/asgi.py`
Content:
```
1 """
2 Instrumentation for Django 3.0
3
4 Since this file contains `async def` it is conditionally imported in
5 `sentry_sdk.integrations.django` (depending on the existence of
6 `django.core.handlers.asgi`.
7 """
8
9 from sentry_sdk import Hub
10 from sentry_sdk._types import MYPY
11
12 from sentry_sdk.integrations.django import DjangoIntegration
13 from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
14
15 if MYPY:
16 from typing import Any
17 from typing import Union
18
19 from django.http.response import HttpResponse
20
21
22 def patch_django_asgi_handler_impl(cls):
23 # type: (Any) -> None
24 old_app = cls.__call__
25
26 async def sentry_patched_asgi_handler(self, scope, receive, send):
27 # type: (Any, Any, Any, Any) -> Any
28 if Hub.current.get_integration(DjangoIntegration) is None:
29 return await old_app(self, scope, receive, send)
30
31 middleware = SentryAsgiMiddleware(
32 old_app.__get__(self, cls), unsafe_context_data=True
33 )._run_asgi3
34 return await middleware(scope, receive, send)
35
36 cls.__call__ = sentry_patched_asgi_handler
37
38
39 def patch_get_response_async(cls, _before_get_response):
40 # type: (Any, Any) -> None
41 old_get_response_async = cls.get_response_async
42
43 async def sentry_patched_get_response_async(self, request):
44 # type: (Any, Any) -> Union[HttpResponse, BaseException]
45 _before_get_response(request)
46 return await old_get_response_async(self, request)
47
48 cls.get_response_async = sentry_patched_get_response_async
49
50
51 def patch_channels_asgi_handler_impl(cls):
52 # type: (Any) -> None
53 old_app = cls.__call__
54
55 async def sentry_patched_asgi_handler(self, receive, send):
56 # type: (Any, Any, Any) -> Any
57 if Hub.current.get_integration(DjangoIntegration) is None:
58 return await old_app(self, receive, send)
59
60 middleware = SentryAsgiMiddleware(
61 lambda _scope: old_app.__get__(self, cls), unsafe_context_data=True
62 )
63
64 return await middleware(self.scope)(receive, send)
65
66 cls.__call__ = sentry_patched_asgi_handler
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sentry_sdk/integrations/django/asgi.py b/sentry_sdk/integrations/django/asgi.py
--- a/sentry_sdk/integrations/django/asgi.py
+++ b/sentry_sdk/integrations/django/asgi.py
@@ -6,10 +6,9 @@
`django.core.handlers.asgi`.
"""
-from sentry_sdk import Hub
+from sentry_sdk import Hub, _functools
from sentry_sdk._types import MYPY
-from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
if MYPY:
@@ -21,6 +20,9 @@
def patch_django_asgi_handler_impl(cls):
# type: (Any) -> None
+
+ from sentry_sdk.integrations.django import DjangoIntegration
+
old_app = cls.__call__
async def sentry_patched_asgi_handler(self, scope, receive, send):
@@ -50,6 +52,9 @@
def patch_channels_asgi_handler_impl(cls):
# type: (Any) -> None
+
+ from sentry_sdk.integrations.django import DjangoIntegration
+
old_app = cls.__call__
async def sentry_patched_asgi_handler(self, receive, send):
@@ -64,3 +69,17 @@
return await middleware(self.scope)(receive, send)
cls.__call__ = sentry_patched_asgi_handler
+
+
+def wrap_async_view(hub, callback):
+ # type: (Hub, Any) -> Any
+ @_functools.wraps(callback)
+ async def sentry_wrapped_callback(request, *args, **kwargs):
+ # type: (Any, *Any, **Any) -> Any
+
+ with hub.start_span(
+ op="django.view", description=request.resolver_match.view_name
+ ):
+ return await callback(request, *args, **kwargs)
+
+ return sentry_wrapped_callback
diff --git a/sentry_sdk/integrations/django/views.py b/sentry_sdk/integrations/django/views.py
--- a/sentry_sdk/integrations/django/views.py
+++ b/sentry_sdk/integrations/django/views.py
@@ -6,6 +6,18 @@
from typing import Any
+try:
+ from asyncio import iscoroutinefunction
+except ImportError:
+ iscoroutinefunction = None # type: ignore
+
+
+try:
+ from sentry_sdk.integrations.django.asgi import wrap_async_view
+except (ImportError, SyntaxError):
+ wrap_async_view = None # type: ignore
+
+
def patch_views():
# type: () -> None
@@ -27,13 +39,14 @@
if integration is not None and integration.middleware_spans:
- @_functools.wraps(callback)
- def sentry_wrapped_callback(request, *args, **kwargs):
- # type: (Any, *Any, **Any) -> Any
- with hub.start_span(
- op="django.view", description=request.resolver_match.view_name
- ):
- return callback(request, *args, **kwargs)
+ if (
+ iscoroutinefunction is not None
+ and wrap_async_view is not None
+ and iscoroutinefunction(callback)
+ ):
+ sentry_wrapped_callback = wrap_async_view(hub, callback)
+ else:
+ sentry_wrapped_callback = _wrap_sync_view(hub, callback)
else:
sentry_wrapped_callback = callback
@@ -41,3 +54,16 @@
return sentry_wrapped_callback
BaseHandler.make_view_atomic = sentry_patched_make_view_atomic
+
+
+def _wrap_sync_view(hub, callback):
+ # type: (Hub, Any) -> Any
+ @_functools.wraps(callback)
+ def sentry_wrapped_callback(request, *args, **kwargs):
+ # type: (Any, *Any, **Any) -> Any
+ with hub.start_span(
+ op="django.view", description=request.resolver_match.view_name
+ ):
+ return callback(request, *args, **kwargs)
+
+ return sentry_wrapped_callback
| {"golden_diff": "diff --git a/sentry_sdk/integrations/django/asgi.py b/sentry_sdk/integrations/django/asgi.py\n--- a/sentry_sdk/integrations/django/asgi.py\n+++ b/sentry_sdk/integrations/django/asgi.py\n@@ -6,10 +6,9 @@\n `django.core.handlers.asgi`.\n \"\"\"\n \n-from sentry_sdk import Hub\n+from sentry_sdk import Hub, _functools\n from sentry_sdk._types import MYPY\n \n-from sentry_sdk.integrations.django import DjangoIntegration\n from sentry_sdk.integrations.asgi import SentryAsgiMiddleware\n \n if MYPY:\n@@ -21,6 +20,9 @@\n \n def patch_django_asgi_handler_impl(cls):\n # type: (Any) -> None\n+\n+ from sentry_sdk.integrations.django import DjangoIntegration\n+\n old_app = cls.__call__\n \n async def sentry_patched_asgi_handler(self, scope, receive, send):\n@@ -50,6 +52,9 @@\n \n def patch_channels_asgi_handler_impl(cls):\n # type: (Any) -> None\n+\n+ from sentry_sdk.integrations.django import DjangoIntegration\n+\n old_app = cls.__call__\n \n async def sentry_patched_asgi_handler(self, receive, send):\n@@ -64,3 +69,17 @@\n return await middleware(self.scope)(receive, send)\n \n cls.__call__ = sentry_patched_asgi_handler\n+\n+\n+def wrap_async_view(hub, callback):\n+ # type: (Hub, Any) -> Any\n+ @_functools.wraps(callback)\n+ async def sentry_wrapped_callback(request, *args, **kwargs):\n+ # type: (Any, *Any, **Any) -> Any\n+\n+ with hub.start_span(\n+ op=\"django.view\", description=request.resolver_match.view_name\n+ ):\n+ return await callback(request, *args, **kwargs)\n+\n+ return sentry_wrapped_callback\ndiff --git a/sentry_sdk/integrations/django/views.py b/sentry_sdk/integrations/django/views.py\n--- a/sentry_sdk/integrations/django/views.py\n+++ b/sentry_sdk/integrations/django/views.py\n@@ -6,6 +6,18 @@\n from typing import Any\n \n \n+try:\n+ from asyncio import iscoroutinefunction\n+except ImportError:\n+ iscoroutinefunction = None # type: ignore\n+\n+\n+try:\n+ from sentry_sdk.integrations.django.asgi import wrap_async_view\n+except (ImportError, SyntaxError):\n+ wrap_async_view = None # type: ignore\n+\n+\n def patch_views():\n # type: () -> None\n \n@@ -27,13 +39,14 @@\n \n if integration is not None and integration.middleware_spans:\n \n- @_functools.wraps(callback)\n- def sentry_wrapped_callback(request, *args, **kwargs):\n- # type: (Any, *Any, **Any) -> Any\n- with hub.start_span(\n- op=\"django.view\", description=request.resolver_match.view_name\n- ):\n- return callback(request, *args, **kwargs)\n+ if (\n+ iscoroutinefunction is not None\n+ and wrap_async_view is not None\n+ and iscoroutinefunction(callback)\n+ ):\n+ sentry_wrapped_callback = wrap_async_view(hub, callback)\n+ else:\n+ sentry_wrapped_callback = _wrap_sync_view(hub, callback)\n \n else:\n sentry_wrapped_callback = callback\n@@ -41,3 +54,16 @@\n return sentry_wrapped_callback\n \n BaseHandler.make_view_atomic = sentry_patched_make_view_atomic\n+\n+\n+def _wrap_sync_view(hub, callback):\n+ # type: (Hub, Any) -> Any\n+ @_functools.wraps(callback)\n+ def sentry_wrapped_callback(request, *args, **kwargs):\n+ # type: (Any, *Any, **Any) -> Any\n+ with hub.start_span(\n+ op=\"django.view\", description=request.resolver_match.view_name\n+ ):\n+ return callback(request, *args, **kwargs)\n+\n+ return sentry_wrapped_callback\n", "issue": "Django 3.1 async views do not work\nWhen using sentry versions greater than 0.16.3, (tested on 0.18.0), Django 3.1 aysnc views do not work.\r\n\r\n```\r\nlog.py 224 ERROR Internal Server Error: /async_ok\r\nTraceback (most recent call last):\r\n File \"/Users/williamchu/dev/sentry-python/.tox/py3.8-django-3.1/lib/python3.8/site-packages/django/core/handlers/exception.py\", line 47, in inner\r\n response = get_response(request)\r\n File \"/Users/williamchu/dev/sentry-python/.tox/py3.8-django-3.1/lib/python3.8/site-packages/django/core/handlers/base.py\", line 186, in _get_response\r\n self.check_response(response, callback)\r\n File \"/Users/williamchu/dev/sentry-python/.tox/py3.8-django-3.1/lib/python3.8/site-packages/django/core/handlers/base.py\", line 312, in check_response\r\n raise ValueError(\r\nValueError: The view tests.integrations.django.myapp.views.async_ok didn't return an HttpResponse object. It returned an unawaited coroutine instead. You may need to add an 'await' into your view.\r\n```\r\n\r\nI have made a branch with a test case to demonstrate this: https://github.com/uptickmetachu/sentry-python/tree/django3.1-test-async-view\r\n\r\n\n", "before_files": [{"content": "from sentry_sdk.hub import Hub\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk import _functools\n\nif MYPY:\n from typing import Any\n\n\ndef patch_views():\n # type: () -> None\n\n from django.core.handlers.base import BaseHandler\n from sentry_sdk.integrations.django import DjangoIntegration\n\n old_make_view_atomic = BaseHandler.make_view_atomic\n\n @_functools.wraps(old_make_view_atomic)\n def sentry_patched_make_view_atomic(self, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n callback = old_make_view_atomic(self, *args, **kwargs)\n\n # XXX: The wrapper function is created for every request. Find more\n # efficient way to wrap views (or build a cache?)\n\n hub = Hub.current\n integration = hub.get_integration(DjangoIntegration)\n\n if integration is not None and integration.middleware_spans:\n\n @_functools.wraps(callback)\n def sentry_wrapped_callback(request, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n with hub.start_span(\n op=\"django.view\", description=request.resolver_match.view_name\n ):\n return callback(request, *args, **kwargs)\n\n else:\n sentry_wrapped_callback = callback\n\n return sentry_wrapped_callback\n\n BaseHandler.make_view_atomic = sentry_patched_make_view_atomic\n", "path": "sentry_sdk/integrations/django/views.py"}, {"content": "\"\"\"\nInstrumentation for Django 3.0\n\nSince this file contains `async def` it is conditionally imported in\n`sentry_sdk.integrations.django` (depending on the existence of\n`django.core.handlers.asgi`.\n\"\"\"\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk._types import MYPY\n\nfrom sentry_sdk.integrations.django import DjangoIntegration\nfrom sentry_sdk.integrations.asgi import SentryAsgiMiddleware\n\nif MYPY:\n from typing import Any\n from typing import Union\n\n from django.http.response import HttpResponse\n\n\ndef patch_django_asgi_handler_impl(cls):\n # type: (Any) -> None\n old_app = cls.__call__\n\n async def sentry_patched_asgi_handler(self, scope, receive, send):\n # type: (Any, Any, Any, Any) -> Any\n if Hub.current.get_integration(DjangoIntegration) is None:\n return await old_app(self, scope, receive, send)\n\n middleware = SentryAsgiMiddleware(\n old_app.__get__(self, cls), unsafe_context_data=True\n )._run_asgi3\n return await middleware(scope, receive, send)\n\n cls.__call__ = sentry_patched_asgi_handler\n\n\ndef patch_get_response_async(cls, _before_get_response):\n # type: (Any, Any) -> None\n old_get_response_async = cls.get_response_async\n\n async def sentry_patched_get_response_async(self, request):\n # type: (Any, Any) -> Union[HttpResponse, BaseException]\n _before_get_response(request)\n return await old_get_response_async(self, request)\n\n cls.get_response_async = sentry_patched_get_response_async\n\n\ndef patch_channels_asgi_handler_impl(cls):\n # type: (Any) -> None\n old_app = cls.__call__\n\n async def sentry_patched_asgi_handler(self, receive, send):\n # type: (Any, Any, Any) -> Any\n if Hub.current.get_integration(DjangoIntegration) is None:\n return await old_app(self, receive, send)\n\n middleware = SentryAsgiMiddleware(\n lambda _scope: old_app.__get__(self, cls), unsafe_context_data=True\n )\n\n return await middleware(self.scope)(receive, send)\n\n cls.__call__ = sentry_patched_asgi_handler\n", "path": "sentry_sdk/integrations/django/asgi.py"}], "after_files": [{"content": "from sentry_sdk.hub import Hub\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk import _functools\n\nif MYPY:\n from typing import Any\n\n\ntry:\n from asyncio import iscoroutinefunction\nexcept ImportError:\n iscoroutinefunction = None # type: ignore\n\n\ntry:\n from sentry_sdk.integrations.django.asgi import wrap_async_view\nexcept (ImportError, SyntaxError):\n wrap_async_view = None # type: ignore\n\n\ndef patch_views():\n # type: () -> None\n\n from django.core.handlers.base import BaseHandler\n from sentry_sdk.integrations.django import DjangoIntegration\n\n old_make_view_atomic = BaseHandler.make_view_atomic\n\n @_functools.wraps(old_make_view_atomic)\n def sentry_patched_make_view_atomic(self, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n callback = old_make_view_atomic(self, *args, **kwargs)\n\n # XXX: The wrapper function is created for every request. Find more\n # efficient way to wrap views (or build a cache?)\n\n hub = Hub.current\n integration = hub.get_integration(DjangoIntegration)\n\n if integration is not None and integration.middleware_spans:\n\n if (\n iscoroutinefunction is not None\n and wrap_async_view is not None\n and iscoroutinefunction(callback)\n ):\n sentry_wrapped_callback = wrap_async_view(hub, callback)\n else:\n sentry_wrapped_callback = _wrap_sync_view(hub, callback)\n\n else:\n sentry_wrapped_callback = callback\n\n return sentry_wrapped_callback\n\n BaseHandler.make_view_atomic = sentry_patched_make_view_atomic\n\n\ndef _wrap_sync_view(hub, callback):\n # type: (Hub, Any) -> Any\n @_functools.wraps(callback)\n def sentry_wrapped_callback(request, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n with hub.start_span(\n op=\"django.view\", description=request.resolver_match.view_name\n ):\n return callback(request, *args, **kwargs)\n\n return sentry_wrapped_callback\n", "path": "sentry_sdk/integrations/django/views.py"}, {"content": "\"\"\"\nInstrumentation for Django 3.0\n\nSince this file contains `async def` it is conditionally imported in\n`sentry_sdk.integrations.django` (depending on the existence of\n`django.core.handlers.asgi`.\n\"\"\"\n\nfrom sentry_sdk import Hub, _functools\nfrom sentry_sdk._types import MYPY\n\nfrom sentry_sdk.integrations.asgi import SentryAsgiMiddleware\n\nif MYPY:\n from typing import Any\n from typing import Union\n\n from django.http.response import HttpResponse\n\n\ndef patch_django_asgi_handler_impl(cls):\n # type: (Any) -> None\n\n from sentry_sdk.integrations.django import DjangoIntegration\n\n old_app = cls.__call__\n\n async def sentry_patched_asgi_handler(self, scope, receive, send):\n # type: (Any, Any, Any, Any) -> Any\n if Hub.current.get_integration(DjangoIntegration) is None:\n return await old_app(self, scope, receive, send)\n\n middleware = SentryAsgiMiddleware(\n old_app.__get__(self, cls), unsafe_context_data=True\n )._run_asgi3\n return await middleware(scope, receive, send)\n\n cls.__call__ = sentry_patched_asgi_handler\n\n\ndef patch_get_response_async(cls, _before_get_response):\n # type: (Any, Any) -> None\n old_get_response_async = cls.get_response_async\n\n async def sentry_patched_get_response_async(self, request):\n # type: (Any, Any) -> Union[HttpResponse, BaseException]\n _before_get_response(request)\n return await old_get_response_async(self, request)\n\n cls.get_response_async = sentry_patched_get_response_async\n\n\ndef patch_channels_asgi_handler_impl(cls):\n # type: (Any) -> None\n\n from sentry_sdk.integrations.django import DjangoIntegration\n\n old_app = cls.__call__\n\n async def sentry_patched_asgi_handler(self, receive, send):\n # type: (Any, Any, Any) -> Any\n if Hub.current.get_integration(DjangoIntegration) is None:\n return await old_app(self, receive, send)\n\n middleware = SentryAsgiMiddleware(\n lambda _scope: old_app.__get__(self, cls), unsafe_context_data=True\n )\n\n return await middleware(self.scope)(receive, send)\n\n cls.__call__ = sentry_patched_asgi_handler\n\n\ndef wrap_async_view(hub, callback):\n # type: (Hub, Any) -> Any\n @_functools.wraps(callback)\n async def sentry_wrapped_callback(request, *args, **kwargs):\n # type: (Any, *Any, **Any) -> Any\n\n with hub.start_span(\n op=\"django.view\", description=request.resolver_match.view_name\n ):\n return await callback(request, *args, **kwargs)\n\n return sentry_wrapped_callback\n", "path": "sentry_sdk/integrations/django/asgi.py"}]} | 1,669 | 954 |
gh_patches_debug_797 | rasdani/github-patches | git_diff | pre-commit__pre-commit-167 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
npmrc causes npm to install to home directory instead of nodeenv
Here is what happened when I tried to get eslint installed:
```
$ pre-commit run --all-files
eslint..............................................................................................................................................................................................................................................................................................................Failed
hookid: eslint
xargs: eslint: No such file or directory
```
Moving .npmrc to nope.npmrc fixed the issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import find_packages
2 from setuptools import setup
3
4
5 setup(
6 name='pre_commit',
7 description=(
8 'A framework for managing and maintaining multi-language pre-commit '
9 'hooks.'
10 ),
11 url='https://github.com/pre-commit/pre-commit',
12 version='0.2.9',
13
14 author='Anthony Sottile',
15 author_email='[email protected]',
16
17 platforms='linux',
18 classifiers=[
19 'License :: OSI Approved :: MIT License',
20 'Programming Language :: Python :: 2',
21 'Programming Language :: Python :: 2.6',
22 'Programming Language :: Python :: 2.7',
23 'Programming Language :: Python :: 3',
24 'Programming Language :: Python :: 3.3',
25 'Programming Language :: Python :: 3.4',
26 'Programming Language :: Python :: Implementation :: CPython',
27 'Programming Language :: Python :: Implementation :: PyPy',
28 ],
29
30 packages=find_packages('.', exclude=('tests*', 'testing*')),
31 package_data={
32 'pre_commit': [
33 'resources/pre-commit-hook',
34 'resources/rbenv.tar.gz',
35 'resources/ruby-build.tar.gz',
36 'resources/ruby-download.tar.gz',
37 ]
38 },
39 install_requires=[
40 'argparse',
41 'aspy.yaml',
42 'cached-property',
43 'jsonschema',
44 'nodeenv>=0.9.4',
45 'ordereddict',
46 'plumbum',
47 'pyyaml',
48 'simplejson',
49 'virtualenv',
50 ],
51 entry_points={
52 'console_scripts': [
53 'pre-commit = pre_commit.main:main',
54 'validate-config = pre_commit.clientlib.validate_config:run',
55 'validate-manifest = pre_commit.clientlib.validate_manifest:run',
56 ],
57 },
58 )
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -41,7 +41,7 @@
'aspy.yaml',
'cached-property',
'jsonschema',
- 'nodeenv>=0.9.4',
+ 'nodeenv>=0.11.1',
'ordereddict',
'plumbum',
'pyyaml',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,7 +41,7 @@\n 'aspy.yaml',\n 'cached-property',\n 'jsonschema',\n- 'nodeenv>=0.9.4',\n+ 'nodeenv>=0.11.1',\n 'ordereddict',\n 'plumbum',\n 'pyyaml',\n", "issue": "npmrc causes npm to install to home directory instead of nodeenv\nHere is what happened when I tried to get eslint installed: \n\n```\n$ pre-commit run --all-files\neslint..............................................................................................................................................................................................................................................................................................................Failed\nhookid: eslint\n\nxargs: eslint: No such file or directory\n```\n\nMoving .npmrc to nope.npmrc fixed the issue.\n\n", "before_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nsetup(\n name='pre_commit',\n description=(\n 'A framework for managing and maintaining multi-language pre-commit '\n 'hooks.'\n ),\n url='https://github.com/pre-commit/pre-commit',\n version='0.2.9',\n\n author='Anthony Sottile',\n author_email='[email protected]',\n\n platforms='linux',\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n\n packages=find_packages('.', exclude=('tests*', 'testing*')),\n package_data={\n 'pre_commit': [\n 'resources/pre-commit-hook',\n 'resources/rbenv.tar.gz',\n 'resources/ruby-build.tar.gz',\n 'resources/ruby-download.tar.gz',\n ]\n },\n install_requires=[\n 'argparse',\n 'aspy.yaml',\n 'cached-property',\n 'jsonschema',\n 'nodeenv>=0.9.4',\n 'ordereddict',\n 'plumbum',\n 'pyyaml',\n 'simplejson',\n 'virtualenv',\n ],\n entry_points={\n 'console_scripts': [\n 'pre-commit = pre_commit.main:main',\n 'validate-config = pre_commit.clientlib.validate_config:run',\n 'validate-manifest = pre_commit.clientlib.validate_manifest:run',\n ],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nsetup(\n name='pre_commit',\n description=(\n 'A framework for managing and maintaining multi-language pre-commit '\n 'hooks.'\n ),\n url='https://github.com/pre-commit/pre-commit',\n version='0.2.9',\n\n author='Anthony Sottile',\n author_email='[email protected]',\n\n platforms='linux',\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n\n packages=find_packages('.', exclude=('tests*', 'testing*')),\n package_data={\n 'pre_commit': [\n 'resources/pre-commit-hook',\n 'resources/rbenv.tar.gz',\n 'resources/ruby-build.tar.gz',\n 'resources/ruby-download.tar.gz',\n ]\n },\n install_requires=[\n 'argparse',\n 'aspy.yaml',\n 'cached-property',\n 'jsonschema',\n 'nodeenv>=0.11.1',\n 'ordereddict',\n 'plumbum',\n 'pyyaml',\n 'simplejson',\n 'virtualenv',\n ],\n entry_points={\n 'console_scripts': [\n 'pre-commit = pre_commit.main:main',\n 'validate-config = pre_commit.clientlib.validate_config:run',\n 'validate-manifest = pre_commit.clientlib.validate_manifest:run',\n ],\n },\n)\n", "path": "setup.py"}]} | 823 | 89 |
gh_patches_debug_20356 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-5615 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tools/c7n-org - azure subscription generation includes disabled subscriptions
per report on gitter.
ngibbondaimler - We used azuresubs.py from c7n-org to generate a list of our subscriptions, however it's picking up disabled subscriptions and c7n-org throws an exception when it tries to read from a disabled sub to apply policy. Is there a suggested workaround for this?
Stefan Gordon -
I believe the return from the subscription API list call includes a state attribute, something like "state": "Enabled" - So for your scenario perhaps you can just add a check on that value at https://github.com/cloud-custodian/cloud-custodian/blob/master/tools/c7n_org/scripts/azuresubs.py#L34
Additionally if you can file an issue with the error you are getting in c7n-org I would say that we should update it to handle this error properly. Generating a list without those is an easy workaround but it shouldn't fail on them.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/c7n_org/scripts/azuresubs.py`
Content:
```
1 # Copyright 2018 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import click
16 from c7n_azure.session import Session
17 from c7n.utils import yaml_dump
18 from azure.mgmt.resource.subscriptions import SubscriptionClient
19
20
21 @click.command()
22 @click.option(
23 '-f', '--output', type=click.File('w'),
24 help="File to store the generated config (default stdout)")
25 def main(output):
26 """
27 Generate a c7n-org subscriptions config file
28 """
29
30 client = SubscriptionClient(Session().get_credentials())
31 subs = [sub.serialize(True) for sub in client.subscriptions.list()]
32 results = []
33 for sub in subs:
34 sub_info = {
35 'subscription_id': sub['subscriptionId'],
36 'name': sub['displayName']
37 }
38 results.append(sub_info)
39
40 print(yaml_dump({'subscriptions': results}), file=output)
41
42
43 if __name__ == '__main__':
44 main()
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/c7n_org/scripts/azuresubs.py b/tools/c7n_org/scripts/azuresubs.py
--- a/tools/c7n_org/scripts/azuresubs.py
+++ b/tools/c7n_org/scripts/azuresubs.py
@@ -22,7 +22,12 @@
@click.option(
'-f', '--output', type=click.File('w'),
help="File to store the generated config (default stdout)")
-def main(output):
[email protected](
+ '-s', '--state', multiple=True, type=click.Choice(
+ ['Enabled', 'Warned', 'PastDue', 'Disabled', 'Deleted']),
+ default=('Enabled',),
+ help="File to store the generated config (default stdout)")
+def main(output, state):
"""
Generate a c7n-org subscriptions config file
"""
@@ -31,6 +36,8 @@
subs = [sub.serialize(True) for sub in client.subscriptions.list()]
results = []
for sub in subs:
+ if state and sub['state'] not in state:
+ continue
sub_info = {
'subscription_id': sub['subscriptionId'],
'name': sub['displayName']
| {"golden_diff": "diff --git a/tools/c7n_org/scripts/azuresubs.py b/tools/c7n_org/scripts/azuresubs.py\n--- a/tools/c7n_org/scripts/azuresubs.py\n+++ b/tools/c7n_org/scripts/azuresubs.py\n@@ -22,7 +22,12 @@\n @click.option(\n '-f', '--output', type=click.File('w'),\n help=\"File to store the generated config (default stdout)\")\n-def main(output):\[email protected](\n+ '-s', '--state', multiple=True, type=click.Choice(\n+ ['Enabled', 'Warned', 'PastDue', 'Disabled', 'Deleted']),\n+ default=('Enabled',),\n+ help=\"File to store the generated config (default stdout)\")\n+def main(output, state):\n \"\"\"\n Generate a c7n-org subscriptions config file\n \"\"\"\n@@ -31,6 +36,8 @@\n subs = [sub.serialize(True) for sub in client.subscriptions.list()]\n results = []\n for sub in subs:\n+ if state and sub['state'] not in state:\n+ continue\n sub_info = {\n 'subscription_id': sub['subscriptionId'],\n 'name': sub['displayName']\n", "issue": "tools/c7n-org - azure subscription generation includes disabled subscriptions\n\r\nper report on gitter.\r\n\r\nngibbondaimler - We used azuresubs.py from c7n-org to generate a list of our subscriptions, however it's picking up disabled subscriptions and c7n-org throws an exception when it tries to read from a disabled sub to apply policy. Is there a suggested workaround for this?\r\n\r\n\r\nStefan Gordon -\r\nI believe the return from the subscription API list call includes a state attribute, something like \"state\": \"Enabled\" - So for your scenario perhaps you can just add a check on that value at https://github.com/cloud-custodian/cloud-custodian/blob/master/tools/c7n_org/scripts/azuresubs.py#L34\r\nAdditionally if you can file an issue with the error you are getting in c7n-org I would say that we should update it to handle this error properly. Generating a list without those is an easy workaround but it shouldn't fail on them.\r\n\n", "before_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport click\nfrom c7n_azure.session import Session\nfrom c7n.utils import yaml_dump\nfrom azure.mgmt.resource.subscriptions import SubscriptionClient\n\n\[email protected]()\[email protected](\n '-f', '--output', type=click.File('w'),\n help=\"File to store the generated config (default stdout)\")\ndef main(output):\n \"\"\"\n Generate a c7n-org subscriptions config file\n \"\"\"\n\n client = SubscriptionClient(Session().get_credentials())\n subs = [sub.serialize(True) for sub in client.subscriptions.list()]\n results = []\n for sub in subs:\n sub_info = {\n 'subscription_id': sub['subscriptionId'],\n 'name': sub['displayName']\n }\n results.append(sub_info)\n\n print(yaml_dump({'subscriptions': results}), file=output)\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/c7n_org/scripts/azuresubs.py"}], "after_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport click\nfrom c7n_azure.session import Session\nfrom c7n.utils import yaml_dump\nfrom azure.mgmt.resource.subscriptions import SubscriptionClient\n\n\[email protected]()\[email protected](\n '-f', '--output', type=click.File('w'),\n help=\"File to store the generated config (default stdout)\")\[email protected](\n '-s', '--state', multiple=True, type=click.Choice(\n ['Enabled', 'Warned', 'PastDue', 'Disabled', 'Deleted']),\n default=('Enabled',),\n help=\"File to store the generated config (default stdout)\")\ndef main(output, state):\n \"\"\"\n Generate a c7n-org subscriptions config file\n \"\"\"\n\n client = SubscriptionClient(Session().get_credentials())\n subs = [sub.serialize(True) for sub in client.subscriptions.list()]\n results = []\n for sub in subs:\n if state and sub['state'] not in state:\n continue\n sub_info = {\n 'subscription_id': sub['subscriptionId'],\n 'name': sub['displayName']\n }\n results.append(sub_info)\n\n print(yaml_dump({'subscriptions': results}), file=output)\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/c7n_org/scripts/azuresubs.py"}]} | 869 | 265 |
gh_patches_debug_13047 | rasdani/github-patches | git_diff | doccano__doccano-1558 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mutli-label text classification export issues: same classes but in different orders
How to reproduce the behaviour
---------
<!-- Before submitting an issue, make sure to check the docs and closed issues and FAQ to see if any of the solutions work for you. https://github.com/doccano/doccano/wiki/Frequently-Asked-Questions -->
We are two annotators on a multi-label classification project. When I export the annotations, for some examples, me and my co-annotator have put the same labels, but on the exported CSV, they do not appear in the same order:
Annotator 1:
| text | labels |
| example 1 | label1#label2#label3 |
Annotator 2:
| text | labels |
| example 1 | label2#label3#label1 |
As I try to use these CSVs for comparing our annotations, this brings more difficulty.
<!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. -->
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: Debian
* Python Version Used: Don't know, I pulled the latest version from Docker Hub
* When you install doccano: 3 days ago
* How did you install doccano (Heroku button etc): Docker
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/api/views/download/writer.py`
Content:
```
1 import abc
2 import csv
3 import itertools
4 import json
5 import os
6 import uuid
7 import zipfile
8 from collections import defaultdict
9 from typing import Dict, Iterable, Iterator, List
10
11 from .data import Record
12
13
14 class BaseWriter:
15
16 def __init__(self, tmpdir: str):
17 self.tmpdir = tmpdir
18
19 @abc.abstractmethod
20 def write(self, records: Iterator[Record]) -> str:
21 raise NotImplementedError()
22
23 def write_zip(self, filenames: Iterable):
24 save_file = '{}.zip'.format(os.path.join(self.tmpdir, str(uuid.uuid4())))
25 with zipfile.ZipFile(save_file, 'w', compression=zipfile.ZIP_DEFLATED) as zf:
26 for file in filenames:
27 zf.write(filename=file, arcname=os.path.basename(file))
28 return save_file
29
30
31 class LineWriter(BaseWriter):
32 extension = 'txt'
33
34 def write(self, records: Iterator[Record]) -> str:
35 files = {}
36 for record in records:
37 filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')
38 if filename not in files:
39 f = open(filename, mode='a')
40 files[filename] = f
41 f = files[filename]
42 line = self.create_line(record)
43 f.write(f'{line}\n')
44 for f in files.values():
45 f.close()
46 save_file = self.write_zip(files)
47 for file in files:
48 os.remove(file)
49 return save_file
50
51 @abc.abstractmethod
52 def create_line(self, record) -> str:
53 raise NotImplementedError()
54
55
56 class CsvWriter(BaseWriter):
57 extension = 'csv'
58
59 def write(self, records: Iterator[Record]) -> str:
60 writers = {}
61 file_handlers = set()
62 records = list(records)
63 header = self.create_header(records)
64 for record in records:
65 filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')
66 if filename not in writers:
67 f = open(filename, mode='a', encoding='utf-8')
68 writer = csv.DictWriter(f, header)
69 writer.writeheader()
70 writers[filename] = writer
71 file_handlers.add(f)
72 writer = writers[filename]
73 line = self.create_line(record)
74 writer.writerow(line)
75
76 for f in file_handlers:
77 f.close()
78 save_file = self.write_zip(writers)
79 for file in writers:
80 os.remove(file)
81 return save_file
82
83 def create_line(self, record) -> Dict:
84 return {
85 'id': record.id,
86 'data': record.data,
87 'label': '#'.join(record.label),
88 **record.metadata
89 }
90
91 def create_header(self, records: List[Record]) -> Iterable[str]:
92 header = ['id', 'data', 'label']
93 header += sorted(set(itertools.chain(*[r.metadata.keys() for r in records])))
94 return header
95
96
97 class JSONWriter(BaseWriter):
98 extension = 'json'
99
100 def write(self, records: Iterator[Record]) -> str:
101 writers = {}
102 contents = defaultdict(list)
103 for record in records:
104 filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')
105 if filename not in writers:
106 f = open(filename, mode='a', encoding='utf-8')
107 writers[filename] = f
108 line = self.create_line(record)
109 contents[filename].append(line)
110
111 for filename, f in writers.items():
112 content = contents[filename]
113 json.dump(content, f, ensure_ascii=False)
114 f.close()
115
116 save_file = self.write_zip(writers)
117 for file in writers:
118 os.remove(file)
119 return save_file
120
121 def create_line(self, record) -> Dict:
122 return {
123 'id': record.id,
124 'data': record.data,
125 'label': record.label,
126 **record.metadata
127 }
128
129
130 class JSONLWriter(LineWriter):
131 extension = 'jsonl'
132
133 def create_line(self, record):
134 return json.dumps({
135 'id': record.id,
136 'data': record.data,
137 'label': record.label,
138 **record.metadata
139 }, ensure_ascii=False)
140
141
142 class FastTextWriter(LineWriter):
143 extension = 'txt'
144
145 def create_line(self, record):
146 line = [f'__label__{label}' for label in record.label]
147 line.append(record.data)
148 line = ' '.join(line)
149 return line
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/api/views/download/writer.py b/backend/api/views/download/writer.py
--- a/backend/api/views/download/writer.py
+++ b/backend/api/views/download/writer.py
@@ -84,7 +84,7 @@
return {
'id': record.id,
'data': record.data,
- 'label': '#'.join(record.label),
+ 'label': '#'.join(sorted(record.label)),
**record.metadata
}
@@ -144,6 +144,7 @@
def create_line(self, record):
line = [f'__label__{label}' for label in record.label]
+ line.sort()
line.append(record.data)
line = ' '.join(line)
return line
| {"golden_diff": "diff --git a/backend/api/views/download/writer.py b/backend/api/views/download/writer.py\n--- a/backend/api/views/download/writer.py\n+++ b/backend/api/views/download/writer.py\n@@ -84,7 +84,7 @@\n return {\n 'id': record.id,\n 'data': record.data,\n- 'label': '#'.join(record.label),\n+ 'label': '#'.join(sorted(record.label)),\n **record.metadata\n }\n \n@@ -144,6 +144,7 @@\n \n def create_line(self, record):\n line = [f'__label__{label}' for label in record.label]\n+ line.sort()\n line.append(record.data)\n line = ' '.join(line)\n return line\n", "issue": "Mutli-label text classification export issues: same classes but in different orders\nHow to reproduce the behaviour\r\n---------\r\n<!-- Before submitting an issue, make sure to check the docs and closed issues and FAQ to see if any of the solutions work for you. https://github.com/doccano/doccano/wiki/Frequently-Asked-Questions -->\r\nWe are two annotators on a multi-label classification project. When I export the annotations, for some examples, me and my co-annotator have put the same labels, but on the exported CSV, they do not appear in the same order:\r\n\r\nAnnotator 1:\r\n\r\n| text | labels |\r\n| example 1 | label1#label2#label3 |\r\n\r\nAnnotator 2:\r\n\r\n| text | labels |\r\n| example 1 | label2#label3#label1 |\r\n\r\nAs I try to use these CSVs for comparing our annotations, this brings more difficulty.\r\n\r\n<!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. -->\r\n\r\nYour Environment\r\n---------\r\n<!-- Include details of your environment.-->\r\n* Operating System: Debian\r\n* Python Version Used: Don't know, I pulled the latest version from Docker Hub\r\n* When you install doccano: 3 days ago\r\n* How did you install doccano (Heroku button etc): Docker\r\n\n", "before_files": [{"content": "import abc\nimport csv\nimport itertools\nimport json\nimport os\nimport uuid\nimport zipfile\nfrom collections import defaultdict\nfrom typing import Dict, Iterable, Iterator, List\n\nfrom .data import Record\n\n\nclass BaseWriter:\n\n def __init__(self, tmpdir: str):\n self.tmpdir = tmpdir\n\n @abc.abstractmethod\n def write(self, records: Iterator[Record]) -> str:\n raise NotImplementedError()\n\n def write_zip(self, filenames: Iterable):\n save_file = '{}.zip'.format(os.path.join(self.tmpdir, str(uuid.uuid4())))\n with zipfile.ZipFile(save_file, 'w', compression=zipfile.ZIP_DEFLATED) as zf:\n for file in filenames:\n zf.write(filename=file, arcname=os.path.basename(file))\n return save_file\n\n\nclass LineWriter(BaseWriter):\n extension = 'txt'\n\n def write(self, records: Iterator[Record]) -> str:\n files = {}\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in files:\n f = open(filename, mode='a')\n files[filename] = f\n f = files[filename]\n line = self.create_line(record)\n f.write(f'{line}\\n')\n for f in files.values():\n f.close()\n save_file = self.write_zip(files)\n for file in files:\n os.remove(file)\n return save_file\n\n @abc.abstractmethod\n def create_line(self, record) -> str:\n raise NotImplementedError()\n\n\nclass CsvWriter(BaseWriter):\n extension = 'csv'\n\n def write(self, records: Iterator[Record]) -> str:\n writers = {}\n file_handlers = set()\n records = list(records)\n header = self.create_header(records)\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in writers:\n f = open(filename, mode='a', encoding='utf-8')\n writer = csv.DictWriter(f, header)\n writer.writeheader()\n writers[filename] = writer\n file_handlers.add(f)\n writer = writers[filename]\n line = self.create_line(record)\n writer.writerow(line)\n\n for f in file_handlers:\n f.close()\n save_file = self.write_zip(writers)\n for file in writers:\n os.remove(file)\n return save_file\n\n def create_line(self, record) -> Dict:\n return {\n 'id': record.id,\n 'data': record.data,\n 'label': '#'.join(record.label),\n **record.metadata\n }\n\n def create_header(self, records: List[Record]) -> Iterable[str]:\n header = ['id', 'data', 'label']\n header += sorted(set(itertools.chain(*[r.metadata.keys() for r in records])))\n return header\n\n\nclass JSONWriter(BaseWriter):\n extension = 'json'\n\n def write(self, records: Iterator[Record]) -> str:\n writers = {}\n contents = defaultdict(list)\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in writers:\n f = open(filename, mode='a', encoding='utf-8')\n writers[filename] = f\n line = self.create_line(record)\n contents[filename].append(line)\n\n for filename, f in writers.items():\n content = contents[filename]\n json.dump(content, f, ensure_ascii=False)\n f.close()\n\n save_file = self.write_zip(writers)\n for file in writers:\n os.remove(file)\n return save_file\n\n def create_line(self, record) -> Dict:\n return {\n 'id': record.id,\n 'data': record.data,\n 'label': record.label,\n **record.metadata\n }\n\n\nclass JSONLWriter(LineWriter):\n extension = 'jsonl'\n\n def create_line(self, record):\n return json.dumps({\n 'id': record.id,\n 'data': record.data,\n 'label': record.label,\n **record.metadata\n }, ensure_ascii=False)\n\n\nclass FastTextWriter(LineWriter):\n extension = 'txt'\n\n def create_line(self, record):\n line = [f'__label__{label}' for label in record.label]\n line.append(record.data)\n line = ' '.join(line)\n return line\n", "path": "backend/api/views/download/writer.py"}], "after_files": [{"content": "import abc\nimport csv\nimport itertools\nimport json\nimport os\nimport uuid\nimport zipfile\nfrom collections import defaultdict\nfrom typing import Dict, Iterable, Iterator, List\n\nfrom .data import Record\n\n\nclass BaseWriter:\n\n def __init__(self, tmpdir: str):\n self.tmpdir = tmpdir\n\n @abc.abstractmethod\n def write(self, records: Iterator[Record]) -> str:\n raise NotImplementedError()\n\n def write_zip(self, filenames: Iterable):\n save_file = '{}.zip'.format(os.path.join(self.tmpdir, str(uuid.uuid4())))\n with zipfile.ZipFile(save_file, 'w', compression=zipfile.ZIP_DEFLATED) as zf:\n for file in filenames:\n zf.write(filename=file, arcname=os.path.basename(file))\n return save_file\n\n\nclass LineWriter(BaseWriter):\n extension = 'txt'\n\n def write(self, records: Iterator[Record]) -> str:\n files = {}\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in files:\n f = open(filename, mode='a')\n files[filename] = f\n f = files[filename]\n line = self.create_line(record)\n f.write(f'{line}\\n')\n for f in files.values():\n f.close()\n save_file = self.write_zip(files)\n for file in files:\n os.remove(file)\n return save_file\n\n @abc.abstractmethod\n def create_line(self, record) -> str:\n raise NotImplementedError()\n\n\nclass CsvWriter(BaseWriter):\n extension = 'csv'\n\n def write(self, records: Iterator[Record]) -> str:\n writers = {}\n file_handlers = set()\n records = list(records)\n header = self.create_header(records)\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in writers:\n f = open(filename, mode='a', encoding='utf-8')\n writer = csv.DictWriter(f, header)\n writer.writeheader()\n writers[filename] = writer\n file_handlers.add(f)\n writer = writers[filename]\n line = self.create_line(record)\n writer.writerow(line)\n\n for f in file_handlers:\n f.close()\n save_file = self.write_zip(writers)\n for file in writers:\n os.remove(file)\n return save_file\n\n def create_line(self, record) -> Dict:\n return {\n 'id': record.id,\n 'data': record.data,\n 'label': '#'.join(sorted(record.label)),\n **record.metadata\n }\n\n def create_header(self, records: List[Record]) -> Iterable[str]:\n header = ['id', 'data', 'label']\n header += sorted(set(itertools.chain(*[r.metadata.keys() for r in records])))\n return header\n\n\nclass JSONWriter(BaseWriter):\n extension = 'json'\n\n def write(self, records: Iterator[Record]) -> str:\n writers = {}\n contents = defaultdict(list)\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in writers:\n f = open(filename, mode='a', encoding='utf-8')\n writers[filename] = f\n line = self.create_line(record)\n contents[filename].append(line)\n\n for filename, f in writers.items():\n content = contents[filename]\n json.dump(content, f, ensure_ascii=False)\n f.close()\n\n save_file = self.write_zip(writers)\n for file in writers:\n os.remove(file)\n return save_file\n\n def create_line(self, record) -> Dict:\n return {\n 'id': record.id,\n 'data': record.data,\n 'label': record.label,\n **record.metadata\n }\n\n\nclass JSONLWriter(LineWriter):\n extension = 'jsonl'\n\n def create_line(self, record):\n return json.dumps({\n 'id': record.id,\n 'data': record.data,\n 'label': record.label,\n **record.metadata\n }, ensure_ascii=False)\n\n\nclass FastTextWriter(LineWriter):\n extension = 'txt'\n\n def create_line(self, record):\n line = [f'__label__{label}' for label in record.label]\n line.sort()\n line.append(record.data)\n line = ' '.join(line)\n return line\n", "path": "backend/api/views/download/writer.py"}]} | 1,841 | 164 |
gh_patches_debug_33390 | rasdani/github-patches | git_diff | kivy__kivy-1947 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TextInput crashes while using clipboard bubble
After opening clipboard bubble, keyboard doesn't close anymore.
Moreover, when closing application, it gives:
```
I/python ( 4932): [INFO ] [Clipboard ] Provider: dummy(['clipboard_android'] ignored)
I/python ( 4932): [INFO ] [Base ] Leaving application in progress...
I/python ( 4932): Python for android ended.
W/dalvikvm( 4932): threadid=10: thread exiting with uncaught exception (group=0x4001d560)
E/AndroidRuntime( 4932): FATAL EXCEPTION: Thread-11
E/AndroidRuntime( 4932): java.lang.NoClassDefFoundError: android.content.ClipData
E/AndroidRuntime( 4932): at org.renpy.android.SDLSurfaceView.nativeInit(Native Method)
E/AndroidRuntime( 4932): at org.renpy.android.SDLSurfaceView.run(SDLSurfaceView.java:725)
E/AndroidRuntime( 4932): at java.lang.Thread.run(Thread.java:1019)
E/AndroidRuntime( 4932): Caused by: java.lang.ClassNotFoundException: android.content.ClipData in loader dalvik.system.PathClassLoader[/data/app/org.emanuele.LyricsDL-2.apk]
E/AndroidRuntime( 4932): at dalvik.system.PathClassLoader.findClass(PathClassLoader.java:240)
E/AndroidRuntime( 4932): at java.lang.ClassLoader.loadClass(ClassLoader.java:551)
E/AndroidRuntime( 4932): at java.lang.ClassLoader.loadClass(ClassLoader.java:511)
E/AndroidRuntime( 4932): ... 3 more
```
If specifing "use_bubble: False" it works correctly, but clipboard is obviously disabled.
android sdk 14
kivy 1.8.0
## <bountysource-plugin>
Want to back this issue? **[Post a bounty on it!](https://www.bountysource.com/issues/1436926-textinput-crashes-while-using-clipboard-bubble?utm_campaign=plugin&utm_content=tracker%2F42681&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F42681&utm_medium=issues&utm_source=github).
</bountysource-plugin>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/core/clipboard/clipboard_android.py`
Content:
```
1 '''
2 Clipboard Android
3 =================
4
5 Android implementation of Clipboard provider, using Pyjnius.
6 '''
7
8 __all__ = ('ClipboardAndroid', )
9
10 from kivy.core.clipboard import ClipboardBase
11 from jnius import autoclass
12 from android.runnable import run_on_ui_thread
13
14 AndroidString = autoclass('java.lang.String')
15 PythonActivity = autoclass('org.renpy.android.PythonActivity')
16 Context = autoclass('android.content.Context')
17 ClipData = autoclass('android.content.ClipData')
18 ClipDescription = autoclass('android.content.ClipDescription')
19
20
21 class ClipboardAndroid(ClipboardBase):
22
23 def __init__(self):
24 super(ClipboardAndroid, self).__init__()
25 self._clipboard = None
26 self._data = dict()
27 self._data['text/plain'] = None
28 self._data['application/data'] = None
29 PythonActivity._clipboard = None
30
31 def get(self, mimetype='text/plain'):
32 return self._get(mimetype)
33
34 def put(self, data, mimetype='text/plain'):
35 self._set(data, mimetype)
36
37 def get_types(self):
38 return list(self._data.keys())
39
40 @run_on_ui_thread
41 def _initialize_clipboard(self):
42 PythonActivity._clipboard = PythonActivity.getSystemService(
43 Context.CLIPBOARD_SERVICE)
44
45 def _get_clipboard(f):
46 def called(*args, **kargs):
47 self = args[0]
48 if not PythonActivity._clipboard:
49 self._initialize_clipboard()
50 import time
51 while not PythonActivity._clipboard:
52 time.sleep(.01)
53 return f(*args, **kargs)
54 return called
55
56 @_get_clipboard
57 def _get(self, mimetype='text/plain'):
58 clippy = PythonActivity._clipboard
59 primary_clip = clippy.getPrimaryClip()
60 if primary_clip and clippy.getPrimaryClipDescription().hasMimeType(
61 ClipDescription.MIMETYPE_TEXT_PLAIN):
62 data = primary_clip.getItemAt(0).getText().toString()
63 else:
64 # TODO: non text data types Not yet implemented
65 data = ''
66 return data
67
68 @_get_clipboard
69 def _set(self, data, mimetype):
70 clippy = PythonActivity._clipboard
71 new_clip = ClipData.newPlainText(AndroidString(""),
72 AndroidString(data))
73 # put text data onto clipboard
74 clippy.setPrimaryClip(new_clip)
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/core/clipboard/clipboard_android.py b/kivy/core/clipboard/clipboard_android.py
--- a/kivy/core/clipboard/clipboard_android.py
+++ b/kivy/core/clipboard/clipboard_android.py
@@ -14,8 +14,8 @@
AndroidString = autoclass('java.lang.String')
PythonActivity = autoclass('org.renpy.android.PythonActivity')
Context = autoclass('android.content.Context')
-ClipData = autoclass('android.content.ClipData')
-ClipDescription = autoclass('android.content.ClipDescription')
+VER = autoclass('android.os.Build$VERSION')
+sdk = VER.SDK_INT
class ClipboardAndroid(ClipboardBase):
@@ -56,19 +56,29 @@
@_get_clipboard
def _get(self, mimetype='text/plain'):
clippy = PythonActivity._clipboard
- primary_clip = clippy.getPrimaryClip()
- if primary_clip and clippy.getPrimaryClipDescription().hasMimeType(
- ClipDescription.MIMETYPE_TEXT_PLAIN):
- data = primary_clip.getItemAt(0).getText().toString()
+ if sdk < 11:
+ data = clippy.getText().toString()
else:
- # TODO: non text data types Not yet implemented
- data = ''
+ ClipDescription = autoclass('android.content.ClipDescription')
+ primary_clip = clippy.getPrimaryClip()
+ if primary_clip and clippy.getPrimaryClipDescription().hasMimeType(
+ ClipDescription.MIMETYPE_TEXT_PLAIN):
+ data = primary_clip.getItemAt(0).getText().toString()
+ else:
+ # TODO: non text data types Not yet implemented
+ data = ''
return data
@_get_clipboard
def _set(self, data, mimetype):
clippy = PythonActivity._clipboard
- new_clip = ClipData.newPlainText(AndroidString(""),
+
+ if sdk < 11:
+ #versions previous to honeycomb
+ clippy.setText(AndroidString(data))
+ else:
+ ClipData = autoclass('android.content.ClipData')
+ new_clip = ClipData.newPlainText(AndroidString(""),
AndroidString(data))
- # put text data onto clipboard
- clippy.setPrimaryClip(new_clip)
+ # put text data onto clipboard
+ clippy.setPrimaryClip(new_clip)
| {"golden_diff": "diff --git a/kivy/core/clipboard/clipboard_android.py b/kivy/core/clipboard/clipboard_android.py\n--- a/kivy/core/clipboard/clipboard_android.py\n+++ b/kivy/core/clipboard/clipboard_android.py\n@@ -14,8 +14,8 @@\n AndroidString = autoclass('java.lang.String')\n PythonActivity = autoclass('org.renpy.android.PythonActivity')\n Context = autoclass('android.content.Context')\n-ClipData = autoclass('android.content.ClipData')\n-ClipDescription = autoclass('android.content.ClipDescription')\n+VER = autoclass('android.os.Build$VERSION')\n+sdk = VER.SDK_INT\n \n \n class ClipboardAndroid(ClipboardBase):\n@@ -56,19 +56,29 @@\n @_get_clipboard\n def _get(self, mimetype='text/plain'):\n clippy = PythonActivity._clipboard\n- primary_clip = clippy.getPrimaryClip()\n- if primary_clip and clippy.getPrimaryClipDescription().hasMimeType(\n- ClipDescription.MIMETYPE_TEXT_PLAIN):\n- data = primary_clip.getItemAt(0).getText().toString()\n+ if sdk < 11:\n+ data = clippy.getText().toString()\n else:\n- # TODO: non text data types Not yet implemented\n- data = ''\n+ ClipDescription = autoclass('android.content.ClipDescription')\n+ primary_clip = clippy.getPrimaryClip()\n+ if primary_clip and clippy.getPrimaryClipDescription().hasMimeType(\n+ ClipDescription.MIMETYPE_TEXT_PLAIN):\n+ data = primary_clip.getItemAt(0).getText().toString()\n+ else:\n+ # TODO: non text data types Not yet implemented\n+ data = ''\n return data\n \n @_get_clipboard\n def _set(self, data, mimetype):\n clippy = PythonActivity._clipboard\n- new_clip = ClipData.newPlainText(AndroidString(\"\"),\n+\n+ if sdk < 11:\n+ #versions previous to honeycomb\n+ clippy.setText(AndroidString(data))\n+ else:\n+ ClipData = autoclass('android.content.ClipData')\n+ new_clip = ClipData.newPlainText(AndroidString(\"\"),\n AndroidString(data))\n- # put text data onto clipboard\n- clippy.setPrimaryClip(new_clip)\n+ # put text data onto clipboard\n+ clippy.setPrimaryClip(new_clip)\n", "issue": "TextInput crashes while using clipboard bubble\nAfter opening clipboard bubble, keyboard doesn't close anymore.\nMoreover, when closing application, it gives:\n\n```\nI/python ( 4932): [INFO ] [Clipboard ] Provider: dummy(['clipboard_android'] ignored)\nI/python ( 4932): [INFO ] [Base ] Leaving application in progress...\nI/python ( 4932): Python for android ended.\nW/dalvikvm( 4932): threadid=10: thread exiting with uncaught exception (group=0x4001d560)\nE/AndroidRuntime( 4932): FATAL EXCEPTION: Thread-11\nE/AndroidRuntime( 4932): java.lang.NoClassDefFoundError: android.content.ClipData\nE/AndroidRuntime( 4932): at org.renpy.android.SDLSurfaceView.nativeInit(Native Method)\nE/AndroidRuntime( 4932): at org.renpy.android.SDLSurfaceView.run(SDLSurfaceView.java:725)\nE/AndroidRuntime( 4932): at java.lang.Thread.run(Thread.java:1019)\nE/AndroidRuntime( 4932): Caused by: java.lang.ClassNotFoundException: android.content.ClipData in loader dalvik.system.PathClassLoader[/data/app/org.emanuele.LyricsDL-2.apk]\nE/AndroidRuntime( 4932): at dalvik.system.PathClassLoader.findClass(PathClassLoader.java:240)\nE/AndroidRuntime( 4932): at java.lang.ClassLoader.loadClass(ClassLoader.java:551)\nE/AndroidRuntime( 4932): at java.lang.ClassLoader.loadClass(ClassLoader.java:511)\nE/AndroidRuntime( 4932): ... 3 more\n```\n\nIf specifing \"use_bubble: False\" it works correctly, but clipboard is obviously disabled.\n\nandroid sdk 14\nkivy 1.8.0\n## <bountysource-plugin>\n\nWant to back this issue? **[Post a bounty on it!](https://www.bountysource.com/issues/1436926-textinput-crashes-while-using-clipboard-bubble?utm_campaign=plugin&utm_content=tracker%2F42681&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F42681&utm_medium=issues&utm_source=github).\n</bountysource-plugin>\n\n", "before_files": [{"content": "'''\nClipboard Android\n=================\n\nAndroid implementation of Clipboard provider, using Pyjnius.\n'''\n\n__all__ = ('ClipboardAndroid', )\n\nfrom kivy.core.clipboard import ClipboardBase\nfrom jnius import autoclass\nfrom android.runnable import run_on_ui_thread\n\nAndroidString = autoclass('java.lang.String')\nPythonActivity = autoclass('org.renpy.android.PythonActivity')\nContext = autoclass('android.content.Context')\nClipData = autoclass('android.content.ClipData')\nClipDescription = autoclass('android.content.ClipDescription')\n\n\nclass ClipboardAndroid(ClipboardBase):\n\n def __init__(self):\n super(ClipboardAndroid, self).__init__()\n self._clipboard = None\n self._data = dict()\n self._data['text/plain'] = None\n self._data['application/data'] = None\n PythonActivity._clipboard = None\n\n def get(self, mimetype='text/plain'):\n return self._get(mimetype)\n\n def put(self, data, mimetype='text/plain'):\n self._set(data, mimetype)\n\n def get_types(self):\n return list(self._data.keys())\n\n @run_on_ui_thread\n def _initialize_clipboard(self):\n PythonActivity._clipboard = PythonActivity.getSystemService(\n Context.CLIPBOARD_SERVICE)\n\n def _get_clipboard(f):\n def called(*args, **kargs):\n self = args[0]\n if not PythonActivity._clipboard:\n self._initialize_clipboard()\n import time\n while not PythonActivity._clipboard:\n time.sleep(.01)\n return f(*args, **kargs)\n return called\n\n @_get_clipboard\n def _get(self, mimetype='text/plain'):\n clippy = PythonActivity._clipboard\n primary_clip = clippy.getPrimaryClip()\n if primary_clip and clippy.getPrimaryClipDescription().hasMimeType(\n ClipDescription.MIMETYPE_TEXT_PLAIN):\n data = primary_clip.getItemAt(0).getText().toString()\n else:\n # TODO: non text data types Not yet implemented\n data = ''\n return data\n\n @_get_clipboard\n def _set(self, data, mimetype):\n clippy = PythonActivity._clipboard\n new_clip = ClipData.newPlainText(AndroidString(\"\"),\n AndroidString(data))\n # put text data onto clipboard\n clippy.setPrimaryClip(new_clip)\n", "path": "kivy/core/clipboard/clipboard_android.py"}], "after_files": [{"content": "'''\nClipboard Android\n=================\n\nAndroid implementation of Clipboard provider, using Pyjnius.\n'''\n\n__all__ = ('ClipboardAndroid', )\n\nfrom kivy.core.clipboard import ClipboardBase\nfrom jnius import autoclass\nfrom android.runnable import run_on_ui_thread\n\nAndroidString = autoclass('java.lang.String')\nPythonActivity = autoclass('org.renpy.android.PythonActivity')\nContext = autoclass('android.content.Context')\nVER = autoclass('android.os.Build$VERSION')\nsdk = VER.SDK_INT\n\n\nclass ClipboardAndroid(ClipboardBase):\n\n def __init__(self):\n super(ClipboardAndroid, self).__init__()\n self._clipboard = None\n self._data = dict()\n self._data['text/plain'] = None\n self._data['application/data'] = None\n PythonActivity._clipboard = None\n\n def get(self, mimetype='text/plain'):\n return self._get(mimetype)\n\n def put(self, data, mimetype='text/plain'):\n self._set(data, mimetype)\n\n def get_types(self):\n return list(self._data.keys())\n\n @run_on_ui_thread\n def _initialize_clipboard(self):\n PythonActivity._clipboard = PythonActivity.getSystemService(\n Context.CLIPBOARD_SERVICE)\n\n def _get_clipboard(f):\n def called(*args, **kargs):\n self = args[0]\n if not PythonActivity._clipboard:\n self._initialize_clipboard()\n import time\n while not PythonActivity._clipboard:\n time.sleep(.01)\n return f(*args, **kargs)\n return called\n\n @_get_clipboard\n def _get(self, mimetype='text/plain'):\n clippy = PythonActivity._clipboard\n if sdk < 11:\n data = clippy.getText().toString()\n else:\n ClipDescription = autoclass('android.content.ClipDescription')\n primary_clip = clippy.getPrimaryClip()\n if primary_clip and clippy.getPrimaryClipDescription().hasMimeType(\n ClipDescription.MIMETYPE_TEXT_PLAIN):\n data = primary_clip.getItemAt(0).getText().toString()\n else:\n # TODO: non text data types Not yet implemented\n data = ''\n return data\n\n @_get_clipboard\n def _set(self, data, mimetype):\n clippy = PythonActivity._clipboard\n\n if sdk < 11:\n #versions previous to honeycomb\n clippy.setText(AndroidString(data))\n else:\n ClipData = autoclass('android.content.ClipData')\n new_clip = ClipData.newPlainText(AndroidString(\"\"),\n AndroidString(data))\n # put text data onto clipboard\n clippy.setPrimaryClip(new_clip)\n", "path": "kivy/core/clipboard/clipboard_android.py"}]} | 1,496 | 514 |
gh_patches_debug_9553 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3064 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Not really possible to override settings
## Description
I want to override the mathsar settings in order to allow LDAP login using django-auth-ldap. I changed the `config/settings/production.py` file that mentions: `# Override default settings ` and added the needed configuration.
This worked fine however that file is under version control so if it is changed on the origin I'll get a conflict and would need to also merge my changes. The usual way to implement this functionality is to add a *non tracked* `local.py` file that would contain any extra configuration for each environment (either dev or production) and import *that* file from the corresponding file. I.e the production.py would be changed to:
```python
# Override default settings
try:
from .local import *
except ImportError:
pass
```
This way, if the local.py file is there it will be used to override the config but if it isnt' there it will be ignored.
## Expected behavior
Being able to override django settings for my environment *without* keeping a fork.
## To Reproduce
Change the production.py file and you'll see that it's version controlled so it can't be easily changed!
## Environment
Not needed
## Additional context
I'd be happy to provide a PR implementing the functionality described here, i.e allow an untracked local.py file to override django settings for each *user/environment*.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `config/settings/development.py`
Content:
```
1 from config.settings.common_settings import * # noqa
2
3 # Override default settings
4
```
Path: `config/settings/production.py`
Content:
```
1 from config.settings.common_settings import * # noqa
2
3 # Override default settings
4
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/config/settings/development.py b/config/settings/development.py
--- a/config/settings/development.py
+++ b/config/settings/development.py
@@ -1,3 +1,10 @@
from config.settings.common_settings import * # noqa
# Override default settings
+
+
+# Use a local.py module for settings that shouldn't be version tracked
+try:
+ from .local import * # noqa
+except ImportError:
+ pass
diff --git a/config/settings/production.py b/config/settings/production.py
--- a/config/settings/production.py
+++ b/config/settings/production.py
@@ -1,3 +1,10 @@
from config.settings.common_settings import * # noqa
# Override default settings
+
+
+# Use a local.py module for settings that shouldn't be version tracked
+try:
+ from .local import * # noqa
+except ImportError:
+ pass
| {"golden_diff": "diff --git a/config/settings/development.py b/config/settings/development.py\n--- a/config/settings/development.py\n+++ b/config/settings/development.py\n@@ -1,3 +1,10 @@\n from config.settings.common_settings import * # noqa\n \n # Override default settings\n+\n+\n+# Use a local.py module for settings that shouldn't be version tracked\n+try:\n+ from .local import * # noqa\n+except ImportError:\n+ pass\ndiff --git a/config/settings/production.py b/config/settings/production.py\n--- a/config/settings/production.py\n+++ b/config/settings/production.py\n@@ -1,3 +1,10 @@\n from config.settings.common_settings import * # noqa\n \n # Override default settings\n+\n+\n+# Use a local.py module for settings that shouldn't be version tracked\n+try:\n+ from .local import * # noqa \n+except ImportError:\n+ pass\n", "issue": "Not really possible to override settings\n## Description\r\nI want to override the mathsar settings in order to allow LDAP login using django-auth-ldap. I changed the `config/settings/production.py` file that mentions: `# Override default settings ` and added the needed configuration. \r\n\r\nThis worked fine however that file is under version control so if it is changed on the origin I'll get a conflict and would need to also merge my changes. The usual way to implement this functionality is to add a *non tracked* `local.py` file that would contain any extra configuration for each environment (either dev or production) and import *that* file from the corresponding file. I.e the production.py would be changed to:\r\n\r\n```python\r\n# Override default settings \r\n\r\ntry:\r\n from .local import *\r\nexcept ImportError:\r\n pass\r\n```\r\n\r\nThis way, if the local.py file is there it will be used to override the config but if it isnt' there it will be ignored. \r\n\r\n## Expected behavior\r\nBeing able to override django settings for my environment *without* keeping a fork.\r\n\r\n## To Reproduce\r\nChange the production.py file and you'll see that it's version controlled so it can't be easily changed!\r\n\r\n## Environment\r\nNot needed\r\n\r\n## Additional context\r\nI'd be happy to provide a PR implementing the functionality described here, i.e allow an untracked local.py file to override django settings for each *user/environment*.\n", "before_files": [{"content": "from config.settings.common_settings import * # noqa\n\n# Override default settings\n", "path": "config/settings/development.py"}, {"content": "from config.settings.common_settings import * # noqa\n\n# Override default settings\n", "path": "config/settings/production.py"}], "after_files": [{"content": "from config.settings.common_settings import * # noqa\n\n# Override default settings\n\n\n# Use a local.py module for settings that shouldn't be version tracked\ntry:\n from .local import * # noqa\nexcept ImportError:\n pass\n", "path": "config/settings/development.py"}, {"content": "from config.settings.common_settings import * # noqa\n\n# Override default settings\n\n\n# Use a local.py module for settings that shouldn't be version tracked\ntry:\n from .local import * # noqa \nexcept ImportError:\n pass\n", "path": "config/settings/production.py"}]} | 608 | 192 |
gh_patches_debug_4675 | rasdani/github-patches | git_diff | pypa__pip-5931 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip uses deprecated SafeConfigParser
* Pip version: 9.0.1
* Python version: 3.6.1
* Operating system: Mac OS X 10.12.4
### Description:
With `error::DeprecationWarning` in `PYTHONWARNINGS`:
```
pip uninstall -y faker
/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/pep425tags.py:260: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses
import imp
Exception:
Traceback (most recent call last):
File "/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/basecommand.py", line 215, in main
status = self.run(options, args)
File "/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/commands/uninstall.py", line 76, in run
requirement_set.uninstall(auto_confirm=options.yes)
File "/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/req/req_set.py", line 346, in uninstall
req.uninstall(auto_confirm=auto_confirm)
File "/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/req/req_install.py", line 732, in uninstall
config = configparser.SafeConfigParser(**options)
File "/Users/davidchudzicki/.cache/hypothesis-build-runtimes/versions/python3.6/lib/python3.6/configparser.py", line 1212, in __init__
DeprecationWarning, stacklevel=2
DeprecationWarning: The SafeConfigParser class has been renamed to ConfigParser in Python 3.2. This alias will be removed in future versions. Use ConfigParser directly instead.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/vcs/mercurial.py`
Content:
```
1 from __future__ import absolute_import
2
3 import logging
4 import os
5
6 from pip._vendor.six.moves import configparser
7
8 from pip._internal.download import path_to_url
9 from pip._internal.utils.misc import display_path, make_vcs_requirement_url
10 from pip._internal.utils.temp_dir import TempDirectory
11 from pip._internal.vcs import VersionControl, vcs
12
13 logger = logging.getLogger(__name__)
14
15
16 class Mercurial(VersionControl):
17 name = 'hg'
18 dirname = '.hg'
19 repo_name = 'clone'
20 schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http')
21
22 def get_base_rev_args(self, rev):
23 return [rev]
24
25 def export(self, location):
26 """Export the Hg repository at the url to the destination location"""
27 with TempDirectory(kind="export") as temp_dir:
28 self.unpack(temp_dir.path)
29
30 self.run_command(
31 ['archive', location], show_stdout=False, cwd=temp_dir.path
32 )
33
34 def fetch_new(self, dest, url, rev_options):
35 rev_display = rev_options.to_display()
36 logger.info(
37 'Cloning hg %s%s to %s',
38 url,
39 rev_display,
40 display_path(dest),
41 )
42 self.run_command(['clone', '--noupdate', '-q', url, dest])
43 cmd_args = ['update', '-q'] + rev_options.to_args()
44 self.run_command(cmd_args, cwd=dest)
45
46 def switch(self, dest, url, rev_options):
47 repo_config = os.path.join(dest, self.dirname, 'hgrc')
48 config = configparser.SafeConfigParser()
49 try:
50 config.read(repo_config)
51 config.set('paths', 'default', url)
52 with open(repo_config, 'w') as config_file:
53 config.write(config_file)
54 except (OSError, configparser.NoSectionError) as exc:
55 logger.warning(
56 'Could not switch Mercurial repository to %s: %s', url, exc,
57 )
58 else:
59 cmd_args = ['update', '-q'] + rev_options.to_args()
60 self.run_command(cmd_args, cwd=dest)
61
62 def update(self, dest, url, rev_options):
63 self.run_command(['pull', '-q'], cwd=dest)
64 cmd_args = ['update', '-q'] + rev_options.to_args()
65 self.run_command(cmd_args, cwd=dest)
66
67 def get_url(self, location):
68 url = self.run_command(
69 ['showconfig', 'paths.default'],
70 show_stdout=False, cwd=location).strip()
71 if self._is_local_repository(url):
72 url = path_to_url(url)
73 return url.strip()
74
75 def get_revision(self, location):
76 current_revision = self.run_command(
77 ['parents', '--template={rev}'],
78 show_stdout=False, cwd=location).strip()
79 return current_revision
80
81 def get_revision_hash(self, location):
82 current_rev_hash = self.run_command(
83 ['parents', '--template={node}'],
84 show_stdout=False, cwd=location).strip()
85 return current_rev_hash
86
87 def get_src_requirement(self, dist, location):
88 repo = self.get_url(location)
89 if not repo.lower().startswith('hg:'):
90 repo = 'hg+' + repo
91 current_rev_hash = self.get_revision_hash(location)
92 egg_project_name = dist.egg_name().split('-', 1)[0]
93 return make_vcs_requirement_url(repo, current_rev_hash,
94 egg_project_name)
95
96 def is_commit_id_equal(self, dest, name):
97 """Always assume the versions don't match"""
98 return False
99
100
101 vcs.register(Mercurial)
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_internal/vcs/mercurial.py b/src/pip/_internal/vcs/mercurial.py
--- a/src/pip/_internal/vcs/mercurial.py
+++ b/src/pip/_internal/vcs/mercurial.py
@@ -45,7 +45,7 @@
def switch(self, dest, url, rev_options):
repo_config = os.path.join(dest, self.dirname, 'hgrc')
- config = configparser.SafeConfigParser()
+ config = configparser.RawConfigParser()
try:
config.read(repo_config)
config.set('paths', 'default', url)
| {"golden_diff": "diff --git a/src/pip/_internal/vcs/mercurial.py b/src/pip/_internal/vcs/mercurial.py\n--- a/src/pip/_internal/vcs/mercurial.py\n+++ b/src/pip/_internal/vcs/mercurial.py\n@@ -45,7 +45,7 @@\n \n def switch(self, dest, url, rev_options):\n repo_config = os.path.join(dest, self.dirname, 'hgrc')\n- config = configparser.SafeConfigParser()\n+ config = configparser.RawConfigParser()\n try:\n config.read(repo_config)\n config.set('paths', 'default', url)\n", "issue": "pip uses deprecated SafeConfigParser\n* Pip version: 9.0.1\r\n* Python version: 3.6.1\r\n* Operating system: Mac OS X 10.12.4\r\n\r\n### Description:\r\n\r\nWith `error::DeprecationWarning` in `PYTHONWARNINGS`:\r\n\r\n```\r\npip uninstall -y faker\r\n/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/pep425tags.py:260: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses\r\n import imp\r\nException:\r\nTraceback (most recent call last):\r\n File \"/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/basecommand.py\", line 215, in main\r\n status = self.run(options, args)\r\n File \"/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/commands/uninstall.py\", line 76, in run\r\n requirement_set.uninstall(auto_confirm=options.yes)\r\n File \"/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/req/req_set.py\", line 346, in uninstall\r\n req.uninstall(auto_confirm=auto_confirm)\r\n File \"/Users/davidchudzicki/.cache/hypothesis-build-runtimes/.tox/py36-full/lib/python3.6/site-packages/pip/req/req_install.py\", line 732, in uninstall\r\n config = configparser.SafeConfigParser(**options)\r\n File \"/Users/davidchudzicki/.cache/hypothesis-build-runtimes/versions/python3.6/lib/python3.6/configparser.py\", line 1212, in __init__\r\n DeprecationWarning, stacklevel=2\r\nDeprecationWarning: The SafeConfigParser class has been renamed to ConfigParser in Python 3.2. This alias will be removed in future versions. Use ConfigParser directly instead.\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport os\n\nfrom pip._vendor.six.moves import configparser\n\nfrom pip._internal.download import path_to_url\nfrom pip._internal.utils.misc import display_path, make_vcs_requirement_url\nfrom pip._internal.utils.temp_dir import TempDirectory\nfrom pip._internal.vcs import VersionControl, vcs\n\nlogger = logging.getLogger(__name__)\n\n\nclass Mercurial(VersionControl):\n name = 'hg'\n dirname = '.hg'\n repo_name = 'clone'\n schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http')\n\n def get_base_rev_args(self, rev):\n return [rev]\n\n def export(self, location):\n \"\"\"Export the Hg repository at the url to the destination location\"\"\"\n with TempDirectory(kind=\"export\") as temp_dir:\n self.unpack(temp_dir.path)\n\n self.run_command(\n ['archive', location], show_stdout=False, cwd=temp_dir.path\n )\n\n def fetch_new(self, dest, url, rev_options):\n rev_display = rev_options.to_display()\n logger.info(\n 'Cloning hg %s%s to %s',\n url,\n rev_display,\n display_path(dest),\n )\n self.run_command(['clone', '--noupdate', '-q', url, dest])\n cmd_args = ['update', '-q'] + rev_options.to_args()\n self.run_command(cmd_args, cwd=dest)\n\n def switch(self, dest, url, rev_options):\n repo_config = os.path.join(dest, self.dirname, 'hgrc')\n config = configparser.SafeConfigParser()\n try:\n config.read(repo_config)\n config.set('paths', 'default', url)\n with open(repo_config, 'w') as config_file:\n config.write(config_file)\n except (OSError, configparser.NoSectionError) as exc:\n logger.warning(\n 'Could not switch Mercurial repository to %s: %s', url, exc,\n )\n else:\n cmd_args = ['update', '-q'] + rev_options.to_args()\n self.run_command(cmd_args, cwd=dest)\n\n def update(self, dest, url, rev_options):\n self.run_command(['pull', '-q'], cwd=dest)\n cmd_args = ['update', '-q'] + rev_options.to_args()\n self.run_command(cmd_args, cwd=dest)\n\n def get_url(self, location):\n url = self.run_command(\n ['showconfig', 'paths.default'],\n show_stdout=False, cwd=location).strip()\n if self._is_local_repository(url):\n url = path_to_url(url)\n return url.strip()\n\n def get_revision(self, location):\n current_revision = self.run_command(\n ['parents', '--template={rev}'],\n show_stdout=False, cwd=location).strip()\n return current_revision\n\n def get_revision_hash(self, location):\n current_rev_hash = self.run_command(\n ['parents', '--template={node}'],\n show_stdout=False, cwd=location).strip()\n return current_rev_hash\n\n def get_src_requirement(self, dist, location):\n repo = self.get_url(location)\n if not repo.lower().startswith('hg:'):\n repo = 'hg+' + repo\n current_rev_hash = self.get_revision_hash(location)\n egg_project_name = dist.egg_name().split('-', 1)[0]\n return make_vcs_requirement_url(repo, current_rev_hash,\n egg_project_name)\n\n def is_commit_id_equal(self, dest, name):\n \"\"\"Always assume the versions don't match\"\"\"\n return False\n\n\nvcs.register(Mercurial)\n", "path": "src/pip/_internal/vcs/mercurial.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport os\n\nfrom pip._vendor.six.moves import configparser\n\nfrom pip._internal.download import path_to_url\nfrom pip._internal.utils.misc import display_path, make_vcs_requirement_url\nfrom pip._internal.utils.temp_dir import TempDirectory\nfrom pip._internal.vcs import VersionControl, vcs\n\nlogger = logging.getLogger(__name__)\n\n\nclass Mercurial(VersionControl):\n name = 'hg'\n dirname = '.hg'\n repo_name = 'clone'\n schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http')\n\n def get_base_rev_args(self, rev):\n return [rev]\n\n def export(self, location):\n \"\"\"Export the Hg repository at the url to the destination location\"\"\"\n with TempDirectory(kind=\"export\") as temp_dir:\n self.unpack(temp_dir.path)\n\n self.run_command(\n ['archive', location], show_stdout=False, cwd=temp_dir.path\n )\n\n def fetch_new(self, dest, url, rev_options):\n rev_display = rev_options.to_display()\n logger.info(\n 'Cloning hg %s%s to %s',\n url,\n rev_display,\n display_path(dest),\n )\n self.run_command(['clone', '--noupdate', '-q', url, dest])\n cmd_args = ['update', '-q'] + rev_options.to_args()\n self.run_command(cmd_args, cwd=dest)\n\n def switch(self, dest, url, rev_options):\n repo_config = os.path.join(dest, self.dirname, 'hgrc')\n config = configparser.RawConfigParser()\n try:\n config.read(repo_config)\n config.set('paths', 'default', url)\n with open(repo_config, 'w') as config_file:\n config.write(config_file)\n except (OSError, configparser.NoSectionError) as exc:\n logger.warning(\n 'Could not switch Mercurial repository to %s: %s', url, exc,\n )\n else:\n cmd_args = ['update', '-q'] + rev_options.to_args()\n self.run_command(cmd_args, cwd=dest)\n\n def update(self, dest, url, rev_options):\n self.run_command(['pull', '-q'], cwd=dest)\n cmd_args = ['update', '-q'] + rev_options.to_args()\n self.run_command(cmd_args, cwd=dest)\n\n def get_url(self, location):\n url = self.run_command(\n ['showconfig', 'paths.default'],\n show_stdout=False, cwd=location).strip()\n if self._is_local_repository(url):\n url = path_to_url(url)\n return url.strip()\n\n def get_revision(self, location):\n current_revision = self.run_command(\n ['parents', '--template={rev}'],\n show_stdout=False, cwd=location).strip()\n return current_revision\n\n def get_revision_hash(self, location):\n current_rev_hash = self.run_command(\n ['parents', '--template={node}'],\n show_stdout=False, cwd=location).strip()\n return current_rev_hash\n\n def get_src_requirement(self, dist, location):\n repo = self.get_url(location)\n if not repo.lower().startswith('hg:'):\n repo = 'hg+' + repo\n current_rev_hash = self.get_revision_hash(location)\n egg_project_name = dist.egg_name().split('-', 1)[0]\n return make_vcs_requirement_url(repo, current_rev_hash,\n egg_project_name)\n\n def is_commit_id_equal(self, dest, name):\n \"\"\"Always assume the versions don't match\"\"\"\n return False\n\n\nvcs.register(Mercurial)\n", "path": "src/pip/_internal/vcs/mercurial.py"}]} | 1,756 | 140 |
gh_patches_debug_3922 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-773 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hide TensorBoard service REST call logs
Currently these logs appear on master pod's log which is not necessary at user level:
```
I0624 15:50:54.834580 140272641951488 _internal.py:122] ::ffff:10.138.0.35 - - [24/Jun/2019 15:50:54] "GET /data/plugin/scalars/tags HTTP/1.1" 200 -
W0624 15:50:55.150964 140556554397440 servicer.py:195] Task result for outdated version 30 dropped
I0624 15:50:57.245738 140272633558784 _internal.py:122] ::ffff:10.36.1.1 - - [24/Jun/2019 15:50:57] "GET /data/environment HTTP/1.1" 200 -
I0624 15:50:57.250612 140271752836864 _internal.py:122] ::ffff:10.138.0.35 - - [24/Jun/2019 15:50:57] "GET /data/experiments HTTP/1.1" 200 -
I0624 15:50:57.252254 140272105903872 _internal.py:122] ::ffff:10.36.1.1 - - [24/Jun/2019 15:50:57] "GET /data/runs HTTP/1.1" 200 -
I0624 15:50:57.255696 140272641951488 _internal.py:122] ::ffff:10.138.0.35 - - [24/Jun/2019 15:50:57] "GET /data/plugins_listing HTTP/1.1" 200 -
I0624 15:50:57.430979 140272641951488 _internal.py:122] ::ffff:10.138.0.35 - - [24/Jun/2019 15:50:57] "GET /data/plugin/scalars/tags HTTP/1.1" 200 -
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/elasticdl/master/tensorboard_service.py`
Content:
```
1 import datetime
2 import subprocess
3 import time
4
5 import tensorflow as tf
6
7 import numpy as np
8
9
10 class TensorboardService(object):
11 """Tensorboard Service implementation"""
12
13 def __init__(self, tensorboard_log_dir):
14 """
15 Arguments:
16 tensorboard_log_dir: The log directory for Tensorboard.
17 """
18 _current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
19 self._tensorboard_log_dir = tensorboard_log_dir + _current_time
20 self._initialize_summary_writer()
21 self.tb_process = None
22
23 def _initialize_summary_writer(self):
24 self.summary_writer = tf.summary.create_file_writer(
25 self._tensorboard_log_dir
26 )
27
28 def write_dict_to_summary(self, dictionary, version):
29 with self.summary_writer.as_default():
30 for k, v in dictionary.items():
31 if isinstance(v, np.ndarray) and len(v) == 1:
32 v = v[0]
33 tf.summary.scalar(k, v, step=version)
34
35 def start(self):
36 self.tb_process = subprocess.Popen(
37 ["tensorboard --logdir " + self._tensorboard_log_dir], shell=True
38 )
39
40 def keep_running(self):
41 while self.tb_process.poll() is None:
42 time.sleep(10)
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/elasticdl/master/tensorboard_service.py b/elasticdl/python/elasticdl/master/tensorboard_service.py
--- a/elasticdl/python/elasticdl/master/tensorboard_service.py
+++ b/elasticdl/python/elasticdl/master/tensorboard_service.py
@@ -34,7 +34,10 @@
def start(self):
self.tb_process = subprocess.Popen(
- ["tensorboard --logdir " + self._tensorboard_log_dir], shell=True
+ ["tensorboard --logdir " + self._tensorboard_log_dir],
+ shell=True,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.STDOUT,
)
def keep_running(self):
| {"golden_diff": "diff --git a/elasticdl/python/elasticdl/master/tensorboard_service.py b/elasticdl/python/elasticdl/master/tensorboard_service.py\n--- a/elasticdl/python/elasticdl/master/tensorboard_service.py\n+++ b/elasticdl/python/elasticdl/master/tensorboard_service.py\n@@ -34,7 +34,10 @@\n \n def start(self):\n self.tb_process = subprocess.Popen(\n- [\"tensorboard --logdir \" + self._tensorboard_log_dir], shell=True\n+ [\"tensorboard --logdir \" + self._tensorboard_log_dir],\n+ shell=True,\n+ stdout=subprocess.DEVNULL,\n+ stderr=subprocess.STDOUT,\n )\n \n def keep_running(self):\n", "issue": "Hide TensorBoard service REST call logs\nCurrently these logs appear on master pod's log which is not necessary at user level:\r\n```\r\nI0624 15:50:54.834580 140272641951488 _internal.py:122] ::ffff:10.138.0.35 - - [24/Jun/2019 15:50:54] \"GET /data/plugin/scalars/tags HTTP/1.1\" 200 -\r\nW0624 15:50:55.150964 140556554397440 servicer.py:195] Task result for outdated version 30 dropped\r\nI0624 15:50:57.245738 140272633558784 _internal.py:122] ::ffff:10.36.1.1 - - [24/Jun/2019 15:50:57] \"GET /data/environment HTTP/1.1\" 200 -\r\nI0624 15:50:57.250612 140271752836864 _internal.py:122] ::ffff:10.138.0.35 - - [24/Jun/2019 15:50:57] \"GET /data/experiments HTTP/1.1\" 200 -\r\nI0624 15:50:57.252254 140272105903872 _internal.py:122] ::ffff:10.36.1.1 - - [24/Jun/2019 15:50:57] \"GET /data/runs HTTP/1.1\" 200 -\r\nI0624 15:50:57.255696 140272641951488 _internal.py:122] ::ffff:10.138.0.35 - - [24/Jun/2019 15:50:57] \"GET /data/plugins_listing HTTP/1.1\" 200 -\r\nI0624 15:50:57.430979 140272641951488 _internal.py:122] ::ffff:10.138.0.35 - - [24/Jun/2019 15:50:57] \"GET /data/plugin/scalars/tags HTTP/1.1\" 200 -\r\n```\n", "before_files": [{"content": "import datetime\nimport subprocess\nimport time\n\nimport tensorflow as tf\n\nimport numpy as np\n\n\nclass TensorboardService(object):\n \"\"\"Tensorboard Service implementation\"\"\"\n\n def __init__(self, tensorboard_log_dir):\n \"\"\"\n Arguments:\n tensorboard_log_dir: The log directory for Tensorboard.\n \"\"\"\n _current_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n self._tensorboard_log_dir = tensorboard_log_dir + _current_time\n self._initialize_summary_writer()\n self.tb_process = None\n\n def _initialize_summary_writer(self):\n self.summary_writer = tf.summary.create_file_writer(\n self._tensorboard_log_dir\n )\n\n def write_dict_to_summary(self, dictionary, version):\n with self.summary_writer.as_default():\n for k, v in dictionary.items():\n if isinstance(v, np.ndarray) and len(v) == 1:\n v = v[0]\n tf.summary.scalar(k, v, step=version)\n\n def start(self):\n self.tb_process = subprocess.Popen(\n [\"tensorboard --logdir \" + self._tensorboard_log_dir], shell=True\n )\n\n def keep_running(self):\n while self.tb_process.poll() is None:\n time.sleep(10)\n", "path": "elasticdl/python/elasticdl/master/tensorboard_service.py"}], "after_files": [{"content": "import datetime\nimport subprocess\nimport time\n\nimport tensorflow as tf\n\nimport numpy as np\n\n\nclass TensorboardService(object):\n \"\"\"Tensorboard Service implementation\"\"\"\n\n def __init__(self, tensorboard_log_dir):\n \"\"\"\n Arguments:\n tensorboard_log_dir: The log directory for Tensorboard.\n \"\"\"\n _current_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n self._tensorboard_log_dir = tensorboard_log_dir + _current_time\n self._initialize_summary_writer()\n self.tb_process = None\n\n def _initialize_summary_writer(self):\n self.summary_writer = tf.summary.create_file_writer(\n self._tensorboard_log_dir\n )\n\n def write_dict_to_summary(self, dictionary, version):\n with self.summary_writer.as_default():\n for k, v in dictionary.items():\n if isinstance(v, np.ndarray) and len(v) == 1:\n v = v[0]\n tf.summary.scalar(k, v, step=version)\n\n def start(self):\n self.tb_process = subprocess.Popen(\n [\"tensorboard --logdir \" + self._tensorboard_log_dir],\n shell=True,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.STDOUT,\n )\n\n def keep_running(self):\n while self.tb_process.poll() is None:\n time.sleep(10)\n", "path": "elasticdl/python/elasticdl/master/tensorboard_service.py"}]} | 1,293 | 158 |
gh_patches_debug_32860 | rasdani/github-patches | git_diff | qutebrowser__qutebrowser-3702 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crash with invalid dictionary filenames
Looks like this doesn't get caught properly on init:
```
16:39:44 DEBUG init app:_init_modules:468 Initializing websettings...
16:39:45 ERROR misc crashsignal:exception_hook:216 Uncaught exception
Traceback (most recent call last):
File "/bin/qutebrowser", line 11, in <module>
load_entry_point('qutebrowser==1.1.1', 'gui_scripts', 'qutebrowser')()
File "/usr/lib/python3.6/site-packages/qutebrowser/qutebrowser.py", line 188, in main
return app.run(args)
File "/usr/lib/python3.6/site-packages/qutebrowser/app.py", line 137, in run
init(args, crash_handler)
File "/usr/lib/python3.6/site-packages/qutebrowser/app.py", line 163, in init
_init_modules(args, crash_handler)
File "/usr/lib/python3.6/site-packages/qutebrowser/app.py", line 469, in _init_modules
websettings.init(args)
File "/usr/lib/python3.6/site-packages/qutebrowser/config/websettings.py", line 215, in init
webenginesettings.init(args)
File "/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/webenginesettings.py", line 297, in init
websettings.init_mappings(MAPPINGS)
File "/usr/lib/python3.6/site-packages/qutebrowser/config/websettings.py", line 198, in init_mappings
mapping.set(value)
File "/usr/lib/python3.6/site-packages/qutebrowser/config/websettings.py", line 72, in set
self._set(value, settings=settings)
File "/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/webenginesettings.py", line 154, in _set
filenames = [self._find_installed(code) for code in value]
File "/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/webenginesettings.py", line 154, in <listcomp>
filenames = [self._find_installed(code) for code in value]
File "/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/webenginesettings.py", line 143, in _find_installed
local_filename = spell.local_filename(code)
File "/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/spell.py", line 64, in local_filename
all_installed = local_files(code)
File "/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/spell.py", line 51, in local_files
for matching_dict in sorted(matching_dicts, key=version, reverse=True):
File "/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/spell.py", line 36, in version
.format(filename))
ValueError: the given dictionary file name is malformed: /usr/share/qt/qtwebengine_dictionaries/en-US.bdic
```
https://crashes.qutebrowser.org/view/63cd0d83
cc @elshize
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutebrowser/browser/webengine/spell.py`
Content:
```
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2017-2018 Michal Siedlaczek <[email protected]>
4
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """Installing and configuring spell-checking for QtWebEngine."""
21
22 import glob
23 import os
24 import re
25
26 from PyQt5.QtCore import QLibraryInfo
27 from qutebrowser.utils import log
28
29
30 def version(filename):
31 """Extract the version number from the dictionary file name."""
32 version_re = re.compile(r".+-(?P<version>[0-9]+-[0-9]+?)\.bdic")
33 match = version_re.fullmatch(filename)
34 if match is None:
35 raise ValueError('the given dictionary file name is malformed: {}'
36 .format(filename))
37 return tuple(int(n) for n in match.group('version').split('-'))
38
39
40 def dictionary_dir():
41 """Return the path (str) to the QtWebEngine's dictionaries directory."""
42 datapath = QLibraryInfo.location(QLibraryInfo.DataPath)
43 return os.path.join(datapath, 'qtwebengine_dictionaries')
44
45
46 def local_files(code):
47 """Return all installed dictionaries for the given code."""
48 pathname = os.path.join(dictionary_dir(), '{}*.bdic'.format(code))
49 matching_dicts = glob.glob(pathname)
50 files = []
51 for matching_dict in sorted(matching_dicts, key=version, reverse=True):
52 filename = os.path.basename(matching_dict)
53 log.config.debug('Found file for dict {}: {}'.format(code, filename))
54 files.append(filename)
55 return files
56
57
58 def local_filename(code):
59 """Return the newest installed dictionary for the given code.
60
61 Return the filename of the installed dictionary with the highest version
62 number or None if the dictionary is not installed.
63 """
64 all_installed = local_files(code)
65 return os.path.splitext(all_installed[0])[0] if all_installed else None
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qutebrowser/browser/webengine/spell.py b/qutebrowser/browser/webengine/spell.py
--- a/qutebrowser/browser/webengine/spell.py
+++ b/qutebrowser/browser/webengine/spell.py
@@ -24,16 +24,18 @@
import re
from PyQt5.QtCore import QLibraryInfo
-from qutebrowser.utils import log
+from qutebrowser.utils import log, message
+
+dict_version_re = re.compile(r".+-(?P<version>[0-9]+-[0-9]+?)\.bdic")
def version(filename):
"""Extract the version number from the dictionary file name."""
- version_re = re.compile(r".+-(?P<version>[0-9]+-[0-9]+?)\.bdic")
- match = version_re.fullmatch(filename)
+ match = dict_version_re.match(filename)
if match is None:
- raise ValueError('the given dictionary file name is malformed: {}'
- .format(filename))
+ message.warning(
+ "Found a dictionary with a malformed name: {}".format(filename))
+ return None
return tuple(int(n) for n in match.group('version').split('-'))
@@ -44,15 +46,23 @@
def local_files(code):
- """Return all installed dictionaries for the given code."""
+ """Return all installed dictionaries for the given code.
+
+ The returned dictionaries are sorted by version, therefore the latest will
+ be the first element. The list will be empty if no dictionaries are found.
+ """
pathname = os.path.join(dictionary_dir(), '{}*.bdic'.format(code))
matching_dicts = glob.glob(pathname)
- files = []
- for matching_dict in sorted(matching_dicts, key=version, reverse=True):
- filename = os.path.basename(matching_dict)
- log.config.debug('Found file for dict {}: {}'.format(code, filename))
- files.append(filename)
- return files
+ versioned_dicts = []
+ for matching_dict in matching_dicts:
+ parsed_version = version(matching_dict)
+ if parsed_version is not None:
+ filename = os.path.basename(matching_dict)
+ log.config.debug('Found file for dict {}: {}'
+ .format(code, filename))
+ versioned_dicts.append((parsed_version, filename))
+ return [filename for version, filename
+ in sorted(versioned_dicts, reverse=True)]
def local_filename(code):
| {"golden_diff": "diff --git a/qutebrowser/browser/webengine/spell.py b/qutebrowser/browser/webengine/spell.py\n--- a/qutebrowser/browser/webengine/spell.py\n+++ b/qutebrowser/browser/webengine/spell.py\n@@ -24,16 +24,18 @@\n import re\n \n from PyQt5.QtCore import QLibraryInfo\n-from qutebrowser.utils import log\n+from qutebrowser.utils import log, message\n+\n+dict_version_re = re.compile(r\".+-(?P<version>[0-9]+-[0-9]+?)\\.bdic\")\n \n \n def version(filename):\n \"\"\"Extract the version number from the dictionary file name.\"\"\"\n- version_re = re.compile(r\".+-(?P<version>[0-9]+-[0-9]+?)\\.bdic\")\n- match = version_re.fullmatch(filename)\n+ match = dict_version_re.match(filename)\n if match is None:\n- raise ValueError('the given dictionary file name is malformed: {}'\n- .format(filename))\n+ message.warning(\n+ \"Found a dictionary with a malformed name: {}\".format(filename))\n+ return None\n return tuple(int(n) for n in match.group('version').split('-'))\n \n \n@@ -44,15 +46,23 @@\n \n \n def local_files(code):\n- \"\"\"Return all installed dictionaries for the given code.\"\"\"\n+ \"\"\"Return all installed dictionaries for the given code.\n+\n+ The returned dictionaries are sorted by version, therefore the latest will\n+ be the first element. The list will be empty if no dictionaries are found.\n+ \"\"\"\n pathname = os.path.join(dictionary_dir(), '{}*.bdic'.format(code))\n matching_dicts = glob.glob(pathname)\n- files = []\n- for matching_dict in sorted(matching_dicts, key=version, reverse=True):\n- filename = os.path.basename(matching_dict)\n- log.config.debug('Found file for dict {}: {}'.format(code, filename))\n- files.append(filename)\n- return files\n+ versioned_dicts = []\n+ for matching_dict in matching_dicts:\n+ parsed_version = version(matching_dict)\n+ if parsed_version is not None:\n+ filename = os.path.basename(matching_dict)\n+ log.config.debug('Found file for dict {}: {}'\n+ .format(code, filename))\n+ versioned_dicts.append((parsed_version, filename))\n+ return [filename for version, filename\n+ in sorted(versioned_dicts, reverse=True)]\n \n \n def local_filename(code):\n", "issue": "Crash with invalid dictionary filenames\nLooks like this doesn't get caught properly on init:\r\n\r\n```\r\n16:39:44 DEBUG init app:_init_modules:468 Initializing websettings...\r\n16:39:45 ERROR misc crashsignal:exception_hook:216 Uncaught exception\r\nTraceback (most recent call last):\r\n File \"/bin/qutebrowser\", line 11, in <module>\r\n load_entry_point('qutebrowser==1.1.1', 'gui_scripts', 'qutebrowser')()\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/qutebrowser.py\", line 188, in main\r\n return app.run(args)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/app.py\", line 137, in run\r\n init(args, crash_handler)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/app.py\", line 163, in init\r\n _init_modules(args, crash_handler)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/app.py\", line 469, in _init_modules\r\n websettings.init(args)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/config/websettings.py\", line 215, in init\r\n webenginesettings.init(args)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/webenginesettings.py\", line 297, in init\r\n websettings.init_mappings(MAPPINGS)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/config/websettings.py\", line 198, in init_mappings\r\n mapping.set(value)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/config/websettings.py\", line 72, in set\r\n self._set(value, settings=settings)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/webenginesettings.py\", line 154, in _set\r\n filenames = [self._find_installed(code) for code in value]\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/webenginesettings.py\", line 154, in <listcomp>\r\n filenames = [self._find_installed(code) for code in value]\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/webenginesettings.py\", line 143, in _find_installed\r\n local_filename = spell.local_filename(code)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/spell.py\", line 64, in local_filename\r\n all_installed = local_files(code)\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/spell.py\", line 51, in local_files\r\n for matching_dict in sorted(matching_dicts, key=version, reverse=True):\r\n File \"/usr/lib/python3.6/site-packages/qutebrowser/browser/webengine/spell.py\", line 36, in version\r\n .format(filename))\r\nValueError: the given dictionary file name is malformed: /usr/share/qt/qtwebengine_dictionaries/en-US.bdic\r\n```\r\n\r\nhttps://crashes.qutebrowser.org/view/63cd0d83\r\n\r\ncc @elshize \n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2017-2018 Michal Siedlaczek <[email protected]>\n\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Installing and configuring spell-checking for QtWebEngine.\"\"\"\n\nimport glob\nimport os\nimport re\n\nfrom PyQt5.QtCore import QLibraryInfo\nfrom qutebrowser.utils import log\n\n\ndef version(filename):\n \"\"\"Extract the version number from the dictionary file name.\"\"\"\n version_re = re.compile(r\".+-(?P<version>[0-9]+-[0-9]+?)\\.bdic\")\n match = version_re.fullmatch(filename)\n if match is None:\n raise ValueError('the given dictionary file name is malformed: {}'\n .format(filename))\n return tuple(int(n) for n in match.group('version').split('-'))\n\n\ndef dictionary_dir():\n \"\"\"Return the path (str) to the QtWebEngine's dictionaries directory.\"\"\"\n datapath = QLibraryInfo.location(QLibraryInfo.DataPath)\n return os.path.join(datapath, 'qtwebengine_dictionaries')\n\n\ndef local_files(code):\n \"\"\"Return all installed dictionaries for the given code.\"\"\"\n pathname = os.path.join(dictionary_dir(), '{}*.bdic'.format(code))\n matching_dicts = glob.glob(pathname)\n files = []\n for matching_dict in sorted(matching_dicts, key=version, reverse=True):\n filename = os.path.basename(matching_dict)\n log.config.debug('Found file for dict {}: {}'.format(code, filename))\n files.append(filename)\n return files\n\n\ndef local_filename(code):\n \"\"\"Return the newest installed dictionary for the given code.\n\n Return the filename of the installed dictionary with the highest version\n number or None if the dictionary is not installed.\n \"\"\"\n all_installed = local_files(code)\n return os.path.splitext(all_installed[0])[0] if all_installed else None\n", "path": "qutebrowser/browser/webengine/spell.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2017-2018 Michal Siedlaczek <[email protected]>\n\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Installing and configuring spell-checking for QtWebEngine.\"\"\"\n\nimport glob\nimport os\nimport re\n\nfrom PyQt5.QtCore import QLibraryInfo\nfrom qutebrowser.utils import log, message\n\ndict_version_re = re.compile(r\".+-(?P<version>[0-9]+-[0-9]+?)\\.bdic\")\n\n\ndef version(filename):\n \"\"\"Extract the version number from the dictionary file name.\"\"\"\n match = dict_version_re.match(filename)\n if match is None:\n message.warning(\n \"Found a dictionary with a malformed name: {}\".format(filename))\n return None\n return tuple(int(n) for n in match.group('version').split('-'))\n\n\ndef dictionary_dir():\n \"\"\"Return the path (str) to the QtWebEngine's dictionaries directory.\"\"\"\n datapath = QLibraryInfo.location(QLibraryInfo.DataPath)\n return os.path.join(datapath, 'qtwebengine_dictionaries')\n\n\ndef local_files(code):\n \"\"\"Return all installed dictionaries for the given code.\n\n The returned dictionaries are sorted by version, therefore the latest will\n be the first element. The list will be empty if no dictionaries are found.\n \"\"\"\n pathname = os.path.join(dictionary_dir(), '{}*.bdic'.format(code))\n matching_dicts = glob.glob(pathname)\n versioned_dicts = []\n for matching_dict in matching_dicts:\n parsed_version = version(matching_dict)\n if parsed_version is not None:\n filename = os.path.basename(matching_dict)\n log.config.debug('Found file for dict {}: {}'\n .format(code, filename))\n versioned_dicts.append((parsed_version, filename))\n return [filename for version, filename\n in sorted(versioned_dicts, reverse=True)]\n\n\ndef local_filename(code):\n \"\"\"Return the newest installed dictionary for the given code.\n\n Return the filename of the installed dictionary with the highest version\n number or None if the dictionary is not installed.\n \"\"\"\n all_installed = local_files(code)\n return os.path.splitext(all_installed[0])[0] if all_installed else None\n", "path": "qutebrowser/browser/webengine/spell.py"}]} | 1,669 | 537 |
gh_patches_debug_4226 | rasdani/github-patches | git_diff | mlflow__mlflow-3598 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unpin sqlalchemy
Hi,
Currently mlflow pins sqlalchemy to `<= 1.3.13`. I wanted to use this package on a projects that requires a more updated version of sqlalchemy.
Would it be possible to unpit sqlalchemy or to change the constraint to disallow specific version of it? (like `!=1.3.14` etc?)
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 from importlib.machinery import SourceFileLoader
3 from setuptools import setup, find_packages
4
5 version = (
6 SourceFileLoader("mlflow.version", os.path.join("mlflow", "version.py")).load_module().VERSION
7 )
8
9
10 # Get a list of all files in the JS directory to include in our module
11 def package_files(directory):
12 paths = []
13 for (path, _, filenames) in os.walk(directory):
14 for filename in filenames:
15 paths.append(os.path.join("..", path, filename))
16 return paths
17
18
19 # Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build
20 # to include in the wheel, e.g. "../mlflow/server/js/build/index.html"
21 js_files = package_files("mlflow/server/js/build")
22 models_container_server_files = package_files("mlflow/models/container")
23 alembic_files = [
24 "../mlflow/store/db_migrations/alembic.ini",
25 "../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini",
26 ]
27
28 setup(
29 name="mlflow",
30 version=version,
31 packages=find_packages(exclude=["tests", "tests.*"]),
32 package_data={"mlflow": js_files + models_container_server_files + alembic_files},
33 install_requires=[
34 "alembic<=1.4.1",
35 # Required
36 "azure-storage-blob",
37 "click>=7.0",
38 "cloudpickle",
39 "databricks-cli>=0.8.7",
40 "requests>=2.17.3",
41 "six>=1.10.0",
42 'waitress; platform_system == "Windows"',
43 'gunicorn; platform_system != "Windows"',
44 "Flask",
45 "numpy",
46 "pandas",
47 "python-dateutil",
48 "protobuf>=3.6.0",
49 "gitpython>=2.1.0",
50 "pyyaml",
51 "querystring_parser",
52 "docker>=4.0.0",
53 "entrypoints",
54 # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433
55 "sqlparse>=0.3.1",
56 # Required to run the MLflow server against SQL-backed storage
57 "sqlalchemy<=1.3.13",
58 "gorilla",
59 "prometheus-flask-exporter",
60 ],
61 extras_require={
62 "extras": [
63 "scikit-learn",
64 # Required to log artifacts and models to HDFS artifact locations
65 "pyarrow",
66 # Required to log artifacts and models to AWS S3 artifact locations
67 "boto3",
68 "mleap",
69 # Required to log artifacts and models to GCS artifact locations
70 "google-cloud-storage",
71 "azureml-core>=1.2.0",
72 # Required to log artifacts to SFTP artifact locations
73 "pysftp",
74 # Required by the mlflow.projects module, when running projects against
75 # a remote Kubernetes cluster
76 "kubernetes",
77 ],
78 "sqlserver": ["mlflow-dbstore",],
79 "aliyun-oss": ["aliyunstoreplugin",],
80 },
81 entry_points="""
82 [console_scripts]
83 mlflow=mlflow.cli:cli
84 """,
85 zip_safe=False,
86 author="Databricks",
87 description="MLflow: A Platform for ML Development and Productionization",
88 long_description=open("README.rst").read(),
89 license="Apache License 2.0",
90 classifiers=["Intended Audience :: Developers", "Programming Language :: Python :: 3.6",],
91 keywords="ml ai databricks",
92 url="https://mlflow.org/",
93 python_requires=">=3.5",
94 project_urls={
95 "Bug Tracker": "https://github.com/mlflow/mlflow/issues",
96 "Documentation": "https://mlflow.org/docs/latest/index.html",
97 "Source Code": "https://github.com/mlflow/mlflow",
98 },
99 )
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -54,7 +54,7 @@
# Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433
"sqlparse>=0.3.1",
# Required to run the MLflow server against SQL-backed storage
- "sqlalchemy<=1.3.13",
+ "sqlalchemy",
"gorilla",
"prometheus-flask-exporter",
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -54,7 +54,7 @@\n # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433\n \"sqlparse>=0.3.1\",\n # Required to run the MLflow server against SQL-backed storage\n- \"sqlalchemy<=1.3.13\",\n+ \"sqlalchemy\",\n \"gorilla\",\n \"prometheus-flask-exporter\",\n ],\n", "issue": "Unpin sqlalchemy\nHi,\r\n\r\nCurrently mlflow pins sqlalchemy to `<= 1.3.13`. I wanted to use this package on a projects that requires a more updated version of sqlalchemy.\r\nWould it be possible to unpit sqlalchemy or to change the constraint to disallow specific version of it? (like `!=1.3.14` etc?)\r\n\r\nThanks\n", "before_files": [{"content": "import os\nfrom importlib.machinery import SourceFileLoader\nfrom setuptools import setup, find_packages\n\nversion = (\n SourceFileLoader(\"mlflow.version\", os.path.join(\"mlflow\", \"version.py\")).load_module().VERSION\n)\n\n\n# Get a list of all files in the JS directory to include in our module\ndef package_files(directory):\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join(\"..\", path, filename))\n return paths\n\n\n# Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build\n# to include in the wheel, e.g. \"../mlflow/server/js/build/index.html\"\njs_files = package_files(\"mlflow/server/js/build\")\nmodels_container_server_files = package_files(\"mlflow/models/container\")\nalembic_files = [\n \"../mlflow/store/db_migrations/alembic.ini\",\n \"../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini\",\n]\n\nsetup(\n name=\"mlflow\",\n version=version,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"mlflow\": js_files + models_container_server_files + alembic_files},\n install_requires=[\n \"alembic<=1.4.1\",\n # Required\n \"azure-storage-blob\",\n \"click>=7.0\",\n \"cloudpickle\",\n \"databricks-cli>=0.8.7\",\n \"requests>=2.17.3\",\n \"six>=1.10.0\",\n 'waitress; platform_system == \"Windows\"',\n 'gunicorn; platform_system != \"Windows\"',\n \"Flask\",\n \"numpy\",\n \"pandas\",\n \"python-dateutil\",\n \"protobuf>=3.6.0\",\n \"gitpython>=2.1.0\",\n \"pyyaml\",\n \"querystring_parser\",\n \"docker>=4.0.0\",\n \"entrypoints\",\n # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433\n \"sqlparse>=0.3.1\",\n # Required to run the MLflow server against SQL-backed storage\n \"sqlalchemy<=1.3.13\",\n \"gorilla\",\n \"prometheus-flask-exporter\",\n ],\n extras_require={\n \"extras\": [\n \"scikit-learn\",\n # Required to log artifacts and models to HDFS artifact locations\n \"pyarrow\",\n # Required to log artifacts and models to AWS S3 artifact locations\n \"boto3\",\n \"mleap\",\n # Required to log artifacts and models to GCS artifact locations\n \"google-cloud-storage\",\n \"azureml-core>=1.2.0\",\n # Required to log artifacts to SFTP artifact locations\n \"pysftp\",\n # Required by the mlflow.projects module, when running projects against\n # a remote Kubernetes cluster\n \"kubernetes\",\n ],\n \"sqlserver\": [\"mlflow-dbstore\",],\n \"aliyun-oss\": [\"aliyunstoreplugin\",],\n },\n entry_points=\"\"\"\n [console_scripts]\n mlflow=mlflow.cli:cli\n \"\"\",\n zip_safe=False,\n author=\"Databricks\",\n description=\"MLflow: A Platform for ML Development and Productionization\",\n long_description=open(\"README.rst\").read(),\n license=\"Apache License 2.0\",\n classifiers=[\"Intended Audience :: Developers\", \"Programming Language :: Python :: 3.6\",],\n keywords=\"ml ai databricks\",\n url=\"https://mlflow.org/\",\n python_requires=\">=3.5\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/mlflow/mlflow/issues\",\n \"Documentation\": \"https://mlflow.org/docs/latest/index.html\",\n \"Source Code\": \"https://github.com/mlflow/mlflow\",\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nfrom importlib.machinery import SourceFileLoader\nfrom setuptools import setup, find_packages\n\nversion = (\n SourceFileLoader(\"mlflow.version\", os.path.join(\"mlflow\", \"version.py\")).load_module().VERSION\n)\n\n\n# Get a list of all files in the JS directory to include in our module\ndef package_files(directory):\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join(\"..\", path, filename))\n return paths\n\n\n# Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build\n# to include in the wheel, e.g. \"../mlflow/server/js/build/index.html\"\njs_files = package_files(\"mlflow/server/js/build\")\nmodels_container_server_files = package_files(\"mlflow/models/container\")\nalembic_files = [\n \"../mlflow/store/db_migrations/alembic.ini\",\n \"../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini\",\n]\n\nsetup(\n name=\"mlflow\",\n version=version,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"mlflow\": js_files + models_container_server_files + alembic_files},\n install_requires=[\n \"alembic<=1.4.1\",\n # Required\n \"azure-storage-blob\",\n \"click>=7.0\",\n \"cloudpickle\",\n \"databricks-cli>=0.8.7\",\n \"requests>=2.17.3\",\n \"six>=1.10.0\",\n 'waitress; platform_system == \"Windows\"',\n 'gunicorn; platform_system != \"Windows\"',\n \"Flask\",\n \"numpy\",\n \"pandas\",\n \"python-dateutil\",\n \"protobuf>=3.6.0\",\n \"gitpython>=2.1.0\",\n \"pyyaml\",\n \"querystring_parser\",\n \"docker>=4.0.0\",\n \"entrypoints\",\n # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433\n \"sqlparse>=0.3.1\",\n # Required to run the MLflow server against SQL-backed storage\n \"sqlalchemy\",\n \"gorilla\",\n \"prometheus-flask-exporter\",\n ],\n extras_require={\n \"extras\": [\n \"scikit-learn\",\n # Required to log artifacts and models to HDFS artifact locations\n \"pyarrow\",\n # Required to log artifacts and models to AWS S3 artifact locations\n \"boto3\",\n \"mleap\",\n # Required to log artifacts and models to GCS artifact locations\n \"google-cloud-storage\",\n \"azureml-core>=1.2.0\",\n # Required to log artifacts to SFTP artifact locations\n \"pysftp\",\n # Required by the mlflow.projects module, when running projects against\n # a remote Kubernetes cluster\n \"kubernetes\",\n ],\n \"sqlserver\": [\"mlflow-dbstore\",],\n \"aliyun-oss\": [\"aliyunstoreplugin\",],\n },\n entry_points=\"\"\"\n [console_scripts]\n mlflow=mlflow.cli:cli\n \"\"\",\n zip_safe=False,\n author=\"Databricks\",\n description=\"MLflow: A Platform for ML Development and Productionization\",\n long_description=open(\"README.rst\").read(),\n license=\"Apache License 2.0\",\n classifiers=[\"Intended Audience :: Developers\", \"Programming Language :: Python :: 3.6\",],\n keywords=\"ml ai databricks\",\n url=\"https://mlflow.org/\",\n python_requires=\">=3.5\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/mlflow/mlflow/issues\",\n \"Documentation\": \"https://mlflow.org/docs/latest/index.html\",\n \"Source Code\": \"https://github.com/mlflow/mlflow\",\n },\n)\n", "path": "setup.py"}]} | 1,377 | 114 |
gh_patches_debug_5864 | rasdani/github-patches | git_diff | pyca__cryptography-1575 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Build automation fixes for 8th release
When triggering the wheel build the release automation does not appropriately wait for the build to complete but instead grabs the previous build. The previous attempted fix of adding a `sleep(3)` did not work around this issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tasks.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import getpass
8 import os
9 import time
10
11 import invoke
12
13 import requests
14
15
16 JENKINS_URL = "https://jenkins.cryptography.io/job/cryptography-wheel-builder"
17
18
19 def wait_for_build_completed(session):
20 # Wait 3 seconds before actually checking if the build is complete, to
21 # ensure that it had time to really start.
22 time.sleep(3)
23 while True:
24 response = session.get(
25 "{0}/lastBuild/api/json/".format(JENKINS_URL),
26 headers={
27 "Accept": "application/json",
28 }
29 )
30 response.raise_for_status()
31 if not response.json()["building"]:
32 assert response.json()["result"] == "SUCCESS"
33 break
34 time.sleep(0.1)
35
36
37 def download_artifacts(session):
38 response = session.get(
39 "{0}/lastBuild/api/json/".format(JENKINS_URL),
40 headers={
41 "Accept": "application/json"
42 }
43 )
44 response.raise_for_status()
45 assert not response.json()["building"]
46 assert response.json()["result"] == "SUCCESS"
47
48 paths = []
49
50 for run in response.json()["runs"]:
51 response = session.get(
52 run["url"] + "api/json/",
53 headers={
54 "Accept": "application/json",
55 }
56 )
57 response.raise_for_status()
58 for artifact in response.json()["artifacts"]:
59 response = session.get(
60 "{0}artifact/{1}".format(run["url"], artifact["relativePath"])
61 )
62 out_path = os.path.join(
63 os.path.dirname(__file__),
64 "dist",
65 artifact["fileName"],
66 )
67 with open(out_path, "wb") as f:
68 f.write(response.content)
69 paths.append(out_path)
70 return paths
71
72
73 @invoke.task
74 def release(version):
75 """
76 ``version`` should be a string like '0.4' or '1.0'.
77 """
78 invoke.run("git tag -s {0} -m '{0} release'".format(version))
79 invoke.run("git push --tags")
80
81 invoke.run("python setup.py sdist")
82 invoke.run("cd vectors/ && python setup.py sdist bdist_wheel")
83
84 invoke.run(
85 "twine upload -s dist/cryptography-{0}* "
86 "vectors/dist/cryptography_vectors-{0}*".format(version)
87 )
88
89 session = requests.Session()
90
91 # This tells the CDN to delete the cached response for the URL. We do this
92 # so that the Jenkins builders will see the new sdist immediately when they
93 # go to build the wheels.
94 response = session.request(
95 "PURGE", "https://pypi.python.org/simple/cryptography/"
96 )
97 response.raise_for_status()
98
99 username = getpass.getpass("Input the GitHub/Jenkins username: ")
100 token = getpass.getpass("Input the Jenkins token: ")
101 response = session.post(
102 "{0}/build".format(JENKINS_URL),
103 auth=requests.auth.HTTPBasicAuth(
104 username, token
105 ),
106 params={
107 "cause": "Building wheels for {0}".format(version)
108 }
109 )
110 response.raise_for_status()
111 wait_for_build_completed(session)
112 paths = download_artifacts(session)
113 invoke.run("twine upload {0}".format(" ".join(paths)))
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tasks.py b/tasks.py
--- a/tasks.py
+++ b/tasks.py
@@ -17,9 +17,9 @@
def wait_for_build_completed(session):
- # Wait 3 seconds before actually checking if the build is complete, to
+ # Wait 20 seconds before actually checking if the build is complete, to
# ensure that it had time to really start.
- time.sleep(3)
+ time.sleep(20)
while True:
response = session.get(
"{0}/lastBuild/api/json/".format(JENKINS_URL),
| {"golden_diff": "diff --git a/tasks.py b/tasks.py\n--- a/tasks.py\n+++ b/tasks.py\n@@ -17,9 +17,9 @@\n \n \n def wait_for_build_completed(session):\n- # Wait 3 seconds before actually checking if the build is complete, to\n+ # Wait 20 seconds before actually checking if the build is complete, to\n # ensure that it had time to really start.\n- time.sleep(3)\n+ time.sleep(20)\n while True:\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n", "issue": "Build automation fixes for 8th release\nWhen triggering the wheel build the release automation does not appropriately wait for the build to complete but instead grabs the previous build. The previous attempted fix of adding a `sleep(3)` did not work around this issue.\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport getpass\nimport os\nimport time\n\nimport invoke\n\nimport requests\n\n\nJENKINS_URL = \"https://jenkins.cryptography.io/job/cryptography-wheel-builder\"\n\n\ndef wait_for_build_completed(session):\n # Wait 3 seconds before actually checking if the build is complete, to\n # ensure that it had time to really start.\n time.sleep(3)\n while True:\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n if not response.json()[\"building\"]:\n assert response.json()[\"result\"] == \"SUCCESS\"\n break\n time.sleep(0.1)\n\n\ndef download_artifacts(session):\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\"\n }\n )\n response.raise_for_status()\n assert not response.json()[\"building\"]\n assert response.json()[\"result\"] == \"SUCCESS\"\n\n paths = []\n\n for run in response.json()[\"runs\"]:\n response = session.get(\n run[\"url\"] + \"api/json/\",\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n for artifact in response.json()[\"artifacts\"]:\n response = session.get(\n \"{0}artifact/{1}\".format(run[\"url\"], artifact[\"relativePath\"])\n )\n out_path = os.path.join(\n os.path.dirname(__file__),\n \"dist\",\n artifact[\"fileName\"],\n )\n with open(out_path, \"wb\") as f:\n f.write(response.content)\n paths.append(out_path)\n return paths\n\n\[email protected]\ndef release(version):\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n invoke.run(\"git tag -s {0} -m '{0} release'\".format(version))\n invoke.run(\"git push --tags\")\n\n invoke.run(\"python setup.py sdist\")\n invoke.run(\"cd vectors/ && python setup.py sdist bdist_wheel\")\n\n invoke.run(\n \"twine upload -s dist/cryptography-{0}* \"\n \"vectors/dist/cryptography_vectors-{0}*\".format(version)\n )\n\n session = requests.Session()\n\n # This tells the CDN to delete the cached response for the URL. We do this\n # so that the Jenkins builders will see the new sdist immediately when they\n # go to build the wheels.\n response = session.request(\n \"PURGE\", \"https://pypi.python.org/simple/cryptography/\"\n )\n response.raise_for_status()\n\n username = getpass.getpass(\"Input the GitHub/Jenkins username: \")\n token = getpass.getpass(\"Input the Jenkins token: \")\n response = session.post(\n \"{0}/build\".format(JENKINS_URL),\n auth=requests.auth.HTTPBasicAuth(\n username, token\n ),\n params={\n \"cause\": \"Building wheels for {0}\".format(version)\n }\n )\n response.raise_for_status()\n wait_for_build_completed(session)\n paths = download_artifacts(session)\n invoke.run(\"twine upload {0}\".format(\" \".join(paths)))\n", "path": "tasks.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport getpass\nimport os\nimport time\n\nimport invoke\n\nimport requests\n\n\nJENKINS_URL = \"https://jenkins.cryptography.io/job/cryptography-wheel-builder\"\n\n\ndef wait_for_build_completed(session):\n # Wait 20 seconds before actually checking if the build is complete, to\n # ensure that it had time to really start.\n time.sleep(20)\n while True:\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n if not response.json()[\"building\"]:\n assert response.json()[\"result\"] == \"SUCCESS\"\n break\n time.sleep(0.1)\n\n\ndef download_artifacts(session):\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\"\n }\n )\n response.raise_for_status()\n assert not response.json()[\"building\"]\n assert response.json()[\"result\"] == \"SUCCESS\"\n\n paths = []\n\n for run in response.json()[\"runs\"]:\n response = session.get(\n run[\"url\"] + \"api/json/\",\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n for artifact in response.json()[\"artifacts\"]:\n response = session.get(\n \"{0}artifact/{1}\".format(run[\"url\"], artifact[\"relativePath\"])\n )\n out_path = os.path.join(\n os.path.dirname(__file__),\n \"dist\",\n artifact[\"fileName\"],\n )\n with open(out_path, \"wb\") as f:\n f.write(response.content)\n paths.append(out_path)\n return paths\n\n\[email protected]\ndef release(version):\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n invoke.run(\"git tag -s {0} -m '{0} release'\".format(version))\n invoke.run(\"git push --tags\")\n\n invoke.run(\"python setup.py sdist\")\n invoke.run(\"cd vectors/ && python setup.py sdist bdist_wheel\")\n\n invoke.run(\n \"twine upload -s dist/cryptography-{0}* \"\n \"vectors/dist/cryptography_vectors-{0}*\".format(version)\n )\n\n session = requests.Session()\n\n # This tells the CDN to delete the cached response for the URL. We do this\n # so that the Jenkins builders will see the new sdist immediately when they\n # go to build the wheels.\n response = session.request(\n \"PURGE\", \"https://pypi.python.org/simple/cryptography/\"\n )\n response.raise_for_status()\n\n username = getpass.getpass(\"Input the GitHub/Jenkins username: \")\n token = getpass.getpass(\"Input the Jenkins token: \")\n response = session.post(\n \"{0}/build\".format(JENKINS_URL),\n auth=requests.auth.HTTPBasicAuth(\n username, token\n ),\n params={\n \"cause\": \"Building wheels for {0}\".format(version)\n }\n )\n response.raise_for_status()\n wait_for_build_completed(session)\n paths = download_artifacts(session)\n invoke.run(\"twine upload {0}\".format(\" \".join(paths)))\n", "path": "tasks.py"}]} | 1,310 | 130 |
gh_patches_debug_37607 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-1525 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[RFE] Results to include positive NACK - CVEs to which a system is NOT vulnerable
Use case: VMaaS provides list of CVEs to which a system is vulnerable, based on list of packages' NEVRAs installed on the system. This is overridden by results from the rules engine evaluation, which can check if a certain port is closed, or a configuration value prevents exploit of a vulnerability. Unless the rules engine results includes "we evaluated for vulnerability to CVE-x and the system is NOT vulnerable", we (vulnerability-engine) won't know to override the VMaaS results that report the system is vulnerable based on the version of the package installed.
Vulnerability-engine will be able to assume that a system is no longer vulnerable to a rules engine-reported CVE if it was reported as vulnerable in previous evaluation. However, a new system checking in with a fix already in place... vulnerability-engine won't know that the vulnerability is mitigated by a config setting, closed port, etc. unless rules engine explicitly reports a CVE that was evaluated and found the system NOT vulnerable.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/core/evaluators.py`
Content:
```
1 import logging
2 import sys
3
4 from ..formats import Formatter
5 from ..specs import Specs
6 from ..combiners.hostname import hostname as combiner_hostname
7 from ..parsers.branch_info import BranchInfo
8 from . import dr, plugins
9
10 log = logging.getLogger(__name__)
11
12
13 def get_simple_module_name(obj):
14 return dr.BASE_MODULE_NAMES.get(obj, None)
15
16
17 class Evaluator(Formatter):
18 def __init__(self, broker=None, stream=sys.stdout, incremental=False):
19 super(Evaluator, self).__init__(broker or dr.Broker(), stream)
20 self.rule_skips = []
21 self.rule_results = []
22 self.fingerprint_results = []
23 self.hostname = None
24 self.metadata = {}
25 self.metadata_keys = {}
26 self.incremental = incremental
27
28 def observer(self, comp, broker):
29 if comp is combiner_hostname and comp in broker:
30 self.hostname = broker[comp].fqdn
31
32 if plugins.is_rule(comp) and comp in broker:
33 self.handle_result(comp, broker[comp])
34
35 def preprocess(self):
36 self.broker.add_observer(self.observer)
37
38 def run_serial(self, graph=None):
39 dr.run(graph or dr.COMPONENTS[dr.GROUPS.single], broker=self.broker)
40
41 def run_incremental(self, graph=None):
42 for _ in dr.run_incremental(graph or dr.COMPONENTS[dr.GROUPS.single], broker=self.broker):
43 pass
44
45 def format_response(self, response):
46 """
47 To be overridden by subclasses to format the response sent back to the
48 client.
49 """
50 return response
51
52 def format_result(self, result):
53 """
54 To be overridden by subclasses to format individual rule results.
55 """
56 return result
57
58 def process(self, graph=None):
59 with self:
60 if self.incremental:
61 self.run_incremental(graph)
62 else:
63 self.run_serial(graph)
64 return self.get_response()
65
66
67 class SingleEvaluator(Evaluator):
68 def append_metadata(self, r):
69 for k, v in r.items():
70 if k != "type":
71 self.metadata[k] = v
72
73 def format_response(self, response):
74 return response
75
76 def get_response(self):
77 r = dict(self.metadata_keys)
78 r.update({
79 "system": {
80 "metadata": self.metadata,
81 "hostname": self.hostname
82 },
83 "reports": self.rule_results,
84 "fingerprints": self.fingerprint_results,
85 "skips": self.rule_skips,
86 })
87 return self.format_response(r)
88
89 def handle_result(self, plugin, r):
90 type_ = r["type"]
91 if type_ == "metadata":
92 self.append_metadata(r)
93 elif type_ == "rule":
94 self.rule_results.append(self.format_result({
95 "rule_id": "{0}|{1}".format(get_simple_module_name(plugin), r["error_key"]),
96 "details": r
97 }))
98 elif type_ == "fingerprint":
99 self.fingerprint_results.append(self.format_result({
100 "fingerprint_id": "{0}|{1}".format(get_simple_module_name(plugin), r["fingerprint_key"]),
101 "details": r
102 }))
103 elif type_ == "skip":
104 self.rule_skips.append(r)
105 elif type_ == "metadata_key":
106 self.metadata_keys[r["key"]] = r["value"]
107
108
109 class InsightsEvaluator(SingleEvaluator):
110 def __init__(self, broker=None, system_id=None, stream=sys.stdout, incremental=False):
111 super(InsightsEvaluator, self).__init__(broker, stream=sys.stdout, incremental=incremental)
112 self.system_id = system_id
113 self.branch_info = {}
114 self.product = "rhel"
115 self.type = "host"
116 self.release = None
117
118 def observer(self, comp, broker):
119 super(InsightsEvaluator, self).observer(comp, broker)
120 if comp is Specs.machine_id and comp in broker:
121 self.system_id = broker[Specs.machine_id].content[0].strip()
122
123 if comp is Specs.redhat_release and comp in broker:
124 self.release = broker[comp].content[0].strip()
125
126 if comp is BranchInfo and BranchInfo in broker:
127 self.branch_info = broker[comp].data
128
129 if comp is Specs.metadata_json and comp in broker:
130 md = broker[comp]
131 self.product = md.get("product_code")
132 self.type = md.get("role")
133
134 def format_result(self, result):
135 result["system_id"] = self.system_id
136 return result
137
138 def format_response(self, response):
139 system = response["system"]
140 system["remote_branch"] = self.branch_info.get("remote_branch")
141 system["remote_leaf"] = self.branch_info.get("remote_leaf")
142 system["system_id"] = self.system_id
143 system["product"] = self.product
144 system["type"] = self.type
145 if self.release:
146 system["metadata"]["release"] = self.release
147
148 return response
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/insights/core/evaluators.py b/insights/core/evaluators.py
--- a/insights/core/evaluators.py
+++ b/insights/core/evaluators.py
@@ -1,6 +1,9 @@
import logging
+import six
import sys
+from collections import defaultdict
+
from ..formats import Formatter
from ..specs import Specs
from ..combiners.hostname import hostname as combiner_hostname
@@ -17,9 +20,8 @@
class Evaluator(Formatter):
def __init__(self, broker=None, stream=sys.stdout, incremental=False):
super(Evaluator, self).__init__(broker or dr.Broker(), stream)
+ self.results = defaultdict(list)
self.rule_skips = []
- self.rule_results = []
- self.fingerprint_results = []
self.hostname = None
self.metadata = {}
self.metadata_keys = {}
@@ -80,30 +82,32 @@
"metadata": self.metadata,
"hostname": self.hostname
},
- "reports": self.rule_results,
- "fingerprints": self.fingerprint_results,
+ "reports": self.results["rule"],
+ "fingerprints": self.results["fingerprint"],
"skips": self.rule_skips,
})
+
+ for k, v in six.iteritems(self.results):
+ if k not in ("rule", "fingerprint"):
+ r[k] = v
+
return self.format_response(r)
def handle_result(self, plugin, r):
type_ = r["type"]
- if type_ == "metadata":
+
+ if type_ == "skip":
+ self.rule_skips.append(r)
+ elif type_ == "metadata":
self.append_metadata(r)
- elif type_ == "rule":
- self.rule_results.append(self.format_result({
- "rule_id": "{0}|{1}".format(get_simple_module_name(plugin), r["error_key"]),
- "details": r
- }))
- elif type_ == "fingerprint":
- self.fingerprint_results.append(self.format_result({
- "fingerprint_id": "{0}|{1}".format(get_simple_module_name(plugin), r["fingerprint_key"]),
+ elif type_ == "metadata_key":
+ self.metadata_keys[r.get_key()] = r["value"]
+ else:
+ response_id = "%s_id" % r.response_type
+ self.results[type_].append(self.format_result({
+ response_id: "{0}|{1}".format(get_simple_module_name(plugin), r.get_key()),
"details": r
}))
- elif type_ == "skip":
- self.rule_skips.append(r)
- elif type_ == "metadata_key":
- self.metadata_keys[r["key"]] = r["value"]
class InsightsEvaluator(SingleEvaluator):
| {"golden_diff": "diff --git a/insights/core/evaluators.py b/insights/core/evaluators.py\n--- a/insights/core/evaluators.py\n+++ b/insights/core/evaluators.py\n@@ -1,6 +1,9 @@\n import logging\n+import six\n import sys\n \n+from collections import defaultdict\n+\n from ..formats import Formatter\n from ..specs import Specs\n from ..combiners.hostname import hostname as combiner_hostname\n@@ -17,9 +20,8 @@\n class Evaluator(Formatter):\n def __init__(self, broker=None, stream=sys.stdout, incremental=False):\n super(Evaluator, self).__init__(broker or dr.Broker(), stream)\n+ self.results = defaultdict(list)\n self.rule_skips = []\n- self.rule_results = []\n- self.fingerprint_results = []\n self.hostname = None\n self.metadata = {}\n self.metadata_keys = {}\n@@ -80,30 +82,32 @@\n \"metadata\": self.metadata,\n \"hostname\": self.hostname\n },\n- \"reports\": self.rule_results,\n- \"fingerprints\": self.fingerprint_results,\n+ \"reports\": self.results[\"rule\"],\n+ \"fingerprints\": self.results[\"fingerprint\"],\n \"skips\": self.rule_skips,\n })\n+\n+ for k, v in six.iteritems(self.results):\n+ if k not in (\"rule\", \"fingerprint\"):\n+ r[k] = v\n+\n return self.format_response(r)\n \n def handle_result(self, plugin, r):\n type_ = r[\"type\"]\n- if type_ == \"metadata\":\n+\n+ if type_ == \"skip\":\n+ self.rule_skips.append(r)\n+ elif type_ == \"metadata\":\n self.append_metadata(r)\n- elif type_ == \"rule\":\n- self.rule_results.append(self.format_result({\n- \"rule_id\": \"{0}|{1}\".format(get_simple_module_name(plugin), r[\"error_key\"]),\n- \"details\": r\n- }))\n- elif type_ == \"fingerprint\":\n- self.fingerprint_results.append(self.format_result({\n- \"fingerprint_id\": \"{0}|{1}\".format(get_simple_module_name(plugin), r[\"fingerprint_key\"]),\n+ elif type_ == \"metadata_key\":\n+ self.metadata_keys[r.get_key()] = r[\"value\"]\n+ else:\n+ response_id = \"%s_id\" % r.response_type\n+ self.results[type_].append(self.format_result({\n+ response_id: \"{0}|{1}\".format(get_simple_module_name(plugin), r.get_key()),\n \"details\": r\n }))\n- elif type_ == \"skip\":\n- self.rule_skips.append(r)\n- elif type_ == \"metadata_key\":\n- self.metadata_keys[r[\"key\"]] = r[\"value\"]\n \n \n class InsightsEvaluator(SingleEvaluator):\n", "issue": "[RFE] Results to include positive NACK - CVEs to which a system is NOT vulnerable\nUse case: VMaaS provides list of CVEs to which a system is vulnerable, based on list of packages' NEVRAs installed on the system. This is overridden by results from the rules engine evaluation, which can check if a certain port is closed, or a configuration value prevents exploit of a vulnerability. Unless the rules engine results includes \"we evaluated for vulnerability to CVE-x and the system is NOT vulnerable\", we (vulnerability-engine) won't know to override the VMaaS results that report the system is vulnerable based on the version of the package installed.\r\n\r\nVulnerability-engine will be able to assume that a system is no longer vulnerable to a rules engine-reported CVE if it was reported as vulnerable in previous evaluation. However, a new system checking in with a fix already in place... vulnerability-engine won't know that the vulnerability is mitigated by a config setting, closed port, etc. unless rules engine explicitly reports a CVE that was evaluated and found the system NOT vulnerable.\n", "before_files": [{"content": "import logging\nimport sys\n\nfrom ..formats import Formatter\nfrom ..specs import Specs\nfrom ..combiners.hostname import hostname as combiner_hostname\nfrom ..parsers.branch_info import BranchInfo\nfrom . import dr, plugins\n\nlog = logging.getLogger(__name__)\n\n\ndef get_simple_module_name(obj):\n return dr.BASE_MODULE_NAMES.get(obj, None)\n\n\nclass Evaluator(Formatter):\n def __init__(self, broker=None, stream=sys.stdout, incremental=False):\n super(Evaluator, self).__init__(broker or dr.Broker(), stream)\n self.rule_skips = []\n self.rule_results = []\n self.fingerprint_results = []\n self.hostname = None\n self.metadata = {}\n self.metadata_keys = {}\n self.incremental = incremental\n\n def observer(self, comp, broker):\n if comp is combiner_hostname and comp in broker:\n self.hostname = broker[comp].fqdn\n\n if plugins.is_rule(comp) and comp in broker:\n self.handle_result(comp, broker[comp])\n\n def preprocess(self):\n self.broker.add_observer(self.observer)\n\n def run_serial(self, graph=None):\n dr.run(graph or dr.COMPONENTS[dr.GROUPS.single], broker=self.broker)\n\n def run_incremental(self, graph=None):\n for _ in dr.run_incremental(graph or dr.COMPONENTS[dr.GROUPS.single], broker=self.broker):\n pass\n\n def format_response(self, response):\n \"\"\"\n To be overridden by subclasses to format the response sent back to the\n client.\n \"\"\"\n return response\n\n def format_result(self, result):\n \"\"\"\n To be overridden by subclasses to format individual rule results.\n \"\"\"\n return result\n\n def process(self, graph=None):\n with self:\n if self.incremental:\n self.run_incremental(graph)\n else:\n self.run_serial(graph)\n return self.get_response()\n\n\nclass SingleEvaluator(Evaluator):\n def append_metadata(self, r):\n for k, v in r.items():\n if k != \"type\":\n self.metadata[k] = v\n\n def format_response(self, response):\n return response\n\n def get_response(self):\n r = dict(self.metadata_keys)\n r.update({\n \"system\": {\n \"metadata\": self.metadata,\n \"hostname\": self.hostname\n },\n \"reports\": self.rule_results,\n \"fingerprints\": self.fingerprint_results,\n \"skips\": self.rule_skips,\n })\n return self.format_response(r)\n\n def handle_result(self, plugin, r):\n type_ = r[\"type\"]\n if type_ == \"metadata\":\n self.append_metadata(r)\n elif type_ == \"rule\":\n self.rule_results.append(self.format_result({\n \"rule_id\": \"{0}|{1}\".format(get_simple_module_name(plugin), r[\"error_key\"]),\n \"details\": r\n }))\n elif type_ == \"fingerprint\":\n self.fingerprint_results.append(self.format_result({\n \"fingerprint_id\": \"{0}|{1}\".format(get_simple_module_name(plugin), r[\"fingerprint_key\"]),\n \"details\": r\n }))\n elif type_ == \"skip\":\n self.rule_skips.append(r)\n elif type_ == \"metadata_key\":\n self.metadata_keys[r[\"key\"]] = r[\"value\"]\n\n\nclass InsightsEvaluator(SingleEvaluator):\n def __init__(self, broker=None, system_id=None, stream=sys.stdout, incremental=False):\n super(InsightsEvaluator, self).__init__(broker, stream=sys.stdout, incremental=incremental)\n self.system_id = system_id\n self.branch_info = {}\n self.product = \"rhel\"\n self.type = \"host\"\n self.release = None\n\n def observer(self, comp, broker):\n super(InsightsEvaluator, self).observer(comp, broker)\n if comp is Specs.machine_id and comp in broker:\n self.system_id = broker[Specs.machine_id].content[0].strip()\n\n if comp is Specs.redhat_release and comp in broker:\n self.release = broker[comp].content[0].strip()\n\n if comp is BranchInfo and BranchInfo in broker:\n self.branch_info = broker[comp].data\n\n if comp is Specs.metadata_json and comp in broker:\n md = broker[comp]\n self.product = md.get(\"product_code\")\n self.type = md.get(\"role\")\n\n def format_result(self, result):\n result[\"system_id\"] = self.system_id\n return result\n\n def format_response(self, response):\n system = response[\"system\"]\n system[\"remote_branch\"] = self.branch_info.get(\"remote_branch\")\n system[\"remote_leaf\"] = self.branch_info.get(\"remote_leaf\")\n system[\"system_id\"] = self.system_id\n system[\"product\"] = self.product\n system[\"type\"] = self.type\n if self.release:\n system[\"metadata\"][\"release\"] = self.release\n\n return response\n", "path": "insights/core/evaluators.py"}], "after_files": [{"content": "import logging\nimport six\nimport sys\n\nfrom collections import defaultdict\n\nfrom ..formats import Formatter\nfrom ..specs import Specs\nfrom ..combiners.hostname import hostname as combiner_hostname\nfrom ..parsers.branch_info import BranchInfo\nfrom . import dr, plugins\n\nlog = logging.getLogger(__name__)\n\n\ndef get_simple_module_name(obj):\n return dr.BASE_MODULE_NAMES.get(obj, None)\n\n\nclass Evaluator(Formatter):\n def __init__(self, broker=None, stream=sys.stdout, incremental=False):\n super(Evaluator, self).__init__(broker or dr.Broker(), stream)\n self.results = defaultdict(list)\n self.rule_skips = []\n self.hostname = None\n self.metadata = {}\n self.metadata_keys = {}\n self.incremental = incremental\n\n def observer(self, comp, broker):\n if comp is combiner_hostname and comp in broker:\n self.hostname = broker[comp].fqdn\n\n if plugins.is_rule(comp) and comp in broker:\n self.handle_result(comp, broker[comp])\n\n def preprocess(self):\n self.broker.add_observer(self.observer)\n\n def run_serial(self, graph=None):\n dr.run(graph or dr.COMPONENTS[dr.GROUPS.single], broker=self.broker)\n\n def run_incremental(self, graph=None):\n for _ in dr.run_incremental(graph or dr.COMPONENTS[dr.GROUPS.single], broker=self.broker):\n pass\n\n def format_response(self, response):\n \"\"\"\n To be overridden by subclasses to format the response sent back to the\n client.\n \"\"\"\n return response\n\n def format_result(self, result):\n \"\"\"\n To be overridden by subclasses to format individual rule results.\n \"\"\"\n return result\n\n def process(self, graph=None):\n with self:\n if self.incremental:\n self.run_incremental(graph)\n else:\n self.run_serial(graph)\n return self.get_response()\n\n\nclass SingleEvaluator(Evaluator):\n def append_metadata(self, r):\n for k, v in r.items():\n if k != \"type\":\n self.metadata[k] = v\n\n def format_response(self, response):\n return response\n\n def get_response(self):\n r = dict(self.metadata_keys)\n r.update({\n \"system\": {\n \"metadata\": self.metadata,\n \"hostname\": self.hostname\n },\n \"reports\": self.results[\"rule\"],\n \"fingerprints\": self.results[\"fingerprint\"],\n \"skips\": self.rule_skips,\n })\n\n for k, v in six.iteritems(self.results):\n if k not in (\"rule\", \"fingerprint\"):\n r[k] = v\n\n return self.format_response(r)\n\n def handle_result(self, plugin, r):\n type_ = r[\"type\"]\n\n if type_ == \"skip\":\n self.rule_skips.append(r)\n elif type_ == \"metadata\":\n self.append_metadata(r)\n elif type_ == \"metadata_key\":\n self.metadata_keys[r.get_key()] = r[\"value\"]\n else:\n response_id = \"%s_id\" % r.response_type\n self.results[type_].append(self.format_result({\n response_id: \"{0}|{1}\".format(get_simple_module_name(plugin), r.get_key()),\n \"details\": r\n }))\n\n\nclass InsightsEvaluator(SingleEvaluator):\n def __init__(self, broker=None, system_id=None, stream=sys.stdout, incremental=False):\n super(InsightsEvaluator, self).__init__(broker, stream=sys.stdout, incremental=incremental)\n self.system_id = system_id\n self.branch_info = {}\n self.product = \"rhel\"\n self.type = \"host\"\n self.release = None\n\n def observer(self, comp, broker):\n super(InsightsEvaluator, self).observer(comp, broker)\n if comp is Specs.machine_id and comp in broker:\n self.system_id = broker[Specs.machine_id].content[0].strip()\n\n if comp is Specs.redhat_release and comp in broker:\n self.release = broker[comp].content[0].strip()\n\n if comp is BranchInfo and BranchInfo in broker:\n self.branch_info = broker[comp].data\n\n if comp is Specs.metadata_json and comp in broker:\n md = broker[comp]\n self.product = md.get(\"product_code\")\n self.type = md.get(\"role\")\n\n def format_result(self, result):\n result[\"system_id\"] = self.system_id\n return result\n\n def format_response(self, response):\n system = response[\"system\"]\n system[\"remote_branch\"] = self.branch_info.get(\"remote_branch\")\n system[\"remote_leaf\"] = self.branch_info.get(\"remote_leaf\")\n system[\"system_id\"] = self.system_id\n system[\"product\"] = self.product\n system[\"type\"] = self.type\n if self.release:\n system[\"metadata\"][\"release\"] = self.release\n\n return response\n", "path": "insights/core/evaluators.py"}]} | 1,885 | 621 |
gh_patches_debug_4530 | rasdani/github-patches | git_diff | ivy-llc__ivy-16060 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cross
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/linalg.py`
Content:
```
1 # global
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
4 from ivy.functional.frontends.paddle import promote_types_of_paddle_inputs
5 from ivy.functional.frontends.paddle.func_wrapper import (
6 to_ivy_arrays_and_back,
7 )
8
9
10 # matmul
11 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
12 @to_ivy_arrays_and_back
13 def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
14 x, y = promote_types_of_paddle_inputs(x, y)
15 return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)
16
17
18 # norm
19 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
20 @to_ivy_arrays_and_back
21 def norm(x, p="fro", axis=None, keepdim=False, name=None):
22 if axis is None and p is not None:
23 if p == "fro":
24 p = 2
25 ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)
26 if keepdim:
27 ret = ret.reshape([1] * len(x.shape))
28 if len(ret.shape) == 0:
29 return ivy.array([ret])
30 return ret
31
32 if isinstance(axis, tuple):
33 axis = list(axis)
34 if isinstance(axis, list) and len(axis) == 1:
35 axis = axis[0]
36
37 if isinstance(axis, int):
38 if p == "fro":
39 p = 2
40 if p in [0, 1, 2, ivy.inf, -ivy.inf]:
41 ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)
42 elif isinstance(p, (int, float)):
43 ret = ivy.pow(
44 ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),
45 float(1.0 / p),
46 )
47
48 elif isinstance(axis, list) and len(axis) == 2:
49 if p == 0:
50 raise ValueError
51 elif p == 1:
52 ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)
53 elif p == 2 or p == "fro":
54 ret = ivy.matrix_norm(x, ord="fro", axis=axis, keepdims=keepdim)
55 elif p == ivy.inf:
56 ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)
57 elif p == -ivy.inf:
58 ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)
59 elif isinstance(p, (int, float)) and p > 0:
60 ret = ivy.pow(
61 ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),
62 float(1.0 / p),
63 )
64 else:
65 raise ValueError
66
67 else:
68 raise ValueError
69
70 if len(ret.shape) == 0:
71 ret = ivy.array(
72 [ret]
73 ) # this is done so as to match shape of output from paddle
74 return ret
75
76
77 # eig
78 @to_ivy_arrays_and_back
79 def eig(x, name=None):
80 return ivy.eig(x)
81
82
83 # eigvals
84 @to_ivy_arrays_and_back
85 def eigvals(x, name=None):
86 return ivy.eigvals(x)
87
88
89 # eigvalsh
90 @to_ivy_arrays_and_back
91 def eigvalsh(x, UPLO="L", name=None):
92 return ivy.eigvalsh(x, UPLO=UPLO)
93
94
95 # eigh
96 @to_ivy_arrays_and_back
97 def eigh(x, UPLO="L", name=None):
98 return ivy.eigh(x, UPLO=UPLO)
99
100
101 # pinv
102 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
103 @to_ivy_arrays_and_back
104 def pinv(x, rcond=1e-15, hermitian=False, name=None):
105 # TODO: Add hermitian functionality
106 return ivy.pinv(x, rtol=rcond)
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/tensor/linalg.py b/ivy/functional/frontends/paddle/tensor/linalg.py
--- a/ivy/functional/frontends/paddle/tensor/linalg.py
+++ b/ivy/functional/frontends/paddle/tensor/linalg.py
@@ -7,6 +7,15 @@
)
+@with_supported_dtypes(
+ {"2.4.2 and below": ("float32", "float64", "int32", "int64")}, "paddle"
+)
+@to_ivy_arrays_and_back
+def cross(x, y, /, *, axis=9, name=None):
+ x, y = promote_types_of_paddle_inputs(x, y)
+ return ivy.cross(x, y, axis=axis)
+
+
# matmul
@with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
@to_ivy_arrays_and_back
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/linalg.py b/ivy/functional/frontends/paddle/tensor/linalg.py\n--- a/ivy/functional/frontends/paddle/tensor/linalg.py\n+++ b/ivy/functional/frontends/paddle/tensor/linalg.py\n@@ -7,6 +7,15 @@\n )\n \n \n+@with_supported_dtypes(\n+ {\"2.4.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n+)\n+@to_ivy_arrays_and_back\n+def cross(x, y, /, *, axis=9, name=None):\n+ x, y = promote_types_of_paddle_inputs(x, y)\n+ return ivy.cross(x, y, axis=axis)\n+\n+\n # matmul\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n @to_ivy_arrays_and_back\n", "issue": "cross\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle import promote_types_of_paddle_inputs\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n# matmul\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matmul(x, y, transpose_x=False, transpose_y=False, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# norm\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef norm(x, p=\"fro\", axis=None, keepdim=False, name=None):\n if axis is None and p is not None:\n if p == \"fro\":\n p = 2\n ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)\n if keepdim:\n ret = ret.reshape([1] * len(x.shape))\n if len(ret.shape) == 0:\n return ivy.array([ret])\n return ret\n\n if isinstance(axis, tuple):\n axis = list(axis)\n if isinstance(axis, list) and len(axis) == 1:\n axis = axis[0]\n\n if isinstance(axis, int):\n if p == \"fro\":\n p = 2\n if p in [0, 1, 2, ivy.inf, -ivy.inf]:\n ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)):\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n\n elif isinstance(axis, list) and len(axis) == 2:\n if p == 0:\n raise ValueError\n elif p == 1:\n ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == 2 or p == \"fro\":\n ret = ivy.matrix_norm(x, ord=\"fro\", axis=axis, keepdims=keepdim)\n elif p == ivy.inf:\n ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == -ivy.inf:\n ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)) and p > 0:\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n else:\n raise ValueError\n\n else:\n raise ValueError\n\n if len(ret.shape) == 0:\n ret = ivy.array(\n [ret]\n ) # this is done so as to match shape of output from paddle\n return ret\n\n\n# eig\n@to_ivy_arrays_and_back\ndef eig(x, name=None):\n return ivy.eig(x)\n\n\n# eigvals\n@to_ivy_arrays_and_back\ndef eigvals(x, name=None):\n return ivy.eigvals(x)\n\n\n# eigvalsh\n@to_ivy_arrays_and_back\ndef eigvalsh(x, UPLO=\"L\", name=None):\n return ivy.eigvalsh(x, UPLO=UPLO)\n\n\n# eigh\n@to_ivy_arrays_and_back\ndef eigh(x, UPLO=\"L\", name=None):\n return ivy.eigh(x, UPLO=UPLO)\n\n\n# pinv\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pinv(x, rcond=1e-15, hermitian=False, name=None):\n # TODO: Add hermitian functionality\n return ivy.pinv(x, rtol=rcond)\n", "path": "ivy/functional/frontends/paddle/tensor/linalg.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle import promote_types_of_paddle_inputs\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.4.2 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef cross(x, y, /, *, axis=9, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.cross(x, y, axis=axis)\n\n\n# matmul\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matmul(x, y, transpose_x=False, transpose_y=False, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# norm\n@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef norm(x, p=\"fro\", axis=None, keepdim=False, name=None):\n if axis is None and p is not None:\n if p == \"fro\":\n p = 2\n ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)\n if keepdim:\n ret = ret.reshape([1] * len(x.shape))\n if len(ret.shape) == 0:\n return ivy.array([ret])\n return ret\n\n if isinstance(axis, tuple):\n axis = list(axis)\n if isinstance(axis, list) and len(axis) == 1:\n axis = axis[0]\n\n if isinstance(axis, int):\n if p == \"fro\":\n p = 2\n if p in [0, 1, 2, ivy.inf, -ivy.inf]:\n ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)):\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n\n elif isinstance(axis, list) and len(axis) == 2:\n if p == 0:\n raise ValueError\n elif p == 1:\n ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == 2 or p == \"fro\":\n ret = ivy.matrix_norm(x, ord=\"fro\", axis=axis, keepdims=keepdim)\n elif p == ivy.inf:\n ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == -ivy.inf:\n ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)) and p > 0:\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n else:\n raise ValueError\n\n else:\n raise ValueError\n\n if len(ret.shape) == 0:\n ret = ivy.array(\n [ret]\n ) # this is done so as to match shape of output from paddle\n return ret\n\n\n# eig\n@to_ivy_arrays_and_back\ndef eig(x, name=None):\n return ivy.eig(x)\n\n\n# eigvals\n@to_ivy_arrays_and_back\ndef eigvals(x, name=None):\n return ivy.eigvals(x)\n\n\n# eigvalsh\n@to_ivy_arrays_and_back\ndef eigvalsh(x, UPLO=\"L\", name=None):\n return ivy.eigvalsh(x, UPLO=UPLO)\n\n\n# eigh\n@to_ivy_arrays_and_back\ndef eigh(x, UPLO=\"L\", name=None):\n return ivy.eigh(x, UPLO=UPLO)\n\n\n# pinv\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pinv(x, rcond=1e-15, hermitian=False, name=None):\n # TODO: Add hermitian functionality\n return ivy.pinv(x, rtol=rcond)\n", "path": "ivy/functional/frontends/paddle/tensor/linalg.py"}]} | 1,438 | 223 |
gh_patches_debug_22157 | rasdani/github-patches | git_diff | lutris__lutris-1197 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change "Import Games" to something more clear (like "Configure library importing")
I personally feel like the current name for that menu is confusing, misleading and does't represent its actual purpose. I personally think something like "Configure library importing" will describe the menu much better, but if you disagree, any suggestions are appreciated.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/gui/sync.py`
Content:
```
1 import gi
2 gi.require_version('Gtk', '3.0')
3 from gi.repository import Gtk, Gio
4
5 from lutris.gui.widgets.utils import get_runner_icon
6 from lutris.gui.dialogs import NoticeDialog
7 from lutris.services import get_services
8 from lutris.settings import read_setting, write_setting
9 from lutris.util.jobs import AsyncCall
10
11
12 class ServiceSyncRow(Gtk.HBox):
13
14 def __init__(self, service):
15 super(ServiceSyncRow, self).__init__()
16 self.set_spacing(20)
17
18 self.identifier = service.__name__.split('.')[-1]
19 name = service.NAME
20
21 icon = get_runner_icon(self.identifier)
22 self.pack_start(icon, False, False, 0)
23
24 label = Gtk.Label(xalign=0)
25 label.set_markup("<b>{}</b>".format(name))
26 self.pack_start(label, True, True, 0)
27
28 actions = Gtk.VBox()
29 self.pack_start(actions, False, False, 0)
30
31 sync_switch = Gtk.Switch()
32 sync_switch.set_tooltip_text("Sync when Lutris starts")
33 sync_switch.props.valign = Gtk.Align.CENTER
34 sync_switch.connect('notify::active', self.on_switch_changed)
35 if read_setting('sync_at_startup', self.identifier) == 'True':
36 sync_switch.set_state(True)
37 actions.pack_start(sync_switch, False, False, 0)
38
39 sync_button = Gtk.Button("Sync")
40 sync_button.set_tooltip_text("Sync now")
41 sync_button.connect('clicked', self.on_sync_button_clicked, service.sync_with_lutris)
42 actions.pack_start(sync_button, False, False, 0)
43
44 def on_sync_button_clicked(self, button, sync_method):
45 AsyncCall(sync_method, callback=self.on_service_synced)
46
47 def on_service_synced(self, caller, data):
48 parent = self.get_toplevel()
49 if not isinstance(parent, Gtk.Window):
50 # The sync dialog may have closed
51 parent = Gio.Application.get_default().props.active_window
52 NoticeDialog("Games synced", parent=parent)
53
54 def on_switch_changed(self, switch, data):
55 state = switch.get_active()
56 write_setting('sync_at_startup', state, self.identifier)
57
58
59 class SyncServiceDialog(Gtk.Dialog):
60
61 def __init__(self, parent=None):
62 Gtk.Dialog.__init__(self, title="Import local games", parent=parent)
63 self.connect("delete-event", lambda *x: self.destroy())
64 self.set_border_width(10)
65 self.set_size_request(512, 0)
66
67 box_outer = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
68 self.get_content_area().add(box_outer)
69
70 description_label = Gtk.Label()
71 description_label.set_markup("You can import games from local game sources, \n"
72 "you can also choose to sync everytime Lutris starts")
73 box_outer.pack_start(description_label, False, False, 5)
74
75 separator = Gtk.Separator()
76 box_outer.pack_start(separator, False, False, 0)
77
78 for service in get_services():
79 sync_row = ServiceSyncRow(service)
80 box_outer.pack_start(sync_row, False, True, 0)
81 box_outer.show_all()
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/gui/sync.py b/lutris/gui/sync.py
--- a/lutris/gui/sync.py
+++ b/lutris/gui/sync.py
@@ -59,7 +59,7 @@
class SyncServiceDialog(Gtk.Dialog):
def __init__(self, parent=None):
- Gtk.Dialog.__init__(self, title="Import local games", parent=parent)
+ Gtk.Dialog.__init__(self, title="Configure local game import", parent=parent)
self.connect("delete-event", lambda *x: self.destroy())
self.set_border_width(10)
self.set_size_request(512, 0)
@@ -68,8 +68,8 @@
self.get_content_area().add(box_outer)
description_label = Gtk.Label()
- description_label.set_markup("You can import games from local game sources, \n"
- "you can also choose to sync everytime Lutris starts")
+ description_label.set_markup("You can choose which local game sources will get synced each\n"
+ "time Lutris starts, or launch an immediate import of games.")
box_outer.pack_start(description_label, False, False, 5)
separator = Gtk.Separator()
| {"golden_diff": "diff --git a/lutris/gui/sync.py b/lutris/gui/sync.py\n--- a/lutris/gui/sync.py\n+++ b/lutris/gui/sync.py\n@@ -59,7 +59,7 @@\n class SyncServiceDialog(Gtk.Dialog):\n \n def __init__(self, parent=None):\n- Gtk.Dialog.__init__(self, title=\"Import local games\", parent=parent)\n+ Gtk.Dialog.__init__(self, title=\"Configure local game import\", parent=parent)\n self.connect(\"delete-event\", lambda *x: self.destroy())\n self.set_border_width(10)\n self.set_size_request(512, 0)\n@@ -68,8 +68,8 @@\n self.get_content_area().add(box_outer)\n \n description_label = Gtk.Label()\n- description_label.set_markup(\"You can import games from local game sources, \\n\"\n- \"you can also choose to sync everytime Lutris starts\")\n+ description_label.set_markup(\"You can choose which local game sources will get synced each\\n\"\n+ \"time Lutris starts, or launch an immediate import of games.\")\n box_outer.pack_start(description_label, False, False, 5)\n \n separator = Gtk.Separator()\n", "issue": "Change \"Import Games\" to something more clear (like \"Configure library importing\")\nI personally feel like the current name for that menu is confusing, misleading and does't represent its actual purpose. I personally think something like \"Configure library importing\" will describe the menu much better, but if you disagree, any suggestions are appreciated.\n", "before_files": [{"content": "import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gio\n\nfrom lutris.gui.widgets.utils import get_runner_icon\nfrom lutris.gui.dialogs import NoticeDialog\nfrom lutris.services import get_services\nfrom lutris.settings import read_setting, write_setting\nfrom lutris.util.jobs import AsyncCall\n\n\nclass ServiceSyncRow(Gtk.HBox):\n\n def __init__(self, service):\n super(ServiceSyncRow, self).__init__()\n self.set_spacing(20)\n\n self.identifier = service.__name__.split('.')[-1]\n name = service.NAME\n\n icon = get_runner_icon(self.identifier)\n self.pack_start(icon, False, False, 0)\n\n label = Gtk.Label(xalign=0)\n label.set_markup(\"<b>{}</b>\".format(name))\n self.pack_start(label, True, True, 0)\n\n actions = Gtk.VBox()\n self.pack_start(actions, False, False, 0)\n\n sync_switch = Gtk.Switch()\n sync_switch.set_tooltip_text(\"Sync when Lutris starts\")\n sync_switch.props.valign = Gtk.Align.CENTER\n sync_switch.connect('notify::active', self.on_switch_changed)\n if read_setting('sync_at_startup', self.identifier) == 'True':\n sync_switch.set_state(True)\n actions.pack_start(sync_switch, False, False, 0)\n\n sync_button = Gtk.Button(\"Sync\")\n sync_button.set_tooltip_text(\"Sync now\")\n sync_button.connect('clicked', self.on_sync_button_clicked, service.sync_with_lutris)\n actions.pack_start(sync_button, False, False, 0)\n\n def on_sync_button_clicked(self, button, sync_method):\n AsyncCall(sync_method, callback=self.on_service_synced)\n\n def on_service_synced(self, caller, data):\n parent = self.get_toplevel()\n if not isinstance(parent, Gtk.Window):\n # The sync dialog may have closed\n parent = Gio.Application.get_default().props.active_window\n NoticeDialog(\"Games synced\", parent=parent)\n\n def on_switch_changed(self, switch, data):\n state = switch.get_active()\n write_setting('sync_at_startup', state, self.identifier)\n\n\nclass SyncServiceDialog(Gtk.Dialog):\n\n def __init__(self, parent=None):\n Gtk.Dialog.__init__(self, title=\"Import local games\", parent=parent)\n self.connect(\"delete-event\", lambda *x: self.destroy())\n self.set_border_width(10)\n self.set_size_request(512, 0)\n\n box_outer = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)\n self.get_content_area().add(box_outer)\n\n description_label = Gtk.Label()\n description_label.set_markup(\"You can import games from local game sources, \\n\"\n \"you can also choose to sync everytime Lutris starts\")\n box_outer.pack_start(description_label, False, False, 5)\n\n separator = Gtk.Separator()\n box_outer.pack_start(separator, False, False, 0)\n\n for service in get_services():\n sync_row = ServiceSyncRow(service)\n box_outer.pack_start(sync_row, False, True, 0)\n box_outer.show_all()\n", "path": "lutris/gui/sync.py"}], "after_files": [{"content": "import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gio\n\nfrom lutris.gui.widgets.utils import get_runner_icon\nfrom lutris.gui.dialogs import NoticeDialog\nfrom lutris.services import get_services\nfrom lutris.settings import read_setting, write_setting\nfrom lutris.util.jobs import AsyncCall\n\n\nclass ServiceSyncRow(Gtk.HBox):\n\n def __init__(self, service):\n super(ServiceSyncRow, self).__init__()\n self.set_spacing(20)\n\n self.identifier = service.__name__.split('.')[-1]\n name = service.NAME\n\n icon = get_runner_icon(self.identifier)\n self.pack_start(icon, False, False, 0)\n\n label = Gtk.Label(xalign=0)\n label.set_markup(\"<b>{}</b>\".format(name))\n self.pack_start(label, True, True, 0)\n\n actions = Gtk.VBox()\n self.pack_start(actions, False, False, 0)\n\n sync_switch = Gtk.Switch()\n sync_switch.set_tooltip_text(\"Sync when Lutris starts\")\n sync_switch.props.valign = Gtk.Align.CENTER\n sync_switch.connect('notify::active', self.on_switch_changed)\n if read_setting('sync_at_startup', self.identifier) == 'True':\n sync_switch.set_state(True)\n actions.pack_start(sync_switch, False, False, 0)\n\n sync_button = Gtk.Button(\"Sync\")\n sync_button.set_tooltip_text(\"Sync now\")\n sync_button.connect('clicked', self.on_sync_button_clicked, service.sync_with_lutris)\n actions.pack_start(sync_button, False, False, 0)\n\n def on_sync_button_clicked(self, button, sync_method):\n AsyncCall(sync_method, callback=self.on_service_synced)\n\n def on_service_synced(self, caller, data):\n parent = self.get_toplevel()\n if not isinstance(parent, Gtk.Window):\n # The sync dialog may have closed\n parent = Gio.Application.get_default().props.active_window\n NoticeDialog(\"Games synced\", parent=parent)\n\n def on_switch_changed(self, switch, data):\n state = switch.get_active()\n write_setting('sync_at_startup', state, self.identifier)\n\n\nclass SyncServiceDialog(Gtk.Dialog):\n\n def __init__(self, parent=None):\n Gtk.Dialog.__init__(self, title=\"Configure local game import\", parent=parent)\n self.connect(\"delete-event\", lambda *x: self.destroy())\n self.set_border_width(10)\n self.set_size_request(512, 0)\n\n box_outer = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)\n self.get_content_area().add(box_outer)\n\n description_label = Gtk.Label()\n description_label.set_markup(\"You can choose which local game sources will get synced each\\n\"\n \"time Lutris starts, or launch an immediate import of games.\")\n box_outer.pack_start(description_label, False, False, 5)\n\n separator = Gtk.Separator()\n box_outer.pack_start(separator, False, False, 0)\n\n for service in get_services():\n sync_row = ServiceSyncRow(service)\n box_outer.pack_start(sync_row, False, True, 0)\n box_outer.show_all()\n", "path": "lutris/gui/sync.py"}]} | 1,168 | 269 |
gh_patches_debug_38060 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-5102 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix Costa Coffee (US) which has switched from Amasty to a JSON API
In the latest weekly run, Costa Coffee (costacoffee_us) is now broken with the Amasty "amlocator" AJAX endpoint no longer existing. In it's place appears to be a JSON API that returns store details (locations and addresses):
https://us.costacoffee.com/api/cf/?locale=en-US&include=2&content_type=storeLocatorStore&limit=500&fields.location[near]=33.77804102,-84.38068933
This appears to be a fairly easy fix to rewrite the spider to use the new JSON API (with a higher limit than 500).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/costacoffee_us.py`
Content:
```
1 import json
2 import re
3
4 import scrapy
5 from scrapy import Selector
6
7 from locations.categories import Categories, apply_category
8 from locations.items import Feature
9
10
11 class CostaCoffeeUSSpider(scrapy.Spider):
12 name = "costacoffee_us"
13 item_attributes = {"brand": "Costa Coffee", "brand_wikidata": "Q608845"}
14 allowed_domains = ["us.costacoffee.com"]
15 start_urls = ["https://us.costacoffee.com/amlocator/index/ajax"]
16
17 def parse(self, response):
18 script = response.xpath('//script[contains(text(), "amLocator")]/text()').extract_first()
19
20 start = script.index("jsonLocations: ") + len("jsonLocations: ")
21 stop = script.index("imageLocations")
22
23 locations = script[start:stop].strip().strip(",")
24 items = json.loads(locations)["items"]
25
26 for store in items:
27 item = Feature()
28 item["ref"] = store["id"]
29 item["lat"] = store["lat"]
30 item["lon"] = store["lng"]
31
32 html = Selector(text=store["popup_html"])
33
34 item["name"] = html.xpath('//*[@class="amlocator-title"]/text()').get()
35
36 for line in html.xpath('//div[@class="amlocator-info-popup"]/text()').getall():
37 line = line.strip()
38 if m := re.match(r"City: (.*)", line):
39 item["city"] = m.group(1)
40 elif m := re.match(r"Zip: (.*)", line):
41 item["postcode"] = m.group(1)
42 elif m := re.match(r"Address: (.*)", line):
43 item["street_address"] = m.group(1)
44 elif m := re.match(r"State: (.*)", line):
45 item["state"] = m.group(1)
46
47 apply_category(Categories.COFFEE_SHOP, item)
48
49 yield item
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/costacoffee_us.py b/locations/spiders/costacoffee_us.py
--- a/locations/spiders/costacoffee_us.py
+++ b/locations/spiders/costacoffee_us.py
@@ -1,49 +1,38 @@
-import json
-import re
-
-import scrapy
-from scrapy import Selector
+from scrapy import Spider
+from scrapy.http import JsonRequest
from locations.categories import Categories, apply_category
-from locations.items import Feature
+from locations.dict_parser import DictParser
+from locations.hours import DAYS_FULL, OpeningHours
-class CostaCoffeeUSSpider(scrapy.Spider):
+class CostaCoffeeUSSpider(Spider):
name = "costacoffee_us"
item_attributes = {"brand": "Costa Coffee", "brand_wikidata": "Q608845"}
allowed_domains = ["us.costacoffee.com"]
- start_urls = ["https://us.costacoffee.com/amlocator/index/ajax"]
-
- def parse(self, response):
- script = response.xpath('//script[contains(text(), "amLocator")]/text()').extract_first()
-
- start = script.index("jsonLocations: ") + len("jsonLocations: ")
- stop = script.index("imageLocations")
-
- locations = script[start:stop].strip().strip(",")
- items = json.loads(locations)["items"]
-
- for store in items:
- item = Feature()
- item["ref"] = store["id"]
- item["lat"] = store["lat"]
- item["lon"] = store["lng"]
+ start_urls = ["https://us.costacoffee.com/api/cf/?content_type=storeLocatorStore"]
+ page_size = 1000
- html = Selector(text=store["popup_html"])
-
- item["name"] = html.xpath('//*[@class="amlocator-title"]/text()').get()
-
- for line in html.xpath('//div[@class="amlocator-info-popup"]/text()').getall():
- line = line.strip()
- if m := re.match(r"City: (.*)", line):
- item["city"] = m.group(1)
- elif m := re.match(r"Zip: (.*)", line):
- item["postcode"] = m.group(1)
- elif m := re.match(r"Address: (.*)", line):
- item["street_address"] = m.group(1)
- elif m := re.match(r"State: (.*)", line):
- item["state"] = m.group(1)
+ def start_requests(self):
+ for url in self.start_urls:
+ yield JsonRequest(url=f"{url}&limit={self.page_size}")
+ def parse(self, response):
+ for location in response.json()["items"]:
+ item = DictParser.parse(location["fields"])
+ item["ref"] = location["sys"]["id"]
+ item["addr_full"] = location["fields"]["storeAddress"]
+ item["opening_hours"] = OpeningHours()
+ for day_name in [s.lower() for s in DAYS_FULL]:
+ open_time = location["fields"].get(f"{day_name}Opening")
+ close_time = location["fields"].get(f"{day_name}Closing")
+ if open_time and "24 HOURS" in open_time.upper():
+ item["opening_hours"].add_range(day_name, "00:00", "24:00")
+ elif open_time and close_time:
+ item["opening_hours"].add_range(day_name, open_time, close_time)
apply_category(Categories.COFFEE_SHOP, item)
-
yield item
+
+ offset = response.json()["skip"]
+ if offset + response.json()["limit"] < response.json()["total"]:
+ yield JsonRequest(url=f"{response.request.url}&limit={self.page_size}&offset={offset}")
| {"golden_diff": "diff --git a/locations/spiders/costacoffee_us.py b/locations/spiders/costacoffee_us.py\n--- a/locations/spiders/costacoffee_us.py\n+++ b/locations/spiders/costacoffee_us.py\n@@ -1,49 +1,38 @@\n-import json\n-import re\n-\n-import scrapy\n-from scrapy import Selector\n+from scrapy import Spider\n+from scrapy.http import JsonRequest\n \n from locations.categories import Categories, apply_category\n-from locations.items import Feature\n+from locations.dict_parser import DictParser\n+from locations.hours import DAYS_FULL, OpeningHours\n \n \n-class CostaCoffeeUSSpider(scrapy.Spider):\n+class CostaCoffeeUSSpider(Spider):\n name = \"costacoffee_us\"\n item_attributes = {\"brand\": \"Costa Coffee\", \"brand_wikidata\": \"Q608845\"}\n allowed_domains = [\"us.costacoffee.com\"]\n- start_urls = [\"https://us.costacoffee.com/amlocator/index/ajax\"]\n-\n- def parse(self, response):\n- script = response.xpath('//script[contains(text(), \"amLocator\")]/text()').extract_first()\n-\n- start = script.index(\"jsonLocations: \") + len(\"jsonLocations: \")\n- stop = script.index(\"imageLocations\")\n-\n- locations = script[start:stop].strip().strip(\",\")\n- items = json.loads(locations)[\"items\"]\n-\n- for store in items:\n- item = Feature()\n- item[\"ref\"] = store[\"id\"]\n- item[\"lat\"] = store[\"lat\"]\n- item[\"lon\"] = store[\"lng\"]\n+ start_urls = [\"https://us.costacoffee.com/api/cf/?content_type=storeLocatorStore\"]\n+ page_size = 1000\n \n- html = Selector(text=store[\"popup_html\"])\n-\n- item[\"name\"] = html.xpath('//*[@class=\"amlocator-title\"]/text()').get()\n-\n- for line in html.xpath('//div[@class=\"amlocator-info-popup\"]/text()').getall():\n- line = line.strip()\n- if m := re.match(r\"City: (.*)\", line):\n- item[\"city\"] = m.group(1)\n- elif m := re.match(r\"Zip: (.*)\", line):\n- item[\"postcode\"] = m.group(1)\n- elif m := re.match(r\"Address: (.*)\", line):\n- item[\"street_address\"] = m.group(1)\n- elif m := re.match(r\"State: (.*)\", line):\n- item[\"state\"] = m.group(1)\n+ def start_requests(self):\n+ for url in self.start_urls:\n+ yield JsonRequest(url=f\"{url}&limit={self.page_size}\")\n \n+ def parse(self, response):\n+ for location in response.json()[\"items\"]:\n+ item = DictParser.parse(location[\"fields\"])\n+ item[\"ref\"] = location[\"sys\"][\"id\"]\n+ item[\"addr_full\"] = location[\"fields\"][\"storeAddress\"]\n+ item[\"opening_hours\"] = OpeningHours()\n+ for day_name in [s.lower() for s in DAYS_FULL]:\n+ open_time = location[\"fields\"].get(f\"{day_name}Opening\")\n+ close_time = location[\"fields\"].get(f\"{day_name}Closing\")\n+ if open_time and \"24 HOURS\" in open_time.upper():\n+ item[\"opening_hours\"].add_range(day_name, \"00:00\", \"24:00\")\n+ elif open_time and close_time:\n+ item[\"opening_hours\"].add_range(day_name, open_time, close_time)\n apply_category(Categories.COFFEE_SHOP, item)\n-\n yield item\n+\n+ offset = response.json()[\"skip\"]\n+ if offset + response.json()[\"limit\"] < response.json()[\"total\"]:\n+ yield JsonRequest(url=f\"{response.request.url}&limit={self.page_size}&offset={offset}\")\n", "issue": "Fix Costa Coffee (US) which has switched from Amasty to a JSON API\nIn the latest weekly run, Costa Coffee (costacoffee_us) is now broken with the Amasty \"amlocator\" AJAX endpoint no longer existing. In it's place appears to be a JSON API that returns store details (locations and addresses):\r\n\r\nhttps://us.costacoffee.com/api/cf/?locale=en-US&include=2&content_type=storeLocatorStore&limit=500&fields.location[near]=33.77804102,-84.38068933\r\n\r\nThis appears to be a fairly easy fix to rewrite the spider to use the new JSON API (with a higher limit than 500).\n", "before_files": [{"content": "import json\nimport re\n\nimport scrapy\nfrom scrapy import Selector\n\nfrom locations.categories import Categories, apply_category\nfrom locations.items import Feature\n\n\nclass CostaCoffeeUSSpider(scrapy.Spider):\n name = \"costacoffee_us\"\n item_attributes = {\"brand\": \"Costa Coffee\", \"brand_wikidata\": \"Q608845\"}\n allowed_domains = [\"us.costacoffee.com\"]\n start_urls = [\"https://us.costacoffee.com/amlocator/index/ajax\"]\n\n def parse(self, response):\n script = response.xpath('//script[contains(text(), \"amLocator\")]/text()').extract_first()\n\n start = script.index(\"jsonLocations: \") + len(\"jsonLocations: \")\n stop = script.index(\"imageLocations\")\n\n locations = script[start:stop].strip().strip(\",\")\n items = json.loads(locations)[\"items\"]\n\n for store in items:\n item = Feature()\n item[\"ref\"] = store[\"id\"]\n item[\"lat\"] = store[\"lat\"]\n item[\"lon\"] = store[\"lng\"]\n\n html = Selector(text=store[\"popup_html\"])\n\n item[\"name\"] = html.xpath('//*[@class=\"amlocator-title\"]/text()').get()\n\n for line in html.xpath('//div[@class=\"amlocator-info-popup\"]/text()').getall():\n line = line.strip()\n if m := re.match(r\"City: (.*)\", line):\n item[\"city\"] = m.group(1)\n elif m := re.match(r\"Zip: (.*)\", line):\n item[\"postcode\"] = m.group(1)\n elif m := re.match(r\"Address: (.*)\", line):\n item[\"street_address\"] = m.group(1)\n elif m := re.match(r\"State: (.*)\", line):\n item[\"state\"] = m.group(1)\n\n apply_category(Categories.COFFEE_SHOP, item)\n\n yield item\n", "path": "locations/spiders/costacoffee_us.py"}], "after_files": [{"content": "from scrapy import Spider\nfrom scrapy.http import JsonRequest\n\nfrom locations.categories import Categories, apply_category\nfrom locations.dict_parser import DictParser\nfrom locations.hours import DAYS_FULL, OpeningHours\n\n\nclass CostaCoffeeUSSpider(Spider):\n name = \"costacoffee_us\"\n item_attributes = {\"brand\": \"Costa Coffee\", \"brand_wikidata\": \"Q608845\"}\n allowed_domains = [\"us.costacoffee.com\"]\n start_urls = [\"https://us.costacoffee.com/api/cf/?content_type=storeLocatorStore\"]\n page_size = 1000\n\n def start_requests(self):\n for url in self.start_urls:\n yield JsonRequest(url=f\"{url}&limit={self.page_size}\")\n\n def parse(self, response):\n for location in response.json()[\"items\"]:\n item = DictParser.parse(location[\"fields\"])\n item[\"ref\"] = location[\"sys\"][\"id\"]\n item[\"addr_full\"] = location[\"fields\"][\"storeAddress\"]\n item[\"opening_hours\"] = OpeningHours()\n for day_name in [s.lower() for s in DAYS_FULL]:\n open_time = location[\"fields\"].get(f\"{day_name}Opening\")\n close_time = location[\"fields\"].get(f\"{day_name}Closing\")\n if open_time and \"24 HOURS\" in open_time.upper():\n item[\"opening_hours\"].add_range(day_name, \"00:00\", \"24:00\")\n elif open_time and close_time:\n item[\"opening_hours\"].add_range(day_name, open_time, close_time)\n apply_category(Categories.COFFEE_SHOP, item)\n yield item\n\n offset = response.json()[\"skip\"]\n if offset + response.json()[\"limit\"] < response.json()[\"total\"]:\n yield JsonRequest(url=f\"{response.request.url}&limit={self.page_size}&offset={offset}\")\n", "path": "locations/spiders/costacoffee_us.py"}]} | 926 | 846 |
gh_patches_debug_6880 | rasdani/github-patches | git_diff | scikit-image__scikit-image-6733 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invalid f-string in _warnings
### Description:
```
f4978b1149 skimage/_shared/_warnings.py (Jarrod Millman 2022-10-11 17:14:49 -0700 145) msg = f"No warning raised matching:\n{{'\n'.join(remaining)}}"
c0a0490eed skimage/_shared/_warnings.py (Steven Silvester 2014-12-23 10:59:47 -0600 146) raise ValueError(msg)
```
That f-string cannot render correctly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/_shared/_warnings.py`
Content:
```
1 from contextlib import contextmanager
2 import sys
3 import warnings
4 import re
5 import functools
6 import os
7
8 __all__ = ['all_warnings', 'expected_warnings', 'warn']
9
10
11 # A version of `warnings.warn` with a default stacklevel of 2.
12 # functool is used so as not to increase the call stack accidentally
13 warn = functools.partial(warnings.warn, stacklevel=2)
14
15
16 @contextmanager
17 def all_warnings():
18 """
19 Context for use in testing to ensure that all warnings are raised.
20
21 Examples
22 --------
23 >>> import warnings
24 >>> def foo():
25 ... warnings.warn(RuntimeWarning("bar"), stacklevel=2)
26
27 We raise the warning once, while the warning filter is set to "once".
28 Hereafter, the warning is invisible, even with custom filters:
29
30 >>> with warnings.catch_warnings():
31 ... warnings.simplefilter('once')
32 ... foo() # doctest: +SKIP
33
34 We can now run ``foo()`` without a warning being raised:
35
36 >>> from numpy.testing import assert_warns
37 >>> foo() # doctest: +SKIP
38
39 To catch the warning, we call in the help of ``all_warnings``:
40
41 >>> with all_warnings():
42 ... assert_warns(RuntimeWarning, foo)
43 """
44 # _warnings.py is on the critical import path.
45 # Since this is a testing only function, we lazy import inspect.
46 import inspect
47 # Whenever a warning is triggered, Python adds a __warningregistry__
48 # member to the *calling* module. The exercise here is to find
49 # and eradicate all those breadcrumbs that were left lying around.
50 #
51 # We proceed by first searching all parent calling frames and explicitly
52 # clearing their warning registries (necessary for the doctests above to
53 # pass). Then, we search for all submodules of skimage and clear theirs
54 # as well (necessary for the skimage test suite to pass).
55
56 frame = inspect.currentframe()
57 if frame:
58 for f in inspect.getouterframes(frame):
59 f[0].f_locals['__warningregistry__'] = {}
60 del frame
61
62 for mod_name, mod in list(sys.modules.items()):
63 try:
64 mod.__warningregistry__.clear()
65 except AttributeError:
66 pass
67
68 with warnings.catch_warnings(record=True) as w:
69 warnings.simplefilter("always")
70 yield w
71
72
73 @contextmanager
74 def expected_warnings(matching):
75 r"""Context for use in testing to catch known warnings matching regexes
76
77 Parameters
78 ----------
79 matching : None or a list of strings or compiled regexes
80 Regexes for the desired warning to catch
81 If matching is None, this behaves as a no-op.
82
83 Examples
84 --------
85 >>> import numpy as np
86 >>> rng = np.random.default_rng()
87 >>> image = rng.integers(0, 2**16, size=(100, 100), dtype=np.uint16)
88 >>> # rank filters are slow when bit-depth exceeds 10 bits
89 >>> from skimage import filters
90 >>> with expected_warnings(['Bad rank filter performance']):
91 ... median_filtered = filters.rank.median(image)
92
93 Notes
94 -----
95 Uses `all_warnings` to ensure all warnings are raised.
96 Upon exiting, it checks the recorded warnings for the desired matching
97 pattern(s).
98 Raises a ValueError if any match was not found or an unexpected
99 warning was raised.
100 Allows for three types of behaviors: `and`, `or`, and `optional` matches.
101 This is done to accommodate different build environments or loop conditions
102 that may produce different warnings. The behaviors can be combined.
103 If you pass multiple patterns, you get an orderless `and`, where all of the
104 warnings must be raised.
105 If you use the `|` operator in a pattern, you can catch one of several
106 warnings.
107 Finally, you can use `|\A\Z` in a pattern to signify it as optional.
108
109 """
110 if isinstance(matching, str):
111 raise ValueError('``matching`` should be a list of strings and not '
112 'a string itself.')
113
114 # Special case for disabling the context manager
115 if matching is None:
116 yield None
117 return
118
119 strict_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS', '1')
120 if strict_warnings.lower() == 'true':
121 strict_warnings = True
122 elif strict_warnings.lower() == 'false':
123 strict_warnings = False
124 else:
125 strict_warnings = bool(int(strict_warnings))
126
127 with all_warnings() as w:
128 # enter context
129 yield w
130 # exited user context, check the recorded warnings
131 # Allow users to provide None
132 while None in matching:
133 matching.remove(None)
134 remaining = [m for m in matching if r'\A\Z' not in m.split('|')]
135 for warn in w:
136 found = False
137 for match in matching:
138 if re.search(match, str(warn.message)) is not None:
139 found = True
140 if match in remaining:
141 remaining.remove(match)
142 if strict_warnings and not found:
143 raise ValueError(f'Unexpected warning: {str(warn.message)}')
144 if strict_warnings and (len(remaining) > 0):
145 msg = f"No warning raised matching:\n{{'\n'.join(remaining)}}"
146 raise ValueError(msg)
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/_shared/_warnings.py b/skimage/_shared/_warnings.py
--- a/skimage/_shared/_warnings.py
+++ b/skimage/_shared/_warnings.py
@@ -142,5 +142,6 @@
if strict_warnings and not found:
raise ValueError(f'Unexpected warning: {str(warn.message)}')
if strict_warnings and (len(remaining) > 0):
- msg = f"No warning raised matching:\n{{'\n'.join(remaining)}}"
+ newline = "\n"
+ msg = f"No warning raised matching:{newline}{newline.join(remaining)}"
raise ValueError(msg)
| {"golden_diff": "diff --git a/skimage/_shared/_warnings.py b/skimage/_shared/_warnings.py\n--- a/skimage/_shared/_warnings.py\n+++ b/skimage/_shared/_warnings.py\n@@ -142,5 +142,6 @@\n if strict_warnings and not found:\n raise ValueError(f'Unexpected warning: {str(warn.message)}')\n if strict_warnings and (len(remaining) > 0):\n- msg = f\"No warning raised matching:\\n{{'\\n'.join(remaining)}}\"\n+ newline = \"\\n\"\n+ msg = f\"No warning raised matching:{newline}{newline.join(remaining)}\"\n raise ValueError(msg)\n", "issue": "Invalid f-string in _warnings\n### Description:\r\n\r\n```\r\nf4978b1149 skimage/_shared/_warnings.py (Jarrod Millman 2022-10-11 17:14:49 -0700 145) msg = f\"No warning raised matching:\\n{{'\\n'.join(remaining)}}\"\r\nc0a0490eed skimage/_shared/_warnings.py (Steven Silvester 2014-12-23 10:59:47 -0600 146) raise ValueError(msg)\r\n```\r\n\r\nThat f-string cannot render correctly.\r\n\r\n\n", "before_files": [{"content": "from contextlib import contextmanager\nimport sys\nimport warnings\nimport re\nimport functools\nimport os\n\n__all__ = ['all_warnings', 'expected_warnings', 'warn']\n\n\n# A version of `warnings.warn` with a default stacklevel of 2.\n# functool is used so as not to increase the call stack accidentally\nwarn = functools.partial(warnings.warn, stacklevel=2)\n\n\n@contextmanager\ndef all_warnings():\n \"\"\"\n Context for use in testing to ensure that all warnings are raised.\n\n Examples\n --------\n >>> import warnings\n >>> def foo():\n ... warnings.warn(RuntimeWarning(\"bar\"), stacklevel=2)\n\n We raise the warning once, while the warning filter is set to \"once\".\n Hereafter, the warning is invisible, even with custom filters:\n\n >>> with warnings.catch_warnings():\n ... warnings.simplefilter('once')\n ... foo() # doctest: +SKIP\n\n We can now run ``foo()`` without a warning being raised:\n\n >>> from numpy.testing import assert_warns\n >>> foo() # doctest: +SKIP\n\n To catch the warning, we call in the help of ``all_warnings``:\n\n >>> with all_warnings():\n ... assert_warns(RuntimeWarning, foo)\n \"\"\"\n # _warnings.py is on the critical import path.\n # Since this is a testing only function, we lazy import inspect.\n import inspect\n # Whenever a warning is triggered, Python adds a __warningregistry__\n # member to the *calling* module. The exercise here is to find\n # and eradicate all those breadcrumbs that were left lying around.\n #\n # We proceed by first searching all parent calling frames and explicitly\n # clearing their warning registries (necessary for the doctests above to\n # pass). Then, we search for all submodules of skimage and clear theirs\n # as well (necessary for the skimage test suite to pass).\n\n frame = inspect.currentframe()\n if frame:\n for f in inspect.getouterframes(frame):\n f[0].f_locals['__warningregistry__'] = {}\n del frame\n\n for mod_name, mod in list(sys.modules.items()):\n try:\n mod.__warningregistry__.clear()\n except AttributeError:\n pass\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n yield w\n\n\n@contextmanager\ndef expected_warnings(matching):\n r\"\"\"Context for use in testing to catch known warnings matching regexes\n\n Parameters\n ----------\n matching : None or a list of strings or compiled regexes\n Regexes for the desired warning to catch\n If matching is None, this behaves as a no-op.\n\n Examples\n --------\n >>> import numpy as np\n >>> rng = np.random.default_rng()\n >>> image = rng.integers(0, 2**16, size=(100, 100), dtype=np.uint16)\n >>> # rank filters are slow when bit-depth exceeds 10 bits\n >>> from skimage import filters\n >>> with expected_warnings(['Bad rank filter performance']):\n ... median_filtered = filters.rank.median(image)\n\n Notes\n -----\n Uses `all_warnings` to ensure all warnings are raised.\n Upon exiting, it checks the recorded warnings for the desired matching\n pattern(s).\n Raises a ValueError if any match was not found or an unexpected\n warning was raised.\n Allows for three types of behaviors: `and`, `or`, and `optional` matches.\n This is done to accommodate different build environments or loop conditions\n that may produce different warnings. The behaviors can be combined.\n If you pass multiple patterns, you get an orderless `and`, where all of the\n warnings must be raised.\n If you use the `|` operator in a pattern, you can catch one of several\n warnings.\n Finally, you can use `|\\A\\Z` in a pattern to signify it as optional.\n\n \"\"\"\n if isinstance(matching, str):\n raise ValueError('``matching`` should be a list of strings and not '\n 'a string itself.')\n\n # Special case for disabling the context manager\n if matching is None:\n yield None\n return\n\n strict_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS', '1')\n if strict_warnings.lower() == 'true':\n strict_warnings = True\n elif strict_warnings.lower() == 'false':\n strict_warnings = False\n else:\n strict_warnings = bool(int(strict_warnings))\n\n with all_warnings() as w:\n # enter context\n yield w\n # exited user context, check the recorded warnings\n # Allow users to provide None\n while None in matching:\n matching.remove(None)\n remaining = [m for m in matching if r'\\A\\Z' not in m.split('|')]\n for warn in w:\n found = False\n for match in matching:\n if re.search(match, str(warn.message)) is not None:\n found = True\n if match in remaining:\n remaining.remove(match)\n if strict_warnings and not found:\n raise ValueError(f'Unexpected warning: {str(warn.message)}')\n if strict_warnings and (len(remaining) > 0):\n msg = f\"No warning raised matching:\\n{{'\\n'.join(remaining)}}\"\n raise ValueError(msg)\n", "path": "skimage/_shared/_warnings.py"}], "after_files": [{"content": "from contextlib import contextmanager\nimport sys\nimport warnings\nimport re\nimport functools\nimport os\n\n__all__ = ['all_warnings', 'expected_warnings', 'warn']\n\n\n# A version of `warnings.warn` with a default stacklevel of 2.\n# functool is used so as not to increase the call stack accidentally\nwarn = functools.partial(warnings.warn, stacklevel=2)\n\n\n@contextmanager\ndef all_warnings():\n \"\"\"\n Context for use in testing to ensure that all warnings are raised.\n\n Examples\n --------\n >>> import warnings\n >>> def foo():\n ... warnings.warn(RuntimeWarning(\"bar\"), stacklevel=2)\n\n We raise the warning once, while the warning filter is set to \"once\".\n Hereafter, the warning is invisible, even with custom filters:\n\n >>> with warnings.catch_warnings():\n ... warnings.simplefilter('once')\n ... foo() # doctest: +SKIP\n\n We can now run ``foo()`` without a warning being raised:\n\n >>> from numpy.testing import assert_warns\n >>> foo() # doctest: +SKIP\n\n To catch the warning, we call in the help of ``all_warnings``:\n\n >>> with all_warnings():\n ... assert_warns(RuntimeWarning, foo)\n \"\"\"\n # _warnings.py is on the critical import path.\n # Since this is a testing only function, we lazy import inspect.\n import inspect\n # Whenever a warning is triggered, Python adds a __warningregistry__\n # member to the *calling* module. The exercise here is to find\n # and eradicate all those breadcrumbs that were left lying around.\n #\n # We proceed by first searching all parent calling frames and explicitly\n # clearing their warning registries (necessary for the doctests above to\n # pass). Then, we search for all submodules of skimage and clear theirs\n # as well (necessary for the skimage test suite to pass).\n\n frame = inspect.currentframe()\n if frame:\n for f in inspect.getouterframes(frame):\n f[0].f_locals['__warningregistry__'] = {}\n del frame\n\n for mod_name, mod in list(sys.modules.items()):\n try:\n mod.__warningregistry__.clear()\n except AttributeError:\n pass\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n yield w\n\n\n@contextmanager\ndef expected_warnings(matching):\n r\"\"\"Context for use in testing to catch known warnings matching regexes\n\n Parameters\n ----------\n matching : None or a list of strings or compiled regexes\n Regexes for the desired warning to catch\n If matching is None, this behaves as a no-op.\n\n Examples\n --------\n >>> import numpy as np\n >>> rng = np.random.default_rng()\n >>> image = rng.integers(0, 2**16, size=(100, 100), dtype=np.uint16)\n >>> # rank filters are slow when bit-depth exceeds 10 bits\n >>> from skimage import filters\n >>> with expected_warnings(['Bad rank filter performance']):\n ... median_filtered = filters.rank.median(image)\n\n Notes\n -----\n Uses `all_warnings` to ensure all warnings are raised.\n Upon exiting, it checks the recorded warnings for the desired matching\n pattern(s).\n Raises a ValueError if any match was not found or an unexpected\n warning was raised.\n Allows for three types of behaviors: `and`, `or`, and `optional` matches.\n This is done to accommodate different build environments or loop conditions\n that may produce different warnings. The behaviors can be combined.\n If you pass multiple patterns, you get an orderless `and`, where all of the\n warnings must be raised.\n If you use the `|` operator in a pattern, you can catch one of several\n warnings.\n Finally, you can use `|\\A\\Z` in a pattern to signify it as optional.\n\n \"\"\"\n if isinstance(matching, str):\n raise ValueError('``matching`` should be a list of strings and not '\n 'a string itself.')\n\n # Special case for disabling the context manager\n if matching is None:\n yield None\n return\n\n strict_warnings = os.environ.get('SKIMAGE_TEST_STRICT_WARNINGS', '1')\n if strict_warnings.lower() == 'true':\n strict_warnings = True\n elif strict_warnings.lower() == 'false':\n strict_warnings = False\n else:\n strict_warnings = bool(int(strict_warnings))\n\n with all_warnings() as w:\n # enter context\n yield w\n # exited user context, check the recorded warnings\n # Allow users to provide None\n while None in matching:\n matching.remove(None)\n remaining = [m for m in matching if r'\\A\\Z' not in m.split('|')]\n for warn in w:\n found = False\n for match in matching:\n if re.search(match, str(warn.message)) is not None:\n found = True\n if match in remaining:\n remaining.remove(match)\n if strict_warnings and not found:\n raise ValueError(f'Unexpected warning: {str(warn.message)}')\n if strict_warnings and (len(remaining) > 0):\n newline = \"\\n\"\n msg = f\"No warning raised matching:{newline}{newline.join(remaining)}\"\n raise ValueError(msg)\n", "path": "skimage/_shared/_warnings.py"}]} | 1,903 | 144 |
gh_patches_debug_21453 | rasdani/github-patches | git_diff | mozmeao__basket-1036 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add content-type header for Acoustic Transact requests
Transact customers using Oauth for submissions should add header:
`Content-Type : text/plain` or
`Content-Type : text/xml`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `basket/news/backends/acoustic.py`
Content:
```
1 import logging
2
3 from django.conf import settings
4 from django.utils.encoding import force_bytes
5
6 from lxml import etree
7 from requests import ConnectionError
8 from silverpop.api import Silverpop, SilverpopResponseException
9
10 logger = logging.getLogger(__name__)
11 XML_HEADER = '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
12
13
14 def process_response(resp):
15 logger.debug("Response: %s" % resp.text)
16 response = etree.fromstring(resp.text.encode("utf-8"))
17 failure = response.find(".//FAILURES/FAILURE")
18 if failure:
19 raise SilverpopResponseException(failure.attrib["description"])
20
21 fault = response.find(".//Fault/FaultString")
22 if fault:
23 raise SilverpopResponseException(fault.text)
24
25 return response
26
27
28 def process_tx_response(resp):
29 logger.debug("Response: %s" % resp.text)
30 response = etree.fromstring(resp.text.encode("utf-8"))
31 errors = response.findall(".//ERROR_STRING")
32 if errors:
33 for e in errors:
34 if e.text:
35 raise SilverpopResponseException(e.text)
36
37 return response
38
39
40 def xml_tag(tag, value=None, cdata=False, **attrs):
41 xmlt = etree.Element(tag, attrs)
42 if value:
43 if cdata:
44 xmlt.text = etree.CDATA(value)
45 else:
46 xmlt.text = value
47
48 return xmlt
49
50
51 def transact_xml(to, campaign_id, fields=None, bcc=None, save_to_db=False):
52 fields = fields or {}
53 bcc = bcc or []
54 if isinstance(bcc, str):
55 bcc = [bcc]
56
57 root = xml_tag("XTMAILING")
58 root.append(xml_tag("CAMPAIGN_ID", campaign_id))
59 if "transaction_id" in fields:
60 root.append(xml_tag("TRANSACTION_ID", fields["transaction_id"]))
61
62 root.append(xml_tag("SEND_AS_BATCH", "false"))
63 root.append(xml_tag("NO_RETRY_ON_FAILURE", "false"))
64 if fields and save_to_db:
65 save_cols_tag = xml_tag("SAVE_COLUMNS")
66 root.append(save_cols_tag)
67 for name in fields:
68 save_cols_tag.append(xml_tag("COLUMN_NAME", name))
69
70 recipient_tag = xml_tag("RECIPIENT")
71 root.append(recipient_tag)
72 recipient_tag.append(xml_tag("EMAIL", to))
73 for addr in bcc:
74 recipient_tag.append(xml_tag("BCC", addr))
75 recipient_tag.append(xml_tag("BODY_TYPE", "HTML"))
76 for name, value in fields.items():
77 p_tag = xml_tag("PERSONALIZATION")
78 p_tag.append(xml_tag("TAG_NAME", name))
79 p_tag.append(xml_tag("VALUE", value))
80 recipient_tag.append(p_tag)
81
82 return XML_HEADER + etree.tostring(root, encoding="unicode")
83
84
85 class Acoustic(Silverpop):
86 def _call(self, xml):
87 logger.debug("Request: %s" % xml)
88 try:
89 response = self.session.post(
90 self.api_endpoint,
91 data=force_bytes(xml),
92 timeout=10,
93 )
94 except ConnectionError:
95 # try one more time
96 response = self.session.post(
97 self.api_endpoint,
98 data=force_bytes(xml),
99 timeout=10,
100 )
101
102 return process_response(response)
103
104
105 class AcousticTransact(Silverpop):
106 api_xt_endpoint = "https://transact-campaign-us-%s.goacoustic.com/XTMail"
107
108 def __init__(self, client_id, client_secret, refresh_token, server_number):
109 self.api_xt_endpoint = self.api_xt_endpoint % server_number
110 super().__init__(client_id, client_secret, refresh_token, server_number)
111
112 def _call_xt(self, xml):
113 logger.debug("Request: %s" % xml)
114 response = self.session.post(
115 self.api_xt_endpoint,
116 data=force_bytes(xml),
117 timeout=10,
118 )
119 return process_tx_response(response)
120
121 def send_mail(self, to, campaign_id, fields=None, bcc=None, save_to_db=False):
122 self._call_xt(transact_xml(to, campaign_id, fields, bcc, save_to_db))
123
124
125 acoustic = Acoustic(
126 client_id=settings.ACOUSTIC_CLIENT_ID,
127 client_secret=settings.ACOUSTIC_CLIENT_SECRET,
128 refresh_token=settings.ACOUSTIC_REFRESH_TOKEN,
129 server_number=settings.ACOUSTIC_SERVER_NUMBER,
130 )
131 acoustic_tx = AcousticTransact(
132 client_id=settings.ACOUSTIC_TX_CLIENT_ID,
133 client_secret=settings.ACOUSTIC_TX_CLIENT_SECRET,
134 refresh_token=settings.ACOUSTIC_TX_REFRESH_TOKEN,
135 server_number=settings.ACOUSTIC_TX_SERVER_NUMBER,
136 )
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/basket/news/backends/acoustic.py b/basket/news/backends/acoustic.py
--- a/basket/news/backends/acoustic.py
+++ b/basket/news/backends/acoustic.py
@@ -90,6 +90,7 @@
self.api_endpoint,
data=force_bytes(xml),
timeout=10,
+ headers={"Content-Type": "text/xml"},
)
except ConnectionError:
# try one more time
@@ -97,6 +98,7 @@
self.api_endpoint,
data=force_bytes(xml),
timeout=10,
+ headers={"Content-Type": "text/xml"},
)
return process_response(response)
@@ -115,6 +117,7 @@
self.api_xt_endpoint,
data=force_bytes(xml),
timeout=10,
+ headers={"Content-Type": "text/xml"},
)
return process_tx_response(response)
| {"golden_diff": "diff --git a/basket/news/backends/acoustic.py b/basket/news/backends/acoustic.py\n--- a/basket/news/backends/acoustic.py\n+++ b/basket/news/backends/acoustic.py\n@@ -90,6 +90,7 @@\n self.api_endpoint,\n data=force_bytes(xml),\n timeout=10,\n+ headers={\"Content-Type\": \"text/xml\"},\n )\n except ConnectionError:\n # try one more time\n@@ -97,6 +98,7 @@\n self.api_endpoint,\n data=force_bytes(xml),\n timeout=10,\n+ headers={\"Content-Type\": \"text/xml\"},\n )\n \n return process_response(response)\n@@ -115,6 +117,7 @@\n self.api_xt_endpoint,\n data=force_bytes(xml),\n timeout=10,\n+ headers={\"Content-Type\": \"text/xml\"},\n )\n return process_tx_response(response)\n", "issue": "Add content-type header for Acoustic Transact requests\nTransact customers using Oauth for submissions should add header: \r\n\r\n`Content-Type : text/plain` or \r\n`Content-Type : text/xml`\r\n\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.utils.encoding import force_bytes\n\nfrom lxml import etree\nfrom requests import ConnectionError\nfrom silverpop.api import Silverpop, SilverpopResponseException\n\nlogger = logging.getLogger(__name__)\nXML_HEADER = '<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>'\n\n\ndef process_response(resp):\n logger.debug(\"Response: %s\" % resp.text)\n response = etree.fromstring(resp.text.encode(\"utf-8\"))\n failure = response.find(\".//FAILURES/FAILURE\")\n if failure:\n raise SilverpopResponseException(failure.attrib[\"description\"])\n\n fault = response.find(\".//Fault/FaultString\")\n if fault:\n raise SilverpopResponseException(fault.text)\n\n return response\n\n\ndef process_tx_response(resp):\n logger.debug(\"Response: %s\" % resp.text)\n response = etree.fromstring(resp.text.encode(\"utf-8\"))\n errors = response.findall(\".//ERROR_STRING\")\n if errors:\n for e in errors:\n if e.text:\n raise SilverpopResponseException(e.text)\n\n return response\n\n\ndef xml_tag(tag, value=None, cdata=False, **attrs):\n xmlt = etree.Element(tag, attrs)\n if value:\n if cdata:\n xmlt.text = etree.CDATA(value)\n else:\n xmlt.text = value\n\n return xmlt\n\n\ndef transact_xml(to, campaign_id, fields=None, bcc=None, save_to_db=False):\n fields = fields or {}\n bcc = bcc or []\n if isinstance(bcc, str):\n bcc = [bcc]\n\n root = xml_tag(\"XTMAILING\")\n root.append(xml_tag(\"CAMPAIGN_ID\", campaign_id))\n if \"transaction_id\" in fields:\n root.append(xml_tag(\"TRANSACTION_ID\", fields[\"transaction_id\"]))\n\n root.append(xml_tag(\"SEND_AS_BATCH\", \"false\"))\n root.append(xml_tag(\"NO_RETRY_ON_FAILURE\", \"false\"))\n if fields and save_to_db:\n save_cols_tag = xml_tag(\"SAVE_COLUMNS\")\n root.append(save_cols_tag)\n for name in fields:\n save_cols_tag.append(xml_tag(\"COLUMN_NAME\", name))\n\n recipient_tag = xml_tag(\"RECIPIENT\")\n root.append(recipient_tag)\n recipient_tag.append(xml_tag(\"EMAIL\", to))\n for addr in bcc:\n recipient_tag.append(xml_tag(\"BCC\", addr))\n recipient_tag.append(xml_tag(\"BODY_TYPE\", \"HTML\"))\n for name, value in fields.items():\n p_tag = xml_tag(\"PERSONALIZATION\")\n p_tag.append(xml_tag(\"TAG_NAME\", name))\n p_tag.append(xml_tag(\"VALUE\", value))\n recipient_tag.append(p_tag)\n\n return XML_HEADER + etree.tostring(root, encoding=\"unicode\")\n\n\nclass Acoustic(Silverpop):\n def _call(self, xml):\n logger.debug(\"Request: %s\" % xml)\n try:\n response = self.session.post(\n self.api_endpoint,\n data=force_bytes(xml),\n timeout=10,\n )\n except ConnectionError:\n # try one more time\n response = self.session.post(\n self.api_endpoint,\n data=force_bytes(xml),\n timeout=10,\n )\n\n return process_response(response)\n\n\nclass AcousticTransact(Silverpop):\n api_xt_endpoint = \"https://transact-campaign-us-%s.goacoustic.com/XTMail\"\n\n def __init__(self, client_id, client_secret, refresh_token, server_number):\n self.api_xt_endpoint = self.api_xt_endpoint % server_number\n super().__init__(client_id, client_secret, refresh_token, server_number)\n\n def _call_xt(self, xml):\n logger.debug(\"Request: %s\" % xml)\n response = self.session.post(\n self.api_xt_endpoint,\n data=force_bytes(xml),\n timeout=10,\n )\n return process_tx_response(response)\n\n def send_mail(self, to, campaign_id, fields=None, bcc=None, save_to_db=False):\n self._call_xt(transact_xml(to, campaign_id, fields, bcc, save_to_db))\n\n\nacoustic = Acoustic(\n client_id=settings.ACOUSTIC_CLIENT_ID,\n client_secret=settings.ACOUSTIC_CLIENT_SECRET,\n refresh_token=settings.ACOUSTIC_REFRESH_TOKEN,\n server_number=settings.ACOUSTIC_SERVER_NUMBER,\n)\nacoustic_tx = AcousticTransact(\n client_id=settings.ACOUSTIC_TX_CLIENT_ID,\n client_secret=settings.ACOUSTIC_TX_CLIENT_SECRET,\n refresh_token=settings.ACOUSTIC_TX_REFRESH_TOKEN,\n server_number=settings.ACOUSTIC_TX_SERVER_NUMBER,\n)\n", "path": "basket/news/backends/acoustic.py"}], "after_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.utils.encoding import force_bytes\n\nfrom lxml import etree\nfrom requests import ConnectionError\nfrom silverpop.api import Silverpop, SilverpopResponseException\n\nlogger = logging.getLogger(__name__)\nXML_HEADER = '<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>'\n\n\ndef process_response(resp):\n logger.debug(\"Response: %s\" % resp.text)\n response = etree.fromstring(resp.text.encode(\"utf-8\"))\n failure = response.find(\".//FAILURES/FAILURE\")\n if failure:\n raise SilverpopResponseException(failure.attrib[\"description\"])\n\n fault = response.find(\".//Fault/FaultString\")\n if fault:\n raise SilverpopResponseException(fault.text)\n\n return response\n\n\ndef process_tx_response(resp):\n logger.debug(\"Response: %s\" % resp.text)\n response = etree.fromstring(resp.text.encode(\"utf-8\"))\n errors = response.findall(\".//ERROR_STRING\")\n if errors:\n for e in errors:\n if e.text:\n raise SilverpopResponseException(e.text)\n\n return response\n\n\ndef xml_tag(tag, value=None, cdata=False, **attrs):\n xmlt = etree.Element(tag, attrs)\n if value:\n if cdata:\n xmlt.text = etree.CDATA(value)\n else:\n xmlt.text = value\n\n return xmlt\n\n\ndef transact_xml(to, campaign_id, fields=None, bcc=None, save_to_db=False):\n fields = fields or {}\n bcc = bcc or []\n if isinstance(bcc, str):\n bcc = [bcc]\n\n root = xml_tag(\"XTMAILING\")\n root.append(xml_tag(\"CAMPAIGN_ID\", campaign_id))\n if \"transaction_id\" in fields:\n root.append(xml_tag(\"TRANSACTION_ID\", fields[\"transaction_id\"]))\n\n root.append(xml_tag(\"SEND_AS_BATCH\", \"false\"))\n root.append(xml_tag(\"NO_RETRY_ON_FAILURE\", \"false\"))\n if fields and save_to_db:\n save_cols_tag = xml_tag(\"SAVE_COLUMNS\")\n root.append(save_cols_tag)\n for name in fields:\n save_cols_tag.append(xml_tag(\"COLUMN_NAME\", name))\n\n recipient_tag = xml_tag(\"RECIPIENT\")\n root.append(recipient_tag)\n recipient_tag.append(xml_tag(\"EMAIL\", to))\n for addr in bcc:\n recipient_tag.append(xml_tag(\"BCC\", addr))\n recipient_tag.append(xml_tag(\"BODY_TYPE\", \"HTML\"))\n for name, value in fields.items():\n p_tag = xml_tag(\"PERSONALIZATION\")\n p_tag.append(xml_tag(\"TAG_NAME\", name))\n p_tag.append(xml_tag(\"VALUE\", value))\n recipient_tag.append(p_tag)\n\n return XML_HEADER + etree.tostring(root, encoding=\"unicode\")\n\n\nclass Acoustic(Silverpop):\n def _call(self, xml):\n logger.debug(\"Request: %s\" % xml)\n try:\n response = self.session.post(\n self.api_endpoint,\n data=force_bytes(xml),\n timeout=10,\n headers={\"Content-Type\": \"text/xml\"},\n )\n except ConnectionError:\n # try one more time\n response = self.session.post(\n self.api_endpoint,\n data=force_bytes(xml),\n timeout=10,\n headers={\"Content-Type\": \"text/xml\"},\n )\n\n return process_response(response)\n\n\nclass AcousticTransact(Silverpop):\n api_xt_endpoint = \"https://transact-campaign-us-%s.goacoustic.com/XTMail\"\n\n def __init__(self, client_id, client_secret, refresh_token, server_number):\n self.api_xt_endpoint = self.api_xt_endpoint % server_number\n super().__init__(client_id, client_secret, refresh_token, server_number)\n\n def _call_xt(self, xml):\n logger.debug(\"Request: %s\" % xml)\n response = self.session.post(\n self.api_xt_endpoint,\n data=force_bytes(xml),\n timeout=10,\n headers={\"Content-Type\": \"text/xml\"},\n )\n return process_tx_response(response)\n\n def send_mail(self, to, campaign_id, fields=None, bcc=None, save_to_db=False):\n self._call_xt(transact_xml(to, campaign_id, fields, bcc, save_to_db))\n\n\nacoustic = Acoustic(\n client_id=settings.ACOUSTIC_CLIENT_ID,\n client_secret=settings.ACOUSTIC_CLIENT_SECRET,\n refresh_token=settings.ACOUSTIC_REFRESH_TOKEN,\n server_number=settings.ACOUSTIC_SERVER_NUMBER,\n)\nacoustic_tx = AcousticTransact(\n client_id=settings.ACOUSTIC_TX_CLIENT_ID,\n client_secret=settings.ACOUSTIC_TX_CLIENT_SECRET,\n refresh_token=settings.ACOUSTIC_TX_REFRESH_TOKEN,\n server_number=settings.ACOUSTIC_TX_SERVER_NUMBER,\n)\n", "path": "basket/news/backends/acoustic.py"}]} | 1,614 | 202 |
gh_patches_debug_17418 | rasdani/github-patches | git_diff | zulip__zulip-3596 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wikipedia bot crashes when the query contains multiple word delimit by underscore.
The bot is under `contrib_bots/bots`. Some error handling is needed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `contrib_bots/bots/wikipedia/wikipedia.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import print_function
3 import requests
4 import logging
5
6 # See readme.md for instructions on running this code.
7
8 class WikipediaHandler(object):
9 '''
10 This plugin facilitates searching Wikipedia for a
11 specific key term and returns the top article from the
12 search. It looks for messages starting with '@wikipedia'
13 or '@wiki'.
14
15 In this example, we write all Wikipedia searches into
16 the same stream that it was called from, but this code
17 could be adapted to write Wikipedia searches to some
18 kind of external issue tracker as well.
19 '''
20
21 def usage(self):
22 return '''
23 This plugin will allow users to directly search
24 Wikipedia for a specific key term and get the top
25 article that is returned from the search. Users
26 should preface searches with "@wikipedia" or
27 "@wiki".
28 '''
29
30 def triage_message(self, message, client):
31 original_content = message['content']
32
33 # This next line of code is defensive, as we
34 # never want to get into an infinite loop of posting Wikipedia
35 # searches for own Wikipedia searches!
36 if message['sender_full_name'] == 'wikipedia-bot':
37 return False
38 is_wikipedia = (original_content.startswith('@wiki') or
39 original_content.startswith('@wikipedia'))
40
41 return is_wikipedia
42
43 def handle_message(self, message, client, state_handler):
44 query = message['content']
45
46 for prefix in ['@wikipedia', '@wiki']:
47 if query.startswith(prefix):
48 query = query[len(prefix)+1:]
49 break
50
51 query_wiki_link = ('https://en.wikipedia.org/w/api.php?action=query&'
52 'list=search&srsearch=%s&format=json' % (query,))
53 try:
54 data = requests.get(query_wiki_link)
55 except requests.exceptions.RequestException:
56 logging.error('broken link')
57 return
58
59 if data.status_code != 200:
60 logging.error('unsuccessful data')
61 return
62
63 search_string = data.json()['query']['search'][0]['title'].replace(' ', '_')
64 url = 'https://wikipedia.org/wiki/' + search_string
65 new_content = 'For search term "' + query
66 if len(data.json()['query']['search']) == 0:
67 new_content = 'I am sorry. The search term you provided is not found :slightly_frowning_face:'
68 else:
69 new_content = new_content + '", ' + url
70
71 client.send_message(dict(
72 type=message['type'],
73 to=message['display_recipient'],
74 subject=message['subject'],
75 content=new_content,
76 ))
77
78 handler_class = WikipediaHandler
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/contrib_bots/bots/wikipedia/wikipedia.py b/contrib_bots/bots/wikipedia/wikipedia.py
--- a/contrib_bots/bots/wikipedia/wikipedia.py
+++ b/contrib_bots/bots/wikipedia/wikipedia.py
@@ -60,12 +60,12 @@
logging.error('unsuccessful data')
return
- search_string = data.json()['query']['search'][0]['title'].replace(' ', '_')
- url = 'https://wikipedia.org/wiki/' + search_string
new_content = 'For search term "' + query
if len(data.json()['query']['search']) == 0:
new_content = 'I am sorry. The search term you provided is not found :slightly_frowning_face:'
else:
+ search_string = data.json()['query']['search'][0]['title'].replace(' ', '_')
+ url = 'https://en.wikipedia.org/wiki/' + search_string
new_content = new_content + '", ' + url
client.send_message(dict(
| {"golden_diff": "diff --git a/contrib_bots/bots/wikipedia/wikipedia.py b/contrib_bots/bots/wikipedia/wikipedia.py\n--- a/contrib_bots/bots/wikipedia/wikipedia.py\n+++ b/contrib_bots/bots/wikipedia/wikipedia.py\n@@ -60,12 +60,12 @@\n logging.error('unsuccessful data')\n return\n \n- search_string = data.json()['query']['search'][0]['title'].replace(' ', '_')\n- url = 'https://wikipedia.org/wiki/' + search_string\n new_content = 'For search term \"' + query\n if len(data.json()['query']['search']) == 0:\n new_content = 'I am sorry. The search term you provided is not found :slightly_frowning_face:'\n else:\n+ search_string = data.json()['query']['search'][0]['title'].replace(' ', '_')\n+ url = 'https://en.wikipedia.org/wiki/' + search_string\n new_content = new_content + '\", ' + url\n \n client.send_message(dict(\n", "issue": "Wikipedia bot crashes when the query contains multiple word delimit by underscore.\nThe bot is under `contrib_bots/bots`. Some error handling is needed.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nimport requests\nimport logging\n\n# See readme.md for instructions on running this code.\n\nclass WikipediaHandler(object):\n '''\n This plugin facilitates searching Wikipedia for a\n specific key term and returns the top article from the\n search. It looks for messages starting with '@wikipedia'\n or '@wiki'.\n\n In this example, we write all Wikipedia searches into\n the same stream that it was called from, but this code\n could be adapted to write Wikipedia searches to some\n kind of external issue tracker as well.\n '''\n\n def usage(self):\n return '''\n This plugin will allow users to directly search\n Wikipedia for a specific key term and get the top\n article that is returned from the search. Users\n should preface searches with \"@wikipedia\" or\n \"@wiki\".\n '''\n\n def triage_message(self, message, client):\n original_content = message['content']\n\n # This next line of code is defensive, as we\n # never want to get into an infinite loop of posting Wikipedia\n # searches for own Wikipedia searches!\n if message['sender_full_name'] == 'wikipedia-bot':\n return False\n is_wikipedia = (original_content.startswith('@wiki') or\n original_content.startswith('@wikipedia'))\n\n return is_wikipedia\n\n def handle_message(self, message, client, state_handler):\n query = message['content']\n\n for prefix in ['@wikipedia', '@wiki']:\n if query.startswith(prefix):\n query = query[len(prefix)+1:]\n break\n\n query_wiki_link = ('https://en.wikipedia.org/w/api.php?action=query&'\n 'list=search&srsearch=%s&format=json' % (query,))\n try:\n data = requests.get(query_wiki_link)\n except requests.exceptions.RequestException:\n logging.error('broken link')\n return\n\n if data.status_code != 200:\n logging.error('unsuccessful data')\n return\n\n search_string = data.json()['query']['search'][0]['title'].replace(' ', '_')\n url = 'https://wikipedia.org/wiki/' + search_string\n new_content = 'For search term \"' + query\n if len(data.json()['query']['search']) == 0:\n new_content = 'I am sorry. The search term you provided is not found :slightly_frowning_face:'\n else:\n new_content = new_content + '\", ' + url\n\n client.send_message(dict(\n type=message['type'],\n to=message['display_recipient'],\n subject=message['subject'],\n content=new_content,\n ))\n\nhandler_class = WikipediaHandler\n", "path": "contrib_bots/bots/wikipedia/wikipedia.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nimport requests\nimport logging\n\n# See readme.md for instructions on running this code.\n\nclass WikipediaHandler(object):\n '''\n This plugin facilitates searching Wikipedia for a\n specific key term and returns the top article from the\n search. It looks for messages starting with '@wikipedia'\n or '@wiki'.\n\n In this example, we write all Wikipedia searches into\n the same stream that it was called from, but this code\n could be adapted to write Wikipedia searches to some\n kind of external issue tracker as well.\n '''\n\n def usage(self):\n return '''\n This plugin will allow users to directly search\n Wikipedia for a specific key term and get the top\n article that is returned from the search. Users\n should preface searches with \"@wikipedia\" or\n \"@wiki\".\n '''\n\n def triage_message(self, message, client):\n original_content = message['content']\n\n # This next line of code is defensive, as we\n # never want to get into an infinite loop of posting Wikipedia\n # searches for own Wikipedia searches!\n if message['sender_full_name'] == 'wikipedia-bot':\n return False\n is_wikipedia = (original_content.startswith('@wiki') or\n original_content.startswith('@wikipedia'))\n\n return is_wikipedia\n\n def handle_message(self, message, client, state_handler):\n query = message['content']\n\n for prefix in ['@wikipedia', '@wiki']:\n if query.startswith(prefix):\n query = query[len(prefix)+1:]\n break\n\n query_wiki_link = ('https://en.wikipedia.org/w/api.php?action=query&'\n 'list=search&srsearch=%s&format=json' % (query,))\n try:\n data = requests.get(query_wiki_link)\n except requests.exceptions.RequestException:\n logging.error('broken link')\n return\n\n if data.status_code != 200:\n logging.error('unsuccessful data')\n return\n\n new_content = 'For search term \"' + query\n if len(data.json()['query']['search']) == 0:\n new_content = 'I am sorry. The search term you provided is not found :slightly_frowning_face:'\n else:\n search_string = data.json()['query']['search'][0]['title'].replace(' ', '_')\n url = 'https://en.wikipedia.org/wiki/' + search_string\n new_content = new_content + '\", ' + url\n\n client.send_message(dict(\n type=message['type'],\n to=message['display_recipient'],\n subject=message['subject'],\n content=new_content,\n ))\n\nhandler_class = WikipediaHandler\n", "path": "contrib_bots/bots/wikipedia/wikipedia.py"}]} | 1,023 | 226 |
gh_patches_debug_24325 | rasdani/github-patches | git_diff | xonsh__xonsh-3002 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cmd && othercmd throws an exception
```
➤ ls &&Exception in thread Thread-35:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/usr/lib/python3.6/site-packages/prompt_toolkit/interface.py", line 860, in run
completions = list(buffer.completer.get_completions(document, complete_event))
File "/usr/lib/python3.6/site-packages/xonsh/ptk/completer.py", line 49, in get_completions
self.ctx)
File "/usr/lib/python3.6/site-packages/xonsh/__amalgam__.py", line 124, in complete
out = func(prefix, line, begidx, endidx, ctx)
File "/usr/lib/python3.6/site-packages/xonsh/completers/__amalgam__.py", line 831, in complete_skipper
ctx)
File "/usr/lib/python3.6/site-packages/xonsh/__amalgam__.py", line 124, in complete
out = func(prefix, line, begidx, endidx, ctx)
File "/usr/lib/python3.6/site-packages/xonsh/completers/__amalgam__.py", line 1427, in complete_base
complete_command(prefix, line, start, end, ctx))
TypeError: unsupported operand type(s) for |: 'tuple' and 'set'
```
```
➤ y -Q | egrep 'toolkit|xon'
community/python-prompt_toolkit 1.0.14-1
local/xonsh 0.5.9-1
```
Let me know if you need anything else.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xonsh/completers/base.py`
Content:
```
1 """Base completer for xonsh."""
2 import collections.abc as cabc
3
4 from xonsh.completers.path import complete_path
5 from xonsh.completers.python import complete_python
6 from xonsh.completers.commands import complete_command
7
8
9 def complete_base(prefix, line, start, end, ctx):
10 """If the line is empty, complete based on valid commands, python names,
11 and paths. If we are completing the first argument, complete based on
12 valid commands and python names.
13 """
14 if line.strip() == "":
15 out = complete_python(prefix, line, start, end, ctx) | complete_command(
16 prefix, line, start, end, ctx
17 )
18 paths = complete_path(prefix, line, start, end, ctx, False)
19 return (out | paths[0]), paths[1]
20 elif prefix == line:
21 python_comps = complete_python(prefix, line, start, end, ctx)
22 if isinstance(python_comps, cabc.Sequence):
23 return (
24 python_comps[0] | complete_command(prefix, line, start, end, ctx),
25 python_comps[1],
26 )
27 else:
28 return python_comps | complete_command(prefix, line, start, end, ctx)
29 return set()
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xonsh/completers/base.py b/xonsh/completers/base.py
--- a/xonsh/completers/base.py
+++ b/xonsh/completers/base.py
@@ -11,19 +11,21 @@
and paths. If we are completing the first argument, complete based on
valid commands and python names.
"""
+ # get and unpack python completions
+ python_comps = complete_python(prefix, line, start, end, ctx)
+ if isinstance(python_comps, cabc.Sequence):
+ python_comps, python_comps_len = python_comps
+ else:
+ python_comps_len = None
+ # add command completions
+ out = python_comps | complete_command(prefix, line, start, end, ctx)
+ # add paths, if needed
if line.strip() == "":
- out = complete_python(prefix, line, start, end, ctx) | complete_command(
- prefix, line, start, end, ctx
- )
paths = complete_path(prefix, line, start, end, ctx, False)
return (out | paths[0]), paths[1]
elif prefix == line:
- python_comps = complete_python(prefix, line, start, end, ctx)
- if isinstance(python_comps, cabc.Sequence):
- return (
- python_comps[0] | complete_command(prefix, line, start, end, ctx),
- python_comps[1],
- )
+ if python_comps_len is None:
+ return out
else:
- return python_comps | complete_command(prefix, line, start, end, ctx)
+ return out, python_comps_len
return set()
| {"golden_diff": "diff --git a/xonsh/completers/base.py b/xonsh/completers/base.py\n--- a/xonsh/completers/base.py\n+++ b/xonsh/completers/base.py\n@@ -11,19 +11,21 @@\n and paths. If we are completing the first argument, complete based on\n valid commands and python names.\n \"\"\"\n+ # get and unpack python completions\n+ python_comps = complete_python(prefix, line, start, end, ctx)\n+ if isinstance(python_comps, cabc.Sequence):\n+ python_comps, python_comps_len = python_comps\n+ else:\n+ python_comps_len = None\n+ # add command completions\n+ out = python_comps | complete_command(prefix, line, start, end, ctx)\n+ # add paths, if needed\n if line.strip() == \"\":\n- out = complete_python(prefix, line, start, end, ctx) | complete_command(\n- prefix, line, start, end, ctx\n- )\n paths = complete_path(prefix, line, start, end, ctx, False)\n return (out | paths[0]), paths[1]\n elif prefix == line:\n- python_comps = complete_python(prefix, line, start, end, ctx)\n- if isinstance(python_comps, cabc.Sequence):\n- return (\n- python_comps[0] | complete_command(prefix, line, start, end, ctx),\n- python_comps[1],\n- )\n+ if python_comps_len is None:\n+ return out\n else:\n- return python_comps | complete_command(prefix, line, start, end, ctx)\n+ return out, python_comps_len\n return set()\n", "issue": "cmd && othercmd throws an exception\n```\r\n\u27a4 ls &&Exception in thread Thread-35:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.6/threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.6/threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/usr/lib/python3.6/site-packages/prompt_toolkit/interface.py\", line 860, in run\r\n completions = list(buffer.completer.get_completions(document, complete_event))\r\n File \"/usr/lib/python3.6/site-packages/xonsh/ptk/completer.py\", line 49, in get_completions\r\n self.ctx)\r\n File \"/usr/lib/python3.6/site-packages/xonsh/__amalgam__.py\", line 124, in complete\r\n out = func(prefix, line, begidx, endidx, ctx)\r\n File \"/usr/lib/python3.6/site-packages/xonsh/completers/__amalgam__.py\", line 831, in complete_skipper\r\n ctx)\r\n File \"/usr/lib/python3.6/site-packages/xonsh/__amalgam__.py\", line 124, in complete\r\n out = func(prefix, line, begidx, endidx, ctx)\r\n File \"/usr/lib/python3.6/site-packages/xonsh/completers/__amalgam__.py\", line 1427, in complete_base\r\n complete_command(prefix, line, start, end, ctx))\r\nTypeError: unsupported operand type(s) for |: 'tuple' and 'set'\r\n```\r\n\r\n```\r\n\u27a4 y -Q | egrep 'toolkit|xon'\r\ncommunity/python-prompt_toolkit 1.0.14-1\r\nlocal/xonsh 0.5.9-1\r\n```\r\n\r\nLet me know if you need anything else.\n", "before_files": [{"content": "\"\"\"Base completer for xonsh.\"\"\"\nimport collections.abc as cabc\n\nfrom xonsh.completers.path import complete_path\nfrom xonsh.completers.python import complete_python\nfrom xonsh.completers.commands import complete_command\n\n\ndef complete_base(prefix, line, start, end, ctx):\n \"\"\"If the line is empty, complete based on valid commands, python names,\n and paths. If we are completing the first argument, complete based on\n valid commands and python names.\n \"\"\"\n if line.strip() == \"\":\n out = complete_python(prefix, line, start, end, ctx) | complete_command(\n prefix, line, start, end, ctx\n )\n paths = complete_path(prefix, line, start, end, ctx, False)\n return (out | paths[0]), paths[1]\n elif prefix == line:\n python_comps = complete_python(prefix, line, start, end, ctx)\n if isinstance(python_comps, cabc.Sequence):\n return (\n python_comps[0] | complete_command(prefix, line, start, end, ctx),\n python_comps[1],\n )\n else:\n return python_comps | complete_command(prefix, line, start, end, ctx)\n return set()\n", "path": "xonsh/completers/base.py"}], "after_files": [{"content": "\"\"\"Base completer for xonsh.\"\"\"\nimport collections.abc as cabc\n\nfrom xonsh.completers.path import complete_path\nfrom xonsh.completers.python import complete_python\nfrom xonsh.completers.commands import complete_command\n\n\ndef complete_base(prefix, line, start, end, ctx):\n \"\"\"If the line is empty, complete based on valid commands, python names,\n and paths. If we are completing the first argument, complete based on\n valid commands and python names.\n \"\"\"\n # get and unpack python completions\n python_comps = complete_python(prefix, line, start, end, ctx)\n if isinstance(python_comps, cabc.Sequence):\n python_comps, python_comps_len = python_comps\n else:\n python_comps_len = None\n # add command completions\n out = python_comps | complete_command(prefix, line, start, end, ctx)\n # add paths, if needed\n if line.strip() == \"\":\n paths = complete_path(prefix, line, start, end, ctx, False)\n return (out | paths[0]), paths[1]\n elif prefix == line:\n if python_comps_len is None:\n return out\n else:\n return out, python_comps_len\n return set()\n", "path": "xonsh/completers/base.py"}]} | 1,012 | 386 |
gh_patches_debug_22442 | rasdani/github-patches | git_diff | getsentry__sentry-24461 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
I have an issue when I import the export json file
## Important Details
On-Premise w/ Docker, version 9.1.2
## Description
I have two servers with a sentry. There are several projects on the first server, and I would like to copy these projects to the clear second server.
I use the export/import commands. The export command works fine. However, when I run the import command, I get an error.
## Steps to Reproduce
1. Run command 'sentry export sentry_export.json' on the first server
2. Run command 'sentry import sentry_export.json' on the second server
3. Get an error
Good items to include here include:
`Traceback (most recent call last):
File "/usr/local/bin/sentry", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/site-packages/sentry/runner/__init__.py", line 162, in main
cli(prog_name=get_prog(), obj={}, max_content_width=100)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/click/decorators.py", line 17, in new_func
return f(get_current_context(), *args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/sentry/runner/decorators.py", line 36, in inner
return ctx.invoke(f, *args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/sentry/runner/commands/backup.py", line 21, in import_
for obj in serializers.deserialize("json", src, stream=True, use_natural_keys=True):
File "/usr/local/lib/python2.7/site-packages/django/core/serializers/json.py", line 76, in Deserializer
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
File "/usr/local/lib/python2.7/site-packages/django/core/serializers/json.py", line 70, in Deserializer
for obj in PythonDeserializer(objects, **options):
File "/usr/local/lib/python2.7/site-packages/django/core/serializers/python.py", line 140, in Deserializer
data[field.name] = field.to_python(field_value)
File "/usr/local/lib/python2.7/site-packages/sentry/db/models/fields/array.py", line 56, in to_python
value = json.loads(value)
File "/usr/local/lib/python2.7/site-packages/sentry/utils/json.py", line 111, in loads
return _default_decoder.decode(value)
File "/usr/local/lib/python2.7/site-packages/simplejson/decoder.py", line 370, in decode
obj, end = self.raw_decode(s)
File "/usr/local/lib/python2.7/site-packages/simplejson/decoder.py", line 400, in raw_decode
return self.scan_once(s, idx=_w(s, idx).end())
django.core.serializers.base.DeserializationError: Expecting value: line 1 column 2 (char 1)`
### What you expected to happen
Import works fine too
### Possible Solution
[If you have an idea on how this could be solved include that detail here.]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/db/models/fields/array.py`
Content:
```
1 from django.db import models
2
3 from sentry.db.models.utils import Creator
4 from sentry.utils import json
5 from sentry.utils.compat import map
6
7
8 # Adapted from django-pgfields
9 # https://github.com/lukesneeringer/django-pgfields/blob/master/django_pg/models/fields/array.py
10 class ArrayField(models.Field):
11 def __init__(self, of=models.TextField, **kwargs):
12 # Arrays in PostgreSQL are arrays of a particular type.
13 # Save the subtype in our field class.
14 if isinstance(of, type):
15 of = of()
16 self.of = of
17
18 # Set "null" to True. Arrays don't have nulls, but null=True
19 # in the ORM amounts to nothing in SQL (whereas null=False
20 # corresponds to `NOT NULL`)
21 kwargs["null"] = True
22
23 super().__init__(**kwargs)
24
25 def contribute_to_class(self, cls, name):
26 """
27 Add a descriptor for backwards compatibility
28 with previous Django behavior.
29 """
30 super().contribute_to_class(cls, name)
31 setattr(cls, name, Creator(self))
32
33 def db_type(self, connection):
34 return f"{self.of.db_type(connection)}[]"
35
36 def get_internal_type(self):
37 return "TextField"
38
39 def to_python(self, value):
40 if not value:
41 value = []
42 if isinstance(value, str):
43 value = json.loads(value)
44 return map(self.of.to_python, value)
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/db/models/fields/array.py b/src/sentry/db/models/fields/array.py
--- a/src/sentry/db/models/fields/array.py
+++ b/src/sentry/db/models/fields/array.py
@@ -1,3 +1,5 @@
+import ast
+
from django.db import models
from sentry.db.models.utils import Creator
@@ -36,9 +38,26 @@
def get_internal_type(self):
return "TextField"
+ def get_prep_value(self, value):
+ """Iterate over each item in the array, and run it
+ through the `get_prep_value` of this array's type.
+ """
+ # If no valid value was given, return an empty list.
+ if not value:
+ return []
+
+ # Appropriately coerce each individual value within
+ # our array.
+ return [self.of.get_prep_value(item) for item in value]
+
def to_python(self, value):
if not value:
value = []
if isinstance(value, str):
- value = json.loads(value)
+ try:
+ value = json.loads(value)
+ except json.JSONDecodeError:
+ # This is to accomodate the erronous exports pre 21.4.0
+ # See getsentry/sentry#23843 for more details
+ value = ast.literal_eval(value)
return map(self.of.to_python, value)
| {"golden_diff": "diff --git a/src/sentry/db/models/fields/array.py b/src/sentry/db/models/fields/array.py\n--- a/src/sentry/db/models/fields/array.py\n+++ b/src/sentry/db/models/fields/array.py\n@@ -1,3 +1,5 @@\n+import ast\n+\n from django.db import models\n \n from sentry.db.models.utils import Creator\n@@ -36,9 +38,26 @@\n def get_internal_type(self):\n return \"TextField\"\n \n+ def get_prep_value(self, value):\n+ \"\"\"Iterate over each item in the array, and run it\n+ through the `get_prep_value` of this array's type.\n+ \"\"\"\n+ # If no valid value was given, return an empty list.\n+ if not value:\n+ return []\n+\n+ # Appropriately coerce each individual value within\n+ # our array.\n+ return [self.of.get_prep_value(item) for item in value]\n+\n def to_python(self, value):\n if not value:\n value = []\n if isinstance(value, str):\n- value = json.loads(value)\n+ try:\n+ value = json.loads(value)\n+ except json.JSONDecodeError:\n+ # This is to accomodate the erronous exports pre 21.4.0\n+ # See getsentry/sentry#23843 for more details\n+ value = ast.literal_eval(value)\n return map(self.of.to_python, value)\n", "issue": "I have an issue when I import the export json file\n## Important Details\r\n\r\nOn-Premise w/ Docker, version 9.1.2\r\n\r\n## Description\r\n\r\nI have two servers with a sentry. There are several projects on the first server, and I would like to copy these projects to the clear second server.\r\nI use the export/import commands. The export command works fine. However, when I run the import command, I get an error.\r\n\r\n## Steps to Reproduce\r\n\r\n1. Run command 'sentry export sentry_export.json' on the first server\r\n2. Run command 'sentry import sentry_export.json' on the second server\r\n3. Get an error\r\n\r\nGood items to include here include:\r\n\r\n`Traceback (most recent call last):\r\n File \"/usr/local/bin/sentry\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python2.7/site-packages/sentry/runner/__init__.py\", line 162, in main\r\n cli(prog_name=get_prog(), obj={}, max_content_width=100)\r\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 722, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 697, in main\r\n rv = self.invoke(ctx)\r\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 1066, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 895, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 535, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/usr/local/lib/python2.7/site-packages/click/decorators.py\", line 17, in new_func\r\n return f(get_current_context(), *args, **kwargs)\r\n File \"/usr/local/lib/python2.7/site-packages/sentry/runner/decorators.py\", line 36, in inner\r\n return ctx.invoke(f, *args, **kwargs)\r\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 535, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/usr/local/lib/python2.7/site-packages/sentry/runner/commands/backup.py\", line 21, in import_\r\n for obj in serializers.deserialize(\"json\", src, stream=True, use_natural_keys=True):\r\n File \"/usr/local/lib/python2.7/site-packages/django/core/serializers/json.py\", line 76, in Deserializer\r\n six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])\r\n File \"/usr/local/lib/python2.7/site-packages/django/core/serializers/json.py\", line 70, in Deserializer\r\n for obj in PythonDeserializer(objects, **options):\r\n File \"/usr/local/lib/python2.7/site-packages/django/core/serializers/python.py\", line 140, in Deserializer\r\n data[field.name] = field.to_python(field_value)\r\n File \"/usr/local/lib/python2.7/site-packages/sentry/db/models/fields/array.py\", line 56, in to_python\r\n value = json.loads(value)\r\n File \"/usr/local/lib/python2.7/site-packages/sentry/utils/json.py\", line 111, in loads\r\n return _default_decoder.decode(value)\r\n File \"/usr/local/lib/python2.7/site-packages/simplejson/decoder.py\", line 370, in decode\r\n obj, end = self.raw_decode(s)\r\n File \"/usr/local/lib/python2.7/site-packages/simplejson/decoder.py\", line 400, in raw_decode\r\n return self.scan_once(s, idx=_w(s, idx).end())\r\ndjango.core.serializers.base.DeserializationError: Expecting value: line 1 column 2 (char 1)`\r\n\r\n### What you expected to happen\r\n\r\nImport works fine too\r\n\r\n### Possible Solution\r\n\r\n[If you have an idea on how this could be solved include that detail here.]\r\n\n", "before_files": [{"content": "from django.db import models\n\nfrom sentry.db.models.utils import Creator\nfrom sentry.utils import json\nfrom sentry.utils.compat import map\n\n\n# Adapted from django-pgfields\n# https://github.com/lukesneeringer/django-pgfields/blob/master/django_pg/models/fields/array.py\nclass ArrayField(models.Field):\n def __init__(self, of=models.TextField, **kwargs):\n # Arrays in PostgreSQL are arrays of a particular type.\n # Save the subtype in our field class.\n if isinstance(of, type):\n of = of()\n self.of = of\n\n # Set \"null\" to True. Arrays don't have nulls, but null=True\n # in the ORM amounts to nothing in SQL (whereas null=False\n # corresponds to `NOT NULL`)\n kwargs[\"null\"] = True\n\n super().__init__(**kwargs)\n\n def contribute_to_class(self, cls, name):\n \"\"\"\n Add a descriptor for backwards compatibility\n with previous Django behavior.\n \"\"\"\n super().contribute_to_class(cls, name)\n setattr(cls, name, Creator(self))\n\n def db_type(self, connection):\n return f\"{self.of.db_type(connection)}[]\"\n\n def get_internal_type(self):\n return \"TextField\"\n\n def to_python(self, value):\n if not value:\n value = []\n if isinstance(value, str):\n value = json.loads(value)\n return map(self.of.to_python, value)\n", "path": "src/sentry/db/models/fields/array.py"}], "after_files": [{"content": "import ast\n\nfrom django.db import models\n\nfrom sentry.db.models.utils import Creator\nfrom sentry.utils import json\nfrom sentry.utils.compat import map\n\n\n# Adapted from django-pgfields\n# https://github.com/lukesneeringer/django-pgfields/blob/master/django_pg/models/fields/array.py\nclass ArrayField(models.Field):\n def __init__(self, of=models.TextField, **kwargs):\n # Arrays in PostgreSQL are arrays of a particular type.\n # Save the subtype in our field class.\n if isinstance(of, type):\n of = of()\n self.of = of\n\n # Set \"null\" to True. Arrays don't have nulls, but null=True\n # in the ORM amounts to nothing in SQL (whereas null=False\n # corresponds to `NOT NULL`)\n kwargs[\"null\"] = True\n\n super().__init__(**kwargs)\n\n def contribute_to_class(self, cls, name):\n \"\"\"\n Add a descriptor for backwards compatibility\n with previous Django behavior.\n \"\"\"\n super().contribute_to_class(cls, name)\n setattr(cls, name, Creator(self))\n\n def db_type(self, connection):\n return f\"{self.of.db_type(connection)}[]\"\n\n def get_internal_type(self):\n return \"TextField\"\n\n def get_prep_value(self, value):\n \"\"\"Iterate over each item in the array, and run it\n through the `get_prep_value` of this array's type.\n \"\"\"\n # If no valid value was given, return an empty list.\n if not value:\n return []\n\n # Appropriately coerce each individual value within\n # our array.\n return [self.of.get_prep_value(item) for item in value]\n\n def to_python(self, value):\n if not value:\n value = []\n if isinstance(value, str):\n try:\n value = json.loads(value)\n except json.JSONDecodeError:\n # This is to accomodate the erronous exports pre 21.4.0\n # See getsentry/sentry#23843 for more details\n value = ast.literal_eval(value)\n return map(self.of.to_python, value)\n", "path": "src/sentry/db/models/fields/array.py"}]} | 1,583 | 319 |
gh_patches_debug_40098 | rasdani/github-patches | git_diff | pytorch__vision-6458 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Port `transforms.LinearTransformation` to `prototype.transforms`
cc @vfdev-5 @datumbox @bjuncek
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/prototype/transforms/_misc.py`
Content:
```
1 import functools
2 from typing import Any, Callable, Dict, List, Sequence, Type, Union
3
4 import torch
5 from torchvision.prototype.transforms import functional as F, Transform
6 from torchvision.transforms.transforms import _setup_size
7
8
9 class Identity(Transform):
10 def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
11 return inpt
12
13
14 class Lambda(Transform):
15 def __init__(self, fn: Callable[[Any], Any], *types: Type):
16 super().__init__()
17 self.fn = fn
18 self.types = types
19
20 def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
21 if type(inpt) in self.types:
22 return self.fn(inpt)
23 else:
24 return inpt
25
26 def extra_repr(self) -> str:
27 extras = []
28 name = getattr(self.fn, "__name__", None)
29 if name:
30 extras.append(name)
31 extras.append(f"types={[type.__name__ for type in self.types]}")
32 return ", ".join(extras)
33
34
35 class Normalize(Transform):
36 def __init__(self, mean: List[float], std: List[float]):
37 super().__init__()
38 self.mean = mean
39 self.std = std
40
41 def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
42 return F.normalize(inpt, mean=self.mean, std=self.std)
43
44
45 class GaussianBlur(Transform):
46 def __init__(
47 self, kernel_size: Union[int, Sequence[int]], sigma: Union[float, Sequence[float]] = (0.1, 2.0)
48 ) -> None:
49 super().__init__()
50 self.kernel_size = _setup_size(kernel_size, "Kernel size should be a tuple/list of two integers")
51 for ks in self.kernel_size:
52 if ks <= 0 or ks % 2 == 0:
53 raise ValueError("Kernel size value should be an odd and positive number.")
54
55 if isinstance(sigma, float):
56 if sigma <= 0:
57 raise ValueError("If sigma is a single number, it must be positive.")
58 sigma = (sigma, sigma)
59 elif isinstance(sigma, Sequence) and len(sigma) == 2:
60 if not 0.0 < sigma[0] <= sigma[1]:
61 raise ValueError("sigma values should be positive and of the form (min, max).")
62 else:
63 raise TypeError("sigma should be a single float or a list/tuple with length 2 floats.")
64
65 self.sigma = sigma
66
67 def _get_params(self, sample: Any) -> Dict[str, Any]:
68 sigma = torch.empty(1).uniform_(self.sigma[0], self.sigma[1]).item()
69 return dict(sigma=[sigma, sigma])
70
71 def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
72 return F.gaussian_blur(inpt, **params)
73
74
75 class ToDtype(Lambda):
76 def __init__(self, dtype: torch.dtype, *types: Type) -> None:
77 self.dtype = dtype
78 super().__init__(functools.partial(torch.Tensor.to, dtype=dtype), *types)
79
80 def extra_repr(self) -> str:
81 return ", ".join([f"dtype={self.dtype}", f"types={[type.__name__ for type in self.types]}"])
82
```
Path: `torchvision/prototype/transforms/__init__.py`
Content:
```
1 from . import functional # usort: skip
2
3 from ._transform import Transform # usort: skip
4
5 from ._augment import RandomCutmix, RandomErasing, RandomMixup
6 from ._auto_augment import AugMix, AutoAugment, AutoAugmentPolicy, RandAugment, TrivialAugmentWide
7 from ._color import (
8 ColorJitter,
9 RandomAdjustSharpness,
10 RandomAutocontrast,
11 RandomEqualize,
12 RandomInvert,
13 RandomPhotometricDistort,
14 RandomPosterize,
15 RandomSolarize,
16 )
17 from ._container import Compose, RandomApply, RandomChoice, RandomOrder
18 from ._geometry import (
19 BatchMultiCrop,
20 CenterCrop,
21 ElasticTransform,
22 FiveCrop,
23 FixedSizeCrop,
24 Pad,
25 RandomAffine,
26 RandomCrop,
27 RandomHorizontalFlip,
28 RandomIoUCrop,
29 RandomPerspective,
30 RandomResizedCrop,
31 RandomRotation,
32 RandomShortestSize,
33 RandomVerticalFlip,
34 RandomZoomOut,
35 Resize,
36 ScaleJitter,
37 TenCrop,
38 )
39 from ._meta import ConvertBoundingBoxFormat, ConvertColorSpace, ConvertImageDtype
40 from ._misc import GaussianBlur, Identity, Lambda, Normalize, ToDtype
41 from ._type_conversion import DecodeImage, LabelToOneHot, ToImagePIL, ToImageTensor
42
43 from ._deprecated import Grayscale, RandomGrayscale, ToTensor, ToPILImage, PILToTensor # usort: skip
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchvision/prototype/transforms/__init__.py b/torchvision/prototype/transforms/__init__.py
--- a/torchvision/prototype/transforms/__init__.py
+++ b/torchvision/prototype/transforms/__init__.py
@@ -37,7 +37,7 @@
TenCrop,
)
from ._meta import ConvertBoundingBoxFormat, ConvertColorSpace, ConvertImageDtype
-from ._misc import GaussianBlur, Identity, Lambda, Normalize, ToDtype
+from ._misc import GaussianBlur, Identity, Lambda, LinearTransformation, Normalize, ToDtype
from ._type_conversion import DecodeImage, LabelToOneHot, ToImagePIL, ToImageTensor
from ._deprecated import Grayscale, RandomGrayscale, ToTensor, ToPILImage, PILToTensor # usort: skip
diff --git a/torchvision/prototype/transforms/_misc.py b/torchvision/prototype/transforms/_misc.py
--- a/torchvision/prototype/transforms/_misc.py
+++ b/torchvision/prototype/transforms/_misc.py
@@ -1,7 +1,10 @@
import functools
from typing import Any, Callable, Dict, List, Sequence, Type, Union
+import PIL.Image
+
import torch
+from torchvision.prototype import features
from torchvision.prototype.transforms import functional as F, Transform
from torchvision.transforms.transforms import _setup_size
@@ -32,6 +35,59 @@
return ", ".join(extras)
+class LinearTransformation(Transform):
+ def __init__(self, transformation_matrix: torch.Tensor, mean_vector: torch.Tensor):
+ super().__init__()
+ if transformation_matrix.size(0) != transformation_matrix.size(1):
+ raise ValueError(
+ "transformation_matrix should be square. Got "
+ f"{tuple(transformation_matrix.size())} rectangular matrix."
+ )
+
+ if mean_vector.size(0) != transformation_matrix.size(0):
+ raise ValueError(
+ f"mean_vector should have the same length {mean_vector.size(0)}"
+ f" as any one of the dimensions of the transformation_matrix [{tuple(transformation_matrix.size())}]"
+ )
+
+ if transformation_matrix.device != mean_vector.device:
+ raise ValueError(
+ f"Input tensors should be on the same device. Got {transformation_matrix.device} and {mean_vector.device}"
+ )
+
+ self.transformation_matrix = transformation_matrix
+ self.mean_vector = mean_vector
+
+ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
+
+ if isinstance(inpt, features._Feature) and not isinstance(inpt, features.Image):
+ return inpt
+ elif isinstance(inpt, PIL.Image.Image):
+ raise TypeError("Unsupported input type")
+
+ # Image instance after linear transformation is not Image anymore due to unknown data range
+ # Thus we will return Tensor for input Image
+
+ shape = inpt.shape
+ n = shape[-3] * shape[-2] * shape[-1]
+ if n != self.transformation_matrix.shape[0]:
+ raise ValueError(
+ "Input tensor and transformation matrix have incompatible shape."
+ + f"[{shape[-3]} x {shape[-2]} x {shape[-1]}] != "
+ + f"{self.transformation_matrix.shape[0]}"
+ )
+
+ if inpt.device.type != self.mean_vector.device.type:
+ raise ValueError(
+ "Input tensor should be on the same device as transformation matrix and mean vector. "
+ f"Got {inpt.device} vs {self.mean_vector.device}"
+ )
+
+ flat_tensor = inpt.view(-1, n) - self.mean_vector
+ transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)
+ return transformed_tensor.view(shape)
+
+
class Normalize(Transform):
def __init__(self, mean: List[float], std: List[float]):
super().__init__()
| {"golden_diff": "diff --git a/torchvision/prototype/transforms/__init__.py b/torchvision/prototype/transforms/__init__.py\n--- a/torchvision/prototype/transforms/__init__.py\n+++ b/torchvision/prototype/transforms/__init__.py\n@@ -37,7 +37,7 @@\n TenCrop,\n )\n from ._meta import ConvertBoundingBoxFormat, ConvertColorSpace, ConvertImageDtype\n-from ._misc import GaussianBlur, Identity, Lambda, Normalize, ToDtype\n+from ._misc import GaussianBlur, Identity, Lambda, LinearTransformation, Normalize, ToDtype\n from ._type_conversion import DecodeImage, LabelToOneHot, ToImagePIL, ToImageTensor\n \n from ._deprecated import Grayscale, RandomGrayscale, ToTensor, ToPILImage, PILToTensor # usort: skip\ndiff --git a/torchvision/prototype/transforms/_misc.py b/torchvision/prototype/transforms/_misc.py\n--- a/torchvision/prototype/transforms/_misc.py\n+++ b/torchvision/prototype/transforms/_misc.py\n@@ -1,7 +1,10 @@\n import functools\n from typing import Any, Callable, Dict, List, Sequence, Type, Union\n \n+import PIL.Image\n+\n import torch\n+from torchvision.prototype import features\n from torchvision.prototype.transforms import functional as F, Transform\n from torchvision.transforms.transforms import _setup_size\n \n@@ -32,6 +35,59 @@\n return \", \".join(extras)\n \n \n+class LinearTransformation(Transform):\n+ def __init__(self, transformation_matrix: torch.Tensor, mean_vector: torch.Tensor):\n+ super().__init__()\n+ if transformation_matrix.size(0) != transformation_matrix.size(1):\n+ raise ValueError(\n+ \"transformation_matrix should be square. Got \"\n+ f\"{tuple(transformation_matrix.size())} rectangular matrix.\"\n+ )\n+\n+ if mean_vector.size(0) != transformation_matrix.size(0):\n+ raise ValueError(\n+ f\"mean_vector should have the same length {mean_vector.size(0)}\"\n+ f\" as any one of the dimensions of the transformation_matrix [{tuple(transformation_matrix.size())}]\"\n+ )\n+\n+ if transformation_matrix.device != mean_vector.device:\n+ raise ValueError(\n+ f\"Input tensors should be on the same device. Got {transformation_matrix.device} and {mean_vector.device}\"\n+ )\n+\n+ self.transformation_matrix = transformation_matrix\n+ self.mean_vector = mean_vector\n+\n+ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n+\n+ if isinstance(inpt, features._Feature) and not isinstance(inpt, features.Image):\n+ return inpt\n+ elif isinstance(inpt, PIL.Image.Image):\n+ raise TypeError(\"Unsupported input type\")\n+\n+ # Image instance after linear transformation is not Image anymore due to unknown data range\n+ # Thus we will return Tensor for input Image\n+\n+ shape = inpt.shape\n+ n = shape[-3] * shape[-2] * shape[-1]\n+ if n != self.transformation_matrix.shape[0]:\n+ raise ValueError(\n+ \"Input tensor and transformation matrix have incompatible shape.\"\n+ + f\"[{shape[-3]} x {shape[-2]} x {shape[-1]}] != \"\n+ + f\"{self.transformation_matrix.shape[0]}\"\n+ )\n+\n+ if inpt.device.type != self.mean_vector.device.type:\n+ raise ValueError(\n+ \"Input tensor should be on the same device as transformation matrix and mean vector. \"\n+ f\"Got {inpt.device} vs {self.mean_vector.device}\"\n+ )\n+\n+ flat_tensor = inpt.view(-1, n) - self.mean_vector\n+ transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)\n+ return transformed_tensor.view(shape)\n+\n+\n class Normalize(Transform):\n def __init__(self, mean: List[float], std: List[float]):\n super().__init__()\n", "issue": "Port `transforms.LinearTransformation` to `prototype.transforms`\ncc @vfdev-5 @datumbox @bjuncek\n", "before_files": [{"content": "import functools\nfrom typing import Any, Callable, Dict, List, Sequence, Type, Union\n\nimport torch\nfrom torchvision.prototype.transforms import functional as F, Transform\nfrom torchvision.transforms.transforms import _setup_size\n\n\nclass Identity(Transform):\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return inpt\n\n\nclass Lambda(Transform):\n def __init__(self, fn: Callable[[Any], Any], *types: Type):\n super().__init__()\n self.fn = fn\n self.types = types\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n if type(inpt) in self.types:\n return self.fn(inpt)\n else:\n return inpt\n\n def extra_repr(self) -> str:\n extras = []\n name = getattr(self.fn, \"__name__\", None)\n if name:\n extras.append(name)\n extras.append(f\"types={[type.__name__ for type in self.types]}\")\n return \", \".join(extras)\n\n\nclass Normalize(Transform):\n def __init__(self, mean: List[float], std: List[float]):\n super().__init__()\n self.mean = mean\n self.std = std\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.normalize(inpt, mean=self.mean, std=self.std)\n\n\nclass GaussianBlur(Transform):\n def __init__(\n self, kernel_size: Union[int, Sequence[int]], sigma: Union[float, Sequence[float]] = (0.1, 2.0)\n ) -> None:\n super().__init__()\n self.kernel_size = _setup_size(kernel_size, \"Kernel size should be a tuple/list of two integers\")\n for ks in self.kernel_size:\n if ks <= 0 or ks % 2 == 0:\n raise ValueError(\"Kernel size value should be an odd and positive number.\")\n\n if isinstance(sigma, float):\n if sigma <= 0:\n raise ValueError(\"If sigma is a single number, it must be positive.\")\n sigma = (sigma, sigma)\n elif isinstance(sigma, Sequence) and len(sigma) == 2:\n if not 0.0 < sigma[0] <= sigma[1]:\n raise ValueError(\"sigma values should be positive and of the form (min, max).\")\n else:\n raise TypeError(\"sigma should be a single float or a list/tuple with length 2 floats.\")\n\n self.sigma = sigma\n\n def _get_params(self, sample: Any) -> Dict[str, Any]:\n sigma = torch.empty(1).uniform_(self.sigma[0], self.sigma[1]).item()\n return dict(sigma=[sigma, sigma])\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.gaussian_blur(inpt, **params)\n\n\nclass ToDtype(Lambda):\n def __init__(self, dtype: torch.dtype, *types: Type) -> None:\n self.dtype = dtype\n super().__init__(functools.partial(torch.Tensor.to, dtype=dtype), *types)\n\n def extra_repr(self) -> str:\n return \", \".join([f\"dtype={self.dtype}\", f\"types={[type.__name__ for type in self.types]}\"])\n", "path": "torchvision/prototype/transforms/_misc.py"}, {"content": "from . import functional # usort: skip\n\nfrom ._transform import Transform # usort: skip\n\nfrom ._augment import RandomCutmix, RandomErasing, RandomMixup\nfrom ._auto_augment import AugMix, AutoAugment, AutoAugmentPolicy, RandAugment, TrivialAugmentWide\nfrom ._color import (\n ColorJitter,\n RandomAdjustSharpness,\n RandomAutocontrast,\n RandomEqualize,\n RandomInvert,\n RandomPhotometricDistort,\n RandomPosterize,\n RandomSolarize,\n)\nfrom ._container import Compose, RandomApply, RandomChoice, RandomOrder\nfrom ._geometry import (\n BatchMultiCrop,\n CenterCrop,\n ElasticTransform,\n FiveCrop,\n FixedSizeCrop,\n Pad,\n RandomAffine,\n RandomCrop,\n RandomHorizontalFlip,\n RandomIoUCrop,\n RandomPerspective,\n RandomResizedCrop,\n RandomRotation,\n RandomShortestSize,\n RandomVerticalFlip,\n RandomZoomOut,\n Resize,\n ScaleJitter,\n TenCrop,\n)\nfrom ._meta import ConvertBoundingBoxFormat, ConvertColorSpace, ConvertImageDtype\nfrom ._misc import GaussianBlur, Identity, Lambda, Normalize, ToDtype\nfrom ._type_conversion import DecodeImage, LabelToOneHot, ToImagePIL, ToImageTensor\n\nfrom ._deprecated import Grayscale, RandomGrayscale, ToTensor, ToPILImage, PILToTensor # usort: skip\n", "path": "torchvision/prototype/transforms/__init__.py"}], "after_files": [{"content": "import functools\nfrom typing import Any, Callable, Dict, List, Sequence, Type, Union\n\nimport PIL.Image\n\nimport torch\nfrom torchvision.prototype import features\nfrom torchvision.prototype.transforms import functional as F, Transform\nfrom torchvision.transforms.transforms import _setup_size\n\n\nclass Identity(Transform):\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return inpt\n\n\nclass Lambda(Transform):\n def __init__(self, fn: Callable[[Any], Any], *types: Type):\n super().__init__()\n self.fn = fn\n self.types = types\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n if type(inpt) in self.types:\n return self.fn(inpt)\n else:\n return inpt\n\n def extra_repr(self) -> str:\n extras = []\n name = getattr(self.fn, \"__name__\", None)\n if name:\n extras.append(name)\n extras.append(f\"types={[type.__name__ for type in self.types]}\")\n return \", \".join(extras)\n\n\nclass LinearTransformation(Transform):\n def __init__(self, transformation_matrix: torch.Tensor, mean_vector: torch.Tensor):\n super().__init__()\n if transformation_matrix.size(0) != transformation_matrix.size(1):\n raise ValueError(\n \"transformation_matrix should be square. Got \"\n f\"{tuple(transformation_matrix.size())} rectangular matrix.\"\n )\n\n if mean_vector.size(0) != transformation_matrix.size(0):\n raise ValueError(\n f\"mean_vector should have the same length {mean_vector.size(0)}\"\n f\" as any one of the dimensions of the transformation_matrix [{tuple(transformation_matrix.size())}]\"\n )\n\n if transformation_matrix.device != mean_vector.device:\n raise ValueError(\n f\"Input tensors should be on the same device. Got {transformation_matrix.device} and {mean_vector.device}\"\n )\n\n self.transformation_matrix = transformation_matrix\n self.mean_vector = mean_vector\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n\n if isinstance(inpt, features._Feature) and not isinstance(inpt, features.Image):\n return inpt\n elif isinstance(inpt, PIL.Image.Image):\n raise TypeError(\"Unsupported input type\")\n\n # Image instance after linear transformation is not Image anymore due to unknown data range\n # Thus we will return Tensor for input Image\n\n shape = inpt.shape\n n = shape[-3] * shape[-2] * shape[-1]\n if n != self.transformation_matrix.shape[0]:\n raise ValueError(\n \"Input tensor and transformation matrix have incompatible shape.\"\n + f\"[{shape[-3]} x {shape[-2]} x {shape[-1]}] != \"\n + f\"{self.transformation_matrix.shape[0]}\"\n )\n\n if inpt.device.type != self.mean_vector.device.type:\n raise ValueError(\n \"Input tensor should be on the same device as transformation matrix and mean vector. \"\n f\"Got {inpt.device} vs {self.mean_vector.device}\"\n )\n\n flat_tensor = inpt.view(-1, n) - self.mean_vector\n transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)\n return transformed_tensor.view(shape)\n\n\nclass Normalize(Transform):\n def __init__(self, mean: List[float], std: List[float]):\n super().__init__()\n self.mean = mean\n self.std = std\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.normalize(inpt, mean=self.mean, std=self.std)\n\n\nclass GaussianBlur(Transform):\n def __init__(\n self, kernel_size: Union[int, Sequence[int]], sigma: Union[float, Sequence[float]] = (0.1, 2.0)\n ) -> None:\n super().__init__()\n self.kernel_size = _setup_size(kernel_size, \"Kernel size should be a tuple/list of two integers\")\n for ks in self.kernel_size:\n if ks <= 0 or ks % 2 == 0:\n raise ValueError(\"Kernel size value should be an odd and positive number.\")\n\n if isinstance(sigma, float):\n if sigma <= 0:\n raise ValueError(\"If sigma is a single number, it must be positive.\")\n sigma = (sigma, sigma)\n elif isinstance(sigma, Sequence) and len(sigma) == 2:\n if not 0.0 < sigma[0] <= sigma[1]:\n raise ValueError(\"sigma values should be positive and of the form (min, max).\")\n else:\n raise TypeError(\"sigma should be a single float or a list/tuple with length 2 floats.\")\n\n self.sigma = sigma\n\n def _get_params(self, sample: Any) -> Dict[str, Any]:\n sigma = torch.empty(1).uniform_(self.sigma[0], self.sigma[1]).item()\n return dict(sigma=[sigma, sigma])\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.gaussian_blur(inpt, **params)\n\n\nclass ToDtype(Lambda):\n def __init__(self, dtype: torch.dtype, *types: Type) -> None:\n self.dtype = dtype\n super().__init__(functools.partial(torch.Tensor.to, dtype=dtype), *types)\n\n def extra_repr(self) -> str:\n return \", \".join([f\"dtype={self.dtype}\", f\"types={[type.__name__ for type in self.types]}\"])\n", "path": "torchvision/prototype/transforms/_misc.py"}, {"content": "from . import functional # usort: skip\n\nfrom ._transform import Transform # usort: skip\n\nfrom ._augment import RandomCutmix, RandomErasing, RandomMixup\nfrom ._auto_augment import AugMix, AutoAugment, AutoAugmentPolicy, RandAugment, TrivialAugmentWide\nfrom ._color import (\n ColorJitter,\n RandomAdjustSharpness,\n RandomAutocontrast,\n RandomEqualize,\n RandomInvert,\n RandomPhotometricDistort,\n RandomPosterize,\n RandomSolarize,\n)\nfrom ._container import Compose, RandomApply, RandomChoice, RandomOrder\nfrom ._geometry import (\n BatchMultiCrop,\n CenterCrop,\n ElasticTransform,\n FiveCrop,\n FixedSizeCrop,\n Pad,\n RandomAffine,\n RandomCrop,\n RandomHorizontalFlip,\n RandomIoUCrop,\n RandomPerspective,\n RandomResizedCrop,\n RandomRotation,\n RandomShortestSize,\n RandomVerticalFlip,\n RandomZoomOut,\n Resize,\n ScaleJitter,\n TenCrop,\n)\nfrom ._meta import ConvertBoundingBoxFormat, ConvertColorSpace, ConvertImageDtype\nfrom ._misc import GaussianBlur, Identity, Lambda, LinearTransformation, Normalize, ToDtype\nfrom ._type_conversion import DecodeImage, LabelToOneHot, ToImagePIL, ToImageTensor\n\nfrom ._deprecated import Grayscale, RandomGrayscale, ToTensor, ToPILImage, PILToTensor # usort: skip\n", "path": "torchvision/prototype/transforms/__init__.py"}]} | 1,574 | 862 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.