problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_9261
rasdani/github-patches
git_diff
getnikola__nikola-3668
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Babel 12.2.x test failures due to no-breaking spaces and slightly different output <!-- Before creating an issue: * make sure you are using an up-to-date version of Nikola * search for existing issues that might be related Make sure to: * provide information about your environment (below) * include all the output you get, and any other information related to your problem Nikola v7.6.4, as provided by Ubuntu, is NOT SUPPORTED. If you are using this version, you should upgrade: https://getnikola.com/getting-started.html --> ### Environment **Python Version:** 3.11.2 **Nikola Version:** 8.2.3 **Operating System:** Fedora Linux 39 ### Description: Hello, in Fedora we are updating our babel to 12.2.0 and I noticed some test failures of nikola. Unfortunately, I won't be able to provide a full producer as I run the tests in the Fedora package and I do not maintain that package. This is just a friendly heads-up for you, I don't expect you to do anything. The test failures are ``` + pytest ============================= test session starts ============================== platform linux -- Python 3.11.2, pytest-7.2.1, pluggy-1.0.0 rootdir: /builddir/build/BUILD/nikola-8.2.3, configfile: setup.cfg plugins: cov-4.0.0, anyio-3.5.0 collected 500 items ... =================================== FAILURES =================================== ____________________________ test_format_date_long _____________________________ base_config = None def test_format_date_long(base_config): > assert LocaleBorg().formatted_date("long", TESLA_BIRTHDAY_DT) == DT_EN_US E AssertionError: assert 'July 10, 185...6\u202fPM UTC' == 'July 10, 185...:34:56 PM UTC' E - July 10, 1856 at 12:34:56 PM UTC E ? ^^^ ^ E + July 10, 1856, 12:34:56 PM UTC E ? ^ ^ tests/test_locale.py:77: AssertionError __________________________ test_format_date_timezone ___________________________ base_config = None def test_format_date_timezone(base_config): tesla_150_birthday_dtz = datetime.datetime( 2006, 7, 10, 12, 34, 56, tzinfo=dateutil.tz.gettz("America/New_York") ) formatted_date = LocaleBorg().formatted_date("long", tesla_150_birthday_dtz) > assert formatted_date == "July 10, 2006 at 12:34:56 PM -0400" E AssertionError: assert 'July 10, 200...u202fPM -0400' == 'July 10, 200...4:56 PM -0400' E - July 10, 2006 at 12:34:56 PM -0400 E ? ^^^ ^ E + July 10, 2006, 12:34:56 PM -0400 E ? ^ ^ tests/test_locale.py:90: AssertionError _____________________ test_format_date_locale_variants[US] _____________________ english_variant = 'en_US', expected_date = 'July 10, 1856 at 12:34:56 PM UTC' @pytest.mark.parametrize( "english_variant, expected_date", [ pytest.param("en_US", DT_EN_US, id="US"), pytest.param("en_GB", "10 July 1856 at 12:34:56 UTC", id="GB"), ], ) def test_format_date_locale_variants(english_variant, expected_date): LocaleBorg.initialize({"en": english_variant}, "en") > assert LocaleBorg().formatted_date("long", TESLA_BIRTHDAY_DT, "en") == expected_date E AssertionError: assert 'July 10, 185...6\u202fPM UTC' == 'July 10, 185...:34:56 PM UTC' E - July 10, 1856 at 12:34:56 PM UTC E ? ^^^ ^ E + July 10, 1856, 12:34:56 PM UTC E ? ^ ^ tests/test_locale.py:108: AssertionError _____________________ test_format_date_locale_variants[GB] _____________________ english_variant = 'en_GB', expected_date = '10 July 1856 at 12:34:56 UTC' @pytest.mark.parametrize( "english_variant, expected_date", [ pytest.param("en_US", DT_EN_US, id="US"), pytest.param("en_GB", "10 July 1856 at 12:34:56 UTC", id="GB"), ], ) def test_format_date_locale_variants(english_variant, expected_date): LocaleBorg.initialize({"en": english_variant}, "en") > assert LocaleBorg().formatted_date("long", TESLA_BIRTHDAY_DT, "en") == expected_date E AssertionError: assert '10 July 1856, 12:34:56 UTC' == '10 July 1856 at 12:34:56 UTC' E - 10 July 1856 at 12:34:56 UTC E ? ^^^ E + 10 July 1856, 12:34:56 UTC E ? ^ tests/test_locale.py:108: AssertionError ``` Apart from the ` at ` vs `, ` change, there is also a difference in SPACE vs. NARROW NO-BREAK SPACE before `PM`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 import os 4 import sys 5 import shutil 6 from setuptools import setup, find_packages 7 from setuptools.command.install import install 8 from setuptools.command.build_py import build_py 9 10 11 with open('requirements.txt', 'r') as fh: 12 dependencies = [l.strip().split("#")[0] for l in fh] 13 14 extras = {} 15 16 with open('requirements-extras.txt', 'r') as fh: 17 extras['extras'] = [l.strip() for l in fh][1:] 18 # Alternative name. 19 extras['full'] = extras['extras'] 20 21 with open('requirements-tests.txt', 'r') as fh: 22 extras['tests'] = [l.strip() for l in fh][1:] 23 24 # ########## platform specific stuff ############# 25 if sys.version_info[0] == 2: 26 raise Exception('Python 2 is not supported') 27 elif sys.version_info[0] == 3 and sys.version_info[1] < 7: 28 raise Exception('Python 3 version < 3.7 is not supported') 29 30 ################################################## 31 32 # Provided as an attribute, so you can append to these instead 33 # of replicating them: 34 standard_exclude = ('*.pyc', '*$py.class', '*~', '.*', '*.bak') 35 standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', 36 './dist', 'EGG-INFO', '*.egg-info') 37 38 with open('README.rst') as f: 39 long_description = f.read() 40 41 42 def copy_messages(): 43 themes_directory = os.path.join( 44 os.path.dirname(__file__), 'nikola', 'data', 'themes') 45 original_messages_directory = os.path.join( 46 themes_directory, 'default', 'messages') 47 48 for theme in ('orphan', 'monospace'): 49 theme_messages_directory = os.path.join( 50 themes_directory, theme, 'messages') 51 52 if os.path.exists(theme_messages_directory): 53 shutil.rmtree(theme_messages_directory) 54 55 shutil.copytree(original_messages_directory, theme_messages_directory) 56 57 58 def expands_symlinks_for_windows(): 59 """replaces the symlinked files with a copy of the original content. 60 61 In windows (msysgit), a symlink is converted to a text file with a 62 path to the file it points to. If not corrected, installing from a git 63 clone will end with some files with bad content 64 65 After install the working copy will be dirty (symlink markers overwritten 66 with real content) 67 """ 68 if sys.platform != 'win32': 69 return 70 71 # apply the fix 72 localdir = os.path.dirname(os.path.abspath(__file__)) 73 oldpath = sys.path[:] 74 sys.path.insert(0, os.path.join(localdir, 'nikola')) 75 winutils = __import__('winutils') 76 failures = winutils.fix_all_git_symlinked(localdir) 77 sys.path = oldpath 78 del sys.modules['winutils'] 79 if failures != -1: 80 print('WARNING: your working copy is now dirty by changes in ' 81 'samplesite, sphinx and themes') 82 if failures > 0: 83 raise Exception("Error: \n\tnot all symlinked files could be fixed." + 84 "\n\tYour best bet is to start again from clean.") 85 86 87 def remove_old_files(self): 88 tree = os.path.join(self.install_lib, 'nikola') 89 try: 90 shutil.rmtree(tree, ignore_errors=True) 91 except Exception: 92 pass 93 94 95 class nikola_install(install): 96 def run(self): 97 expands_symlinks_for_windows() 98 remove_old_files(self) 99 install.run(self) 100 101 102 class nikola_build_py(build_py): 103 def run(self): 104 expands_symlinks_for_windows() 105 build_py.run(self) 106 107 108 setup(name='Nikola', 109 version='8.2.3', 110 description='A modular, fast, simple, static website and blog generator', 111 long_description=long_description, 112 author='Roberto Alsina and others', 113 author_email='[email protected]', 114 url='https://getnikola.com/', 115 packages=find_packages(exclude=('tests', 'tests.*')), 116 license='MIT', 117 keywords='website, blog, static', 118 classifiers=['Development Status :: 5 - Production/Stable', 119 'Environment :: Console', 120 'Environment :: Plugins', 121 'Environment :: Web Environment', 122 'Intended Audience :: End Users/Desktop', 123 'License :: OSI Approved :: MIT License', 124 'Operating System :: MacOS', 125 'Operating System :: MacOS :: MacOS X', 126 'Operating System :: Microsoft :: Windows', 127 'Operating System :: OS Independent', 128 'Operating System :: POSIX', 129 'Operating System :: Unix', 130 'Programming Language :: Python', 131 'Programming Language :: Python :: 3.7', 132 'Programming Language :: Python :: 3.8', 133 'Programming Language :: Python :: 3.9', 134 'Programming Language :: Python :: 3.10', 135 'Topic :: Internet', 136 'Topic :: Internet :: WWW/HTTP', 137 'Topic :: Text Processing :: Markup'], 138 install_requires=dependencies, 139 extras_require=extras, 140 include_package_data=True, 141 python_requires='>=3.7', 142 cmdclass={'install': nikola_install, 'build_py': nikola_build_py}, 143 data_files=[ 144 ('share/doc/nikola', [ 145 'docs/manual.rst', 146 'docs/theming.rst', 147 'docs/extending.rst']), 148 ('share/man/man1', ['docs/man/nikola.1.gz']), 149 ], 150 entry_points={ 151 'console_scripts': [ 152 'nikola = nikola.__main__:main' 153 ] 154 }, 155 ) 156 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -132,6 +132,7 @@ 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', 'Topic :: Internet', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Text Processing :: Markup'],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -132,6 +132,7 @@\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n+ 'Programming Language :: Python :: 3.11',\n 'Topic :: Internet',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Text Processing :: Markup'],\n", "issue": "Babel 12.2.x test failures due to no-breaking spaces and slightly different output\n<!--\r\nBefore creating an issue:\r\n* make sure you are using an up-to-date version of Nikola\r\n* search for existing issues that might be related\r\n\r\nMake sure to:\r\n* provide information about your environment (below)\r\n* include all the output you get, and any other information related to your problem\r\n\r\nNikola v7.6.4, as provided by Ubuntu, is NOT SUPPORTED.\r\nIf you are using this version, you should upgrade: https://getnikola.com/getting-started.html\r\n-->\r\n\r\n### Environment\r\n\r\n**Python Version:** 3.11.2\r\n\r\n**Nikola Version:** 8.2.3\r\n\r\n**Operating System:** Fedora Linux 39\r\n\r\n### Description:\r\n\r\nHello, in Fedora we are updating our babel to 12.2.0 and I noticed some test failures of nikola. Unfortunately, I won't be able to provide a full producer as I run the tests in the Fedora package and I do not maintain that package. This is just a friendly heads-up for you, I don't expect you to do anything.\r\n\r\nThe test failures are \r\n\r\n```\r\n+ pytest\r\n============================= test session starts ==============================\r\nplatform linux -- Python 3.11.2, pytest-7.2.1, pluggy-1.0.0\r\nrootdir: /builddir/build/BUILD/nikola-8.2.3, configfile: setup.cfg\r\nplugins: cov-4.0.0, anyio-3.5.0\r\ncollected 500 items\r\n...\r\n=================================== FAILURES ===================================\r\n____________________________ test_format_date_long _____________________________\r\n\r\nbase_config = None\r\n\r\n def test_format_date_long(base_config):\r\n> assert LocaleBorg().formatted_date(\"long\", TESLA_BIRTHDAY_DT) == DT_EN_US\r\nE AssertionError: assert 'July 10, 185...6\\u202fPM UTC' == 'July 10, 185...:34:56 PM UTC'\r\nE - July 10, 1856 at 12:34:56 PM UTC\r\nE ? ^^^ ^\r\nE + July 10, 1856, 12:34:56\u202fPM UTC\r\nE ? ^ ^\r\n\r\ntests/test_locale.py:77: AssertionError\r\n__________________________ test_format_date_timezone ___________________________\r\n\r\nbase_config = None\r\n\r\n def test_format_date_timezone(base_config):\r\n tesla_150_birthday_dtz = datetime.datetime(\r\n 2006, 7, 10, 12, 34, 56, tzinfo=dateutil.tz.gettz(\"America/New_York\")\r\n )\r\n formatted_date = LocaleBorg().formatted_date(\"long\", tesla_150_birthday_dtz)\r\n> assert formatted_date == \"July 10, 2006 at 12:34:56 PM -0400\"\r\nE AssertionError: assert 'July 10, 200...u202fPM -0400' == 'July 10, 200...4:56 PM -0400'\r\nE - July 10, 2006 at 12:34:56 PM -0400\r\nE ? ^^^ ^\r\nE + July 10, 2006, 12:34:56\u202fPM -0400\r\nE ? ^ ^\r\n\r\ntests/test_locale.py:90: AssertionError\r\n_____________________ test_format_date_locale_variants[US] _____________________\r\n\r\nenglish_variant = 'en_US', expected_date = 'July 10, 1856 at 12:34:56 PM UTC'\r\n\r\n @pytest.mark.parametrize(\r\n \"english_variant, expected_date\",\r\n [\r\n pytest.param(\"en_US\", DT_EN_US, id=\"US\"),\r\n pytest.param(\"en_GB\", \"10 July 1856 at 12:34:56 UTC\", id=\"GB\"),\r\n ],\r\n )\r\n def test_format_date_locale_variants(english_variant, expected_date):\r\n LocaleBorg.initialize({\"en\": english_variant}, \"en\")\r\n> assert LocaleBorg().formatted_date(\"long\", TESLA_BIRTHDAY_DT, \"en\") == expected_date\r\nE AssertionError: assert 'July 10, 185...6\\u202fPM UTC' == 'July 10, 185...:34:56 PM UTC'\r\nE - July 10, 1856 at 12:34:56 PM UTC\r\nE ? ^^^ ^\r\nE + July 10, 1856, 12:34:56\u202fPM UTC\r\nE ? ^ ^\r\n\r\ntests/test_locale.py:108: AssertionError\r\n_____________________ test_format_date_locale_variants[GB] _____________________\r\n\r\nenglish_variant = 'en_GB', expected_date = '10 July 1856 at 12:34:56 UTC'\r\n\r\n @pytest.mark.parametrize(\r\n \"english_variant, expected_date\",\r\n [\r\n pytest.param(\"en_US\", DT_EN_US, id=\"US\"),\r\n pytest.param(\"en_GB\", \"10 July 1856 at 12:34:56 UTC\", id=\"GB\"),\r\n ],\r\n )\r\n def test_format_date_locale_variants(english_variant, expected_date):\r\n LocaleBorg.initialize({\"en\": english_variant}, \"en\")\r\n> assert LocaleBorg().formatted_date(\"long\", TESLA_BIRTHDAY_DT, \"en\") == expected_date\r\nE AssertionError: assert '10 July 1856, 12:34:56 UTC' == '10 July 1856 at 12:34:56 UTC'\r\nE - 10 July 1856 at 12:34:56 UTC\r\nE ? ^^^\r\nE + 10 July 1856, 12:34:56 UTC\r\nE ? ^\r\n\r\ntests/test_locale.py:108: AssertionError\r\n```\r\n\r\nApart from the ` at ` vs `, ` change, there is also a difference in SPACE vs. NARROW NO-BREAK SPACE before `PM`.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport shutil\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.build_py import build_py\n\n\nwith open('requirements.txt', 'r') as fh:\n dependencies = [l.strip().split(\"#\")[0] for l in fh]\n\nextras = {}\n\nwith open('requirements-extras.txt', 'r') as fh:\n extras['extras'] = [l.strip() for l in fh][1:]\n # Alternative name.\n extras['full'] = extras['extras']\n\nwith open('requirements-tests.txt', 'r') as fh:\n extras['tests'] = [l.strip() for l in fh][1:]\n\n# ########## platform specific stuff #############\nif sys.version_info[0] == 2:\n raise Exception('Python 2 is not supported')\nelif sys.version_info[0] == 3 and sys.version_info[1] < 7:\n raise Exception('Python 3 version < 3.7 is not supported')\n\n##################################################\n\n# Provided as an attribute, so you can append to these instead\n# of replicating them:\nstandard_exclude = ('*.pyc', '*$py.class', '*~', '.*', '*.bak')\nstandard_exclude_directories = ('.*', 'CVS', '_darcs', './build',\n './dist', 'EGG-INFO', '*.egg-info')\n\nwith open('README.rst') as f:\n long_description = f.read()\n\n\ndef copy_messages():\n themes_directory = os.path.join(\n os.path.dirname(__file__), 'nikola', 'data', 'themes')\n original_messages_directory = os.path.join(\n themes_directory, 'default', 'messages')\n\n for theme in ('orphan', 'monospace'):\n theme_messages_directory = os.path.join(\n themes_directory, theme, 'messages')\n\n if os.path.exists(theme_messages_directory):\n shutil.rmtree(theme_messages_directory)\n\n shutil.copytree(original_messages_directory, theme_messages_directory)\n\n\ndef expands_symlinks_for_windows():\n \"\"\"replaces the symlinked files with a copy of the original content.\n\n In windows (msysgit), a symlink is converted to a text file with a\n path to the file it points to. If not corrected, installing from a git\n clone will end with some files with bad content\n\n After install the working copy will be dirty (symlink markers overwritten\n with real content)\n \"\"\"\n if sys.platform != 'win32':\n return\n\n # apply the fix\n localdir = os.path.dirname(os.path.abspath(__file__))\n oldpath = sys.path[:]\n sys.path.insert(0, os.path.join(localdir, 'nikola'))\n winutils = __import__('winutils')\n failures = winutils.fix_all_git_symlinked(localdir)\n sys.path = oldpath\n del sys.modules['winutils']\n if failures != -1:\n print('WARNING: your working copy is now dirty by changes in '\n 'samplesite, sphinx and themes')\n if failures > 0:\n raise Exception(\"Error: \\n\\tnot all symlinked files could be fixed.\" +\n \"\\n\\tYour best bet is to start again from clean.\")\n\n\ndef remove_old_files(self):\n tree = os.path.join(self.install_lib, 'nikola')\n try:\n shutil.rmtree(tree, ignore_errors=True)\n except Exception:\n pass\n\n\nclass nikola_install(install):\n def run(self):\n expands_symlinks_for_windows()\n remove_old_files(self)\n install.run(self)\n\n\nclass nikola_build_py(build_py):\n def run(self):\n expands_symlinks_for_windows()\n build_py.run(self)\n\n\nsetup(name='Nikola',\n version='8.2.3',\n description='A modular, fast, simple, static website and blog generator',\n long_description=long_description,\n author='Roberto Alsina and others',\n author_email='[email protected]',\n url='https://getnikola.com/',\n packages=find_packages(exclude=('tests', 'tests.*')),\n license='MIT',\n keywords='website, blog, static',\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Plugins',\n 'Environment :: Web Environment',\n 'Intended Audience :: End Users/Desktop',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Topic :: Internet',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Text Processing :: Markup'],\n install_requires=dependencies,\n extras_require=extras,\n include_package_data=True,\n python_requires='>=3.7',\n cmdclass={'install': nikola_install, 'build_py': nikola_build_py},\n data_files=[\n ('share/doc/nikola', [\n 'docs/manual.rst',\n 'docs/theming.rst',\n 'docs/extending.rst']),\n ('share/man/man1', ['docs/man/nikola.1.gz']),\n ],\n entry_points={\n 'console_scripts': [\n 'nikola = nikola.__main__:main'\n ]\n },\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport shutil\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.build_py import build_py\n\n\nwith open('requirements.txt', 'r') as fh:\n dependencies = [l.strip().split(\"#\")[0] for l in fh]\n\nextras = {}\n\nwith open('requirements-extras.txt', 'r') as fh:\n extras['extras'] = [l.strip() for l in fh][1:]\n # Alternative name.\n extras['full'] = extras['extras']\n\nwith open('requirements-tests.txt', 'r') as fh:\n extras['tests'] = [l.strip() for l in fh][1:]\n\n# ########## platform specific stuff #############\nif sys.version_info[0] == 2:\n raise Exception('Python 2 is not supported')\nelif sys.version_info[0] == 3 and sys.version_info[1] < 7:\n raise Exception('Python 3 version < 3.7 is not supported')\n\n##################################################\n\n# Provided as an attribute, so you can append to these instead\n# of replicating them:\nstandard_exclude = ('*.pyc', '*$py.class', '*~', '.*', '*.bak')\nstandard_exclude_directories = ('.*', 'CVS', '_darcs', './build',\n './dist', 'EGG-INFO', '*.egg-info')\n\nwith open('README.rst') as f:\n long_description = f.read()\n\n\ndef copy_messages():\n themes_directory = os.path.join(\n os.path.dirname(__file__), 'nikola', 'data', 'themes')\n original_messages_directory = os.path.join(\n themes_directory, 'default', 'messages')\n\n for theme in ('orphan', 'monospace'):\n theme_messages_directory = os.path.join(\n themes_directory, theme, 'messages')\n\n if os.path.exists(theme_messages_directory):\n shutil.rmtree(theme_messages_directory)\n\n shutil.copytree(original_messages_directory, theme_messages_directory)\n\n\ndef expands_symlinks_for_windows():\n \"\"\"replaces the symlinked files with a copy of the original content.\n\n In windows (msysgit), a symlink is converted to a text file with a\n path to the file it points to. If not corrected, installing from a git\n clone will end with some files with bad content\n\n After install the working copy will be dirty (symlink markers overwritten\n with real content)\n \"\"\"\n if sys.platform != 'win32':\n return\n\n # apply the fix\n localdir = os.path.dirname(os.path.abspath(__file__))\n oldpath = sys.path[:]\n sys.path.insert(0, os.path.join(localdir, 'nikola'))\n winutils = __import__('winutils')\n failures = winutils.fix_all_git_symlinked(localdir)\n sys.path = oldpath\n del sys.modules['winutils']\n if failures != -1:\n print('WARNING: your working copy is now dirty by changes in '\n 'samplesite, sphinx and themes')\n if failures > 0:\n raise Exception(\"Error: \\n\\tnot all symlinked files could be fixed.\" +\n \"\\n\\tYour best bet is to start again from clean.\")\n\n\ndef remove_old_files(self):\n tree = os.path.join(self.install_lib, 'nikola')\n try:\n shutil.rmtree(tree, ignore_errors=True)\n except Exception:\n pass\n\n\nclass nikola_install(install):\n def run(self):\n expands_symlinks_for_windows()\n remove_old_files(self)\n install.run(self)\n\n\nclass nikola_build_py(build_py):\n def run(self):\n expands_symlinks_for_windows()\n build_py.run(self)\n\n\nsetup(name='Nikola',\n version='8.2.3',\n description='A modular, fast, simple, static website and blog generator',\n long_description=long_description,\n author='Roberto Alsina and others',\n author_email='[email protected]',\n url='https://getnikola.com/',\n packages=find_packages(exclude=('tests', 'tests.*')),\n license='MIT',\n keywords='website, blog, static',\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Plugins',\n 'Environment :: Web Environment',\n 'Intended Audience :: End Users/Desktop',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3.11',\n 'Topic :: Internet',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Text Processing :: Markup'],\n install_requires=dependencies,\n extras_require=extras,\n include_package_data=True,\n python_requires='>=3.7',\n cmdclass={'install': nikola_install, 'build_py': nikola_build_py},\n data_files=[\n ('share/doc/nikola', [\n 'docs/manual.rst',\n 'docs/theming.rst',\n 'docs/extending.rst']),\n ('share/man/man1', ['docs/man/nikola.1.gz']),\n ],\n entry_points={\n 'console_scripts': [\n 'nikola = nikola.__main__:main'\n ]\n },\n )\n", "path": "setup.py"}]}
3,256
110
gh_patches_debug_6189
rasdani/github-patches
git_diff
facebookresearch__hydra-594
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fish completion is not working Fish completion does not seem to work: ``` $ python my_app.py -sc install=fish | source $ python my_app.py <TAB> string split: Unknown option “-n” - (line 2): string split -n ' ' $COMP_LINE ^ in command substitution called on line 0 of file - in function “hydra_fish_completion” called on standard input in command substitution called on standard input stringstring - manipulate strings - Synopsis string escape [(-n | --no-quoted)] [--style=xxx] [STRING...] string join [(-q | --quiet)] SEP [STRING...] string length [(-q | --quiet)] [STRING...] string lower [(-q | --quiet)] [STRING...] string match [(-a | --all)] [((-e | --entire)] [(-i | --ignore-case)] [(-r | --regex)] [(-n | --index)] [(-q | ``` Fish completion is not working Fish completion does not seem to work: ``` $ python my_app.py -sc install=fish | source $ python my_app.py <TAB> string split: Unknown option “-n” - (line 2): string split -n ' ' $COMP_LINE ^ in command substitution called on line 0 of file - in function “hydra_fish_completion” called on standard input in command substitution called on standard input stringstring - manipulate strings - Synopsis string escape [(-n | --no-quoted)] [--style=xxx] [STRING...] string join [(-q | --quiet)] SEP [STRING...] string length [(-q | --quiet)] [STRING...] string lower [(-q | --quiet)] [STRING...] string match [(-a | --all)] [((-e | --entire)] [(-i | --ignore-case)] [(-r | --regex)] [(-n | --index)] [(-q | ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `hydra/_internal/core_plugins/fish_completion.py` Content: ``` 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 import logging 3 import os 4 import sys 5 from typing import List, Optional, Tuple 6 7 from hydra.plugins.completion_plugin import CompletionPlugin 8 9 log = logging.getLogger(__name__) 10 11 12 class FishCompletion(CompletionPlugin): 13 def install(self) -> None: 14 script = """function hydra_fish_completion 15 # Hydra will access COMP_LINE to generate completion candidates 16 set -lx COMP_LINE (commandline -cp) 17 18 # Find out how to call the underlying script 19 set -l parts (string split -n ' ' $COMP_LINE) 20 if test "$parts[1]" = "python" -o "$parts[1]" = "python3" 21 set cmd "$parts[1] $parts[2]" 22 if not grep -q "@hydra.main" $parts[2] 23 return 24 end 25 else 26 set cmd "$parts[1]" 27 end 28 29 # Generate candidates 30 eval "$cmd -sc query=fish" 31 end 32 """ 33 output = self._get_exec() 34 reg_cmd = [] 35 for name, cond in output: 36 reg_cmd.append( 37 f"complete -c {name} {cond}-x -a '(hydra_fish_completion)'\n" 38 ) 39 print(script) 40 print("".join(reg_cmd)) 41 42 def uninstall(self) -> None: 43 name = self._get_uninstall_exec() 44 print(f"complete -e -c {name}") 45 print("function hydra_fish_completion\nend") 46 47 @staticmethod 48 def provides() -> str: 49 return "fish" 50 51 def query(self, config_name: Optional[str]) -> None: 52 line = os.environ["COMP_LINE"] 53 line = self.strip_python_or_app_name(line) 54 print("\n".join(self._query(config_name=config_name, line=line))) 55 56 @staticmethod 57 def help(command: str) -> str: 58 assert command in ["install", "uninstall"] 59 return f"{{}} -sc {command}=fish | source" 60 61 @staticmethod 62 def _get_exec() -> List[Tuple[str, str]]: 63 # Running as an installed app (setuptools entry point) 64 output = [] 65 # User scenario 1: python script.py 66 name = os.path.basename(sys.executable) 67 cond = f"-n '__fish_seen_subcommand_from {sys.argv[0]}' " 68 output.append((name, cond)) 69 70 # User scenario 2: ./script.py or src/script.py or script.py 71 name = os.path.basename(sys.argv[0]) 72 cond = "" 73 output.append((name, cond)) 74 75 return output 76 77 @staticmethod 78 def _get_uninstall_exec() -> str: 79 name = os.path.basename(sys.argv[0]) 80 81 return name 82 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/hydra/_internal/core_plugins/fish_completion.py b/hydra/_internal/core_plugins/fish_completion.py --- a/hydra/_internal/core_plugins/fish_completion.py +++ b/hydra/_internal/core_plugins/fish_completion.py @@ -16,7 +16,7 @@ set -lx COMP_LINE (commandline -cp) # Find out how to call the underlying script - set -l parts (string split -n ' ' $COMP_LINE) + set -l parts (commandline -cpo) if test "$parts[1]" = "python" -o "$parts[1]" = "python3" set cmd "$parts[1] $parts[2]" if not grep -q "@hydra.main" $parts[2]
{"golden_diff": "diff --git a/hydra/_internal/core_plugins/fish_completion.py b/hydra/_internal/core_plugins/fish_completion.py\n--- a/hydra/_internal/core_plugins/fish_completion.py\n+++ b/hydra/_internal/core_plugins/fish_completion.py\n@@ -16,7 +16,7 @@\n set -lx COMP_LINE (commandline -cp)\n \n # Find out how to call the underlying script\n- set -l parts (string split -n ' ' $COMP_LINE)\n+ set -l parts (commandline -cpo)\n if test \"$parts[1]\" = \"python\" -o \"$parts[1]\" = \"python3\"\n set cmd \"$parts[1] $parts[2]\"\n if not grep -q \"@hydra.main\" $parts[2]\n", "issue": "Fish completion is not working\nFish completion does not seem to work:\r\n\r\n```\r\n$ python my_app.py -sc install=fish | source\r\n$ python my_app.py <TAB> string split: Unknown option \u201c-n\u201d\r\n- (line 2): \r\nstring split -n ' ' $COMP_LINE\r\n^ \r\nin command substitution \r\n called on line 0 of file -\r\n \r\nin function \u201chydra_fish_completion\u201d\r\n called on standard input\r\n \r\nin command substitution\r\n called on standard input\r\n \r\n\r\n stringstring - manipulate strings\r\n - \r\n\r\n Synopsis \r\n string escape [(-n | --no-quoted)] [--style=xxx] [STRING...]\r\n string join [(-q | --quiet)] SEP [STRING...]\r\n string length [(-q | --quiet)] [STRING...]\r\n string lower [(-q | --quiet)] [STRING...]\r\n string match [(-a | --all)] [((-e | --entire)] [(-i | --ignore-case)] [(-r | --regex)]\r\n [(-n | --index)] [(-q |\r\n```\nFish completion is not working\nFish completion does not seem to work:\r\n\r\n```\r\n$ python my_app.py -sc install=fish | source\r\n$ python my_app.py <TAB> string split: Unknown option \u201c-n\u201d\r\n- (line 2): \r\nstring split -n ' ' $COMP_LINE\r\n^ \r\nin command substitution \r\n called on line 0 of file -\r\n \r\nin function \u201chydra_fish_completion\u201d\r\n called on standard input\r\n \r\nin command substitution\r\n called on standard input\r\n \r\n\r\n stringstring - manipulate strings\r\n - \r\n\r\n Synopsis \r\n string escape [(-n | --no-quoted)] [--style=xxx] [STRING...]\r\n string join [(-q | --quiet)] SEP [STRING...]\r\n string length [(-q | --quiet)] [STRING...]\r\n string lower [(-q | --quiet)] [STRING...]\r\n string match [(-a | --all)] [((-e | --entire)] [(-i | --ignore-case)] [(-r | --regex)]\r\n [(-n | --index)] [(-q |\r\n```\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nimport os\nimport sys\nfrom typing import List, Optional, Tuple\n\nfrom hydra.plugins.completion_plugin import CompletionPlugin\n\nlog = logging.getLogger(__name__)\n\n\nclass FishCompletion(CompletionPlugin):\n def install(self) -> None:\n script = \"\"\"function hydra_fish_completion\n # Hydra will access COMP_LINE to generate completion candidates\n set -lx COMP_LINE (commandline -cp)\n\n # Find out how to call the underlying script\n set -l parts (string split -n ' ' $COMP_LINE)\n if test \"$parts[1]\" = \"python\" -o \"$parts[1]\" = \"python3\"\n set cmd \"$parts[1] $parts[2]\"\n if not grep -q \"@hydra.main\" $parts[2]\n return\n end\n else\n set cmd \"$parts[1]\"\n end\n\n # Generate candidates\n eval \"$cmd -sc query=fish\"\nend\n \"\"\"\n output = self._get_exec()\n reg_cmd = []\n for name, cond in output:\n reg_cmd.append(\n f\"complete -c {name} {cond}-x -a '(hydra_fish_completion)'\\n\"\n )\n print(script)\n print(\"\".join(reg_cmd))\n\n def uninstall(self) -> None:\n name = self._get_uninstall_exec()\n print(f\"complete -e -c {name}\")\n print(\"function hydra_fish_completion\\nend\")\n\n @staticmethod\n def provides() -> str:\n return \"fish\"\n\n def query(self, config_name: Optional[str]) -> None:\n line = os.environ[\"COMP_LINE\"]\n line = self.strip_python_or_app_name(line)\n print(\"\\n\".join(self._query(config_name=config_name, line=line)))\n\n @staticmethod\n def help(command: str) -> str:\n assert command in [\"install\", \"uninstall\"]\n return f\"{{}} -sc {command}=fish | source\"\n\n @staticmethod\n def _get_exec() -> List[Tuple[str, str]]:\n # Running as an installed app (setuptools entry point)\n output = []\n # User scenario 1: python script.py\n name = os.path.basename(sys.executable)\n cond = f\"-n '__fish_seen_subcommand_from {sys.argv[0]}' \"\n output.append((name, cond))\n\n # User scenario 2: ./script.py or src/script.py or script.py\n name = os.path.basename(sys.argv[0])\n cond = \"\"\n output.append((name, cond))\n\n return output\n\n @staticmethod\n def _get_uninstall_exec() -> str:\n name = os.path.basename(sys.argv[0])\n\n return name\n", "path": "hydra/_internal/core_plugins/fish_completion.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nimport os\nimport sys\nfrom typing import List, Optional, Tuple\n\nfrom hydra.plugins.completion_plugin import CompletionPlugin\n\nlog = logging.getLogger(__name__)\n\n\nclass FishCompletion(CompletionPlugin):\n def install(self) -> None:\n script = \"\"\"function hydra_fish_completion\n # Hydra will access COMP_LINE to generate completion candidates\n set -lx COMP_LINE (commandline -cp)\n\n # Find out how to call the underlying script\n set -l parts (commandline -cpo)\n if test \"$parts[1]\" = \"python\" -o \"$parts[1]\" = \"python3\"\n set cmd \"$parts[1] $parts[2]\"\n if not grep -q \"@hydra.main\" $parts[2]\n return\n end\n else\n set cmd \"$parts[1]\"\n end\n\n # Generate candidates\n eval \"$cmd -sc query=fish\"\nend\n \"\"\"\n output = self._get_exec()\n reg_cmd = []\n for name, cond in output:\n reg_cmd.append(\n f\"complete -c {name} {cond}-x -a '(hydra_fish_completion)'\\n\"\n )\n print(script)\n print(\"\".join(reg_cmd))\n\n def uninstall(self) -> None:\n name = self._get_uninstall_exec()\n print(f\"complete -e -c {name}\")\n print(\"function hydra_fish_completion\\nend\")\n\n @staticmethod\n def provides() -> str:\n return \"fish\"\n\n def query(self, config_name: Optional[str]) -> None:\n line = os.environ[\"COMP_LINE\"]\n line = self.strip_python_or_app_name(line)\n print(\"\\n\".join(self._query(config_name=config_name, line=line)))\n\n @staticmethod\n def help(command: str) -> str:\n assert command in [\"install\", \"uninstall\"]\n return f\"{{}} -sc {command}=fish | source\"\n\n @staticmethod\n def _get_exec() -> List[Tuple[str, str]]:\n # Running as an installed app (setuptools entry point)\n output = []\n # User scenario 1: python script.py\n name = os.path.basename(sys.executable)\n cond = f\"-n '__fish_seen_subcommand_from {sys.argv[0]}' \"\n output.append((name, cond))\n\n # User scenario 2: ./script.py or src/script.py or script.py\n name = os.path.basename(sys.argv[0])\n cond = \"\"\n output.append((name, cond))\n\n return output\n\n @staticmethod\n def _get_uninstall_exec() -> str:\n name = os.path.basename(sys.argv[0])\n\n return name\n", "path": "hydra/_internal/core_plugins/fish_completion.py"}]}
1,507
175
gh_patches_debug_29530
rasdani/github-patches
git_diff
Flexget__Flexget-1667
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Adding new column to `plugins` output I'm trying to understand the concepts of 'phase' and 'interface' of a plugin. The output of the CLI command `flexget plugins` has been helpful. But I think I noticed that the output is missing a column. It has 'Keyword', 'Phases' and 'Flags', but no 'Interfaces'. I found out that all plugins _do_ defined a list of interfaces for themselves. Shall I create a PR adding the column 'Interfaces' to the output? I wanted to ask before I put the effort in. And if someone can explain what a 'phase' and 'interface' is I'll write up a wiki page. ;) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `flexget/plugins/cli/plugins.py` Content: ``` 1 from __future__ import unicode_literals, division, absolute_import 2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin 3 4 import logging 5 6 from colorclass.toggles import disable_all_colors 7 from flexget import options 8 from flexget.event import event 9 from flexget.plugin import get_plugins 10 from flexget.terminal import TerminalTable, TerminalTableError, table_parser, console, colorize 11 12 log = logging.getLogger('plugins') 13 14 15 def plugins_summary(manager, options): 16 if options.table_type == 'porcelain': 17 disable_all_colors() 18 header = ['Keyword', 'Phases', 'Flags'] 19 table_data = [header] 20 for plugin in sorted(get_plugins(phase=options.phase, interface=options.interface)): 21 if options.builtins and not plugin.builtin: 22 continue 23 flags = [] 24 if plugin.instance.__doc__: 25 flags.append('doc') 26 if plugin.builtin: 27 flags.append('builtin') 28 if plugin.debug: 29 if not options.debug: 30 continue 31 flags.append('developers') 32 handlers = plugin.phase_handlers 33 roles = [] 34 for phase in handlers: 35 priority = handlers[phase].priority 36 roles.append('{0}({1})'.format(phase, priority)) 37 38 name = colorize('green', plugin.name) if 'builtin' in flags else plugin.name 39 table_data.append([name, ', '.join(roles), ', '.join(flags)]) 40 41 try: 42 table = TerminalTable(options.table_type, table_data, wrap_columns=[1, 2]) 43 console(table.output) 44 except TerminalTableError as e: 45 console('ERROR: %s' % str(e)) 46 return 47 console(colorize('green', ' Built-in plugins')) 48 49 50 @event('options.register') 51 def register_parser_arguments(): 52 parser = options.register_command('plugins', plugins_summary, help='Print registered plugin summaries', 53 parents=[table_parser]) 54 parser.add_argument('--interface', help='Show plugins belonging to this interface') 55 parser.add_argument('--phase', help='Show plugins that act on this phase') 56 parser.add_argument('--builtins', action='store_true', help='Show just builtin plugins') 57 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/flexget/plugins/cli/plugins.py b/flexget/plugins/cli/plugins.py --- a/flexget/plugins/cli/plugins.py +++ b/flexget/plugins/cli/plugins.py @@ -15,11 +15,12 @@ def plugins_summary(manager, options): if options.table_type == 'porcelain': disable_all_colors() - header = ['Keyword', 'Phases', 'Flags'] + header = ['Keyword', 'Interfaces', 'Phases', 'Flags'] table_data = [header] for plugin in sorted(get_plugins(phase=options.phase, interface=options.interface)): if options.builtins and not plugin.builtin: continue + flags = [] if plugin.instance.__doc__: flags.append('doc') @@ -29,6 +30,7 @@ if not options.debug: continue flags.append('developers') + handlers = plugin.phase_handlers roles = [] for phase in handlers: @@ -36,7 +38,7 @@ roles.append('{0}({1})'.format(phase, priority)) name = colorize('green', plugin.name) if 'builtin' in flags else plugin.name - table_data.append([name, ', '.join(roles), ', '.join(flags)]) + table_data.append([name, ', '.join(plugin.interfaces), ', '.join(roles), ', '.join(flags)]) try: table = TerminalTable(options.table_type, table_data, wrap_columns=[1, 2])
{"golden_diff": "diff --git a/flexget/plugins/cli/plugins.py b/flexget/plugins/cli/plugins.py\n--- a/flexget/plugins/cli/plugins.py\n+++ b/flexget/plugins/cli/plugins.py\n@@ -15,11 +15,12 @@\n def plugins_summary(manager, options):\n if options.table_type == 'porcelain':\n disable_all_colors()\n- header = ['Keyword', 'Phases', 'Flags']\n+ header = ['Keyword', 'Interfaces', 'Phases', 'Flags']\n table_data = [header]\n for plugin in sorted(get_plugins(phase=options.phase, interface=options.interface)):\n if options.builtins and not plugin.builtin:\n continue\n+\n flags = []\n if plugin.instance.__doc__:\n flags.append('doc')\n@@ -29,6 +30,7 @@\n if not options.debug:\n continue\n flags.append('developers')\n+\n handlers = plugin.phase_handlers\n roles = []\n for phase in handlers:\n@@ -36,7 +38,7 @@\n roles.append('{0}({1})'.format(phase, priority))\n \n name = colorize('green', plugin.name) if 'builtin' in flags else plugin.name\n- table_data.append([name, ', '.join(roles), ', '.join(flags)])\n+ table_data.append([name, ', '.join(plugin.interfaces), ', '.join(roles), ', '.join(flags)])\n \n try:\n table = TerminalTable(options.table_type, table_data, wrap_columns=[1, 2])\n", "issue": "Adding new column to `plugins` output\nI'm trying to understand the concepts of 'phase' and 'interface' of a plugin. The output of the CLI command `flexget plugins` has been helpful. But I think I noticed that the output is missing a column. It has 'Keyword', 'Phases' and 'Flags', but no 'Interfaces'. I found out that all plugins _do_ defined a list of interfaces for themselves. \r\n\r\nShall I create a PR adding the column 'Interfaces' to the output?\r\nI wanted to ask before I put the effort in.\r\n\r\nAnd if someone can explain what a 'phase' and 'interface' is I'll write up a wiki page. ;)\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\n\nfrom colorclass.toggles import disable_all_colors\nfrom flexget import options\nfrom flexget.event import event\nfrom flexget.plugin import get_plugins\nfrom flexget.terminal import TerminalTable, TerminalTableError, table_parser, console, colorize\n\nlog = logging.getLogger('plugins')\n\n\ndef plugins_summary(manager, options):\n if options.table_type == 'porcelain':\n disable_all_colors()\n header = ['Keyword', 'Phases', 'Flags']\n table_data = [header]\n for plugin in sorted(get_plugins(phase=options.phase, interface=options.interface)):\n if options.builtins and not plugin.builtin:\n continue\n flags = []\n if plugin.instance.__doc__:\n flags.append('doc')\n if plugin.builtin:\n flags.append('builtin')\n if plugin.debug:\n if not options.debug:\n continue\n flags.append('developers')\n handlers = plugin.phase_handlers\n roles = []\n for phase in handlers:\n priority = handlers[phase].priority\n roles.append('{0}({1})'.format(phase, priority))\n\n name = colorize('green', plugin.name) if 'builtin' in flags else plugin.name\n table_data.append([name, ', '.join(roles), ', '.join(flags)])\n\n try:\n table = TerminalTable(options.table_type, table_data, wrap_columns=[1, 2])\n console(table.output)\n except TerminalTableError as e:\n console('ERROR: %s' % str(e))\n return\n console(colorize('green', ' Built-in plugins'))\n\n\n@event('options.register')\ndef register_parser_arguments():\n parser = options.register_command('plugins', plugins_summary, help='Print registered plugin summaries',\n parents=[table_parser])\n parser.add_argument('--interface', help='Show plugins belonging to this interface')\n parser.add_argument('--phase', help='Show plugins that act on this phase')\n parser.add_argument('--builtins', action='store_true', help='Show just builtin plugins')\n", "path": "flexget/plugins/cli/plugins.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\n\nfrom colorclass.toggles import disable_all_colors\nfrom flexget import options\nfrom flexget.event import event\nfrom flexget.plugin import get_plugins\nfrom flexget.terminal import TerminalTable, TerminalTableError, table_parser, console, colorize\n\nlog = logging.getLogger('plugins')\n\n\ndef plugins_summary(manager, options):\n if options.table_type == 'porcelain':\n disable_all_colors()\n header = ['Keyword', 'Interfaces', 'Phases', 'Flags']\n table_data = [header]\n for plugin in sorted(get_plugins(phase=options.phase, interface=options.interface)):\n if options.builtins and not plugin.builtin:\n continue\n\n flags = []\n if plugin.instance.__doc__:\n flags.append('doc')\n if plugin.builtin:\n flags.append('builtin')\n if plugin.debug:\n if not options.debug:\n continue\n flags.append('developers')\n\n handlers = plugin.phase_handlers\n roles = []\n for phase in handlers:\n priority = handlers[phase].priority\n roles.append('{0}({1})'.format(phase, priority))\n\n name = colorize('green', plugin.name) if 'builtin' in flags else plugin.name\n table_data.append([name, ', '.join(plugin.interfaces), ', '.join(roles), ', '.join(flags)])\n\n try:\n table = TerminalTable(options.table_type, table_data, wrap_columns=[1, 2])\n console(table.output)\n except TerminalTableError as e:\n console('ERROR: %s' % str(e))\n return\n console(colorize('green', ' Built-in plugins'))\n\n\n@event('options.register')\ndef register_parser_arguments():\n parser = options.register_command('plugins', plugins_summary, help='Print registered plugin summaries',\n parents=[table_parser])\n parser.add_argument('--interface', help='Show plugins belonging to this interface')\n parser.add_argument('--phase', help='Show plugins that act on this phase')\n parser.add_argument('--builtins', action='store_true', help='Show just builtin plugins')\n", "path": "flexget/plugins/cli/plugins.py"}]}
966
325
gh_patches_debug_1839
rasdani/github-patches
git_diff
ipython__ipython-2280
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- SSH passwordless check with OpenSSH checks for the wrong thing The pattern passed to pexpect is '[Ppassword]:', which looks for any of those letters followed by ':', and not, as the intention seems to be, for 'Password:'. The correct pattern is '[Pp]assword:'. This is at IPython/external/ssh/tunnel.py:100. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `IPython/external/ssh/tunnel.py` Content: ``` 1 """Basic ssh tunnel utilities, and convenience functions for tunneling 2 zeromq connections. 3 4 Authors 5 ------- 6 * Min RK 7 """ 8 9 #----------------------------------------------------------------------------- 10 # Copyright (C) 2010-2011 The IPython Development Team 11 # 12 # Distributed under the terms of the BSD License. The full license is in 13 # the file COPYING, distributed as part of this software. 14 #----------------------------------------------------------------------------- 15 16 17 18 #----------------------------------------------------------------------------- 19 # Imports 20 #----------------------------------------------------------------------------- 21 22 from __future__ import print_function 23 24 import os,sys, atexit 25 import socket 26 from multiprocessing import Process 27 from getpass import getpass, getuser 28 import warnings 29 30 try: 31 with warnings.catch_warnings(): 32 warnings.simplefilter('ignore', DeprecationWarning) 33 import paramiko 34 except ImportError: 35 paramiko = None 36 else: 37 from forward import forward_tunnel 38 39 try: 40 from IPython.external import pexpect 41 except ImportError: 42 pexpect = None 43 44 #----------------------------------------------------------------------------- 45 # Code 46 #----------------------------------------------------------------------------- 47 48 # select_random_ports copied from IPython.parallel.util 49 _random_ports = set() 50 51 def select_random_ports(n): 52 """Selects and return n random ports that are available.""" 53 ports = [] 54 for i in xrange(n): 55 sock = socket.socket() 56 sock.bind(('', 0)) 57 while sock.getsockname()[1] in _random_ports: 58 sock.close() 59 sock = socket.socket() 60 sock.bind(('', 0)) 61 ports.append(sock) 62 for i, sock in enumerate(ports): 63 port = sock.getsockname()[1] 64 sock.close() 65 ports[i] = port 66 _random_ports.add(port) 67 return ports 68 69 70 #----------------------------------------------------------------------------- 71 # Check for passwordless login 72 #----------------------------------------------------------------------------- 73 74 def try_passwordless_ssh(server, keyfile, paramiko=None): 75 """Attempt to make an ssh connection without a password. 76 This is mainly used for requiring password input only once 77 when many tunnels may be connected to the same server. 78 79 If paramiko is None, the default for the platform is chosen. 80 """ 81 if paramiko is None: 82 paramiko = sys.platform == 'win32' 83 if not paramiko: 84 f = _try_passwordless_openssh 85 else: 86 f = _try_passwordless_paramiko 87 return f(server, keyfile) 88 89 def _try_passwordless_openssh(server, keyfile): 90 """Try passwordless login with shell ssh command.""" 91 if pexpect is None: 92 raise ImportError("pexpect unavailable, use paramiko") 93 cmd = 'ssh -f '+ server 94 if keyfile: 95 cmd += ' -i ' + keyfile 96 cmd += ' exit' 97 p = pexpect.spawn(cmd) 98 while True: 99 try: 100 p.expect('[Ppassword]:', timeout=.1) 101 except pexpect.TIMEOUT: 102 continue 103 except pexpect.EOF: 104 return True 105 else: 106 return False 107 108 def _try_passwordless_paramiko(server, keyfile): 109 """Try passwordless login with paramiko.""" 110 if paramiko is None: 111 msg = "Paramiko unavaliable, " 112 if sys.platform == 'win32': 113 msg += "Paramiko is required for ssh tunneled connections on Windows." 114 else: 115 msg += "use OpenSSH." 116 raise ImportError(msg) 117 username, server, port = _split_server(server) 118 client = paramiko.SSHClient() 119 client.load_system_host_keys() 120 client.set_missing_host_key_policy(paramiko.WarningPolicy()) 121 try: 122 client.connect(server, port, username=username, key_filename=keyfile, 123 look_for_keys=True) 124 except paramiko.AuthenticationException: 125 return False 126 else: 127 client.close() 128 return True 129 130 131 def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60): 132 """Connect a socket to an address via an ssh tunnel. 133 134 This is a wrapper for socket.connect(addr), when addr is not accessible 135 from the local machine. It simply creates an ssh tunnel using the remaining args, 136 and calls socket.connect('tcp://localhost:lport') where lport is the randomly 137 selected local port of the tunnel. 138 139 """ 140 new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout) 141 socket.connect(new_url) 142 return tunnel 143 144 145 def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60): 146 """Open a tunneled connection from a 0MQ url. 147 148 For use inside tunnel_connection. 149 150 Returns 151 ------- 152 153 (url, tunnel): The 0MQ url that has been forwarded, and the tunnel object 154 """ 155 156 lport = select_random_ports(1)[0] 157 transport, addr = addr.split('://') 158 ip,rport = addr.split(':') 159 rport = int(rport) 160 if paramiko is None: 161 paramiko = sys.platform == 'win32' 162 if paramiko: 163 tunnelf = paramiko_tunnel 164 else: 165 tunnelf = openssh_tunnel 166 167 tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout) 168 return 'tcp://127.0.0.1:%i'%lport, tunnel 169 170 def openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60): 171 """Create an ssh tunnel using command-line ssh that connects port lport 172 on this machine to localhost:rport on server. The tunnel 173 will automatically close when not in use, remaining open 174 for a minimum of timeout seconds for an initial connection. 175 176 This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`, 177 as seen from `server`. 178 179 keyfile and password may be specified, but ssh config is checked for defaults. 180 181 Parameters 182 ---------- 183 184 lport : int 185 local port for connecting to the tunnel from this machine. 186 rport : int 187 port on the remote machine to connect to. 188 server : str 189 The ssh server to connect to. The full ssh server string will be parsed. 190 user@server:port 191 remoteip : str [Default: 127.0.0.1] 192 The remote ip, specifying the destination of the tunnel. 193 Default is localhost, which means that the tunnel would redirect 194 localhost:lport on this machine to localhost:rport on the *server*. 195 196 keyfile : str; path to public key file 197 This specifies a key to be used in ssh login, default None. 198 Regular default ssh keys will be used without specifying this argument. 199 password : str; 200 Your ssh password to the ssh server. Note that if this is left None, 201 you will be prompted for it if passwordless key based login is unavailable. 202 timeout : int [default: 60] 203 The time (in seconds) after which no activity will result in the tunnel 204 closing. This prevents orphaned tunnels from running forever. 205 """ 206 if pexpect is None: 207 raise ImportError("pexpect unavailable, use paramiko_tunnel") 208 ssh="ssh " 209 if keyfile: 210 ssh += "-i " + keyfile 211 212 if ':' in server: 213 server, port = server.split(':') 214 ssh += " -p %s" % port 215 216 cmd = "%s -f -L 127.0.0.1:%i:%s:%i %s sleep %i" % ( 217 ssh, lport, remoteip, rport, server, timeout) 218 tunnel = pexpect.spawn(cmd) 219 failed = False 220 while True: 221 try: 222 tunnel.expect('[Pp]assword:', timeout=.1) 223 except pexpect.TIMEOUT: 224 continue 225 except pexpect.EOF: 226 if tunnel.exitstatus: 227 print (tunnel.exitstatus) 228 print (tunnel.before) 229 print (tunnel.after) 230 raise RuntimeError("tunnel '%s' failed to start"%(cmd)) 231 else: 232 return tunnel.pid 233 else: 234 if failed: 235 print("Password rejected, try again") 236 password=None 237 if password is None: 238 password = getpass("%s's password: "%(server)) 239 tunnel.sendline(password) 240 failed = True 241 242 def _split_server(server): 243 if '@' in server: 244 username,server = server.split('@', 1) 245 else: 246 username = getuser() 247 if ':' in server: 248 server, port = server.split(':') 249 port = int(port) 250 else: 251 port = 22 252 return username, server, port 253 254 def paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60): 255 """launch a tunner with paramiko in a subprocess. This should only be used 256 when shell ssh is unavailable (e.g. Windows). 257 258 This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`, 259 as seen from `server`. 260 261 If you are familiar with ssh tunnels, this creates the tunnel: 262 263 ssh server -L localhost:lport:remoteip:rport 264 265 keyfile and password may be specified, but ssh config is checked for defaults. 266 267 268 Parameters 269 ---------- 270 271 lport : int 272 local port for connecting to the tunnel from this machine. 273 rport : int 274 port on the remote machine to connect to. 275 server : str 276 The ssh server to connect to. The full ssh server string will be parsed. 277 user@server:port 278 remoteip : str [Default: 127.0.0.1] 279 The remote ip, specifying the destination of the tunnel. 280 Default is localhost, which means that the tunnel would redirect 281 localhost:lport on this machine to localhost:rport on the *server*. 282 283 keyfile : str; path to public key file 284 This specifies a key to be used in ssh login, default None. 285 Regular default ssh keys will be used without specifying this argument. 286 password : str; 287 Your ssh password to the ssh server. Note that if this is left None, 288 you will be prompted for it if passwordless key based login is unavailable. 289 timeout : int [default: 60] 290 The time (in seconds) after which no activity will result in the tunnel 291 closing. This prevents orphaned tunnels from running forever. 292 293 """ 294 if paramiko is None: 295 raise ImportError("Paramiko not available") 296 297 if password is None: 298 if not _try_passwordless_paramiko(server, keyfile): 299 password = getpass("%s's password: "%(server)) 300 301 p = Process(target=_paramiko_tunnel, 302 args=(lport, rport, server, remoteip), 303 kwargs=dict(keyfile=keyfile, password=password)) 304 p.daemon=False 305 p.start() 306 atexit.register(_shutdown_process, p) 307 return p 308 309 def _shutdown_process(p): 310 if p.is_alive(): 311 p.terminate() 312 313 def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None): 314 """Function for actually starting a paramiko tunnel, to be passed 315 to multiprocessing.Process(target=this), and not called directly. 316 """ 317 username, server, port = _split_server(server) 318 client = paramiko.SSHClient() 319 client.load_system_host_keys() 320 client.set_missing_host_key_policy(paramiko.WarningPolicy()) 321 322 try: 323 client.connect(server, port, username=username, key_filename=keyfile, 324 look_for_keys=True, password=password) 325 # except paramiko.AuthenticationException: 326 # if password is None: 327 # password = getpass("%s@%s's password: "%(username, server)) 328 # client.connect(server, port, username=username, password=password) 329 # else: 330 # raise 331 except Exception as e: 332 print ('*** Failed to connect to %s:%d: %r' % (server, port, e)) 333 sys.exit(1) 334 335 # print ('Now forwarding port %d to %s:%d ...' % (lport, server, rport)) 336 337 try: 338 forward_tunnel(lport, remoteip, rport, client.get_transport()) 339 except KeyboardInterrupt: 340 print ('SIGINT: Port forwarding stopped cleanly') 341 sys.exit(0) 342 except Exception as e: 343 print ("Port forwarding stopped uncleanly: %s"%e) 344 sys.exit(255) 345 346 if sys.platform == 'win32': 347 ssh_tunnel = paramiko_tunnel 348 else: 349 ssh_tunnel = openssh_tunnel 350 351 352 __all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh'] 353 354 355 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/IPython/external/ssh/tunnel.py b/IPython/external/ssh/tunnel.py --- a/IPython/external/ssh/tunnel.py +++ b/IPython/external/ssh/tunnel.py @@ -97,7 +97,7 @@ p = pexpect.spawn(cmd) while True: try: - p.expect('[Ppassword]:', timeout=.1) + p.expect('[Pp]assword:', timeout=.1) except pexpect.TIMEOUT: continue except pexpect.EOF:
{"golden_diff": "diff --git a/IPython/external/ssh/tunnel.py b/IPython/external/ssh/tunnel.py\n--- a/IPython/external/ssh/tunnel.py\n+++ b/IPython/external/ssh/tunnel.py\n@@ -97,7 +97,7 @@\n p = pexpect.spawn(cmd)\n while True:\n try:\n- p.expect('[Ppassword]:', timeout=.1)\n+ p.expect('[Pp]assword:', timeout=.1)\n except pexpect.TIMEOUT:\n continue\n except pexpect.EOF:\n", "issue": "SSH passwordless check with OpenSSH checks for the wrong thing\nThe pattern passed to pexpect is '[Ppassword]:', which looks for any of those letters followed by ':', and not, as the intention seems to be, for 'Password:'. The correct pattern is '[Pp]assword:'.\n\nThis is at IPython/external/ssh/tunnel.py:100.\n\n", "before_files": [{"content": "\"\"\"Basic ssh tunnel utilities, and convenience functions for tunneling\nzeromq connections.\n\nAuthors\n-------\n* Min RK\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2010-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport os,sys, atexit\nimport socket\nfrom multiprocessing import Process\nfrom getpass import getpass, getuser\nimport warnings\n\ntry:\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DeprecationWarning)\n import paramiko\nexcept ImportError:\n paramiko = None\nelse:\n from forward import forward_tunnel\n\ntry:\n from IPython.external import pexpect\nexcept ImportError:\n pexpect = None\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\n# select_random_ports copied from IPython.parallel.util\n_random_ports = set()\n\ndef select_random_ports(n):\n \"\"\"Selects and return n random ports that are available.\"\"\"\n ports = []\n for i in xrange(n):\n sock = socket.socket()\n sock.bind(('', 0))\n while sock.getsockname()[1] in _random_ports:\n sock.close()\n sock = socket.socket()\n sock.bind(('', 0))\n ports.append(sock)\n for i, sock in enumerate(ports):\n port = sock.getsockname()[1]\n sock.close()\n ports[i] = port\n _random_ports.add(port)\n return ports\n\n\n#-----------------------------------------------------------------------------\n# Check for passwordless login\n#-----------------------------------------------------------------------------\n\ndef try_passwordless_ssh(server, keyfile, paramiko=None):\n \"\"\"Attempt to make an ssh connection without a password.\n This is mainly used for requiring password input only once\n when many tunnels may be connected to the same server.\n\n If paramiko is None, the default for the platform is chosen.\n \"\"\"\n if paramiko is None:\n paramiko = sys.platform == 'win32'\n if not paramiko:\n f = _try_passwordless_openssh\n else:\n f = _try_passwordless_paramiko\n return f(server, keyfile)\n\ndef _try_passwordless_openssh(server, keyfile):\n \"\"\"Try passwordless login with shell ssh command.\"\"\"\n if pexpect is None:\n raise ImportError(\"pexpect unavailable, use paramiko\")\n cmd = 'ssh -f '+ server\n if keyfile:\n cmd += ' -i ' + keyfile\n cmd += ' exit'\n p = pexpect.spawn(cmd)\n while True:\n try:\n p.expect('[Ppassword]:', timeout=.1)\n except pexpect.TIMEOUT:\n continue\n except pexpect.EOF:\n return True\n else:\n return False\n\ndef _try_passwordless_paramiko(server, keyfile):\n \"\"\"Try passwordless login with paramiko.\"\"\"\n if paramiko is None:\n msg = \"Paramiko unavaliable, \"\n if sys.platform == 'win32':\n msg += \"Paramiko is required for ssh tunneled connections on Windows.\"\n else:\n msg += \"use OpenSSH.\"\n raise ImportError(msg)\n username, server, port = _split_server(server)\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy())\n try:\n client.connect(server, port, username=username, key_filename=keyfile,\n look_for_keys=True)\n except paramiko.AuthenticationException:\n return False\n else:\n client.close()\n return True\n\n\ndef tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):\n \"\"\"Connect a socket to an address via an ssh tunnel.\n\n This is a wrapper for socket.connect(addr), when addr is not accessible\n from the local machine. It simply creates an ssh tunnel using the remaining args,\n and calls socket.connect('tcp://localhost:lport') where lport is the randomly\n selected local port of the tunnel.\n\n \"\"\"\n new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout)\n socket.connect(new_url)\n return tunnel\n\n\ndef open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):\n \"\"\"Open a tunneled connection from a 0MQ url.\n\n For use inside tunnel_connection.\n\n Returns\n -------\n\n (url, tunnel): The 0MQ url that has been forwarded, and the tunnel object\n \"\"\"\n\n lport = select_random_ports(1)[0]\n transport, addr = addr.split('://')\n ip,rport = addr.split(':')\n rport = int(rport)\n if paramiko is None:\n paramiko = sys.platform == 'win32'\n if paramiko:\n tunnelf = paramiko_tunnel\n else:\n tunnelf = openssh_tunnel\n\n tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout)\n return 'tcp://127.0.0.1:%i'%lport, tunnel\n\ndef openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):\n \"\"\"Create an ssh tunnel using command-line ssh that connects port lport\n on this machine to localhost:rport on server. The tunnel\n will automatically close when not in use, remaining open\n for a minimum of timeout seconds for an initial connection.\n\n This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,\n as seen from `server`.\n\n keyfile and password may be specified, but ssh config is checked for defaults.\n\n Parameters\n ----------\n\n lport : int\n local port for connecting to the tunnel from this machine.\n rport : int\n port on the remote machine to connect to.\n server : str\n The ssh server to connect to. The full ssh server string will be parsed.\n user@server:port\n remoteip : str [Default: 127.0.0.1]\n The remote ip, specifying the destination of the tunnel.\n Default is localhost, which means that the tunnel would redirect\n localhost:lport on this machine to localhost:rport on the *server*.\n\n keyfile : str; path to public key file\n This specifies a key to be used in ssh login, default None.\n Regular default ssh keys will be used without specifying this argument.\n password : str;\n Your ssh password to the ssh server. Note that if this is left None,\n you will be prompted for it if passwordless key based login is unavailable.\n timeout : int [default: 60]\n The time (in seconds) after which no activity will result in the tunnel\n closing. This prevents orphaned tunnels from running forever.\n \"\"\"\n if pexpect is None:\n raise ImportError(\"pexpect unavailable, use paramiko_tunnel\")\n ssh=\"ssh \"\n if keyfile:\n ssh += \"-i \" + keyfile\n \n if ':' in server:\n server, port = server.split(':')\n ssh += \" -p %s\" % port\n \n cmd = \"%s -f -L 127.0.0.1:%i:%s:%i %s sleep %i\" % (\n ssh, lport, remoteip, rport, server, timeout)\n tunnel = pexpect.spawn(cmd)\n failed = False\n while True:\n try:\n tunnel.expect('[Pp]assword:', timeout=.1)\n except pexpect.TIMEOUT:\n continue\n except pexpect.EOF:\n if tunnel.exitstatus:\n print (tunnel.exitstatus)\n print (tunnel.before)\n print (tunnel.after)\n raise RuntimeError(\"tunnel '%s' failed to start\"%(cmd))\n else:\n return tunnel.pid\n else:\n if failed:\n print(\"Password rejected, try again\")\n password=None\n if password is None:\n password = getpass(\"%s's password: \"%(server))\n tunnel.sendline(password)\n failed = True\n\ndef _split_server(server):\n if '@' in server:\n username,server = server.split('@', 1)\n else:\n username = getuser()\n if ':' in server:\n server, port = server.split(':')\n port = int(port)\n else:\n port = 22\n return username, server, port\n\ndef paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):\n \"\"\"launch a tunner with paramiko in a subprocess. This should only be used\n when shell ssh is unavailable (e.g. Windows).\n\n This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,\n as seen from `server`.\n\n If you are familiar with ssh tunnels, this creates the tunnel:\n\n ssh server -L localhost:lport:remoteip:rport\n\n keyfile and password may be specified, but ssh config is checked for defaults.\n\n\n Parameters\n ----------\n\n lport : int\n local port for connecting to the tunnel from this machine.\n rport : int\n port on the remote machine to connect to.\n server : str\n The ssh server to connect to. The full ssh server string will be parsed.\n user@server:port\n remoteip : str [Default: 127.0.0.1]\n The remote ip, specifying the destination of the tunnel.\n Default is localhost, which means that the tunnel would redirect\n localhost:lport on this machine to localhost:rport on the *server*.\n\n keyfile : str; path to public key file\n This specifies a key to be used in ssh login, default None.\n Regular default ssh keys will be used without specifying this argument.\n password : str;\n Your ssh password to the ssh server. Note that if this is left None,\n you will be prompted for it if passwordless key based login is unavailable.\n timeout : int [default: 60]\n The time (in seconds) after which no activity will result in the tunnel\n closing. This prevents orphaned tunnels from running forever.\n\n \"\"\"\n if paramiko is None:\n raise ImportError(\"Paramiko not available\")\n\n if password is None:\n if not _try_passwordless_paramiko(server, keyfile):\n password = getpass(\"%s's password: \"%(server))\n\n p = Process(target=_paramiko_tunnel,\n args=(lport, rport, server, remoteip),\n kwargs=dict(keyfile=keyfile, password=password))\n p.daemon=False\n p.start()\n atexit.register(_shutdown_process, p)\n return p\n\ndef _shutdown_process(p):\n if p.is_alive():\n p.terminate()\n\ndef _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):\n \"\"\"Function for actually starting a paramiko tunnel, to be passed\n to multiprocessing.Process(target=this), and not called directly.\n \"\"\"\n username, server, port = _split_server(server)\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy())\n\n try:\n client.connect(server, port, username=username, key_filename=keyfile,\n look_for_keys=True, password=password)\n# except paramiko.AuthenticationException:\n# if password is None:\n# password = getpass(\"%s@%s's password: \"%(username, server))\n# client.connect(server, port, username=username, password=password)\n# else:\n# raise\n except Exception as e:\n print ('*** Failed to connect to %s:%d: %r' % (server, port, e))\n sys.exit(1)\n\n # print ('Now forwarding port %d to %s:%d ...' % (lport, server, rport))\n\n try:\n forward_tunnel(lport, remoteip, rport, client.get_transport())\n except KeyboardInterrupt:\n print ('SIGINT: Port forwarding stopped cleanly')\n sys.exit(0)\n except Exception as e:\n print (\"Port forwarding stopped uncleanly: %s\"%e)\n sys.exit(255)\n\nif sys.platform == 'win32':\n ssh_tunnel = paramiko_tunnel\nelse:\n ssh_tunnel = openssh_tunnel\n\n\n__all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh']\n\n\n", "path": "IPython/external/ssh/tunnel.py"}], "after_files": [{"content": "\"\"\"Basic ssh tunnel utilities, and convenience functions for tunneling\nzeromq connections.\n\nAuthors\n-------\n* Min RK\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2010-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport os,sys, atexit\nimport socket\nfrom multiprocessing import Process\nfrom getpass import getpass, getuser\nimport warnings\n\ntry:\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DeprecationWarning)\n import paramiko\nexcept ImportError:\n paramiko = None\nelse:\n from forward import forward_tunnel\n\ntry:\n from IPython.external import pexpect\nexcept ImportError:\n pexpect = None\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\n# select_random_ports copied from IPython.parallel.util\n_random_ports = set()\n\ndef select_random_ports(n):\n \"\"\"Selects and return n random ports that are available.\"\"\"\n ports = []\n for i in xrange(n):\n sock = socket.socket()\n sock.bind(('', 0))\n while sock.getsockname()[1] in _random_ports:\n sock.close()\n sock = socket.socket()\n sock.bind(('', 0))\n ports.append(sock)\n for i, sock in enumerate(ports):\n port = sock.getsockname()[1]\n sock.close()\n ports[i] = port\n _random_ports.add(port)\n return ports\n\n\n#-----------------------------------------------------------------------------\n# Check for passwordless login\n#-----------------------------------------------------------------------------\n\ndef try_passwordless_ssh(server, keyfile, paramiko=None):\n \"\"\"Attempt to make an ssh connection without a password.\n This is mainly used for requiring password input only once\n when many tunnels may be connected to the same server.\n\n If paramiko is None, the default for the platform is chosen.\n \"\"\"\n if paramiko is None:\n paramiko = sys.platform == 'win32'\n if not paramiko:\n f = _try_passwordless_openssh\n else:\n f = _try_passwordless_paramiko\n return f(server, keyfile)\n\ndef _try_passwordless_openssh(server, keyfile):\n \"\"\"Try passwordless login with shell ssh command.\"\"\"\n if pexpect is None:\n raise ImportError(\"pexpect unavailable, use paramiko\")\n cmd = 'ssh -f '+ server\n if keyfile:\n cmd += ' -i ' + keyfile\n cmd += ' exit'\n p = pexpect.spawn(cmd)\n while True:\n try:\n p.expect('[Pp]assword:', timeout=.1)\n except pexpect.TIMEOUT:\n continue\n except pexpect.EOF:\n return True\n else:\n return False\n\ndef _try_passwordless_paramiko(server, keyfile):\n \"\"\"Try passwordless login with paramiko.\"\"\"\n if paramiko is None:\n msg = \"Paramiko unavaliable, \"\n if sys.platform == 'win32':\n msg += \"Paramiko is required for ssh tunneled connections on Windows.\"\n else:\n msg += \"use OpenSSH.\"\n raise ImportError(msg)\n username, server, port = _split_server(server)\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy())\n try:\n client.connect(server, port, username=username, key_filename=keyfile,\n look_for_keys=True)\n except paramiko.AuthenticationException:\n return False\n else:\n client.close()\n return True\n\n\ndef tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):\n \"\"\"Connect a socket to an address via an ssh tunnel.\n\n This is a wrapper for socket.connect(addr), when addr is not accessible\n from the local machine. It simply creates an ssh tunnel using the remaining args,\n and calls socket.connect('tcp://localhost:lport') where lport is the randomly\n selected local port of the tunnel.\n\n \"\"\"\n new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout)\n socket.connect(new_url)\n return tunnel\n\n\ndef open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):\n \"\"\"Open a tunneled connection from a 0MQ url.\n\n For use inside tunnel_connection.\n\n Returns\n -------\n\n (url, tunnel): The 0MQ url that has been forwarded, and the tunnel object\n \"\"\"\n\n lport = select_random_ports(1)[0]\n transport, addr = addr.split('://')\n ip,rport = addr.split(':')\n rport = int(rport)\n if paramiko is None:\n paramiko = sys.platform == 'win32'\n if paramiko:\n tunnelf = paramiko_tunnel\n else:\n tunnelf = openssh_tunnel\n\n tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout)\n return 'tcp://127.0.0.1:%i'%lport, tunnel\n\ndef openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):\n \"\"\"Create an ssh tunnel using command-line ssh that connects port lport\n on this machine to localhost:rport on server. The tunnel\n will automatically close when not in use, remaining open\n for a minimum of timeout seconds for an initial connection.\n\n This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,\n as seen from `server`.\n\n keyfile and password may be specified, but ssh config is checked for defaults.\n\n Parameters\n ----------\n\n lport : int\n local port for connecting to the tunnel from this machine.\n rport : int\n port on the remote machine to connect to.\n server : str\n The ssh server to connect to. The full ssh server string will be parsed.\n user@server:port\n remoteip : str [Default: 127.0.0.1]\n The remote ip, specifying the destination of the tunnel.\n Default is localhost, which means that the tunnel would redirect\n localhost:lport on this machine to localhost:rport on the *server*.\n\n keyfile : str; path to public key file\n This specifies a key to be used in ssh login, default None.\n Regular default ssh keys will be used without specifying this argument.\n password : str;\n Your ssh password to the ssh server. Note that if this is left None,\n you will be prompted for it if passwordless key based login is unavailable.\n timeout : int [default: 60]\n The time (in seconds) after which no activity will result in the tunnel\n closing. This prevents orphaned tunnels from running forever.\n \"\"\"\n if pexpect is None:\n raise ImportError(\"pexpect unavailable, use paramiko_tunnel\")\n ssh=\"ssh \"\n if keyfile:\n ssh += \"-i \" + keyfile\n \n if ':' in server:\n server, port = server.split(':')\n ssh += \" -p %s\" % port\n \n cmd = \"%s -f -L 127.0.0.1:%i:%s:%i %s sleep %i\" % (\n ssh, lport, remoteip, rport, server, timeout)\n tunnel = pexpect.spawn(cmd)\n failed = False\n while True:\n try:\n tunnel.expect('[Pp]assword:', timeout=.1)\n except pexpect.TIMEOUT:\n continue\n except pexpect.EOF:\n if tunnel.exitstatus:\n print (tunnel.exitstatus)\n print (tunnel.before)\n print (tunnel.after)\n raise RuntimeError(\"tunnel '%s' failed to start\"%(cmd))\n else:\n return tunnel.pid\n else:\n if failed:\n print(\"Password rejected, try again\")\n password=None\n if password is None:\n password = getpass(\"%s's password: \"%(server))\n tunnel.sendline(password)\n failed = True\n\ndef _split_server(server):\n if '@' in server:\n username,server = server.split('@', 1)\n else:\n username = getuser()\n if ':' in server:\n server, port = server.split(':')\n port = int(port)\n else:\n port = 22\n return username, server, port\n\ndef paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):\n \"\"\"launch a tunner with paramiko in a subprocess. This should only be used\n when shell ssh is unavailable (e.g. Windows).\n\n This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,\n as seen from `server`.\n\n If you are familiar with ssh tunnels, this creates the tunnel:\n\n ssh server -L localhost:lport:remoteip:rport\n\n keyfile and password may be specified, but ssh config is checked for defaults.\n\n\n Parameters\n ----------\n\n lport : int\n local port for connecting to the tunnel from this machine.\n rport : int\n port on the remote machine to connect to.\n server : str\n The ssh server to connect to. The full ssh server string will be parsed.\n user@server:port\n remoteip : str [Default: 127.0.0.1]\n The remote ip, specifying the destination of the tunnel.\n Default is localhost, which means that the tunnel would redirect\n localhost:lport on this machine to localhost:rport on the *server*.\n\n keyfile : str; path to public key file\n This specifies a key to be used in ssh login, default None.\n Regular default ssh keys will be used without specifying this argument.\n password : str;\n Your ssh password to the ssh server. Note that if this is left None,\n you will be prompted for it if passwordless key based login is unavailable.\n timeout : int [default: 60]\n The time (in seconds) after which no activity will result in the tunnel\n closing. This prevents orphaned tunnels from running forever.\n\n \"\"\"\n if paramiko is None:\n raise ImportError(\"Paramiko not available\")\n\n if password is None:\n if not _try_passwordless_paramiko(server, keyfile):\n password = getpass(\"%s's password: \"%(server))\n\n p = Process(target=_paramiko_tunnel,\n args=(lport, rport, server, remoteip),\n kwargs=dict(keyfile=keyfile, password=password))\n p.daemon=False\n p.start()\n atexit.register(_shutdown_process, p)\n return p\n\ndef _shutdown_process(p):\n if p.is_alive():\n p.terminate()\n\ndef _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):\n \"\"\"Function for actually starting a paramiko tunnel, to be passed\n to multiprocessing.Process(target=this), and not called directly.\n \"\"\"\n username, server, port = _split_server(server)\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy())\n\n try:\n client.connect(server, port, username=username, key_filename=keyfile,\n look_for_keys=True, password=password)\n# except paramiko.AuthenticationException:\n# if password is None:\n# password = getpass(\"%s@%s's password: \"%(username, server))\n# client.connect(server, port, username=username, password=password)\n# else:\n# raise\n except Exception as e:\n print ('*** Failed to connect to %s:%d: %r' % (server, port, e))\n sys.exit(1)\n\n # print ('Now forwarding port %d to %s:%d ...' % (lport, server, rport))\n\n try:\n forward_tunnel(lport, remoteip, rport, client.get_transport())\n except KeyboardInterrupt:\n print ('SIGINT: Port forwarding stopped cleanly')\n sys.exit(0)\n except Exception as e:\n print (\"Port forwarding stopped uncleanly: %s\"%e)\n sys.exit(255)\n\nif sys.platform == 'win32':\n ssh_tunnel = paramiko_tunnel\nelse:\n ssh_tunnel = openssh_tunnel\n\n\n__all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh']\n\n\n", "path": "IPython/external/ssh/tunnel.py"}]}
4,093
118
gh_patches_debug_28979
rasdani/github-patches
git_diff
Parsl__parsl-465
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Tweak logging to show info per-executor If a user has configured multiple executors, they will see multiple `Tasks: X Slots: X Parallelism: X` logging lines for each executor. We need to label the executor here so it is clear what each line refers to. Idea and report from @djf604. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `parsl/dataflow/strategy.py` Content: ``` 1 import logging 2 import time 3 import math 4 5 logger = logging.getLogger(__name__) 6 7 8 class Strategy(object): 9 """FlowControl strategy. 10 11 As a workflow dag is processed by Parsl, new tasks are added and completed 12 asynchronously. Parsl interfaces executors with execution providers to construct 13 scalable executors to handle the variable work-load generated by the 14 workflow. This component is responsible for periodically checking outstanding 15 tasks and available compute capacity and trigger scaling events to match 16 workflow needs. 17 18 Here's a diagram of an executor. An executor consists of blocks, which are usually 19 created by single requests to a Local Resource Manager (LRM) such as slurm, 20 condor, torque, or even AWS API. The blocks could contain several task blocks 21 which are separate instances on workers. 22 23 24 .. code:: python 25 26 |<--min_blocks |<-init_blocks max_blocks-->| 27 +----------------------------------------------------------+ 28 | +--------block----------+ +--------block--------+ | 29 executor = | | task task | ... | task task | | 30 | +-----------------------+ +---------------------+ | 31 +----------------------------------------------------------+ 32 33 The relevant specification options are: 34 1. min_blocks: Minimum number of blocks to maintain 35 2. init_blocks: number of blocks to provision at initialization of workflow 36 3. max_blocks: Maximum number of blocks that can be active due to one workflow 37 38 39 .. code:: python 40 41 slots = current_capacity * tasks_per_node * nodes_per_block 42 43 active_tasks = pending_tasks + running_tasks 44 45 Parallelism = slots / tasks 46 = [0, 1] (i.e, 0 <= p <= 1) 47 48 For example: 49 50 When p = 0, 51 => compute with the least resources possible. 52 infinite tasks are stacked per slot. 53 54 .. code:: python 55 56 blocks = min_blocks { if active_tasks = 0 57 max(min_blocks, 1) { else 58 59 When p = 1, 60 => compute with the most resources. 61 one task is stacked per slot. 62 63 .. code:: python 64 65 blocks = min ( max_blocks, 66 ceil( active_tasks / slots ) ) 67 68 69 When p = 1/2, 70 => We stack upto 2 tasks per slot before we overflow 71 and request a new block 72 73 74 let's say min:init:max = 0:0:4 and task_blocks=2 75 Consider the following example: 76 min_blocks = 0 77 init_blocks = 0 78 max_blocks = 4 79 tasks_per_node = 2 80 nodes_per_block = 1 81 82 In the diagram, X <- task 83 84 at 2 tasks: 85 86 .. code:: python 87 88 +---Block---| 89 | | 90 | X X | 91 |slot slot| 92 +-----------+ 93 94 at 5 tasks, we overflow as the capacity of a single block is fully used. 95 96 .. code:: python 97 98 +---Block---| +---Block---| 99 | X X | ----> | | 100 | X X | | X | 101 |slot slot| |slot slot| 102 +-----------+ +-----------+ 103 104 """ 105 106 def __init__(self, dfk): 107 """Initialize strategy.""" 108 self.dfk = dfk 109 self.config = dfk.config 110 self.executors = {} 111 self.max_idletime = 60 * 2 # 2 minutes 112 113 for e in self.dfk.config.executors: 114 self.executors[e.label] = {'idle_since': None, 'config': e.label} 115 116 self.strategies = {None: self._strategy_noop, 'simple': self._strategy_simple} 117 118 self.strategize = self.strategies[self.config.strategy] 119 self.logger_flag = False 120 self.prior_loghandlers = set(logging.getLogger().handlers) 121 122 logger.debug("Scaling strategy: {0}".format(self.config.strategy)) 123 124 def _strategy_noop(self, tasks, *args, kind=None, **kwargs): 125 """Do nothing. 126 127 Args: 128 - tasks (task_ids): Not used here. 129 130 KWargs: 131 - kind (Not used) 132 """ 133 134 def unset_logging(self): 135 """ Mute newly added handlers to the root level, right after calling executor.status 136 """ 137 if self.logger_flag is True: 138 return 139 140 root_logger = logging.getLogger() 141 142 for hndlr in root_logger.handlers: 143 if hndlr not in self.prior_loghandlers: 144 hndlr.setLevel(logging.ERROR) 145 146 self.logger_flag = True 147 148 def _strategy_simple(self, tasks, *args, kind=None, **kwargs): 149 """Peek at the DFK and the executors specified. 150 151 We assume here that tasks are not held in a runnable 152 state, and that all tasks from an app would be sent to 153 a single specific executor, i.e tasks cannot be specified 154 to go to one of more executors. 155 156 Args: 157 - tasks (task_ids): Not used here. 158 159 KWargs: 160 - kind (Not used) 161 """ 162 163 for label, executor in self.dfk.executors.items(): 164 if not executor.scaling_enabled: 165 continue 166 167 # Tasks that are either pending completion 168 active_tasks = executor.executor.outstanding 169 170 status = executor.status() 171 self.unset_logging() 172 173 # FIXME we need to handle case where provider does not define these 174 # FIXME probably more of this logic should be moved to the provider 175 min_blocks = executor.provider.min_blocks 176 max_blocks = executor.provider.max_blocks 177 tasks_per_node = executor.provider.tasks_per_node 178 nodes_per_block = executor.provider.nodes_per_block 179 parallelism = executor.provider.parallelism 180 181 active_blocks = sum([1 for x in status if x in ('RUNNING', 182 'SUBMITTING', 183 'PENDING')]) 184 active_slots = active_blocks * tasks_per_node * nodes_per_block 185 186 # import pdb; pdb.set_trace() 187 logger.debug("Tasks:{} Slots:{} Parallelism:{}".format(len(active_tasks), 188 active_slots, 189 parallelism)) 190 191 # Case 1 192 # No tasks. 193 if len(active_tasks) == 0: 194 # Case 1a 195 # Fewer blocks that min_blocks 196 if active_blocks <= min_blocks: 197 # Ignore 198 # logger.debug("Strategy: Case.1a") 199 pass 200 201 # Case 1b 202 # More blocks than min_blocks. Scale down 203 else: 204 # We want to make sure that max_idletime is reached 205 # before killing off resources 206 if not self.executors[executor.label]['idle_since']: 207 logger.debug("Strategy: Scale_in, tasks=0 starting kill timer") 208 self.executors[executor.label]['idle_since'] = time.time() 209 210 idle_since = self.executors[executor.label]['idle_since'] 211 if (time.time() - idle_since) > self.max_idletime: 212 # We have resources idle for the max duration, 213 # we have to scale_in now. 214 logger.debug("Strategy: Scale_in, tasks=0") 215 executor.scale_in(active_blocks - min_blocks) 216 217 else: 218 pass 219 # logger.debug("Strategy: Case.1b. Waiting for timer : {0}".format(idle_since)) 220 221 # Case 2 222 # More tasks than the available slots. 223 elif (float(active_slots) / len(active_tasks)) < parallelism: 224 # Case 2a 225 # We have the max blocks possible 226 if active_blocks >= max_blocks: 227 # Ignore since we already have the max nodes 228 # logger.debug("Strategy: Case.2a") 229 pass 230 231 # Case 2b 232 else: 233 # logger.debug("Strategy: Case.2b") 234 excess = math.ceil((len(active_tasks) * parallelism) - active_slots) 235 excess_blocks = math.ceil(float(excess) / (tasks_per_node * nodes_per_block)) 236 logger.debug("Requesting {} more blocks".format(excess_blocks)) 237 executor.scale_out(excess_blocks) 238 239 elif active_slots == 0 and len(active_tasks) > 0: 240 # Case 4 241 # Check if slots are being lost quickly ? 242 logger.debug("Requesting single slot") 243 executor.scale_out(1) 244 # Case 3 245 # tasks ~ slots 246 else: 247 # logger.debug("Strategy: Case 3") 248 pass 249 250 251 if __name__ == '__main__': 252 253 pass 254 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/parsl/dataflow/strategy.py b/parsl/dataflow/strategy.py --- a/parsl/dataflow/strategy.py +++ b/parsl/dataflow/strategy.py @@ -2,6 +2,8 @@ import time import math +from parsl.executors.ipp import IPyParallelExecutor + logger = logging.getLogger(__name__) @@ -178,15 +180,18 @@ nodes_per_block = executor.provider.nodes_per_block parallelism = executor.provider.parallelism - active_blocks = sum([1 for x in status if x in ('RUNNING', - 'SUBMITTING', - 'PENDING')]) + running = sum([1 for x in status if x == 'RUNNING']) + submitting = sum([1 for x in status if x == 'SUBMITTING']) + pending = sum([1 for x in status if x == 'PENDING']) + active_blocks = running + submitting + pending active_slots = active_blocks * tasks_per_node * nodes_per_block - # import pdb; pdb.set_trace() - logger.debug("Tasks:{} Slots:{} Parallelism:{}".format(len(active_tasks), - active_slots, - parallelism)) + if isinstance(executor, IPyParallelExecutor): + logger.debug('Executor {} has {} active tasks, {}/{}/{} running/submitted/pending blocks, and {} connected engines'.format( + label, len(active_tasks), running, submitting, pending, len(executor.executor))) + else: + logger.debug('Executor {} has {} active tasks and {}/{}/{} running/submitted/pending blocks'.format( + label, len(active_tasks), running, submitting, pending)) # Case 1 # No tasks.
{"golden_diff": "diff --git a/parsl/dataflow/strategy.py b/parsl/dataflow/strategy.py\n--- a/parsl/dataflow/strategy.py\n+++ b/parsl/dataflow/strategy.py\n@@ -2,6 +2,8 @@\n import time\n import math\n \n+from parsl.executors.ipp import IPyParallelExecutor\n+\n logger = logging.getLogger(__name__)\n \n \n@@ -178,15 +180,18 @@\n nodes_per_block = executor.provider.nodes_per_block\n parallelism = executor.provider.parallelism\n \n- active_blocks = sum([1 for x in status if x in ('RUNNING',\n- 'SUBMITTING',\n- 'PENDING')])\n+ running = sum([1 for x in status if x == 'RUNNING'])\n+ submitting = sum([1 for x in status if x == 'SUBMITTING'])\n+ pending = sum([1 for x in status if x == 'PENDING'])\n+ active_blocks = running + submitting + pending\n active_slots = active_blocks * tasks_per_node * nodes_per_block\n \n- # import pdb; pdb.set_trace()\n- logger.debug(\"Tasks:{} Slots:{} Parallelism:{}\".format(len(active_tasks),\n- active_slots,\n- parallelism))\n+ if isinstance(executor, IPyParallelExecutor):\n+ logger.debug('Executor {} has {} active tasks, {}/{}/{} running/submitted/pending blocks, and {} connected engines'.format(\n+ label, len(active_tasks), running, submitting, pending, len(executor.executor)))\n+ else:\n+ logger.debug('Executor {} has {} active tasks and {}/{}/{} running/submitted/pending blocks'.format(\n+ label, len(active_tasks), running, submitting, pending))\n \n # Case 1\n # No tasks.\n", "issue": "Tweak logging to show info per-executor\nIf a user has configured multiple executors, they will see multiple `Tasks: X Slots: X Parallelism: X` logging lines for each executor. We need to label the executor here so it is clear what each line refers to.\r\n\r\nIdea and report from @djf604.\n", "before_files": [{"content": "import logging\nimport time\nimport math\n\nlogger = logging.getLogger(__name__)\n\n\nclass Strategy(object):\n \"\"\"FlowControl strategy.\n\n As a workflow dag is processed by Parsl, new tasks are added and completed\n asynchronously. Parsl interfaces executors with execution providers to construct\n scalable executors to handle the variable work-load generated by the\n workflow. This component is responsible for periodically checking outstanding\n tasks and available compute capacity and trigger scaling events to match\n workflow needs.\n\n Here's a diagram of an executor. An executor consists of blocks, which are usually\n created by single requests to a Local Resource Manager (LRM) such as slurm,\n condor, torque, or even AWS API. The blocks could contain several task blocks\n which are separate instances on workers.\n\n\n .. code:: python\n\n |<--min_blocks |<-init_blocks max_blocks-->|\n +----------------------------------------------------------+\n | +--------block----------+ +--------block--------+ |\n executor = | | task task | ... | task task | |\n | +-----------------------+ +---------------------+ |\n +----------------------------------------------------------+\n\n The relevant specification options are:\n 1. min_blocks: Minimum number of blocks to maintain\n 2. init_blocks: number of blocks to provision at initialization of workflow\n 3. max_blocks: Maximum number of blocks that can be active due to one workflow\n\n\n .. code:: python\n\n slots = current_capacity * tasks_per_node * nodes_per_block\n\n active_tasks = pending_tasks + running_tasks\n\n Parallelism = slots / tasks\n = [0, 1] (i.e, 0 <= p <= 1)\n\n For example:\n\n When p = 0,\n => compute with the least resources possible.\n infinite tasks are stacked per slot.\n\n .. code:: python\n\n blocks = min_blocks { if active_tasks = 0\n max(min_blocks, 1) { else\n\n When p = 1,\n => compute with the most resources.\n one task is stacked per slot.\n\n .. code:: python\n\n blocks = min ( max_blocks,\n ceil( active_tasks / slots ) )\n\n\n When p = 1/2,\n => We stack upto 2 tasks per slot before we overflow\n and request a new block\n\n\n let's say min:init:max = 0:0:4 and task_blocks=2\n Consider the following example:\n min_blocks = 0\n init_blocks = 0\n max_blocks = 4\n tasks_per_node = 2\n nodes_per_block = 1\n\n In the diagram, X <- task\n\n at 2 tasks:\n\n .. code:: python\n\n +---Block---|\n | |\n | X X |\n |slot slot|\n +-----------+\n\n at 5 tasks, we overflow as the capacity of a single block is fully used.\n\n .. code:: python\n\n +---Block---| +---Block---|\n | X X | ----> | |\n | X X | | X |\n |slot slot| |slot slot|\n +-----------+ +-----------+\n\n \"\"\"\n\n def __init__(self, dfk):\n \"\"\"Initialize strategy.\"\"\"\n self.dfk = dfk\n self.config = dfk.config\n self.executors = {}\n self.max_idletime = 60 * 2 # 2 minutes\n\n for e in self.dfk.config.executors:\n self.executors[e.label] = {'idle_since': None, 'config': e.label}\n\n self.strategies = {None: self._strategy_noop, 'simple': self._strategy_simple}\n\n self.strategize = self.strategies[self.config.strategy]\n self.logger_flag = False\n self.prior_loghandlers = set(logging.getLogger().handlers)\n\n logger.debug(\"Scaling strategy: {0}\".format(self.config.strategy))\n\n def _strategy_noop(self, tasks, *args, kind=None, **kwargs):\n \"\"\"Do nothing.\n\n Args:\n - tasks (task_ids): Not used here.\n\n KWargs:\n - kind (Not used)\n \"\"\"\n\n def unset_logging(self):\n \"\"\" Mute newly added handlers to the root level, right after calling executor.status\n \"\"\"\n if self.logger_flag is True:\n return\n\n root_logger = logging.getLogger()\n\n for hndlr in root_logger.handlers:\n if hndlr not in self.prior_loghandlers:\n hndlr.setLevel(logging.ERROR)\n\n self.logger_flag = True\n\n def _strategy_simple(self, tasks, *args, kind=None, **kwargs):\n \"\"\"Peek at the DFK and the executors specified.\n\n We assume here that tasks are not held in a runnable\n state, and that all tasks from an app would be sent to\n a single specific executor, i.e tasks cannot be specified\n to go to one of more executors.\n\n Args:\n - tasks (task_ids): Not used here.\n\n KWargs:\n - kind (Not used)\n \"\"\"\n\n for label, executor in self.dfk.executors.items():\n if not executor.scaling_enabled:\n continue\n\n # Tasks that are either pending completion\n active_tasks = executor.executor.outstanding\n\n status = executor.status()\n self.unset_logging()\n\n # FIXME we need to handle case where provider does not define these\n # FIXME probably more of this logic should be moved to the provider\n min_blocks = executor.provider.min_blocks\n max_blocks = executor.provider.max_blocks\n tasks_per_node = executor.provider.tasks_per_node\n nodes_per_block = executor.provider.nodes_per_block\n parallelism = executor.provider.parallelism\n\n active_blocks = sum([1 for x in status if x in ('RUNNING',\n 'SUBMITTING',\n 'PENDING')])\n active_slots = active_blocks * tasks_per_node * nodes_per_block\n\n # import pdb; pdb.set_trace()\n logger.debug(\"Tasks:{} Slots:{} Parallelism:{}\".format(len(active_tasks),\n active_slots,\n parallelism))\n\n # Case 1\n # No tasks.\n if len(active_tasks) == 0:\n # Case 1a\n # Fewer blocks that min_blocks\n if active_blocks <= min_blocks:\n # Ignore\n # logger.debug(\"Strategy: Case.1a\")\n pass\n\n # Case 1b\n # More blocks than min_blocks. Scale down\n else:\n # We want to make sure that max_idletime is reached\n # before killing off resources\n if not self.executors[executor.label]['idle_since']:\n logger.debug(\"Strategy: Scale_in, tasks=0 starting kill timer\")\n self.executors[executor.label]['idle_since'] = time.time()\n\n idle_since = self.executors[executor.label]['idle_since']\n if (time.time() - idle_since) > self.max_idletime:\n # We have resources idle for the max duration,\n # we have to scale_in now.\n logger.debug(\"Strategy: Scale_in, tasks=0\")\n executor.scale_in(active_blocks - min_blocks)\n\n else:\n pass\n # logger.debug(\"Strategy: Case.1b. Waiting for timer : {0}\".format(idle_since))\n\n # Case 2\n # More tasks than the available slots.\n elif (float(active_slots) / len(active_tasks)) < parallelism:\n # Case 2a\n # We have the max blocks possible\n if active_blocks >= max_blocks:\n # Ignore since we already have the max nodes\n # logger.debug(\"Strategy: Case.2a\")\n pass\n\n # Case 2b\n else:\n # logger.debug(\"Strategy: Case.2b\")\n excess = math.ceil((len(active_tasks) * parallelism) - active_slots)\n excess_blocks = math.ceil(float(excess) / (tasks_per_node * nodes_per_block))\n logger.debug(\"Requesting {} more blocks\".format(excess_blocks))\n executor.scale_out(excess_blocks)\n\n elif active_slots == 0 and len(active_tasks) > 0:\n # Case 4\n # Check if slots are being lost quickly ?\n logger.debug(\"Requesting single slot\")\n executor.scale_out(1)\n # Case 3\n # tasks ~ slots\n else:\n # logger.debug(\"Strategy: Case 3\")\n pass\n\n\nif __name__ == '__main__':\n\n pass\n", "path": "parsl/dataflow/strategy.py"}], "after_files": [{"content": "import logging\nimport time\nimport math\n\nfrom parsl.executors.ipp import IPyParallelExecutor\n\nlogger = logging.getLogger(__name__)\n\n\nclass Strategy(object):\n \"\"\"FlowControl strategy.\n\n As a workflow dag is processed by Parsl, new tasks are added and completed\n asynchronously. Parsl interfaces executors with execution providers to construct\n scalable executors to handle the variable work-load generated by the\n workflow. This component is responsible for periodically checking outstanding\n tasks and available compute capacity and trigger scaling events to match\n workflow needs.\n\n Here's a diagram of an executor. An executor consists of blocks, which are usually\n created by single requests to a Local Resource Manager (LRM) such as slurm,\n condor, torque, or even AWS API. The blocks could contain several task blocks\n which are separate instances on workers.\n\n\n .. code:: python\n\n |<--min_blocks |<-init_blocks max_blocks-->|\n +----------------------------------------------------------+\n | +--------block----------+ +--------block--------+ |\n executor = | | task task | ... | task task | |\n | +-----------------------+ +---------------------+ |\n +----------------------------------------------------------+\n\n The relevant specification options are:\n 1. min_blocks: Minimum number of blocks to maintain\n 2. init_blocks: number of blocks to provision at initialization of workflow\n 3. max_blocks: Maximum number of blocks that can be active due to one workflow\n\n\n .. code:: python\n\n slots = current_capacity * tasks_per_node * nodes_per_block\n\n active_tasks = pending_tasks + running_tasks\n\n Parallelism = slots / tasks\n = [0, 1] (i.e, 0 <= p <= 1)\n\n For example:\n\n When p = 0,\n => compute with the least resources possible.\n infinite tasks are stacked per slot.\n\n .. code:: python\n\n blocks = min_blocks { if active_tasks = 0\n max(min_blocks, 1) { else\n\n When p = 1,\n => compute with the most resources.\n one task is stacked per slot.\n\n .. code:: python\n\n blocks = min ( max_blocks,\n ceil( active_tasks / slots ) )\n\n\n When p = 1/2,\n => We stack upto 2 tasks per slot before we overflow\n and request a new block\n\n\n let's say min:init:max = 0:0:4 and task_blocks=2\n Consider the following example:\n min_blocks = 0\n init_blocks = 0\n max_blocks = 4\n tasks_per_node = 2\n nodes_per_block = 1\n\n In the diagram, X <- task\n\n at 2 tasks:\n\n .. code:: python\n\n +---Block---|\n | |\n | X X |\n |slot slot|\n +-----------+\n\n at 5 tasks, we overflow as the capacity of a single block is fully used.\n\n .. code:: python\n\n +---Block---| +---Block---|\n | X X | ----> | |\n | X X | | X |\n |slot slot| |slot slot|\n +-----------+ +-----------+\n\n \"\"\"\n\n def __init__(self, dfk):\n \"\"\"Initialize strategy.\"\"\"\n self.dfk = dfk\n self.config = dfk.config\n self.executors = {}\n self.max_idletime = 60 * 2 # 2 minutes\n\n for e in self.dfk.config.executors:\n self.executors[e.label] = {'idle_since': None, 'config': e.label}\n\n self.strategies = {None: self._strategy_noop, 'simple': self._strategy_simple}\n\n self.strategize = self.strategies[self.config.strategy]\n self.logger_flag = False\n self.prior_loghandlers = set(logging.getLogger().handlers)\n\n logger.debug(\"Scaling strategy: {0}\".format(self.config.strategy))\n\n def _strategy_noop(self, tasks, *args, kind=None, **kwargs):\n \"\"\"Do nothing.\n\n Args:\n - tasks (task_ids): Not used here.\n\n KWargs:\n - kind (Not used)\n \"\"\"\n\n def unset_logging(self):\n \"\"\" Mute newly added handlers to the root level, right after calling executor.status\n \"\"\"\n if self.logger_flag is True:\n return\n\n root_logger = logging.getLogger()\n\n for hndlr in root_logger.handlers:\n if hndlr not in self.prior_loghandlers:\n hndlr.setLevel(logging.ERROR)\n\n self.logger_flag = True\n\n def _strategy_simple(self, tasks, *args, kind=None, **kwargs):\n \"\"\"Peek at the DFK and the executors specified.\n\n We assume here that tasks are not held in a runnable\n state, and that all tasks from an app would be sent to\n a single specific executor, i.e tasks cannot be specified\n to go to one of more executors.\n\n Args:\n - tasks (task_ids): Not used here.\n\n KWargs:\n - kind (Not used)\n \"\"\"\n\n for label, executor in self.dfk.executors.items():\n if not executor.scaling_enabled:\n continue\n\n # Tasks that are either pending completion\n active_tasks = executor.executor.outstanding\n\n status = executor.status()\n self.unset_logging()\n\n # FIXME we need to handle case where provider does not define these\n # FIXME probably more of this logic should be moved to the provider\n min_blocks = executor.provider.min_blocks\n max_blocks = executor.provider.max_blocks\n tasks_per_node = executor.provider.tasks_per_node\n nodes_per_block = executor.provider.nodes_per_block\n parallelism = executor.provider.parallelism\n\n running = sum([1 for x in status if x == 'RUNNING'])\n submitting = sum([1 for x in status if x == 'SUBMITTING'])\n pending = sum([1 for x in status if x == 'PENDING'])\n active_blocks = running + submitting + pending\n active_slots = active_blocks * tasks_per_node * nodes_per_block\n\n if isinstance(executor, IPyParallelExecutor):\n logger.debug('Executor {} has {} active tasks, {}/{}/{} running/submitted/pending blocks, and {} connected engines'.format(\n label, len(active_tasks), running, submitting, pending, len(executor.executor)))\n else:\n logger.debug('Executor {} has {} active tasks and {}/{}/{} running/submitted/pending blocks'.format(\n label, len(active_tasks), running, submitting, pending))\n\n # Case 1\n # No tasks.\n if len(active_tasks) == 0:\n # Case 1a\n # Fewer blocks that min_blocks\n if active_blocks <= min_blocks:\n # Ignore\n # logger.debug(\"Strategy: Case.1a\")\n pass\n\n # Case 1b\n # More blocks than min_blocks. Scale down\n else:\n # We want to make sure that max_idletime is reached\n # before killing off resources\n if not self.executors[executor.label]['idle_since']:\n logger.debug(\"Strategy: Scale_in, tasks=0 starting kill timer\")\n self.executors[executor.label]['idle_since'] = time.time()\n\n idle_since = self.executors[executor.label]['idle_since']\n if (time.time() - idle_since) > self.max_idletime:\n # We have resources idle for the max duration,\n # we have to scale_in now.\n logger.debug(\"Strategy: Scale_in, tasks=0\")\n executor.scale_in(active_blocks - min_blocks)\n\n else:\n pass\n # logger.debug(\"Strategy: Case.1b. Waiting for timer : {0}\".format(idle_since))\n\n # Case 2\n # More tasks than the available slots.\n elif (float(active_slots) / len(active_tasks)) < parallelism:\n # Case 2a\n # We have the max blocks possible\n if active_blocks >= max_blocks:\n # Ignore since we already have the max nodes\n # logger.debug(\"Strategy: Case.2a\")\n pass\n\n # Case 2b\n else:\n # logger.debug(\"Strategy: Case.2b\")\n excess = math.ceil((len(active_tasks) * parallelism) - active_slots)\n excess_blocks = math.ceil(float(excess) / (tasks_per_node * nodes_per_block))\n logger.debug(\"Requesting {} more blocks\".format(excess_blocks))\n executor.scale_out(excess_blocks)\n\n elif active_slots == 0 and len(active_tasks) > 0:\n # Case 4\n # Check if slots are being lost quickly ?\n logger.debug(\"Requesting single slot\")\n executor.scale_out(1)\n # Case 3\n # tasks ~ slots\n else:\n # logger.debug(\"Strategy: Case 3\")\n pass\n\n\nif __name__ == '__main__':\n\n pass\n", "path": "parsl/dataflow/strategy.py"}]}
2,864
386
gh_patches_debug_5568
rasdani/github-patches
git_diff
pyodide__pyodide-689
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Typo? Wondering what "wv" means or if this should read "more complex things." https://github.com/iodide-project/pyodide/blob/163ab43b64180223d010cdcdcdecd17307cc5a45/pyodide_build/mkpkg.py#L77-L79 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pyodide_build/mkpkg.py` Content: ``` 1 #!/usr/bin/env python3 2 3 import argparse 4 import json 5 import os 6 import shutil 7 import urllib.request 8 from pathlib import Path 9 10 PACKAGES_ROOT = Path(__file__).parent.parent / 'packages' 11 12 SDIST_EXTENSIONS = [] 13 14 15 def get_sdist_extensions(): 16 if SDIST_EXTENSIONS: 17 return SDIST_EXTENSIONS 18 19 for format in shutil.get_unpack_formats(): 20 for ext in format[1]: 21 SDIST_EXTENSIONS.append(ext) 22 23 return SDIST_EXTENSIONS 24 25 26 def get_sdist_url_entry(json_content): 27 sdist_extensions_tuple = tuple(get_sdist_extensions()) 28 29 for entry in json_content['urls']: 30 if entry['filename'].endswith(sdist_extensions_tuple): 31 return entry 32 33 raise Exception('No sdist URL found for package %s (%s)' % ( 34 json_content['info'].get('name'), 35 json_content['info'].get('package_url'), 36 )) 37 38 39 def make_package(package, version=None): 40 import yaml 41 42 version = ('/' + version) if version is not None else '' 43 url = f"https://pypi.org/pypi/{package}{version}/json" 44 45 with urllib.request.urlopen(url) as fd: 46 json_content = json.load(fd) 47 48 entry = get_sdist_url_entry(json_content) 49 download_url = entry['url'] 50 sha256 = entry['digests']['sha256'] 51 version = json_content['info']['version'] 52 53 yaml_content = { 54 'package': { 55 'name': package, 56 'version': version 57 }, 58 'source': { 59 'url': download_url, 60 'sha256': sha256 61 }, 62 'test': { 63 'imports': [ 64 package 65 ] 66 } 67 } 68 69 if not (PACKAGES_ROOT / package).is_dir(): 70 os.makedirs(PACKAGES_ROOT / package) 71 with open(PACKAGES_ROOT / package / 'meta.yaml', 'w') as fd: 72 yaml.dump(yaml_content, fd, default_flow_style=False) 73 74 75 def make_parser(parser): 76 parser.description = ''' 77 Make a new pyodide package. Creates a simple template that will work 78 for most pure Python packages, but will have to be edited for more wv 79 complex things.'''.strip() 80 parser.add_argument( 81 'package', type=str, nargs=1, 82 help="The package name on PyPI") 83 parser.add_argument( 84 '--version', type=str, default=None, 85 help="Package version string, " 86 "e.g. v1.2.1 (defaults to latest stable release)") 87 return parser 88 89 90 def main(args): 91 package = args.package[0] 92 make_package(package, args.version) 93 94 95 if __name__ == '__main__': 96 parser = make_parser(argparse.ArgumentParser()) 97 args = parser.parse_args() 98 main(args) 99 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pyodide_build/mkpkg.py b/pyodide_build/mkpkg.py --- a/pyodide_build/mkpkg.py +++ b/pyodide_build/mkpkg.py @@ -75,7 +75,7 @@ def make_parser(parser): parser.description = ''' Make a new pyodide package. Creates a simple template that will work -for most pure Python packages, but will have to be edited for more wv +for most pure Python packages, but will have to be edited for more complex things.'''.strip() parser.add_argument( 'package', type=str, nargs=1,
{"golden_diff": "diff --git a/pyodide_build/mkpkg.py b/pyodide_build/mkpkg.py\n--- a/pyodide_build/mkpkg.py\n+++ b/pyodide_build/mkpkg.py\n@@ -75,7 +75,7 @@\n def make_parser(parser):\n parser.description = '''\n Make a new pyodide package. Creates a simple template that will work\n-for most pure Python packages, but will have to be edited for more wv\n+for most pure Python packages, but will have to be edited for more\n complex things.'''.strip()\n parser.add_argument(\n 'package', type=str, nargs=1,\n", "issue": "Typo?\nWondering what \"wv\" means or if this should read \"more complex things.\"\r\n\r\nhttps://github.com/iodide-project/pyodide/blob/163ab43b64180223d010cdcdcdecd17307cc5a45/pyodide_build/mkpkg.py#L77-L79\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport os\nimport shutil\nimport urllib.request\nfrom pathlib import Path\n\nPACKAGES_ROOT = Path(__file__).parent.parent / 'packages'\n\nSDIST_EXTENSIONS = []\n\n\ndef get_sdist_extensions():\n if SDIST_EXTENSIONS:\n return SDIST_EXTENSIONS\n\n for format in shutil.get_unpack_formats():\n for ext in format[1]:\n SDIST_EXTENSIONS.append(ext)\n\n return SDIST_EXTENSIONS\n\n\ndef get_sdist_url_entry(json_content):\n sdist_extensions_tuple = tuple(get_sdist_extensions())\n\n for entry in json_content['urls']:\n if entry['filename'].endswith(sdist_extensions_tuple):\n return entry\n\n raise Exception('No sdist URL found for package %s (%s)' % (\n json_content['info'].get('name'),\n json_content['info'].get('package_url'),\n ))\n\n\ndef make_package(package, version=None):\n import yaml\n\n version = ('/' + version) if version is not None else ''\n url = f\"https://pypi.org/pypi/{package}{version}/json\"\n\n with urllib.request.urlopen(url) as fd:\n json_content = json.load(fd)\n\n entry = get_sdist_url_entry(json_content)\n download_url = entry['url']\n sha256 = entry['digests']['sha256']\n version = json_content['info']['version']\n\n yaml_content = {\n 'package': {\n 'name': package,\n 'version': version\n },\n 'source': {\n 'url': download_url,\n 'sha256': sha256\n },\n 'test': {\n 'imports': [\n package\n ]\n }\n }\n\n if not (PACKAGES_ROOT / package).is_dir():\n os.makedirs(PACKAGES_ROOT / package)\n with open(PACKAGES_ROOT / package / 'meta.yaml', 'w') as fd:\n yaml.dump(yaml_content, fd, default_flow_style=False)\n\n\ndef make_parser(parser):\n parser.description = '''\nMake a new pyodide package. Creates a simple template that will work\nfor most pure Python packages, but will have to be edited for more wv\ncomplex things.'''.strip()\n parser.add_argument(\n 'package', type=str, nargs=1,\n help=\"The package name on PyPI\")\n parser.add_argument(\n '--version', type=str, default=None,\n help=\"Package version string, \"\n \"e.g. v1.2.1 (defaults to latest stable release)\")\n return parser\n\n\ndef main(args):\n package = args.package[0]\n make_package(package, args.version)\n\n\nif __name__ == '__main__':\n parser = make_parser(argparse.ArgumentParser())\n args = parser.parse_args()\n main(args)\n", "path": "pyodide_build/mkpkg.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport os\nimport shutil\nimport urllib.request\nfrom pathlib import Path\n\nPACKAGES_ROOT = Path(__file__).parent.parent / 'packages'\n\nSDIST_EXTENSIONS = []\n\n\ndef get_sdist_extensions():\n if SDIST_EXTENSIONS:\n return SDIST_EXTENSIONS\n\n for format in shutil.get_unpack_formats():\n for ext in format[1]:\n SDIST_EXTENSIONS.append(ext)\n\n return SDIST_EXTENSIONS\n\n\ndef get_sdist_url_entry(json_content):\n sdist_extensions_tuple = tuple(get_sdist_extensions())\n\n for entry in json_content['urls']:\n if entry['filename'].endswith(sdist_extensions_tuple):\n return entry\n\n raise Exception('No sdist URL found for package %s (%s)' % (\n json_content['info'].get('name'),\n json_content['info'].get('package_url'),\n ))\n\n\ndef make_package(package, version=None):\n import yaml\n\n version = ('/' + version) if version is not None else ''\n url = f\"https://pypi.org/pypi/{package}{version}/json\"\n\n with urllib.request.urlopen(url) as fd:\n json_content = json.load(fd)\n\n entry = get_sdist_url_entry(json_content)\n download_url = entry['url']\n sha256 = entry['digests']['sha256']\n version = json_content['info']['version']\n\n yaml_content = {\n 'package': {\n 'name': package,\n 'version': version\n },\n 'source': {\n 'url': download_url,\n 'sha256': sha256\n },\n 'test': {\n 'imports': [\n package\n ]\n }\n }\n\n if not (PACKAGES_ROOT / package).is_dir():\n os.makedirs(PACKAGES_ROOT / package)\n with open(PACKAGES_ROOT / package / 'meta.yaml', 'w') as fd:\n yaml.dump(yaml_content, fd, default_flow_style=False)\n\n\ndef make_parser(parser):\n parser.description = '''\nMake a new pyodide package. Creates a simple template that will work\nfor most pure Python packages, but will have to be edited for more\ncomplex things.'''.strip()\n parser.add_argument(\n 'package', type=str, nargs=1,\n help=\"The package name on PyPI\")\n parser.add_argument(\n '--version', type=str, default=None,\n help=\"Package version string, \"\n \"e.g. v1.2.1 (defaults to latest stable release)\")\n return parser\n\n\ndef main(args):\n package = args.package[0]\n make_package(package, args.version)\n\n\nif __name__ == '__main__':\n parser = make_parser(argparse.ArgumentParser())\n args = parser.parse_args()\n main(args)\n", "path": "pyodide_build/mkpkg.py"}]}
1,145
137
gh_patches_debug_30846
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-1877
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spider tmobile_us is broken During the global build at 2021-05-26-14-42-23, spider **tmobile_us** failed with **7356 features** and **2 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tmobile_us.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tmobile_us.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tmobile_us.geojson)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/tmobile_us.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import json 3 from urllib.parse import urlencode 4 5 import scrapy 6 7 from locations.items import GeojsonPointItem 8 from locations.hours import OpeningHours 9 10 DAY_MAPPING = {'Monday': 'Mo', 11 'Tuesday': 'Tu', 12 'Wednesday': 'We', 13 'Thursday': 'Th', 14 'Friday': 'Fr', 15 'Saturday': 'Sa', 16 'Sunday': 'Su'} 17 18 BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?' 19 20 21 class TMobileUSSpider(scrapy.Spider): 22 name = "tmobile_us" 23 item_attributes = { 'brand': "T-Mobile" } 24 allowed_domains = ["www.t-mobile.com"] 25 download_delay = 0.2 26 27 def parse_hours(self, store_hours): 28 opening_hours = OpeningHours() 29 if store_hours is None: 30 return 31 32 for store_day in store_hours: 33 day = DAY_MAPPING[store_day.get("day")] 34 open_time = store_day.get("opens") 35 close_time = store_day.get("closes") 36 if open_time is None and close_time is None: 37 continue 38 opening_hours.add_range(day=day, 39 open_time=open_time, 40 close_time=close_time, 41 time_format='%H:%M' 42 ) 43 44 return opening_hours.as_opening_hours() 45 46 def start_requests(self): 47 url = BASE_URL 48 49 with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points: 50 51 next(points) # Ignore the header 52 for point in points: 53 _, lat, lon = point.strip().split(',') 54 55 params = { 56 'latitude': '{}'.format(lat), 57 'longitude': '{}'.format(lon), 58 'count': '1000', 59 'radius': '25', 60 'ignoreLoadingBar': 'false' 61 } 62 63 yield scrapy.http.Request(url + urlencode(params), callback=self.parse) 64 65 def parse(self, response): 66 data = json.loads(response.body_as_unicode()) 67 68 for store in data: 69 properties = { 70 'name': store["name"], 71 'ref': store["id"], 72 'addr_full': store["location"]["address"]["streetAddress"], 73 'city': store["location"]["address"]["addressLocality"], 74 'state': store["location"]["address"]["addressRegion"], 75 'postcode': store["location"]["address"]["postalCode"], 76 'phone': store.get("telephone"), 77 'website': store.get("url") or response.url, 78 'lat': float(store["location"]["latitude"]), 79 'lon': float(store["location"]["longitude"]), 80 } 81 82 hours = self.parse_hours(store.get("hours", [])) 83 if hours: 84 properties["opening_hours"] = hours 85 86 yield GeojsonPointItem(**properties) 87 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/tmobile_us.py b/locations/spiders/tmobile_us.py --- a/locations/spiders/tmobile_us.py +++ b/locations/spiders/tmobile_us.py @@ -15,12 +15,12 @@ 'Saturday': 'Sa', 'Sunday': 'Su'} -BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?' +BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?' class TMobileUSSpider(scrapy.Spider): name = "tmobile_us" - item_attributes = { 'brand': "T-Mobile" } + item_attributes = {'brand': "T-Mobile"} allowed_domains = ["www.t-mobile.com"] download_delay = 0.2 @@ -67,7 +67,7 @@ for store in data: properties = { - 'name': store["name"], + 'name': store.get("name"), 'ref': store["id"], 'addr_full': store["location"]["address"]["streetAddress"], 'city': store["location"]["address"]["addressLocality"], @@ -75,8 +75,8 @@ 'postcode': store["location"]["address"]["postalCode"], 'phone': store.get("telephone"), 'website': store.get("url") or response.url, - 'lat': float(store["location"]["latitude"]), - 'lon': float(store["location"]["longitude"]), + 'lat': store["location"]["latitude"], + 'lon': store["location"]["longitude"], } hours = self.parse_hours(store.get("hours", []))
{"golden_diff": "diff --git a/locations/spiders/tmobile_us.py b/locations/spiders/tmobile_us.py\n--- a/locations/spiders/tmobile_us.py\n+++ b/locations/spiders/tmobile_us.py\n@@ -15,12 +15,12 @@\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n \n-BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'\n+BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?'\n \n \n class TMobileUSSpider(scrapy.Spider):\n name = \"tmobile_us\"\n- item_attributes = { 'brand': \"T-Mobile\" }\n+ item_attributes = {'brand': \"T-Mobile\"}\n allowed_domains = [\"www.t-mobile.com\"]\n download_delay = 0.2\n \n@@ -67,7 +67,7 @@\n \n for store in data:\n properties = {\n- 'name': store[\"name\"],\n+ 'name': store.get(\"name\"),\n 'ref': store[\"id\"],\n 'addr_full': store[\"location\"][\"address\"][\"streetAddress\"],\n 'city': store[\"location\"][\"address\"][\"addressLocality\"],\n@@ -75,8 +75,8 @@\n 'postcode': store[\"location\"][\"address\"][\"postalCode\"],\n 'phone': store.get(\"telephone\"),\n 'website': store.get(\"url\") or response.url,\n- 'lat': float(store[\"location\"][\"latitude\"]),\n- 'lon': float(store[\"location\"][\"longitude\"]),\n+ 'lat': store[\"location\"][\"latitude\"],\n+ 'lon': store[\"location\"][\"longitude\"],\n }\n \n hours = self.parse_hours(store.get(\"hours\", []))\n", "issue": "Spider tmobile_us is broken\nDuring the global build at 2021-05-26-14-42-23, spider **tmobile_us** failed with **7356 features** and **2 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tmobile_us.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tmobile_us.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tmobile_us.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nfrom urllib.parse import urlencode\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n\nBASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'\n\n\nclass TMobileUSSpider(scrapy.Spider):\n name = \"tmobile_us\"\n item_attributes = { 'brand': \"T-Mobile\" }\n allowed_domains = [\"www.t-mobile.com\"]\n download_delay = 0.2\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n if store_hours is None:\n return\n\n for store_day in store_hours:\n day = DAY_MAPPING[store_day.get(\"day\")]\n open_time = store_day.get(\"opens\")\n close_time = store_day.get(\"closes\")\n if open_time is None and close_time is None:\n continue\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%H:%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def start_requests(self):\n url = BASE_URL\n\n with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:\n\n next(points) # Ignore the header\n for point in points:\n _, lat, lon = point.strip().split(',')\n\n params = {\n 'latitude': '{}'.format(lat),\n 'longitude': '{}'.format(lon),\n 'count': '1000',\n 'radius': '25',\n 'ignoreLoadingBar': 'false'\n }\n\n yield scrapy.http.Request(url + urlencode(params), callback=self.parse)\n\n def parse(self, response):\n data = json.loads(response.body_as_unicode())\n\n for store in data:\n properties = {\n 'name': store[\"name\"],\n 'ref': store[\"id\"],\n 'addr_full': store[\"location\"][\"address\"][\"streetAddress\"],\n 'city': store[\"location\"][\"address\"][\"addressLocality\"],\n 'state': store[\"location\"][\"address\"][\"addressRegion\"],\n 'postcode': store[\"location\"][\"address\"][\"postalCode\"],\n 'phone': store.get(\"telephone\"),\n 'website': store.get(\"url\") or response.url,\n 'lat': float(store[\"location\"][\"latitude\"]),\n 'lon': float(store[\"location\"][\"longitude\"]),\n }\n\n hours = self.parse_hours(store.get(\"hours\", []))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/tmobile_us.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nfrom urllib.parse import urlencode\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n\nBASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?'\n\n\nclass TMobileUSSpider(scrapy.Spider):\n name = \"tmobile_us\"\n item_attributes = {'brand': \"T-Mobile\"}\n allowed_domains = [\"www.t-mobile.com\"]\n download_delay = 0.2\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n if store_hours is None:\n return\n\n for store_day in store_hours:\n day = DAY_MAPPING[store_day.get(\"day\")]\n open_time = store_day.get(\"opens\")\n close_time = store_day.get(\"closes\")\n if open_time is None and close_time is None:\n continue\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%H:%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def start_requests(self):\n url = BASE_URL\n\n with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:\n\n next(points) # Ignore the header\n for point in points:\n _, lat, lon = point.strip().split(',')\n\n params = {\n 'latitude': '{}'.format(lat),\n 'longitude': '{}'.format(lon),\n 'count': '1000',\n 'radius': '25',\n 'ignoreLoadingBar': 'false'\n }\n\n yield scrapy.http.Request(url + urlencode(params), callback=self.parse)\n\n def parse(self, response):\n data = json.loads(response.body_as_unicode())\n\n for store in data:\n properties = {\n 'name': store.get(\"name\"),\n 'ref': store[\"id\"],\n 'addr_full': store[\"location\"][\"address\"][\"streetAddress\"],\n 'city': store[\"location\"][\"address\"][\"addressLocality\"],\n 'state': store[\"location\"][\"address\"][\"addressRegion\"],\n 'postcode': store[\"location\"][\"address\"][\"postalCode\"],\n 'phone': store.get(\"telephone\"),\n 'website': store.get(\"url\") or response.url,\n 'lat': store[\"location\"][\"latitude\"],\n 'lon': store[\"location\"][\"longitude\"],\n }\n\n hours = self.parse_hours(store.get(\"hours\", []))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/tmobile_us.py"}]}
1,227
392
gh_patches_debug_16999
rasdani/github-patches
git_diff
microsoft__torchgeo-939
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Trainers: predict step ### Summary This issue is to track progress on adding a `predict` step to all Trainers. - [x] BYOLTask (#819) - [x] ClassificationTask (#790) - [x] MultiLabelClassificationTask (#792) - [x] ObjectDetectionTask (#758) - [x] RegressionTask (#818) - [x] SemanticSegmentationTask (#939) ### Rationale The default `predict` step does not know how to handle our batch dicts. ### Implementation See implementations that have already been finished. ### Alternatives _No response_ ### Additional information _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchgeo/trainers/segmentation.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 """Segmentation tasks.""" 5 6 import warnings 7 from typing import Any, Dict, cast 8 9 import matplotlib.pyplot as plt 10 import pytorch_lightning as pl 11 import segmentation_models_pytorch as smp 12 import torch 13 import torch.nn as nn 14 from torch import Tensor 15 from torch.optim.lr_scheduler import ReduceLROnPlateau 16 from torch.utils.data import DataLoader 17 from torchmetrics import MetricCollection 18 from torchmetrics.classification import MulticlassAccuracy, MulticlassJaccardIndex 19 20 from ..datasets.utils import unbind_samples 21 from ..models import FCN 22 23 # https://github.com/pytorch/pytorch/issues/60979 24 # https://github.com/pytorch/pytorch/pull/61045 25 DataLoader.__module__ = "torch.utils.data" 26 27 28 class SemanticSegmentationTask(pl.LightningModule): 29 """LightningModule for semantic segmentation of images. 30 31 Supports `Segmentation Models Pytorch 32 <https://github.com/qubvel/segmentation_models.pytorch>`_ 33 as an architecture choice in combination with any of these 34 `TIMM encoders <https://smp.readthedocs.io/en/latest/encoders_timm.html>`_. 35 """ 36 37 def config_task(self) -> None: 38 """Configures the task based on kwargs parameters passed to the constructor.""" 39 if self.hyperparams["segmentation_model"] == "unet": 40 self.model = smp.Unet( 41 encoder_name=self.hyperparams["encoder_name"], 42 encoder_weights=self.hyperparams["encoder_weights"], 43 in_channels=self.hyperparams["in_channels"], 44 classes=self.hyperparams["num_classes"], 45 ) 46 elif self.hyperparams["segmentation_model"] == "deeplabv3+": 47 self.model = smp.DeepLabV3Plus( 48 encoder_name=self.hyperparams["encoder_name"], 49 encoder_weights=self.hyperparams["encoder_weights"], 50 in_channels=self.hyperparams["in_channels"], 51 classes=self.hyperparams["num_classes"], 52 ) 53 elif self.hyperparams["segmentation_model"] == "fcn": 54 self.model = FCN( 55 in_channels=self.hyperparams["in_channels"], 56 classes=self.hyperparams["num_classes"], 57 num_filters=self.hyperparams["num_filters"], 58 ) 59 else: 60 raise ValueError( 61 f"Model type '{self.hyperparams['segmentation_model']}' is not valid. " 62 f"Currently, only supports 'unet', 'deeplabv3+' and 'fcn'." 63 ) 64 65 if self.hyperparams["loss"] == "ce": 66 ignore_value = -1000 if self.ignore_index is None else self.ignore_index 67 self.loss = nn.CrossEntropyLoss(ignore_index=ignore_value) 68 elif self.hyperparams["loss"] == "jaccard": 69 self.loss = smp.losses.JaccardLoss( 70 mode="multiclass", classes=self.hyperparams["num_classes"] 71 ) 72 elif self.hyperparams["loss"] == "focal": 73 self.loss = smp.losses.FocalLoss( 74 "multiclass", ignore_index=self.ignore_index, normalized=True 75 ) 76 else: 77 raise ValueError( 78 f"Loss type '{self.hyperparams['loss']}' is not valid. " 79 f"Currently, supports 'ce', 'jaccard' or 'focal' loss." 80 ) 81 82 def __init__(self, **kwargs: Any) -> None: 83 """Initialize the LightningModule with a model and loss function. 84 85 Keyword Args: 86 segmentation_model: Name of the segmentation model type to use 87 encoder_name: Name of the encoder model backbone to use 88 encoder_weights: None or "imagenet" to use imagenet pretrained weights in 89 the encoder model 90 in_channels: Number of channels in input image 91 num_classes: Number of semantic classes to predict 92 loss: Name of the loss function 93 ignore_index: Optional integer class index to ignore in the loss and metrics 94 learning_rate: Learning rate for optimizer 95 learning_rate_schedule_patience: Patience for learning rate scheduler 96 97 Raises: 98 ValueError: if kwargs arguments are invalid 99 100 .. versionchanged:: 0.3 101 The *ignore_zeros* parameter was renamed to *ignore_index*. 102 """ 103 super().__init__() 104 105 # Creates `self.hparams` from kwargs 106 self.save_hyperparameters() # type: ignore[operator] 107 self.hyperparams = cast(Dict[str, Any], self.hparams) 108 109 if not isinstance(kwargs["ignore_index"], (int, type(None))): 110 raise ValueError("ignore_index must be an int or None") 111 if (kwargs["ignore_index"] is not None) and (kwargs["loss"] == "jaccard"): 112 warnings.warn( 113 "ignore_index has no effect on training when loss='jaccard'", 114 UserWarning, 115 ) 116 self.ignore_index = kwargs["ignore_index"] 117 self.config_task() 118 119 self.train_metrics = MetricCollection( 120 [ 121 MulticlassAccuracy( 122 num_classes=self.hyperparams["num_classes"], 123 ignore_index=self.ignore_index, 124 mdmc_average="global", 125 ), 126 MulticlassJaccardIndex( 127 num_classes=self.hyperparams["num_classes"], 128 ignore_index=self.ignore_index, 129 ), 130 ], 131 prefix="train_", 132 ) 133 self.val_metrics = self.train_metrics.clone(prefix="val_") 134 self.test_metrics = self.train_metrics.clone(prefix="test_") 135 136 def forward(self, *args: Any, **kwargs: Any) -> Any: 137 """Forward pass of the model. 138 139 Args: 140 x: tensor of data to run through the model 141 142 Returns: 143 output from the model 144 """ 145 return self.model(*args, **kwargs) 146 147 def training_step(self, *args: Any, **kwargs: Any) -> Tensor: 148 """Compute and return the training loss. 149 150 Args: 151 batch: the output of your DataLoader 152 153 Returns: 154 training loss 155 """ 156 batch = args[0] 157 x = batch["image"] 158 y = batch["mask"] 159 y_hat = self(x) 160 y_hat_hard = y_hat.argmax(dim=1) 161 162 loss = self.loss(y_hat, y) 163 164 # by default, the train step logs every `log_every_n_steps` steps where 165 # `log_every_n_steps` is a parameter to the `Trainer` object 166 self.log("train_loss", loss, on_step=True, on_epoch=False) 167 self.train_metrics(y_hat_hard, y) 168 169 return cast(Tensor, loss) 170 171 def training_epoch_end(self, outputs: Any) -> None: 172 """Logs epoch level training metrics. 173 174 Args: 175 outputs: list of items returned by training_step 176 """ 177 self.log_dict(self.train_metrics.compute()) 178 self.train_metrics.reset() 179 180 def validation_step(self, *args: Any, **kwargs: Any) -> None: 181 """Compute validation loss and log example predictions. 182 183 Args: 184 batch: the output of your DataLoader 185 batch_idx: the index of this batch 186 """ 187 batch = args[0] 188 batch_idx = args[1] 189 x = batch["image"] 190 y = batch["mask"] 191 y_hat = self(x) 192 y_hat_hard = y_hat.argmax(dim=1) 193 194 loss = self.loss(y_hat, y) 195 196 self.log("val_loss", loss, on_step=False, on_epoch=True) 197 self.val_metrics(y_hat_hard, y) 198 199 if batch_idx < 10: 200 try: 201 datamodule = self.trainer.datamodule # type: ignore[attr-defined] 202 batch["prediction"] = y_hat_hard 203 for key in ["image", "mask", "prediction"]: 204 batch[key] = batch[key].cpu() 205 sample = unbind_samples(batch)[0] 206 fig = datamodule.plot(sample) 207 summary_writer = self.logger.experiment # type: ignore[union-attr] 208 summary_writer.add_figure( 209 f"image/{batch_idx}", fig, global_step=self.global_step 210 ) 211 plt.close() 212 except AttributeError: 213 pass 214 215 def validation_epoch_end(self, outputs: Any) -> None: 216 """Logs epoch level validation metrics. 217 218 Args: 219 outputs: list of items returned by validation_step 220 """ 221 self.log_dict(self.val_metrics.compute()) 222 self.val_metrics.reset() 223 224 def test_step(self, *args: Any, **kwargs: Any) -> None: 225 """Compute test loss. 226 227 Args: 228 batch: the output of your DataLoader 229 """ 230 batch = args[0] 231 x = batch["image"] 232 y = batch["mask"] 233 y_hat = self(x) 234 y_hat_hard = y_hat.argmax(dim=1) 235 236 loss = self.loss(y_hat, y) 237 238 # by default, the test and validation steps only log per *epoch* 239 self.log("test_loss", loss, on_step=False, on_epoch=True) 240 self.test_metrics(y_hat_hard, y) 241 242 def test_epoch_end(self, outputs: Any) -> None: 243 """Logs epoch level test metrics. 244 245 Args: 246 outputs: list of items returned by test_step 247 """ 248 self.log_dict(self.test_metrics.compute()) 249 self.test_metrics.reset() 250 251 def configure_optimizers(self) -> Dict[str, Any]: 252 """Initialize the optimizer and learning rate scheduler. 253 254 Returns: 255 a "lr dict" according to the pytorch lightning documentation -- 256 https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers 257 """ 258 optimizer = torch.optim.Adam( 259 self.model.parameters(), lr=self.hyperparams["learning_rate"] 260 ) 261 return { 262 "optimizer": optimizer, 263 "lr_scheduler": { 264 "scheduler": ReduceLROnPlateau( 265 optimizer, 266 patience=self.hyperparams["learning_rate_schedule_patience"], 267 ), 268 "monitor": "val_loss", 269 }, 270 } 271 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchgeo/trainers/segmentation.py b/torchgeo/trainers/segmentation.py --- a/torchgeo/trainers/segmentation.py +++ b/torchgeo/trainers/segmentation.py @@ -248,6 +248,25 @@ self.log_dict(self.test_metrics.compute()) self.test_metrics.reset() + def predict_step(self, *args: Any, **kwargs: Any) -> Tensor: + """Compute and return the predictions. + + By default, this will loop over images in a dataloader and aggregate + predictions into a list. This may not be desirable if you have many images + or large images which could cause out of memory errors. In this case + it's recommended to override this with a custom predict_step. + + Args: + batch: the output of your DataLoader + + Returns: + predicted softmax probabilities + """ + batch = args[0] + x = batch["image"] + y_hat: Tensor = self(x).softmax(dim=1) + return y_hat + def configure_optimizers(self) -> Dict[str, Any]: """Initialize the optimizer and learning rate scheduler.
{"golden_diff": "diff --git a/torchgeo/trainers/segmentation.py b/torchgeo/trainers/segmentation.py\n--- a/torchgeo/trainers/segmentation.py\n+++ b/torchgeo/trainers/segmentation.py\n@@ -248,6 +248,25 @@\n self.log_dict(self.test_metrics.compute())\n self.test_metrics.reset()\n \n+ def predict_step(self, *args: Any, **kwargs: Any) -> Tensor:\n+ \"\"\"Compute and return the predictions.\n+\n+ By default, this will loop over images in a dataloader and aggregate\n+ predictions into a list. This may not be desirable if you have many images\n+ or large images which could cause out of memory errors. In this case\n+ it's recommended to override this with a custom predict_step.\n+\n+ Args:\n+ batch: the output of your DataLoader\n+\n+ Returns:\n+ predicted softmax probabilities\n+ \"\"\"\n+ batch = args[0]\n+ x = batch[\"image\"]\n+ y_hat: Tensor = self(x).softmax(dim=1)\n+ return y_hat\n+\n def configure_optimizers(self) -> Dict[str, Any]:\n \"\"\"Initialize the optimizer and learning rate scheduler.\n", "issue": "Trainers: predict step\n### Summary\r\n\r\nThis issue is to track progress on adding a `predict` step to all Trainers.\r\n\r\n- [x] BYOLTask (#819)\r\n- [x] ClassificationTask (#790)\r\n- [x] MultiLabelClassificationTask (#792)\r\n- [x] ObjectDetectionTask (#758)\r\n- [x] RegressionTask (#818)\r\n- [x] SemanticSegmentationTask (#939)\r\n\r\n### Rationale\r\n\r\nThe default `predict` step does not know how to handle our batch dicts.\r\n\r\n### Implementation\r\n\r\nSee implementations that have already been finished.\r\n\r\n### Alternatives\r\n\r\n_No response_\r\n\r\n### Additional information\r\n\r\n_No response_\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Segmentation tasks.\"\"\"\n\nimport warnings\nfrom typing import Any, Dict, cast\n\nimport matplotlib.pyplot as plt\nimport pytorch_lightning as pl\nimport segmentation_models_pytorch as smp\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.utils.data import DataLoader\nfrom torchmetrics import MetricCollection\nfrom torchmetrics.classification import MulticlassAccuracy, MulticlassJaccardIndex\n\nfrom ..datasets.utils import unbind_samples\nfrom ..models import FCN\n\n# https://github.com/pytorch/pytorch/issues/60979\n# https://github.com/pytorch/pytorch/pull/61045\nDataLoader.__module__ = \"torch.utils.data\"\n\n\nclass SemanticSegmentationTask(pl.LightningModule):\n \"\"\"LightningModule for semantic segmentation of images.\n\n Supports `Segmentation Models Pytorch\n <https://github.com/qubvel/segmentation_models.pytorch>`_\n as an architecture choice in combination with any of these\n `TIMM encoders <https://smp.readthedocs.io/en/latest/encoders_timm.html>`_.\n \"\"\"\n\n def config_task(self) -> None:\n \"\"\"Configures the task based on kwargs parameters passed to the constructor.\"\"\"\n if self.hyperparams[\"segmentation_model\"] == \"unet\":\n self.model = smp.Unet(\n encoder_name=self.hyperparams[\"encoder_name\"],\n encoder_weights=self.hyperparams[\"encoder_weights\"],\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"segmentation_model\"] == \"deeplabv3+\":\n self.model = smp.DeepLabV3Plus(\n encoder_name=self.hyperparams[\"encoder_name\"],\n encoder_weights=self.hyperparams[\"encoder_weights\"],\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"segmentation_model\"] == \"fcn\":\n self.model = FCN(\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n num_filters=self.hyperparams[\"num_filters\"],\n )\n else:\n raise ValueError(\n f\"Model type '{self.hyperparams['segmentation_model']}' is not valid. \"\n f\"Currently, only supports 'unet', 'deeplabv3+' and 'fcn'.\"\n )\n\n if self.hyperparams[\"loss\"] == \"ce\":\n ignore_value = -1000 if self.ignore_index is None else self.ignore_index\n self.loss = nn.CrossEntropyLoss(ignore_index=ignore_value)\n elif self.hyperparams[\"loss\"] == \"jaccard\":\n self.loss = smp.losses.JaccardLoss(\n mode=\"multiclass\", classes=self.hyperparams[\"num_classes\"]\n )\n elif self.hyperparams[\"loss\"] == \"focal\":\n self.loss = smp.losses.FocalLoss(\n \"multiclass\", ignore_index=self.ignore_index, normalized=True\n )\n else:\n raise ValueError(\n f\"Loss type '{self.hyperparams['loss']}' is not valid. \"\n f\"Currently, supports 'ce', 'jaccard' or 'focal' loss.\"\n )\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\"Initialize the LightningModule with a model and loss function.\n\n Keyword Args:\n segmentation_model: Name of the segmentation model type to use\n encoder_name: Name of the encoder model backbone to use\n encoder_weights: None or \"imagenet\" to use imagenet pretrained weights in\n the encoder model\n in_channels: Number of channels in input image\n num_classes: Number of semantic classes to predict\n loss: Name of the loss function\n ignore_index: Optional integer class index to ignore in the loss and metrics\n learning_rate: Learning rate for optimizer\n learning_rate_schedule_patience: Patience for learning rate scheduler\n\n Raises:\n ValueError: if kwargs arguments are invalid\n\n .. versionchanged:: 0.3\n The *ignore_zeros* parameter was renamed to *ignore_index*.\n \"\"\"\n super().__init__()\n\n # Creates `self.hparams` from kwargs\n self.save_hyperparameters() # type: ignore[operator]\n self.hyperparams = cast(Dict[str, Any], self.hparams)\n\n if not isinstance(kwargs[\"ignore_index\"], (int, type(None))):\n raise ValueError(\"ignore_index must be an int or None\")\n if (kwargs[\"ignore_index\"] is not None) and (kwargs[\"loss\"] == \"jaccard\"):\n warnings.warn(\n \"ignore_index has no effect on training when loss='jaccard'\",\n UserWarning,\n )\n self.ignore_index = kwargs[\"ignore_index\"]\n self.config_task()\n\n self.train_metrics = MetricCollection(\n [\n MulticlassAccuracy(\n num_classes=self.hyperparams[\"num_classes\"],\n ignore_index=self.ignore_index,\n mdmc_average=\"global\",\n ),\n MulticlassJaccardIndex(\n num_classes=self.hyperparams[\"num_classes\"],\n ignore_index=self.ignore_index,\n ),\n ],\n prefix=\"train_\",\n )\n self.val_metrics = self.train_metrics.clone(prefix=\"val_\")\n self.test_metrics = self.train_metrics.clone(prefix=\"test_\")\n\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Forward pass of the model.\n\n Args:\n x: tensor of data to run through the model\n\n Returns:\n output from the model\n \"\"\"\n return self.model(*args, **kwargs)\n\n def training_step(self, *args: Any, **kwargs: Any) -> Tensor:\n \"\"\"Compute and return the training loss.\n\n Args:\n batch: the output of your DataLoader\n\n Returns:\n training loss\n \"\"\"\n batch = args[0]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n # by default, the train step logs every `log_every_n_steps` steps where\n # `log_every_n_steps` is a parameter to the `Trainer` object\n self.log(\"train_loss\", loss, on_step=True, on_epoch=False)\n self.train_metrics(y_hat_hard, y)\n\n return cast(Tensor, loss)\n\n def training_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level training metrics.\n\n Args:\n outputs: list of items returned by training_step\n \"\"\"\n self.log_dict(self.train_metrics.compute())\n self.train_metrics.reset()\n\n def validation_step(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Compute validation loss and log example predictions.\n\n Args:\n batch: the output of your DataLoader\n batch_idx: the index of this batch\n \"\"\"\n batch = args[0]\n batch_idx = args[1]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n self.log(\"val_loss\", loss, on_step=False, on_epoch=True)\n self.val_metrics(y_hat_hard, y)\n\n if batch_idx < 10:\n try:\n datamodule = self.trainer.datamodule # type: ignore[attr-defined]\n batch[\"prediction\"] = y_hat_hard\n for key in [\"image\", \"mask\", \"prediction\"]:\n batch[key] = batch[key].cpu()\n sample = unbind_samples(batch)[0]\n fig = datamodule.plot(sample)\n summary_writer = self.logger.experiment # type: ignore[union-attr]\n summary_writer.add_figure(\n f\"image/{batch_idx}\", fig, global_step=self.global_step\n )\n plt.close()\n except AttributeError:\n pass\n\n def validation_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level validation metrics.\n\n Args:\n outputs: list of items returned by validation_step\n \"\"\"\n self.log_dict(self.val_metrics.compute())\n self.val_metrics.reset()\n\n def test_step(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Compute test loss.\n\n Args:\n batch: the output of your DataLoader\n \"\"\"\n batch = args[0]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n # by default, the test and validation steps only log per *epoch*\n self.log(\"test_loss\", loss, on_step=False, on_epoch=True)\n self.test_metrics(y_hat_hard, y)\n\n def test_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level test metrics.\n\n Args:\n outputs: list of items returned by test_step\n \"\"\"\n self.log_dict(self.test_metrics.compute())\n self.test_metrics.reset()\n\n def configure_optimizers(self) -> Dict[str, Any]:\n \"\"\"Initialize the optimizer and learning rate scheduler.\n\n Returns:\n a \"lr dict\" according to the pytorch lightning documentation --\n https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers\n \"\"\"\n optimizer = torch.optim.Adam(\n self.model.parameters(), lr=self.hyperparams[\"learning_rate\"]\n )\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\n \"scheduler\": ReduceLROnPlateau(\n optimizer,\n patience=self.hyperparams[\"learning_rate_schedule_patience\"],\n ),\n \"monitor\": \"val_loss\",\n },\n }\n", "path": "torchgeo/trainers/segmentation.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Segmentation tasks.\"\"\"\n\nimport warnings\nfrom typing import Any, Dict, cast\n\nimport matplotlib.pyplot as plt\nimport pytorch_lightning as pl\nimport segmentation_models_pytorch as smp\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.utils.data import DataLoader\nfrom torchmetrics import MetricCollection\nfrom torchmetrics.classification import MulticlassAccuracy, MulticlassJaccardIndex\n\nfrom ..datasets.utils import unbind_samples\nfrom ..models import FCN\n\n# https://github.com/pytorch/pytorch/issues/60979\n# https://github.com/pytorch/pytorch/pull/61045\nDataLoader.__module__ = \"torch.utils.data\"\n\n\nclass SemanticSegmentationTask(pl.LightningModule):\n \"\"\"LightningModule for semantic segmentation of images.\n\n Supports `Segmentation Models Pytorch\n <https://github.com/qubvel/segmentation_models.pytorch>`_\n as an architecture choice in combination with any of these\n `TIMM encoders <https://smp.readthedocs.io/en/latest/encoders_timm.html>`_.\n \"\"\"\n\n def config_task(self) -> None:\n \"\"\"Configures the task based on kwargs parameters passed to the constructor.\"\"\"\n if self.hyperparams[\"segmentation_model\"] == \"unet\":\n self.model = smp.Unet(\n encoder_name=self.hyperparams[\"encoder_name\"],\n encoder_weights=self.hyperparams[\"encoder_weights\"],\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"segmentation_model\"] == \"deeplabv3+\":\n self.model = smp.DeepLabV3Plus(\n encoder_name=self.hyperparams[\"encoder_name\"],\n encoder_weights=self.hyperparams[\"encoder_weights\"],\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"segmentation_model\"] == \"fcn\":\n self.model = FCN(\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n num_filters=self.hyperparams[\"num_filters\"],\n )\n else:\n raise ValueError(\n f\"Model type '{self.hyperparams['segmentation_model']}' is not valid. \"\n f\"Currently, only supports 'unet', 'deeplabv3+' and 'fcn'.\"\n )\n\n if self.hyperparams[\"loss\"] == \"ce\":\n ignore_value = -1000 if self.ignore_index is None else self.ignore_index\n self.loss = nn.CrossEntropyLoss(ignore_index=ignore_value)\n elif self.hyperparams[\"loss\"] == \"jaccard\":\n self.loss = smp.losses.JaccardLoss(\n mode=\"multiclass\", classes=self.hyperparams[\"num_classes\"]\n )\n elif self.hyperparams[\"loss\"] == \"focal\":\n self.loss = smp.losses.FocalLoss(\n \"multiclass\", ignore_index=self.ignore_index, normalized=True\n )\n else:\n raise ValueError(\n f\"Loss type '{self.hyperparams['loss']}' is not valid. \"\n f\"Currently, supports 'ce', 'jaccard' or 'focal' loss.\"\n )\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\"Initialize the LightningModule with a model and loss function.\n\n Keyword Args:\n segmentation_model: Name of the segmentation model type to use\n encoder_name: Name of the encoder model backbone to use\n encoder_weights: None or \"imagenet\" to use imagenet pretrained weights in\n the encoder model\n in_channels: Number of channels in input image\n num_classes: Number of semantic classes to predict\n loss: Name of the loss function\n ignore_index: Optional integer class index to ignore in the loss and metrics\n learning_rate: Learning rate for optimizer\n learning_rate_schedule_patience: Patience for learning rate scheduler\n\n Raises:\n ValueError: if kwargs arguments are invalid\n\n .. versionchanged:: 0.3\n The *ignore_zeros* parameter was renamed to *ignore_index*.\n \"\"\"\n super().__init__()\n\n # Creates `self.hparams` from kwargs\n self.save_hyperparameters() # type: ignore[operator]\n self.hyperparams = cast(Dict[str, Any], self.hparams)\n\n if not isinstance(kwargs[\"ignore_index\"], (int, type(None))):\n raise ValueError(\"ignore_index must be an int or None\")\n if (kwargs[\"ignore_index\"] is not None) and (kwargs[\"loss\"] == \"jaccard\"):\n warnings.warn(\n \"ignore_index has no effect on training when loss='jaccard'\",\n UserWarning,\n )\n self.ignore_index = kwargs[\"ignore_index\"]\n self.config_task()\n\n self.train_metrics = MetricCollection(\n [\n MulticlassAccuracy(\n num_classes=self.hyperparams[\"num_classes\"],\n ignore_index=self.ignore_index,\n mdmc_average=\"global\",\n ),\n MulticlassJaccardIndex(\n num_classes=self.hyperparams[\"num_classes\"],\n ignore_index=self.ignore_index,\n ),\n ],\n prefix=\"train_\",\n )\n self.val_metrics = self.train_metrics.clone(prefix=\"val_\")\n self.test_metrics = self.train_metrics.clone(prefix=\"test_\")\n\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Forward pass of the model.\n\n Args:\n x: tensor of data to run through the model\n\n Returns:\n output from the model\n \"\"\"\n return self.model(*args, **kwargs)\n\n def training_step(self, *args: Any, **kwargs: Any) -> Tensor:\n \"\"\"Compute and return the training loss.\n\n Args:\n batch: the output of your DataLoader\n\n Returns:\n training loss\n \"\"\"\n batch = args[0]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n # by default, the train step logs every `log_every_n_steps` steps where\n # `log_every_n_steps` is a parameter to the `Trainer` object\n self.log(\"train_loss\", loss, on_step=True, on_epoch=False)\n self.train_metrics(y_hat_hard, y)\n\n return cast(Tensor, loss)\n\n def training_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level training metrics.\n\n Args:\n outputs: list of items returned by training_step\n \"\"\"\n self.log_dict(self.train_metrics.compute())\n self.train_metrics.reset()\n\n def validation_step(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Compute validation loss and log example predictions.\n\n Args:\n batch: the output of your DataLoader\n batch_idx: the index of this batch\n \"\"\"\n batch = args[0]\n batch_idx = args[1]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n self.log(\"val_loss\", loss, on_step=False, on_epoch=True)\n self.val_metrics(y_hat_hard, y)\n\n if batch_idx < 10:\n try:\n datamodule = self.trainer.datamodule # type: ignore[attr-defined]\n batch[\"prediction\"] = y_hat_hard\n for key in [\"image\", \"mask\", \"prediction\"]:\n batch[key] = batch[key].cpu()\n sample = unbind_samples(batch)[0]\n fig = datamodule.plot(sample)\n summary_writer = self.logger.experiment # type: ignore[union-attr]\n summary_writer.add_figure(\n f\"image/{batch_idx}\", fig, global_step=self.global_step\n )\n plt.close()\n except AttributeError:\n pass\n\n def validation_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level validation metrics.\n\n Args:\n outputs: list of items returned by validation_step\n \"\"\"\n self.log_dict(self.val_metrics.compute())\n self.val_metrics.reset()\n\n def test_step(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Compute test loss.\n\n Args:\n batch: the output of your DataLoader\n \"\"\"\n batch = args[0]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n # by default, the test and validation steps only log per *epoch*\n self.log(\"test_loss\", loss, on_step=False, on_epoch=True)\n self.test_metrics(y_hat_hard, y)\n\n def test_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level test metrics.\n\n Args:\n outputs: list of items returned by test_step\n \"\"\"\n self.log_dict(self.test_metrics.compute())\n self.test_metrics.reset()\n\n def predict_step(self, *args: Any, **kwargs: Any) -> Tensor:\n \"\"\"Compute and return the predictions.\n\n By default, this will loop over images in a dataloader and aggregate\n predictions into a list. This may not be desirable if you have many images\n or large images which could cause out of memory errors. In this case\n it's recommended to override this with a custom predict_step.\n\n Args:\n batch: the output of your DataLoader\n\n Returns:\n predicted softmax probabilities\n \"\"\"\n batch = args[0]\n x = batch[\"image\"]\n y_hat: Tensor = self(x).softmax(dim=1)\n return y_hat\n\n def configure_optimizers(self) -> Dict[str, Any]:\n \"\"\"Initialize the optimizer and learning rate scheduler.\n\n Returns:\n a \"lr dict\" according to the pytorch lightning documentation --\n https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers\n \"\"\"\n optimizer = torch.optim.Adam(\n self.model.parameters(), lr=self.hyperparams[\"learning_rate\"]\n )\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\n \"scheduler\": ReduceLROnPlateau(\n optimizer,\n patience=self.hyperparams[\"learning_rate_schedule_patience\"],\n ),\n \"monitor\": \"val_loss\",\n },\n }\n", "path": "torchgeo/trainers/segmentation.py"}]}
3,263
267
gh_patches_debug_4330
rasdani/github-patches
git_diff
LMFDB__lmfdb-5664
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Dirichlet character browse page thinks all characters are primitive On a Dir Char browse page, such as https://beta.lmfdb.org/Character/Dirichlet/?modbrowse=1-20 all characters are shown by default, and shown as "primitive". On loading, only the primitive characters should be shown. The underlying problem is that the `td` holding the character index has `class="primitive"` in all cases. (So, the Imprimitive/Primitive toggles in the upper right are working correctly, it's just that the page is being generated with incorrect markup). Dirichlet character browse page thinks all characters are primitive On a Dir Char browse page, such as https://beta.lmfdb.org/Character/Dirichlet/?modbrowse=1-20 all characters are shown by default, and shown as "primitive". On loading, only the primitive characters should be shown. The underlying problem is that the `td` holding the character index has `class="primitive"` in all cases. (So, the Imprimitive/Primitive toggles in the upper right are working correctly, it's just that the page is being generated with incorrect markup). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lmfdb/characters/ListCharacters.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # ListCharacters.py 3 import re 4 from sage.all import lcm, factor, Integers 5 from sage.databases.cremona import cremona_letter_code 6 from lmfdb.characters.web_character import WebDirichlet, parity_string 7 from lmfdb.characters.TinyConrey import ConreyCharacter 8 from lmfdb.utils import flash_error, integer_divisors 9 10 # utility functions # 11 12 13 def modn_exponent(n): 14 """ given a nonzero integer n, returns the group exponent of (Z/nZ)* """ 15 return lcm( [ (p-1)*p**(e-1) for (p,e) in factor(n) ] ) // (1 if n%8 else 2) 16 17 def divisors_in_interval(n, a, b): 18 """ given a nonzero integer n and an interval [a,b] returns a list of the divisors of n in [a,b] """ 19 return [d for d in integer_divisors(n) if a <= d and d <= b] 20 21 def parse_interval(arg, name): 22 """ parses a user specified interval of positive integers (or a single integer), flashes errors and raises exceptions """ 23 a,b = 0,0 24 arg = arg.replace (' ','') 25 if re.match('^[0-9]+$', arg): 26 a,b = (int(arg), int(arg)) 27 elif re.match('^[0-9]+-[0-9]+$', arg): 28 s = arg.split('-') 29 a,b = (int(s[0]), int(s[1])) 30 elif re.match('^[0-9]+..[0-9]+$', arg): 31 s = arg.split('..') 32 a,b = (int(s[0]), int(s[1])) 33 elif re.match(r'^\[[0-9]+..[0-9]+\]$', arg): 34 s = arg[1:-1].split('..') 35 a,b = (int(s[0]), int(s[1])) 36 if a <= 0 or b < a: 37 flash_error("%s is not a valid value for %s. It should be a positive integer (e.g. 7) or a nonempty range of positive integers (e.g. 1-10 or 1..10)", arg, name) 38 raise ValueError("invalid " + name) 39 return a,b 40 41 def parse_limit(arg): 42 if not arg: 43 return 50 44 limit = -1 45 arg = arg.replace (' ','') 46 if re.match('^[0-9]+$', arg): 47 limit = int(arg) 48 if limit > 100: 49 flash_error("%s is not a valid limit on the number of results to display. It should be a positive integer no greater than 100.", arg) 50 raise ValueError("limit") 51 return limit 52 53 def get_character_modulus(a, b, limit=7): 54 """ this function is also used by lfunctions/LfunctionPlot.py """ 55 headers = list(range(1, limit)) 56 headers.append("more") 57 entries = {} 58 rows = list(range(a, b + 1)) 59 for row in rows: 60 if row != 1: 61 G = Integers(row).list_of_elements_of_multiplicative_group() 62 else: 63 G = [1] 64 for chi_n in G: 65 chi = ConreyCharacter(row, chi_n) 66 multorder = chi.order 67 if multorder <= limit: 68 el = chi 69 col = multorder 70 entry = entries.get((row, col), []) 71 entry.append(el) 72 entries[(row, col)] = entry 73 entries2 = {} 74 def out(chi): return (chi.number, chi.is_primitive, 75 chi.order, chi.is_even()) 76 for k, v in entries.items(): 77 l = [] 78 v = sorted(v, key=lambda x: x.number) 79 while v: 80 e1 = v.pop(0) 81 e1_num = e1.number 82 inv_num = 1 if e1_num == 1 else e1_num.inverse_mod(e1.modulus) 83 84 inv = ConreyCharacter(e1.modulus, inv_num) 85 86 if e1_num == inv_num: 87 l.append((out(e1),)) 88 else: 89 l.append((out(e1), out(inv))) 90 v = [x for x in v if (x.modulus, x.number) != (inv.modulus, inv.number)] 91 if k[1] == "more": 92 l = sorted(l, key=lambda e: e[0][2]) 93 entries2[k] = l 94 cols = headers 95 return headers, entries2, rows, cols 96 97 98 def info_from_db_orbit(orbit): 99 mod = orbit['modulus'] 100 conductor = orbit['conductor'] 101 orbit_index = orbit['orbit_index'] 102 orbit_letter = cremona_letter_code(orbit_index - 1) 103 orbit_label = "{}.{}".format(mod, orbit_letter) 104 order = orbit['order'] 105 is_odd = parity_string(orbit['parity']) 106 is_prim = _is_primitive(orbit['is_primitive']) 107 results = [] 108 for num in orbit['galois_orbit']: 109 results.append(( 110 mod, 111 num, 112 conductor, 113 orbit_label, 114 order, 115 is_odd, 116 is_prim, 117 WebDirichlet.char2tex(mod, num) 118 )) 119 return results 120 121 122 def _is_primitive(db_primitive): 123 """ 124 Translate db's primitive entry to boolean. 125 """ 126 if str(db_primitive) == "True": 127 return True 128 return False 129 130 131 def _is_odd(db_parity): 132 """ 133 Translate db's parity entry to boolean. 134 """ 135 _parity = int(db_parity) 136 if _parity == -1: 137 return True 138 return False 139 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lmfdb/characters/ListCharacters.py b/lmfdb/characters/ListCharacters.py --- a/lmfdb/characters/ListCharacters.py +++ b/lmfdb/characters/ListCharacters.py @@ -71,7 +71,7 @@ entry.append(el) entries[(row, col)] = entry entries2 = {} - def out(chi): return (chi.number, chi.is_primitive, + def out(chi): return (chi.number, chi.is_primitive(), chi.order, chi.is_even()) for k, v in entries.items(): l = []
{"golden_diff": "diff --git a/lmfdb/characters/ListCharacters.py b/lmfdb/characters/ListCharacters.py\n--- a/lmfdb/characters/ListCharacters.py\n+++ b/lmfdb/characters/ListCharacters.py\n@@ -71,7 +71,7 @@\n entry.append(el)\n entries[(row, col)] = entry\n entries2 = {}\n- def out(chi): return (chi.number, chi.is_primitive,\n+ def out(chi): return (chi.number, chi.is_primitive(),\n chi.order, chi.is_even())\n for k, v in entries.items():\n l = []\n", "issue": "Dirichlet character browse page thinks all characters are primitive \nOn a Dir Char browse page, such as\r\n\r\nhttps://beta.lmfdb.org/Character/Dirichlet/?modbrowse=1-20\r\n\r\nall characters are shown by default, and shown as \"primitive\".\r\nOn loading, only the primitive characters should be shown.\r\n\r\nThe underlying problem is that the `td` holding the character\r\nindex has `class=\"primitive\"` in all cases. (So, the Imprimitive/Primitive\r\ntoggles in the upper right are working correctly, it's just that the\r\npage is being generated with incorrect markup).\r\n\r\n\r\n\r\n\nDirichlet character browse page thinks all characters are primitive \nOn a Dir Char browse page, such as\r\n\r\nhttps://beta.lmfdb.org/Character/Dirichlet/?modbrowse=1-20\r\n\r\nall characters are shown by default, and shown as \"primitive\".\r\nOn loading, only the primitive characters should be shown.\r\n\r\nThe underlying problem is that the `td` holding the character\r\nindex has `class=\"primitive\"` in all cases. (So, the Imprimitive/Primitive\r\ntoggles in the upper right are working correctly, it's just that the\r\npage is being generated with incorrect markup).\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# ListCharacters.py\nimport re\nfrom sage.all import lcm, factor, Integers\nfrom sage.databases.cremona import cremona_letter_code\nfrom lmfdb.characters.web_character import WebDirichlet, parity_string\nfrom lmfdb.characters.TinyConrey import ConreyCharacter\nfrom lmfdb.utils import flash_error, integer_divisors\n\n# utility functions #\n\n\ndef modn_exponent(n):\n \"\"\" given a nonzero integer n, returns the group exponent of (Z/nZ)* \"\"\"\n return lcm( [ (p-1)*p**(e-1) for (p,e) in factor(n) ] ) // (1 if n%8 else 2)\n\ndef divisors_in_interval(n, a, b):\n \"\"\" given a nonzero integer n and an interval [a,b] returns a list of the divisors of n in [a,b] \"\"\"\n return [d for d in integer_divisors(n) if a <= d and d <= b]\n\ndef parse_interval(arg, name):\n \"\"\" parses a user specified interval of positive integers (or a single integer), flashes errors and raises exceptions \"\"\"\n a,b = 0,0\n arg = arg.replace (' ','')\n if re.match('^[0-9]+$', arg):\n a,b = (int(arg), int(arg))\n elif re.match('^[0-9]+-[0-9]+$', arg):\n s = arg.split('-')\n a,b = (int(s[0]), int(s[1]))\n elif re.match('^[0-9]+..[0-9]+$', arg):\n s = arg.split('..')\n a,b = (int(s[0]), int(s[1]))\n elif re.match(r'^\\[[0-9]+..[0-9]+\\]$', arg):\n s = arg[1:-1].split('..')\n a,b = (int(s[0]), int(s[1]))\n if a <= 0 or b < a:\n flash_error(\"%s is not a valid value for %s. It should be a positive integer (e.g. 7) or a nonempty range of positive integers (e.g. 1-10 or 1..10)\", arg, name)\n raise ValueError(\"invalid \" + name)\n return a,b\n\ndef parse_limit(arg):\n if not arg:\n return 50\n limit = -1\n arg = arg.replace (' ','')\n if re.match('^[0-9]+$', arg):\n limit = int(arg)\n if limit > 100:\n flash_error(\"%s is not a valid limit on the number of results to display. It should be a positive integer no greater than 100.\", arg)\n raise ValueError(\"limit\")\n return limit\n\ndef get_character_modulus(a, b, limit=7):\n \"\"\" this function is also used by lfunctions/LfunctionPlot.py \"\"\"\n headers = list(range(1, limit))\n headers.append(\"more\")\n entries = {}\n rows = list(range(a, b + 1))\n for row in rows:\n if row != 1:\n G = Integers(row).list_of_elements_of_multiplicative_group()\n else:\n G = [1]\n for chi_n in G:\n chi = ConreyCharacter(row, chi_n)\n multorder = chi.order\n if multorder <= limit:\n el = chi\n col = multorder\n entry = entries.get((row, col), [])\n entry.append(el)\n entries[(row, col)] = entry\n entries2 = {}\n def out(chi): return (chi.number, chi.is_primitive,\n chi.order, chi.is_even())\n for k, v in entries.items():\n l = []\n v = sorted(v, key=lambda x: x.number)\n while v:\n e1 = v.pop(0)\n e1_num = e1.number\n inv_num = 1 if e1_num == 1 else e1_num.inverse_mod(e1.modulus)\n\n inv = ConreyCharacter(e1.modulus, inv_num)\n\n if e1_num == inv_num:\n l.append((out(e1),))\n else:\n l.append((out(e1), out(inv)))\n v = [x for x in v if (x.modulus, x.number) != (inv.modulus, inv.number)]\n if k[1] == \"more\":\n l = sorted(l, key=lambda e: e[0][2])\n entries2[k] = l\n cols = headers\n return headers, entries2, rows, cols\n\n\ndef info_from_db_orbit(orbit):\n mod = orbit['modulus']\n conductor = orbit['conductor']\n orbit_index = orbit['orbit_index']\n orbit_letter = cremona_letter_code(orbit_index - 1)\n orbit_label = \"{}.{}\".format(mod, orbit_letter)\n order = orbit['order']\n is_odd = parity_string(orbit['parity'])\n is_prim = _is_primitive(orbit['is_primitive'])\n results = []\n for num in orbit['galois_orbit']:\n results.append((\n mod,\n num,\n conductor,\n orbit_label,\n order,\n is_odd,\n is_prim,\n WebDirichlet.char2tex(mod, num)\n ))\n return results\n\n\ndef _is_primitive(db_primitive):\n \"\"\"\n Translate db's primitive entry to boolean.\n \"\"\"\n if str(db_primitive) == \"True\":\n return True\n return False\n\n\ndef _is_odd(db_parity):\n \"\"\"\n Translate db's parity entry to boolean.\n \"\"\"\n _parity = int(db_parity)\n if _parity == -1:\n return True\n return False\n", "path": "lmfdb/characters/ListCharacters.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# ListCharacters.py\nimport re\nfrom sage.all import lcm, factor, Integers\nfrom sage.databases.cremona import cremona_letter_code\nfrom lmfdb.characters.web_character import WebDirichlet, parity_string\nfrom lmfdb.characters.TinyConrey import ConreyCharacter\nfrom lmfdb.utils import flash_error, integer_divisors\n\n# utility functions #\n\n\ndef modn_exponent(n):\n \"\"\" given a nonzero integer n, returns the group exponent of (Z/nZ)* \"\"\"\n return lcm( [ (p-1)*p**(e-1) for (p,e) in factor(n) ] ) // (1 if n%8 else 2)\n\ndef divisors_in_interval(n, a, b):\n \"\"\" given a nonzero integer n and an interval [a,b] returns a list of the divisors of n in [a,b] \"\"\"\n return [d for d in integer_divisors(n) if a <= d and d <= b]\n\ndef parse_interval(arg, name):\n \"\"\" parses a user specified interval of positive integers (or a single integer), flashes errors and raises exceptions \"\"\"\n a,b = 0,0\n arg = arg.replace (' ','')\n if re.match('^[0-9]+$', arg):\n a,b = (int(arg), int(arg))\n elif re.match('^[0-9]+-[0-9]+$', arg):\n s = arg.split('-')\n a,b = (int(s[0]), int(s[1]))\n elif re.match('^[0-9]+..[0-9]+$', arg):\n s = arg.split('..')\n a,b = (int(s[0]), int(s[1]))\n elif re.match(r'^\\[[0-9]+..[0-9]+\\]$', arg):\n s = arg[1:-1].split('..')\n a,b = (int(s[0]), int(s[1]))\n if a <= 0 or b < a:\n flash_error(\"%s is not a valid value for %s. It should be a positive integer (e.g. 7) or a nonempty range of positive integers (e.g. 1-10 or 1..10)\", arg, name)\n raise ValueError(\"invalid \" + name)\n return a,b\n\ndef parse_limit(arg):\n if not arg:\n return 50\n limit = -1\n arg = arg.replace (' ','')\n if re.match('^[0-9]+$', arg):\n limit = int(arg)\n if limit > 100:\n flash_error(\"%s is not a valid limit on the number of results to display. It should be a positive integer no greater than 100.\", arg)\n raise ValueError(\"limit\")\n return limit\n\ndef get_character_modulus(a, b, limit=7):\n \"\"\" this function is also used by lfunctions/LfunctionPlot.py \"\"\"\n headers = list(range(1, limit))\n headers.append(\"more\")\n entries = {}\n rows = list(range(a, b + 1))\n for row in rows:\n if row != 1:\n G = Integers(row).list_of_elements_of_multiplicative_group()\n else:\n G = [1]\n for chi_n in G:\n chi = ConreyCharacter(row, chi_n)\n multorder = chi.order\n if multorder <= limit:\n el = chi\n col = multorder\n entry = entries.get((row, col), [])\n entry.append(el)\n entries[(row, col)] = entry\n entries2 = {}\n def out(chi): return (chi.number, chi.is_primitive(),\n chi.order, chi.is_even())\n for k, v in entries.items():\n l = []\n v = sorted(v, key=lambda x: x.number)\n while v:\n e1 = v.pop(0)\n e1_num = e1.number\n inv_num = 1 if e1_num == 1 else e1_num.inverse_mod(e1.modulus)\n\n inv = ConreyCharacter(e1.modulus, inv_num)\n\n if e1_num == inv_num:\n l.append((out(e1),))\n else:\n l.append((out(e1), out(inv)))\n v = [x for x in v if (x.modulus, x.number) != (inv.modulus, inv.number)]\n if k[1] == \"more\":\n l = sorted(l, key=lambda e: e[0][2])\n entries2[k] = l\n cols = headers\n return headers, entries2, rows, cols\n\n\ndef info_from_db_orbit(orbit):\n mod = orbit['modulus']\n conductor = orbit['conductor']\n orbit_index = orbit['orbit_index']\n orbit_letter = cremona_letter_code(orbit_index - 1)\n orbit_label = \"{}.{}\".format(mod, orbit_letter)\n order = orbit['order']\n is_odd = parity_string(orbit['parity'])\n is_prim = _is_primitive(orbit['is_primitive'])\n results = []\n for num in orbit['galois_orbit']:\n results.append((\n mod,\n num,\n conductor,\n orbit_label,\n order,\n is_odd,\n is_prim,\n WebDirichlet.char2tex(mod, num)\n ))\n return results\n\n\ndef _is_primitive(db_primitive):\n \"\"\"\n Translate db's primitive entry to boolean.\n \"\"\"\n if str(db_primitive) == \"True\":\n return True\n return False\n\n\ndef _is_odd(db_parity):\n \"\"\"\n Translate db's parity entry to boolean.\n \"\"\"\n _parity = int(db_parity)\n if _parity == -1:\n return True\n return False\n", "path": "lmfdb/characters/ListCharacters.py"}]}
2,075
129
gh_patches_debug_54784
rasdani/github-patches
git_diff
encode__httpx-589
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- AttributeError: module 'rfc3986.api' has no attribute 'iri_reference' Just did a pip install of httpx, and it appeared to meet all the pre-reqs. However, I'd encounter the error in the title. (python37) $ pip install httpx Collecting httpx Using cached https://files.pythonhosted.org/packages/15/fb/81861a0dbe87a0de57491b1b587419845f9a99e5b3d48e56cd440b1fcb68/httpx-0.7.6-py2.py3-none-any.whl Requirement already satisfied: certifi in .../envs/python37/lib/python3.7/site-packages (from httpx) (2019.6.16) Requirement already satisfied: h2==3.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (3.1.0) Requirement already satisfied: chardet==3.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (3.0.4) Requirement already satisfied: h11==0.8.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (0.8.1) Requirement already satisfied: rfc3986==1.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (1.2.0) Requirement already satisfied: idna==2.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (2.8) Collecting hstspreload>=2019.8.27 (from httpx) Downloading https://files.pythonhosted.org/packages/55/77/e0031fbb2930891db135af5a53afdf5567ca6f1b1ff55bf231ed5330f66d/hstspreload-2019.11.4.tar.gz (701kB) |████████████████████████████████| 706kB 740kB/s Requirement already satisfied: hpack<4,>=2.3 in .../envs/python37/lib/python3.7/site-packages (from h2==3.*->httpx) (3.0.0) Requirement already satisfied: hyperframe<6,>=5.2.0 in .../envs/python37/lib/python3.7/site-packages (from h2==3.*->httpx) (5.2.0) Building wheels for collected packages: hstspreload Building wheel for hstspreload (setup.py) ... done Created wheel for hstspreload: filename=hstspreload-2019.11.4-cp37-none-any.whl size=704190 sha256=ac82e6b8abe795fa1a179318d64d73660fa3024715b04b7db0746971c3e5a87b Stored in directory: .../Library/Caches/pip/wheels/49/f1/9d/dc01d2c108593d8d2c32d00cdd9daf595e6ad4a727d57b7033 Successfully built hstspreload Installing collected packages: hstspreload, httpx Successfully installed hstspreload-2019.11.4 httpx-0.7.6 Manually upgrading rfc3986 to 1.3.2 resolved the issue. Seems the installer should be checking for rfc3986==1.3.2 or greater? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 import re 5 from pathlib import Path 6 7 from setuptools import setup 8 9 10 def get_version(package): 11 """ 12 Return package version as listed in `__version__` in `init.py`. 13 """ 14 version = Path(package, "__version__.py").read_text() 15 return re.search("__version__ = ['\"]([^'\"]+)['\"]", version).group(1) 16 17 18 def get_long_description(): 19 """ 20 Return the README. 21 """ 22 long_description = "" 23 with open("README.md", encoding="utf8") as f: 24 long_description += f.read() 25 long_description += "\n\n" 26 with open("CHANGELOG.md", encoding="utf8") as f: 27 long_description += f.read() 28 return long_description 29 30 31 def get_packages(package): 32 """ 33 Return root package and all sub-packages. 34 """ 35 return [str(path.parent) for path in Path(package).glob("**/__init__.py")] 36 37 38 setup( 39 name="httpx", 40 python_requires=">=3.6", 41 version=get_version("httpx"), 42 url="https://github.com/encode/httpx", 43 license="BSD", 44 description="The next generation HTTP client.", 45 long_description=get_long_description(), 46 long_description_content_type="text/markdown", 47 author="Tom Christie", 48 author_email="[email protected]", 49 package_data={"httpx": ["py.typed"]}, 50 packages=get_packages("httpx"), 51 include_package_data=True, 52 zip_safe=False, 53 install_requires=[ 54 "certifi", 55 "hstspreload", 56 "chardet==3.*", 57 "h11==0.8.*", 58 "h2==3.*", 59 "idna==2.*", 60 "rfc3986==1.*", 61 "sniffio==1.*", 62 ], 63 classifiers=[ 64 "Development Status :: 3 - Alpha", 65 "Environment :: Web Environment", 66 "Intended Audience :: Developers", 67 "License :: OSI Approved :: BSD License", 68 "Operating System :: OS Independent", 69 "Topic :: Internet :: WWW/HTTP", 70 "Framework :: AsyncIO", 71 "Framework :: Trio", 72 "Programming Language :: Python :: 3", 73 "Programming Language :: Python :: 3.6", 74 "Programming Language :: Python :: 3.7", 75 "Programming Language :: Python :: 3.8", 76 ], 77 ) 78 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -57,7 +57,7 @@ "h11==0.8.*", "h2==3.*", "idna==2.*", - "rfc3986==1.*", + "rfc3986>=1.3,<2", "sniffio==1.*", ], classifiers=[
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,7 +57,7 @@\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"idna==2.*\",\n- \"rfc3986==1.*\",\n+ \"rfc3986>=1.3,<2\",\n \"sniffio==1.*\",\n ],\n classifiers=[\n", "issue": "AttributeError: module 'rfc3986.api' has no attribute 'iri_reference'\nJust did a pip install of httpx, and it appeared to meet all the pre-reqs. However, I'd encounter the error in the title.\r\n\r\n\r\n(python37) $ pip install httpx\r\nCollecting httpx\r\n Using cached https://files.pythonhosted.org/packages/15/fb/81861a0dbe87a0de57491b1b587419845f9a99e5b3d48e56cd440b1fcb68/httpx-0.7.6-py2.py3-none-any.whl\r\nRequirement already satisfied: certifi in .../envs/python37/lib/python3.7/site-packages (from httpx) (2019.6.16)\r\nRequirement already satisfied: h2==3.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (3.1.0)\r\nRequirement already satisfied: chardet==3.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (3.0.4)\r\nRequirement already satisfied: h11==0.8.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (0.8.1)\r\nRequirement already satisfied: rfc3986==1.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (1.2.0)\r\nRequirement already satisfied: idna==2.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (2.8)\r\nCollecting hstspreload>=2019.8.27 (from httpx)\r\n Downloading https://files.pythonhosted.org/packages/55/77/e0031fbb2930891db135af5a53afdf5567ca6f1b1ff55bf231ed5330f66d/hstspreload-2019.11.4.tar.gz (701kB)\r\n |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 706kB 740kB/s \r\nRequirement already satisfied: hpack<4,>=2.3 in .../envs/python37/lib/python3.7/site-packages (from h2==3.*->httpx) (3.0.0)\r\nRequirement already satisfied: hyperframe<6,>=5.2.0 in .../envs/python37/lib/python3.7/site-packages (from h2==3.*->httpx) (5.2.0)\r\nBuilding wheels for collected packages: hstspreload\r\n Building wheel for hstspreload (setup.py) ... done\r\n Created wheel for hstspreload: filename=hstspreload-2019.11.4-cp37-none-any.whl size=704190 sha256=ac82e6b8abe795fa1a179318d64d73660fa3024715b04b7db0746971c3e5a87b\r\n Stored in directory: .../Library/Caches/pip/wheels/49/f1/9d/dc01d2c108593d8d2c32d00cdd9daf595e6ad4a727d57b7033\r\nSuccessfully built hstspreload\r\nInstalling collected packages: hstspreload, httpx\r\nSuccessfully installed hstspreload-2019.11.4 httpx-0.7.6\r\n\r\nManually upgrading rfc3986 to 1.3.2 resolved the issue. Seems the installer should be checking for rfc3986==1.3.2 or greater? \n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"certifi\",\n \"hstspreload\",\n \"chardet==3.*\",\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"idna==2.*\",\n \"rfc3986==1.*\",\n \"sniffio==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Framework :: AsyncIO\",\n \"Framework :: Trio\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"certifi\",\n \"hstspreload\",\n \"chardet==3.*\",\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"idna==2.*\",\n \"rfc3986>=1.3,<2\",\n \"sniffio==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Framework :: AsyncIO\",\n \"Framework :: Trio\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}]}
1,814
102
gh_patches_debug_11881
rasdani/github-patches
git_diff
spack__spack-5816
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- libxml2 fails with Intel 16.0.2 I am now trying to build `amrex` on another machine. I setup spack for intel compilers (to load the proper module, etc.). Lots of packages build just fine (including, e.g. `m4` which failed in #5705 and we had to fix it in spack). But the `libxml2` packages fails with: ``` $ spack install --restage amrex%[email protected] ... ==> Executing phase: 'autoreconf' ==> Executing phase: 'configure' ==> Executing phase: 'build' ==> Error: ProcessError: Command exited with status 2: 'make' '-j32' 4 errors found in build log: [ ... ] 328 icc: command line warning #10006: ignoring unknown option '-Wnested-externs' 329 icc: command line warning #10006: ignoring unknown option '-Wredundant-decls' 330 HTMLparser.c(5409): warning #2338: this switch statement does not have a default clause 331 switch (ctxt->instate) { 332 ^ 333 >> 334 xmlIO.c(1450): error: identifier "LZMA_OK" is undefined 335 ret = (__libxml2_xzclose((xzFile) context) == LZMA_OK ) ? 0 : -1; 336 ^ 337 338 xmlIO.c(2044): warning #3179: deprecated conversion of string literal to char* (should be const char*) 339 char * content_type = (char *) "text/xml"; 340 ^ [ ... ] 342 xmlIO.c(2056): warning #3179: deprecated conversion of string literal to char* (should be const char*) 343 content_encoding = (char *) "Content-Encoding: gzip"; 344 ^ 345 346 compilation aborted for xmlIO.c (code 2) 347 Makefile:1248: recipe for target 'xmlIO.lo' failed >> 348 make[2]: *** [xmlIO.lo] Error 1 349 make[2]: *** Waiting for unfinished jobs.... 350 icc: command line warning #10006: ignoring unknown option '-Wno-format-extra-args' 351 icc: command line warning #10006: ignoring unknown option '-Wcast-align' 352 icc: command line warning #10006: ignoring unknown option '-Waggregate-return' 353 icc: command line warning #10006: ignoring unknown option '-Wnested-externs' 354 icc: command line warning #10006: ignoring unknown option '-Wredundant-decls' [ ... ] 981 trionan.c(218): warning #177: function "trio_is_negative" was declared but never referenced 982 trio_is_negative 983 ^ 984 985 make[2]: Leaving directory '/home/certik/tmp/spack-stage/spack-stage-dmGFaF/libxml2-2.9.4' 986 Makefile:1440: recipe for target 'all-recursive' failed >> 987 make[1]: *** [all-recursive] Error 1 988 make[1]: Leaving directory '/home/certik/tmp/spack-stage/spack-stage-dmGFaF/libxml2-2.9.4' 989 Makefile:859: recipe for target 'all' failed >> 990 make: *** [all] Error 2 See build log for details: /home/certik/repos/spack/var/spack/stage/libxml2-2.9.4-j26m2sopxpelbb2vhdtbfdb7gdhczbl2/libxml2-2.9.4/spack-build.out ``` The missing `LZMA_OK` symbol I think comes from the `xz` package, that was installed successfully by spack already. Here is the spec: ``` $ spack spec amrex%[email protected] Input spec -------------------------------- amrex%[email protected] Normalized -------------------------------- amrex%[email protected] ^cmake Concretized -------------------------------- [email protected]%[email protected] build_type=RelWithDebInfo ~debug dims=3 +fortran+mpi~openmp~particles prec=DOUBLE arch=linux-ubuntu16.04-x86_64 ^[email protected]%[email protected]~doc+ncurses+openssl+ownlibs~qt arch=linux-ubuntu16.04-x86_64 ^[email protected]%[email protected] patches=f84b2708a42777aadcc7f502a261afe10ca5646a51c1ef8b5e60d2070d926b57 ~symlinks arch=linux-ubuntu16.04-x86_64 ^[email protected]%[email protected]+internal_glib arch=linux-ubuntu16.04-x86_64 ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 ^[email protected]%[email protected]+pic+shared arch=linux-ubuntu16.04-x86_64 ^[email protected]%[email protected]~cuda fabrics= ~java schedulers= ~sqlite3~thread_multiple+vt arch=linux-ubuntu16.04-x86_64 ^[email protected]%[email protected]~cuda+libxml2+pci arch=linux-ubuntu16.04-x86_64 ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 ^[email protected]%[email protected] patches=3877ab548f88597ab2327a2230ee048d2d07ace1062efe81fc92e91b7f39cd00 +sigsegv arch=linux-ubuntu16.04-x86_64 ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 ^[email protected]%[email protected]~python arch=linux-ubuntu16.04-x86_64 ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 ``` Note that `libxml2` builds just fine with Intel 17.0.1 on another machine. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `var/spack/repos/builtin/packages/libxml2/package.py` Content: ``` 1 ############################################################################## 2 # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. 3 # Produced at the Lawrence Livermore National Laboratory. 4 # 5 # This file is part of Spack. 6 # Created by Todd Gamblin, [email protected], All rights reserved. 7 # LLNL-CODE-647188 8 # 9 # For details, see https://github.com/llnl/spack 10 # Please also see the NOTICE and LICENSE files for our notice and the LGPL. 11 # 12 # This program is free software; you can redistribute it and/or modify 13 # it under the terms of the GNU Lesser General Public License (as 14 # published by the Free Software Foundation) version 2.1, February 1999. 15 # 16 # This program is distributed in the hope that it will be useful, but 17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF 18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and 19 # conditions of the GNU Lesser General Public License for more details. 20 # 21 # You should have received a copy of the GNU Lesser General Public 22 # License along with this program; if not, write to the Free Software 23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 ############################################################################## 25 from spack import * 26 27 28 class Libxml2(AutotoolsPackage): 29 """Libxml2 is the XML C parser and toolkit developed for the Gnome 30 project (but usable outside of the Gnome platform), it is free 31 software available under the MIT License.""" 32 homepage = "http://xmlsoft.org" 33 url = "http://xmlsoft.org/sources/libxml2-2.9.2.tar.gz" 34 35 version('2.9.4', 'ae249165c173b1ff386ee8ad676815f5') 36 version('2.9.2', '9e6a9aca9d155737868b3dc5fd82f788') 37 version('2.7.8', '8127a65e8c3b08856093099b52599c86') 38 39 variant('python', default=False, description='Enable Python support') 40 41 extends('python', when='+python', 42 ignore=r'(bin.*$)|(include.*$)|(share.*$)|(lib/libxml2.*$)|' 43 '(lib/xml2.*$)|(lib/cmake.*$)') 44 depends_on('zlib') 45 depends_on('xz') 46 47 depends_on('[email protected]:', type='build') 48 49 def configure_args(self): 50 spec = self.spec 51 if '+python' in spec: 52 python_args = [ 53 '--with-python={0}'.format(spec['python'].home), 54 '--with-python-install-dir={0}'.format(site_packages_dir) 55 ] 56 else: 57 python_args = ['--without-python'] 58 59 return python_args 60 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/var/spack/repos/builtin/packages/libxml2/package.py b/var/spack/repos/builtin/packages/libxml2/package.py --- a/var/spack/repos/builtin/packages/libxml2/package.py +++ b/var/spack/repos/builtin/packages/libxml2/package.py @@ -48,12 +48,15 @@ def configure_args(self): spec = self.spec + + args = ["--with-lzma=%s" % spec['xz'].prefix] + if '+python' in spec: - python_args = [ + args.extend([ '--with-python={0}'.format(spec['python'].home), '--with-python-install-dir={0}'.format(site_packages_dir) - ] + ]) else: - python_args = ['--without-python'] + args.append('--without-python') - return python_args + return args
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/libxml2/package.py b/var/spack/repos/builtin/packages/libxml2/package.py\n--- a/var/spack/repos/builtin/packages/libxml2/package.py\n+++ b/var/spack/repos/builtin/packages/libxml2/package.py\n@@ -48,12 +48,15 @@\n \n def configure_args(self):\n spec = self.spec\n+\n+ args = [\"--with-lzma=%s\" % spec['xz'].prefix]\n+\n if '+python' in spec:\n- python_args = [\n+ args.extend([\n '--with-python={0}'.format(spec['python'].home),\n '--with-python-install-dir={0}'.format(site_packages_dir)\n- ]\n+ ])\n else:\n- python_args = ['--without-python']\n+ args.append('--without-python')\n \n- return python_args\n+ return args\n", "issue": "libxml2 fails with Intel 16.0.2\nI am now trying to build `amrex` on another machine. I setup spack for intel compilers (to load the proper module, etc.). Lots of packages build just fine (including, e.g. `m4` which failed in #5705 and we had to fix it in spack). But the `libxml2` packages fails with:\r\n```\r\n$ spack install --restage amrex%[email protected]\r\n...\r\n==> Executing phase: 'autoreconf'\r\n==> Executing phase: 'configure'\r\n==> Executing phase: 'build'\r\n==> Error: ProcessError: Command exited with status 2:\r\n 'make' '-j32'\r\n\r\n4 errors found in build log:\r\n [ ... ]\r\n 328 icc: command line warning #10006: ignoring unknown option '-Wnested-externs'\r\n 329 icc: command line warning #10006: ignoring unknown option '-Wredundant-decls'\r\n 330 HTMLparser.c(5409): warning #2338: this switch statement does not have a default clause\r\n 331 switch (ctxt->instate) {\r\n 332 ^\r\n 333 \r\n >> 334 xmlIO.c(1450): error: identifier \"LZMA_OK\" is undefined\r\n 335 ret = (__libxml2_xzclose((xzFile) context) == LZMA_OK ) ? 0 : -1;\r\n 336 ^\r\n 337 \r\n 338 xmlIO.c(2044): warning #3179: deprecated conversion of string literal to char* (should be const char*)\r\n 339 char * content_type = (char *) \"text/xml\";\r\n 340 ^\r\n [ ... ]\r\n 342 xmlIO.c(2056): warning #3179: deprecated conversion of string literal to char* (should be const char*)\r\n 343 content_encoding = (char *) \"Content-Encoding: gzip\";\r\n 344 ^\r\n 345 \r\n 346 compilation aborted for xmlIO.c (code 2)\r\n 347 Makefile:1248: recipe for target 'xmlIO.lo' failed\r\n >> 348 make[2]: *** [xmlIO.lo] Error 1\r\n 349 make[2]: *** Waiting for unfinished jobs....\r\n 350 icc: command line warning #10006: ignoring unknown option '-Wno-format-extra-args'\r\n 351 icc: command line warning #10006: ignoring unknown option '-Wcast-align'\r\n 352 icc: command line warning #10006: ignoring unknown option '-Waggregate-return'\r\n 353 icc: command line warning #10006: ignoring unknown option '-Wnested-externs'\r\n 354 icc: command line warning #10006: ignoring unknown option '-Wredundant-decls'\r\n [ ... ]\r\n 981 trionan.c(218): warning #177: function \"trio_is_negative\" was declared but never referenced\r\n 982 trio_is_negative\r\n 983 ^\r\n 984 \r\n 985 make[2]: Leaving directory '/home/certik/tmp/spack-stage/spack-stage-dmGFaF/libxml2-2.9.4'\r\n 986 Makefile:1440: recipe for target 'all-recursive' failed\r\n >> 987 make[1]: *** [all-recursive] Error 1\r\n 988 make[1]: Leaving directory '/home/certik/tmp/spack-stage/spack-stage-dmGFaF/libxml2-2.9.4'\r\n 989 Makefile:859: recipe for target 'all' failed\r\n >> 990 make: *** [all] Error 2\r\n\r\nSee build log for details:\r\n /home/certik/repos/spack/var/spack/stage/libxml2-2.9.4-j26m2sopxpelbb2vhdtbfdb7gdhczbl2/libxml2-2.9.4/spack-build.out\r\n```\r\nThe missing `LZMA_OK` symbol I think comes from the `xz` package, that was installed successfully by spack already. Here is the spec:\r\n```\r\n$ spack spec amrex%[email protected]\r\nInput spec\r\n--------------------------------\r\namrex%[email protected]\r\n\r\nNormalized\r\n--------------------------------\r\namrex%[email protected]\r\n ^cmake\r\n\r\nConcretized\r\n--------------------------------\r\[email protected]%[email protected] build_type=RelWithDebInfo ~debug dims=3 +fortran+mpi~openmp~particles prec=DOUBLE arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected]~doc+ncurses+openssl+ownlibs~qt arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected] patches=f84b2708a42777aadcc7f502a261afe10ca5646a51c1ef8b5e60d2070d926b57 ~symlinks arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected]+internal_glib arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected]+pic+shared arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected]~cuda fabrics= ~java schedulers= ~sqlite3~thread_multiple+vt arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected]~cuda+libxml2+pci arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected] patches=3877ab548f88597ab2327a2230ee048d2d07ace1062efe81fc92e91b7f39cd00 +sigsegv arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected]~python arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 \r\n```\r\nNote that `libxml2` builds just fine with Intel 17.0.1 on another machine.\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Libxml2(AutotoolsPackage):\n \"\"\"Libxml2 is the XML C parser and toolkit developed for the Gnome\n project (but usable outside of the Gnome platform), it is free\n software available under the MIT License.\"\"\"\n homepage = \"http://xmlsoft.org\"\n url = \"http://xmlsoft.org/sources/libxml2-2.9.2.tar.gz\"\n\n version('2.9.4', 'ae249165c173b1ff386ee8ad676815f5')\n version('2.9.2', '9e6a9aca9d155737868b3dc5fd82f788')\n version('2.7.8', '8127a65e8c3b08856093099b52599c86')\n\n variant('python', default=False, description='Enable Python support')\n\n extends('python', when='+python',\n ignore=r'(bin.*$)|(include.*$)|(share.*$)|(lib/libxml2.*$)|'\n '(lib/xml2.*$)|(lib/cmake.*$)')\n depends_on('zlib')\n depends_on('xz')\n\n depends_on('[email protected]:', type='build')\n\n def configure_args(self):\n spec = self.spec\n if '+python' in spec:\n python_args = [\n '--with-python={0}'.format(spec['python'].home),\n '--with-python-install-dir={0}'.format(site_packages_dir)\n ]\n else:\n python_args = ['--without-python']\n\n return python_args\n", "path": "var/spack/repos/builtin/packages/libxml2/package.py"}], "after_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Libxml2(AutotoolsPackage):\n \"\"\"Libxml2 is the XML C parser and toolkit developed for the Gnome\n project (but usable outside of the Gnome platform), it is free\n software available under the MIT License.\"\"\"\n homepage = \"http://xmlsoft.org\"\n url = \"http://xmlsoft.org/sources/libxml2-2.9.2.tar.gz\"\n\n version('2.9.4', 'ae249165c173b1ff386ee8ad676815f5')\n version('2.9.2', '9e6a9aca9d155737868b3dc5fd82f788')\n version('2.7.8', '8127a65e8c3b08856093099b52599c86')\n\n variant('python', default=False, description='Enable Python support')\n\n extends('python', when='+python',\n ignore=r'(bin.*$)|(include.*$)|(share.*$)|(lib/libxml2.*$)|'\n '(lib/xml2.*$)|(lib/cmake.*$)')\n depends_on('zlib')\n depends_on('xz')\n\n depends_on('[email protected]:', type='build')\n\n def configure_args(self):\n spec = self.spec\n\n args = [\"--with-lzma=%s\" % spec['xz'].prefix]\n\n if '+python' in spec:\n args.extend([\n '--with-python={0}'.format(spec['python'].home),\n '--with-python-install-dir={0}'.format(site_packages_dir)\n ])\n else:\n args.append('--without-python')\n\n return args\n", "path": "var/spack/repos/builtin/packages/libxml2/package.py"}]}
2,922
195
gh_patches_debug_6596
rasdani/github-patches
git_diff
DataBiosphere__toil-3023
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove Sphinx as a dependency As mentioned in https://github.com/DataBiosphere/toil/pull/3011#issuecomment-601349848 we depend on Sphinx, but we only use it for generating documentation. We never use it at runtime. We should remove the Sphinx dependency, and just make sure it is installed by the `Makefile` when running the `docs` target. Maybe it could be one of the things installed by `make prepare`. ┆Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-528) ┆Issue Number: TOIL-528 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # Copyright (C) 2015-2016 Regents of the University of California 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from setuptools import find_packages, setup 15 import os 16 17 # setting the 'CPPFLAGS' flag specifies the necessary cython dependency for "http-parser", for more info: 18 # toil issue: https://github.com/DataBiosphere/toil/issues/2924 19 # very similar to this issue: https://github.com/mcfletch/pyopengl/issues/11 20 # the "right way" is waiting for a fix from "http-parser", but this fixes things in the meantime since that might take a while 21 cppflags = os.environ.get('CPPFLAGS') 22 if cppflags: 23 # note, duplicate options don't affect things here so we don't check - Mark D 24 os.environ['CPPFLAGS'] = ' '.join([cppflags, '-DPYPY_VERSION']) 25 else: 26 os.environ['CPPFLAGS'] = '-DPYPY_VERSION' 27 28 29 def runSetup(): 30 """ 31 Calls setup(). This function exists so the setup() invocation preceded more internal 32 functionality. The `version` module is imported dynamically by importVersion() below. 33 """ 34 boto = 'boto==2.48.0' 35 boto3 = 'boto3>=1.7.50, <2.0' 36 futures = 'futures==3.1.1' 37 pycryptodome = 'pycryptodome==3.5.1' 38 pymesos = 'pymesos==0.3.7' 39 psutil = 'psutil >= 3.0.1, <6' 40 pynacl = 'pynacl==1.1.2' 41 gcs = 'google-cloud-storage==1.6.0' 42 gcs_oauth2_boto_plugin = 'gcs_oauth2_boto_plugin==1.14' 43 apacheLibcloud = 'apache-libcloud==2.2.1' 44 cwltool = 'cwltool<=2.0.20200126090152' 45 galaxyToolUtil = 'galaxy-tool-util' 46 htcondor = 'htcondor>=8.6.0' 47 kubernetes = 'kubernetes>=10, <11' 48 pytz = 'pytz>=2012' 49 dill = 'dill==0.2.7.1' 50 six = 'six>=1.10.0' 51 future = 'future' 52 requests = 'requests>=2, <3' 53 docker = 'docker==2.5.1' 54 dateutil = 'python-dateutil' 55 addict = 'addict<=2.2.0' 56 sphinx = 'sphinx>=2.4.4, <3' 57 pathlib2 = 'pathlib2==2.3.2' 58 59 core_reqs = [ 60 dill, 61 six, 62 future, 63 requests, 64 docker, 65 dateutil, 66 psutil, 67 addict, 68 sphinx, 69 pathlib2, 70 pytz] 71 72 aws_reqs = [ 73 boto, 74 boto3, 75 futures, 76 pycryptodome] 77 cwl_reqs = [ 78 cwltool, 79 galaxyToolUtil] 80 encryption_reqs = [ 81 pynacl] 82 google_reqs = [ 83 gcs_oauth2_boto_plugin, # is this being used?? 84 apacheLibcloud, 85 gcs] 86 htcondor_reqs = [ 87 htcondor] 88 kubernetes_reqs = [ 89 kubernetes] 90 mesos_reqs = [ 91 pymesos, 92 psutil] 93 wdl_reqs = [] 94 95 96 # htcondor is not supported by apple 97 # this is tricky to conditionally support in 'all' due 98 # to how wheels work, so it is not included in all and 99 # must be explicitly installed as an extra 100 all_reqs = \ 101 aws_reqs + \ 102 cwl_reqs + \ 103 encryption_reqs + \ 104 google_reqs + \ 105 kubernetes_reqs + \ 106 mesos_reqs 107 108 109 setup( 110 name='toil', 111 version=version.distVersion, 112 description='Pipeline management software for clusters.', 113 author='Benedict Paten', 114 author_email='[email protected]', 115 url="https://github.com/DataBiosphere/toil", 116 classifiers=[ 117 'Development Status :: 5 - Production/Stable', 118 'Environment :: Console', 119 'Intended Audience :: Developers', 120 'Intended Audience :: Science/Research', 121 'Intended Audience :: Healthcare Industry', 122 'License :: OSI Approved :: Apache Software License', 123 'Natural Language :: English', 124 'Operating System :: MacOS :: MacOS X', 125 'Operating System :: POSIX', 126 'Operating System :: POSIX :: Linux', 127 'Programming Language :: Python :: 3.6', 128 'Topic :: Scientific/Engineering', 129 'Topic :: Scientific/Engineering :: Bio-Informatics', 130 'Topic :: Scientific/Engineering :: Astronomy', 131 'Topic :: Scientific/Engineering :: Atmospheric Science', 132 'Topic :: Scientific/Engineering :: Information Analysis', 133 'Topic :: Scientific/Engineering :: Medical Science Apps.', 134 'Topic :: System :: Distributed Computing', 135 'Topic :: Utilities'], 136 license="Apache License v2.0", 137 install_requires=core_reqs, 138 extras_require={ 139 'aws': aws_reqs, 140 'cwl': cwl_reqs, 141 'encryption': encryption_reqs, 142 'google': google_reqs, 143 'htcondor:sys_platform!="darwin"': htcondor_reqs, 144 'kubernetes': kubernetes_reqs, 145 'mesos': mesos_reqs, 146 'wdl': wdl_reqs, 147 'all': all_reqs}, 148 package_dir={'': 'src'}, 149 packages=find_packages(where='src', 150 # Note that we intentionally include the top-level `test` package for 151 # functionality like the @experimental and @integrative decoratorss: 152 exclude=['*.test.*']), 153 package_data = { 154 '': ['*.yml', 'cloud-config'], 155 }, 156 # Unfortunately, the names of the entry points are hard-coded elsewhere in the code base so 157 # you can't just change them here. Luckily, most of them are pretty unique strings, and thus 158 # easy to search for. 159 entry_points={ 160 'console_scripts': [ 161 'toil = toil.utils.toilMain:main', 162 '_toil_worker = toil.worker:main', 163 'cwltoil = toil.cwl.cwltoil:cwltoil_was_removed [cwl]', 164 'toil-cwl-runner = toil.cwl.cwltoil:main [cwl]', 165 'toil-wdl-runner = toil.wdl.toilwdl:main', 166 '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]', 167 '_toil_kubernetes_executor = toil.batchSystems.kubernetes:executor [kubernetes]']}) 168 169 170 def importVersion(): 171 """ 172 Load and return the module object for src/toil/version.py, generating it from the template if 173 required. 174 """ 175 import imp 176 try: 177 # Attempt to load the template first. It only exists in a working copy cloned via git. 178 import version_template 179 except ImportError: 180 # If loading the template fails we must be in a unpacked source distribution and 181 # src/toil/version.py will already exist. 182 pass 183 else: 184 # Use the template to generate src/toil/version.py 185 import os 186 import errno 187 from tempfile import NamedTemporaryFile 188 189 new = version_template.expand_() 190 try: 191 with open('src/toil/version.py') as f: 192 old = f.read() 193 except IOError as e: 194 if e.errno == errno.ENOENT: 195 old = None 196 else: 197 raise 198 199 if old != new: 200 with NamedTemporaryFile(mode='w', dir='src/toil', prefix='version.py.', delete=False) as f: 201 f.write(new) 202 os.rename(f.name, 'src/toil/version.py') 203 # Unfortunately, we can't use a straight import here because that would also load the stuff 204 # defined in src/toil/__init__.py which imports modules from external dependencies that may 205 # yet to be installed when setup.py is invoked. 206 return imp.load_source('toil.version', 'src/toil/version.py') 207 208 209 version = importVersion() 210 runSetup() 211 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -53,7 +53,6 @@ docker = 'docker==2.5.1' dateutil = 'python-dateutil' addict = 'addict<=2.2.0' - sphinx = 'sphinx>=2.4.4, <3' pathlib2 = 'pathlib2==2.3.2' core_reqs = [ @@ -65,7 +64,6 @@ dateutil, psutil, addict, - sphinx, pathlib2, pytz]
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -53,7 +53,6 @@\n docker = 'docker==2.5.1'\n dateutil = 'python-dateutil'\n addict = 'addict<=2.2.0'\n- sphinx = 'sphinx>=2.4.4, <3'\n pathlib2 = 'pathlib2==2.3.2'\n \n core_reqs = [\n@@ -65,7 +64,6 @@\n dateutil,\n psutil,\n addict,\n- sphinx,\n pathlib2,\n pytz]\n", "issue": "Remove Sphinx as a dependency\nAs mentioned in https://github.com/DataBiosphere/toil/pull/3011#issuecomment-601349848 we depend on Sphinx, but we only use it for generating documentation. We never use it at runtime.\n\nWe should remove the Sphinx dependency, and just make sure it is installed by the `Makefile` when running the `docs` target. Maybe it could be one of the things installed by `make prepare`.\n\n\u2506Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-528)\n\u2506Issue Number: TOIL-528\n\n", "before_files": [{"content": "# Copyright (C) 2015-2016 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom setuptools import find_packages, setup\nimport os\n\n# setting the 'CPPFLAGS' flag specifies the necessary cython dependency for \"http-parser\", for more info:\n# toil issue: https://github.com/DataBiosphere/toil/issues/2924\n# very similar to this issue: https://github.com/mcfletch/pyopengl/issues/11\n# the \"right way\" is waiting for a fix from \"http-parser\", but this fixes things in the meantime since that might take a while\ncppflags = os.environ.get('CPPFLAGS')\nif cppflags:\n # note, duplicate options don't affect things here so we don't check - Mark D\n os.environ['CPPFLAGS'] = ' '.join([cppflags, '-DPYPY_VERSION'])\nelse:\n os.environ['CPPFLAGS'] = '-DPYPY_VERSION'\n\n\ndef runSetup():\n \"\"\"\n Calls setup(). This function exists so the setup() invocation preceded more internal\n functionality. The `version` module is imported dynamically by importVersion() below.\n \"\"\"\n boto = 'boto==2.48.0'\n boto3 = 'boto3>=1.7.50, <2.0'\n futures = 'futures==3.1.1'\n pycryptodome = 'pycryptodome==3.5.1'\n pymesos = 'pymesos==0.3.7'\n psutil = 'psutil >= 3.0.1, <6'\n pynacl = 'pynacl==1.1.2'\n gcs = 'google-cloud-storage==1.6.0'\n gcs_oauth2_boto_plugin = 'gcs_oauth2_boto_plugin==1.14'\n apacheLibcloud = 'apache-libcloud==2.2.1'\n cwltool = 'cwltool<=2.0.20200126090152'\n galaxyToolUtil = 'galaxy-tool-util'\n htcondor = 'htcondor>=8.6.0'\n kubernetes = 'kubernetes>=10, <11'\n pytz = 'pytz>=2012'\n dill = 'dill==0.2.7.1'\n six = 'six>=1.10.0'\n future = 'future'\n requests = 'requests>=2, <3'\n docker = 'docker==2.5.1'\n dateutil = 'python-dateutil'\n addict = 'addict<=2.2.0'\n sphinx = 'sphinx>=2.4.4, <3'\n pathlib2 = 'pathlib2==2.3.2'\n\n core_reqs = [\n dill,\n six,\n future,\n requests,\n docker,\n dateutil,\n psutil,\n addict,\n sphinx,\n pathlib2,\n pytz]\n\n aws_reqs = [\n boto,\n boto3,\n futures,\n pycryptodome]\n cwl_reqs = [\n cwltool,\n galaxyToolUtil]\n encryption_reqs = [\n pynacl]\n google_reqs = [\n gcs_oauth2_boto_plugin, # is this being used??\n apacheLibcloud,\n gcs]\n htcondor_reqs = [\n htcondor]\n kubernetes_reqs = [\n kubernetes]\n mesos_reqs = [\n pymesos,\n psutil]\n wdl_reqs = []\n \n\n # htcondor is not supported by apple\n # this is tricky to conditionally support in 'all' due\n # to how wheels work, so it is not included in all and\n # must be explicitly installed as an extra\n all_reqs = \\\n aws_reqs + \\\n cwl_reqs + \\\n encryption_reqs + \\\n google_reqs + \\\n kubernetes_reqs + \\\n mesos_reqs\n\n\n setup(\n name='toil',\n version=version.distVersion,\n description='Pipeline management software for clusters.',\n author='Benedict Paten',\n author_email='[email protected]',\n url=\"https://github.com/DataBiosphere/toil\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Healthcare Industry',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Scientific/Engineering :: Atmospheric Science',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Medical Science Apps.',\n 'Topic :: System :: Distributed Computing',\n 'Topic :: Utilities'],\n license=\"Apache License v2.0\",\n install_requires=core_reqs,\n extras_require={\n 'aws': aws_reqs,\n 'cwl': cwl_reqs,\n 'encryption': encryption_reqs,\n 'google': google_reqs,\n 'htcondor:sys_platform!=\"darwin\"': htcondor_reqs,\n 'kubernetes': kubernetes_reqs,\n 'mesos': mesos_reqs,\n 'wdl': wdl_reqs,\n 'all': all_reqs},\n package_dir={'': 'src'},\n packages=find_packages(where='src',\n # Note that we intentionally include the top-level `test` package for\n # functionality like the @experimental and @integrative decoratorss:\n exclude=['*.test.*']),\n package_data = {\n '': ['*.yml', 'cloud-config'],\n },\n # Unfortunately, the names of the entry points are hard-coded elsewhere in the code base so\n # you can't just change them here. Luckily, most of them are pretty unique strings, and thus\n # easy to search for.\n entry_points={\n 'console_scripts': [\n 'toil = toil.utils.toilMain:main',\n '_toil_worker = toil.worker:main',\n 'cwltoil = toil.cwl.cwltoil:cwltoil_was_removed [cwl]',\n 'toil-cwl-runner = toil.cwl.cwltoil:main [cwl]',\n 'toil-wdl-runner = toil.wdl.toilwdl:main',\n '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]',\n '_toil_kubernetes_executor = toil.batchSystems.kubernetes:executor [kubernetes]']})\n\n\ndef importVersion():\n \"\"\"\n Load and return the module object for src/toil/version.py, generating it from the template if\n required.\n \"\"\"\n import imp\n try:\n # Attempt to load the template first. It only exists in a working copy cloned via git.\n import version_template\n except ImportError:\n # If loading the template fails we must be in a unpacked source distribution and\n # src/toil/version.py will already exist.\n pass\n else:\n # Use the template to generate src/toil/version.py\n import os\n import errno\n from tempfile import NamedTemporaryFile\n\n new = version_template.expand_()\n try:\n with open('src/toil/version.py') as f:\n old = f.read()\n except IOError as e:\n if e.errno == errno.ENOENT:\n old = None\n else:\n raise\n\n if old != new:\n with NamedTemporaryFile(mode='w', dir='src/toil', prefix='version.py.', delete=False) as f:\n f.write(new)\n os.rename(f.name, 'src/toil/version.py')\n # Unfortunately, we can't use a straight import here because that would also load the stuff\n # defined in src/toil/__init__.py which imports modules from external dependencies that may\n # yet to be installed when setup.py is invoked.\n return imp.load_source('toil.version', 'src/toil/version.py')\n\n\nversion = importVersion()\nrunSetup()\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright (C) 2015-2016 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom setuptools import find_packages, setup\nimport os\n\n# setting the 'CPPFLAGS' flag specifies the necessary cython dependency for \"http-parser\", for more info:\n# toil issue: https://github.com/DataBiosphere/toil/issues/2924\n# very similar to this issue: https://github.com/mcfletch/pyopengl/issues/11\n# the \"right way\" is waiting for a fix from \"http-parser\", but this fixes things in the meantime since that might take a while\ncppflags = os.environ.get('CPPFLAGS')\nif cppflags:\n # note, duplicate options don't affect things here so we don't check - Mark D\n os.environ['CPPFLAGS'] = ' '.join([cppflags, '-DPYPY_VERSION'])\nelse:\n os.environ['CPPFLAGS'] = '-DPYPY_VERSION'\n\n\ndef runSetup():\n \"\"\"\n Calls setup(). This function exists so the setup() invocation preceded more internal\n functionality. The `version` module is imported dynamically by importVersion() below.\n \"\"\"\n boto = 'boto==2.48.0'\n boto3 = 'boto3>=1.7.50, <2.0'\n futures = 'futures==3.1.1'\n pycryptodome = 'pycryptodome==3.5.1'\n pymesos = 'pymesos==0.3.7'\n psutil = 'psutil >= 3.0.1, <6'\n pynacl = 'pynacl==1.1.2'\n gcs = 'google-cloud-storage==1.6.0'\n gcs_oauth2_boto_plugin = 'gcs_oauth2_boto_plugin==1.14'\n apacheLibcloud = 'apache-libcloud==2.2.1'\n cwltool = 'cwltool<=2.0.20200126090152'\n galaxyToolUtil = 'galaxy-tool-util'\n htcondor = 'htcondor>=8.6.0'\n kubernetes = 'kubernetes>=10, <11'\n pytz = 'pytz>=2012'\n dill = 'dill==0.2.7.1'\n six = 'six>=1.10.0'\n future = 'future'\n requests = 'requests>=2, <3'\n docker = 'docker==2.5.1'\n dateutil = 'python-dateutil'\n addict = 'addict<=2.2.0'\n pathlib2 = 'pathlib2==2.3.2'\n\n core_reqs = [\n dill,\n six,\n future,\n requests,\n docker,\n dateutil,\n psutil,\n addict,\n pathlib2,\n pytz]\n\n aws_reqs = [\n boto,\n boto3,\n futures,\n pycryptodome]\n cwl_reqs = [\n cwltool,\n galaxyToolUtil]\n encryption_reqs = [\n pynacl]\n google_reqs = [\n gcs_oauth2_boto_plugin, # is this being used??\n apacheLibcloud,\n gcs]\n htcondor_reqs = [\n htcondor]\n kubernetes_reqs = [\n kubernetes]\n mesos_reqs = [\n pymesos,\n psutil]\n wdl_reqs = []\n \n\n # htcondor is not supported by apple\n # this is tricky to conditionally support in 'all' due\n # to how wheels work, so it is not included in all and\n # must be explicitly installed as an extra\n all_reqs = \\\n aws_reqs + \\\n cwl_reqs + \\\n encryption_reqs + \\\n google_reqs + \\\n kubernetes_reqs + \\\n mesos_reqs\n\n\n setup(\n name='toil',\n version=version.distVersion,\n description='Pipeline management software for clusters.',\n author='Benedict Paten',\n author_email='[email protected]',\n url=\"https://github.com/DataBiosphere/toil\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Healthcare Industry',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Scientific/Engineering :: Atmospheric Science',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Medical Science Apps.',\n 'Topic :: System :: Distributed Computing',\n 'Topic :: Utilities'],\n license=\"Apache License v2.0\",\n install_requires=core_reqs,\n extras_require={\n 'aws': aws_reqs,\n 'cwl': cwl_reqs,\n 'encryption': encryption_reqs,\n 'google': google_reqs,\n 'htcondor:sys_platform!=\"darwin\"': htcondor_reqs,\n 'kubernetes': kubernetes_reqs,\n 'mesos': mesos_reqs,\n 'wdl': wdl_reqs,\n 'all': all_reqs},\n package_dir={'': 'src'},\n packages=find_packages(where='src',\n # Note that we intentionally include the top-level `test` package for\n # functionality like the @experimental and @integrative decoratorss:\n exclude=['*.test.*']),\n package_data = {\n '': ['*.yml', 'cloud-config'],\n },\n # Unfortunately, the names of the entry points are hard-coded elsewhere in the code base so\n # you can't just change them here. Luckily, most of them are pretty unique strings, and thus\n # easy to search for.\n entry_points={\n 'console_scripts': [\n 'toil = toil.utils.toilMain:main',\n '_toil_worker = toil.worker:main',\n 'cwltoil = toil.cwl.cwltoil:cwltoil_was_removed [cwl]',\n 'toil-cwl-runner = toil.cwl.cwltoil:main [cwl]',\n 'toil-wdl-runner = toil.wdl.toilwdl:main',\n '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]',\n '_toil_kubernetes_executor = toil.batchSystems.kubernetes:executor [kubernetes]']})\n\n\ndef importVersion():\n \"\"\"\n Load and return the module object for src/toil/version.py, generating it from the template if\n required.\n \"\"\"\n import imp\n try:\n # Attempt to load the template first. It only exists in a working copy cloned via git.\n import version_template\n except ImportError:\n # If loading the template fails we must be in a unpacked source distribution and\n # src/toil/version.py will already exist.\n pass\n else:\n # Use the template to generate src/toil/version.py\n import os\n import errno\n from tempfile import NamedTemporaryFile\n\n new = version_template.expand_()\n try:\n with open('src/toil/version.py') as f:\n old = f.read()\n except IOError as e:\n if e.errno == errno.ENOENT:\n old = None\n else:\n raise\n\n if old != new:\n with NamedTemporaryFile(mode='w', dir='src/toil', prefix='version.py.', delete=False) as f:\n f.write(new)\n os.rename(f.name, 'src/toil/version.py')\n # Unfortunately, we can't use a straight import here because that would also load the stuff\n # defined in src/toil/__init__.py which imports modules from external dependencies that may\n # yet to be installed when setup.py is invoked.\n return imp.load_source('toil.version', 'src/toil/version.py')\n\n\nversion = importVersion()\nrunSetup()\n", "path": "setup.py"}]}
2,883
142
gh_patches_debug_36379
rasdani/github-patches
git_diff
dotkom__onlineweb4-1175
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Datetime timezone stuff gives runtime warnings in server log /home/vagrant/.virtualenvs/onlineweb/local/lib/python2.7/site-packages/django/db/models/fields/**init**.py:827: RuntimeWarning: DateTimeField received a naive datetime (2013-09-05 00:00:00) while time zone support is active. RuntimeWarning) humdum. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `apps/events/utils.py` Content: ``` 1 #-*- coding: utf-8 -*- 2 from datetime import timedelta 3 4 from django.conf import settings 5 from django.core.mail import send_mail 6 from django.utils import timezone 7 8 from django.core.signing import Signer, BadSignature 9 from django.http import HttpResponse 10 from django.utils import timezone 11 from filebrowser.base import FileObject 12 from filebrowser.settings import VERSIONS 13 14 from apps.authentication.models import OnlineUser as User 15 from apps.events.models import Event 16 from apps.splash.models import SplashYear 17 18 import icalendar 19 20 21 def get_group_restricted_events(user): 22 """ Returns a queryset of events with attendance_event that a user has access to """ 23 types_allowed = [] 24 25 groups = user.groups.all() 26 27 if reduce(lambda r, g: g.name in ['Hovedstyret', 'dotKom'] or r, groups, False): 28 return Event.objects.filter(attendance_event__isnull=False) 29 30 for group in groups: 31 if group.name == 'arrKom': 32 types_allowed.append(1) # sosialt 33 types_allowed.append(4) # utflukt 34 35 if group.name == 'bedKom': 36 types_allowed.append(2) # bedriftspresentasjon 37 38 if group.name == 'fagKom': 39 types_allowed.append(3) # kurs 40 41 return Event.objects.filter(attendance_event__isnull=False, event_type__in=types_allowed) 42 43 44 def handle_waitlist_bump(event, host, attendees, payment=None): 45 46 title = u'Du har fått plass på %s' % (event.title) 47 48 extended_deadline = timezone.now() + timedelta(days=2) 49 message = u'Du har stått på venteliste for arrangementet "%s" og har nå fått plass.\n' % (unicode(event.title)) 50 51 if payment: 52 if payment.payment_type == 1: #Instant 53 for attendee in attendees: 54 payment.create_payment_delay(attendee.user, extended_deadline) 55 message += u"Dette arrangementet krever betaling og du må betale innen 48 timer." 56 57 elif payment.payment_type == 2: #Deadline 58 if payment.deadline > extended_deadline: #More than 2 days left of payment deadline 59 message += u"Dette arrangementet krever betaling og fristen for og betale er %s" % (payment.deadline.strftime('%-d %B %Y kl: %H:%M')) 60 else: #The deadline is in less than 2 days 61 for attendee in attendees: 62 payment.create_payment_delay(attendee.user, extended_deadline) 63 message += u"Dette arrangementet krever betaling og du har 48 timer på å betale" 64 65 elif payment.payment_type == 3: #Delay 66 deadline = timezone.now() + timedelta(days=payment.delay) 67 for attendee in attendees: 68 payment.create_payment_delay(attendee.user, deadline) 69 message += u"Dette arrangementet krever betaling og du må betale innen %d dager." % (payment.delay) 70 if len(payment.prices()) == 1: 71 message += u"\nPrisen for dette arrangementet er %skr." % (payment.prices()[0].price) 72 # elif len(payment.prices()) >= 2: 73 # message += u"\nDette arrangementet har flere prisklasser:" 74 # for payment_price in payment.prices(): 75 # message += "\n%s: %skr" % (payment_price.description, payment_price.price) 76 else: 77 message += u"Det kreves ingen ekstra handling fra deg med mindre du vil melde deg av." 78 79 message += u"\n\nFor mer info:" 80 message += u"\nhttp://%s%s" % (host, event.get_absolute_url()) 81 82 for attendee in attendees: 83 send_mail(title, message, settings.DEFAULT_FROM_EMAIL, [attendee.user.email]) 84 85 86 class Calendar(object): 87 def __init__(self): 88 self.cal = icalendar.Calendar() 89 # Filename served by webserver 90 self.filename = 'online' 91 # Required ical info 92 self.cal.add('prodid', '-//Online//Onlineweb//EN') 93 self.cal.add('version', '2.0') 94 95 def add_event(self, event): 96 raise NotImplementedError 97 98 def add_events(self, events): 99 for event in events: 100 self.add_event(event) 101 102 def output(self): 103 """Return icalendar as text""" 104 return self.cal.to_ical() 105 106 def response(self): 107 """Returns a response object""" 108 response = HttpResponse(self.cal.to_ical(), content_type='text/calendar') 109 response['Content-Type'] = 'text/calendar; charset=utf-8' 110 response['Content-Disposition'] = 'attachment; filename=' + self.filename + '.ics' 111 return response 112 113 114 class EventCalendar(Calendar): 115 def user(self, user): 116 """ 117 Personalized calendar 118 This calendar is publicly available, but the url is not guessable so data should not be leaked to everyone 119 """ 120 signer = Signer() 121 try: 122 username = signer.unsign(user) 123 user = User.objects.get(username=username) 124 except (BadSignature, User.DoesNotExist): 125 user = None 126 if user: 127 # Getting all events that the user has/is participating in 128 self.add_events(Event.objects.filter( 129 attendance_event__attendees__user=user 130 ).order_by('event_start').prefetch_related( 131 'attendance_event', 'attendance_event__attendees' 132 )) 133 self.filename = username 134 135 def event(self, event_id): 136 """Single event""" 137 try: 138 self.add_event(Event.objects.get(id=event_id)) 139 except Event.DoesNotExist: 140 pass 141 self.filename = str(event_id) 142 143 def events(self): 144 """All events that haven't ended yet""" 145 self.add_events(Event.objects.filter(event_end__gt=timezone.now()).order_by('event_start')) 146 self.filename = 'events' 147 148 def add_event(self, event): 149 cal_event = icalendar.Event() 150 151 cal_event.add('dtstart', event.event_start) 152 cal_event.add('dtend', event.event_end) 153 cal_event.add('location', event.location) 154 cal_event.add('summary', event.title) 155 cal_event.add('description', event.ingress_short) 156 cal_event.add('uid', 'event-' + str(event.id) + '@online.ntnu.no') 157 158 self.cal.add_component(cal_event) 159 160 161 class SplashCalendar(Calendar): 162 def add_event(self, event): 163 cal_event = icalendar.Event() 164 cal_event.add('dtstart', event.start_time) 165 cal_event.add('dtend', event.end_time) 166 cal_event.add('summary', event.title) 167 cal_event.add('description', event.content) 168 cal_event.add('uid', 'splash-' + str(event.id) + '@online.ntnu.no') 169 170 self.cal.add_component(cal_event) 171 172 def events(self): 173 self.add_events(SplashYear.objects.current().splash_events.all()) 174 self.filename = 'events' 175 176 177 def find_image_versions(event): 178 img = event.image 179 img_strings = [] 180 print("gnna find sm imgs") 181 182 for ver in VERSIONS.keys(): 183 if ver.startswith('events_'): 184 print(ver) 185 img_strings.append(img.version_generate(ver).url) 186 187 return img_strings 188 ``` Path: `apps/companyprofile/utils.py` Content: ``` 1 from filebrowser.settings import VERSIONS 2 3 4 def find_image_versions(company): 5 img = company.old_image 6 img_strings = [] 7 print("gnna find sm imgs") 8 9 for ver in VERSIONS.keys(): 10 if ver.startswith('companies_'): 11 print(ver) 12 img_strings.append(img.version_generate(ver).url) 13 14 return img_strings 15 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/apps/companyprofile/utils.py b/apps/companyprofile/utils.py --- a/apps/companyprofile/utils.py +++ b/apps/companyprofile/utils.py @@ -4,11 +4,9 @@ def find_image_versions(company): img = company.old_image img_strings = [] - print("gnna find sm imgs") for ver in VERSIONS.keys(): if ver.startswith('companies_'): - print(ver) img_strings.append(img.version_generate(ver).url) return img_strings diff --git a/apps/events/utils.py b/apps/events/utils.py --- a/apps/events/utils.py +++ b/apps/events/utils.py @@ -53,7 +53,7 @@ for attendee in attendees: payment.create_payment_delay(attendee.user, extended_deadline) message += u"Dette arrangementet krever betaling og du må betale innen 48 timer." - + elif payment.payment_type == 2: #Deadline if payment.deadline > extended_deadline: #More than 2 days left of payment deadline message += u"Dette arrangementet krever betaling og fristen for og betale er %s" % (payment.deadline.strftime('%-d %B %Y kl: %H:%M')) @@ -61,7 +61,7 @@ for attendee in attendees: payment.create_payment_delay(attendee.user, extended_deadline) message += u"Dette arrangementet krever betaling og du har 48 timer på å betale" - + elif payment.payment_type == 3: #Delay deadline = timezone.now() + timedelta(days=payment.delay) for attendee in attendees: @@ -177,11 +177,9 @@ def find_image_versions(event): img = event.image img_strings = [] - print("gnna find sm imgs") for ver in VERSIONS.keys(): if ver.startswith('events_'): - print(ver) img_strings.append(img.version_generate(ver).url) return img_strings
{"golden_diff": "diff --git a/apps/companyprofile/utils.py b/apps/companyprofile/utils.py\n--- a/apps/companyprofile/utils.py\n+++ b/apps/companyprofile/utils.py\n@@ -4,11 +4,9 @@\n def find_image_versions(company):\n img = company.old_image\n img_strings = []\n- print(\"gnna find sm imgs\")\n \n for ver in VERSIONS.keys():\n if ver.startswith('companies_'):\n- print(ver)\n img_strings.append(img.version_generate(ver).url)\n \n return img_strings\ndiff --git a/apps/events/utils.py b/apps/events/utils.py\n--- a/apps/events/utils.py\n+++ b/apps/events/utils.py\n@@ -53,7 +53,7 @@\n for attendee in attendees:\n payment.create_payment_delay(attendee.user, extended_deadline)\n message += u\"Dette arrangementet krever betaling og du m\u00e5 betale innen 48 timer.\"\n- \n+\n elif payment.payment_type == 2: #Deadline\n if payment.deadline > extended_deadline: #More than 2 days left of payment deadline\n message += u\"Dette arrangementet krever betaling og fristen for og betale er %s\" % (payment.deadline.strftime('%-d %B %Y kl: %H:%M'))\n@@ -61,7 +61,7 @@\n for attendee in attendees:\n payment.create_payment_delay(attendee.user, extended_deadline)\n message += u\"Dette arrangementet krever betaling og du har 48 timer p\u00e5 \u00e5 betale\"\n- \n+\n elif payment.payment_type == 3: #Delay\n deadline = timezone.now() + timedelta(days=payment.delay)\n for attendee in attendees:\n@@ -177,11 +177,9 @@\n def find_image_versions(event):\n img = event.image\n img_strings = []\n- print(\"gnna find sm imgs\")\n \n for ver in VERSIONS.keys():\n if ver.startswith('events_'):\n- print(ver)\n img_strings.append(img.version_generate(ver).url)\n \n return img_strings\n", "issue": "Datetime timezone stuff gives runtime warnings in server log\n/home/vagrant/.virtualenvs/onlineweb/local/lib/python2.7/site-packages/django/db/models/fields/**init**.py:827: RuntimeWarning: DateTimeField received a naive datetime (2013-09-05 00:00:00) while time zone support is active.\n RuntimeWarning)\n\nhumdum.\n\n", "before_files": [{"content": "#-*- coding: utf-8 -*-\nfrom datetime import timedelta\n\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.utils import timezone\n\nfrom django.core.signing import Signer, BadSignature\nfrom django.http import HttpResponse\nfrom django.utils import timezone\nfrom filebrowser.base import FileObject\nfrom filebrowser.settings import VERSIONS\n\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.events.models import Event\nfrom apps.splash.models import SplashYear\n\nimport icalendar\n\n\ndef get_group_restricted_events(user):\n \"\"\" Returns a queryset of events with attendance_event that a user has access to \"\"\"\n types_allowed = []\n\n groups = user.groups.all()\n\n if reduce(lambda r, g: g.name in ['Hovedstyret', 'dotKom'] or r, groups, False):\n return Event.objects.filter(attendance_event__isnull=False)\n\n for group in groups:\n if group.name == 'arrKom':\n types_allowed.append(1) # sosialt\n types_allowed.append(4) # utflukt\n\n if group.name == 'bedKom':\n types_allowed.append(2) # bedriftspresentasjon\n\n if group.name == 'fagKom':\n types_allowed.append(3) # kurs\n\n return Event.objects.filter(attendance_event__isnull=False, event_type__in=types_allowed)\n\n\ndef handle_waitlist_bump(event, host, attendees, payment=None):\n\n title = u'Du har f\u00e5tt plass p\u00e5 %s' % (event.title)\n\n extended_deadline = timezone.now() + timedelta(days=2)\n message = u'Du har st\u00e5tt p\u00e5 venteliste for arrangementet \"%s\" og har n\u00e5 f\u00e5tt plass.\\n' % (unicode(event.title))\n\n if payment:\n if payment.payment_type == 1: #Instant\n for attendee in attendees:\n payment.create_payment_delay(attendee.user, extended_deadline)\n message += u\"Dette arrangementet krever betaling og du m\u00e5 betale innen 48 timer.\"\n \n elif payment.payment_type == 2: #Deadline\n if payment.deadline > extended_deadline: #More than 2 days left of payment deadline\n message += u\"Dette arrangementet krever betaling og fristen for og betale er %s\" % (payment.deadline.strftime('%-d %B %Y kl: %H:%M'))\n else: #The deadline is in less than 2 days\n for attendee in attendees:\n payment.create_payment_delay(attendee.user, extended_deadline)\n message += u\"Dette arrangementet krever betaling og du har 48 timer p\u00e5 \u00e5 betale\"\n \n elif payment.payment_type == 3: #Delay\n deadline = timezone.now() + timedelta(days=payment.delay)\n for attendee in attendees:\n payment.create_payment_delay(attendee.user, deadline)\n message += u\"Dette arrangementet krever betaling og du m\u00e5 betale innen %d dager.\" % (payment.delay)\n if len(payment.prices()) == 1:\n message += u\"\\nPrisen for dette arrangementet er %skr.\" % (payment.prices()[0].price)\n # elif len(payment.prices()) >= 2:\n # message += u\"\\nDette arrangementet har flere prisklasser:\"\n # for payment_price in payment.prices():\n # message += \"\\n%s: %skr\" % (payment_price.description, payment_price.price)\n else:\n message += u\"Det kreves ingen ekstra handling fra deg med mindre du vil melde deg av.\"\n\n message += u\"\\n\\nFor mer info:\"\n message += u\"\\nhttp://%s%s\" % (host, event.get_absolute_url())\n\n for attendee in attendees:\n send_mail(title, message, settings.DEFAULT_FROM_EMAIL, [attendee.user.email])\n\n\nclass Calendar(object):\n def __init__(self):\n self.cal = icalendar.Calendar()\n # Filename served by webserver\n self.filename = 'online'\n # Required ical info\n self.cal.add('prodid', '-//Online//Onlineweb//EN')\n self.cal.add('version', '2.0')\n\n def add_event(self, event):\n raise NotImplementedError\n\n def add_events(self, events):\n for event in events:\n self.add_event(event)\n\n def output(self):\n \"\"\"Return icalendar as text\"\"\"\n return self.cal.to_ical()\n\n def response(self):\n \"\"\"Returns a response object\"\"\"\n response = HttpResponse(self.cal.to_ical(), content_type='text/calendar')\n response['Content-Type'] = 'text/calendar; charset=utf-8'\n response['Content-Disposition'] = 'attachment; filename=' + self.filename + '.ics'\n return response\n\n\nclass EventCalendar(Calendar):\n def user(self, user):\n \"\"\"\n Personalized calendar\n This calendar is publicly available, but the url is not guessable so data should not be leaked to everyone\n \"\"\"\n signer = Signer()\n try:\n username = signer.unsign(user)\n user = User.objects.get(username=username)\n except (BadSignature, User.DoesNotExist):\n user = None\n if user:\n # Getting all events that the user has/is participating in\n self.add_events(Event.objects.filter(\n attendance_event__attendees__user=user\n ).order_by('event_start').prefetch_related(\n 'attendance_event', 'attendance_event__attendees'\n ))\n self.filename = username\n\n def event(self, event_id):\n \"\"\"Single event\"\"\"\n try:\n self.add_event(Event.objects.get(id=event_id))\n except Event.DoesNotExist:\n pass\n self.filename = str(event_id)\n\n def events(self):\n \"\"\"All events that haven't ended yet\"\"\"\n self.add_events(Event.objects.filter(event_end__gt=timezone.now()).order_by('event_start'))\n self.filename = 'events'\n\n def add_event(self, event):\n cal_event = icalendar.Event()\n\n cal_event.add('dtstart', event.event_start)\n cal_event.add('dtend', event.event_end)\n cal_event.add('location', event.location)\n cal_event.add('summary', event.title)\n cal_event.add('description', event.ingress_short)\n cal_event.add('uid', 'event-' + str(event.id) + '@online.ntnu.no')\n\n self.cal.add_component(cal_event)\n\n\nclass SplashCalendar(Calendar):\n def add_event(self, event):\n cal_event = icalendar.Event()\n cal_event.add('dtstart', event.start_time)\n cal_event.add('dtend', event.end_time)\n cal_event.add('summary', event.title)\n cal_event.add('description', event.content)\n cal_event.add('uid', 'splash-' + str(event.id) + '@online.ntnu.no')\n\n self.cal.add_component(cal_event)\n\n def events(self):\n self.add_events(SplashYear.objects.current().splash_events.all())\n self.filename = 'events'\n\n\ndef find_image_versions(event):\n img = event.image\n img_strings = []\n print(\"gnna find sm imgs\")\n\n for ver in VERSIONS.keys():\n if ver.startswith('events_'):\n print(ver)\n img_strings.append(img.version_generate(ver).url)\n\n return img_strings\n", "path": "apps/events/utils.py"}, {"content": "from filebrowser.settings import VERSIONS\n\n\ndef find_image_versions(company):\n img = company.old_image\n img_strings = []\n print(\"gnna find sm imgs\")\n\n for ver in VERSIONS.keys():\n if ver.startswith('companies_'):\n print(ver)\n img_strings.append(img.version_generate(ver).url)\n\n return img_strings\n", "path": "apps/companyprofile/utils.py"}], "after_files": [{"content": "#-*- coding: utf-8 -*-\nfrom datetime import timedelta\n\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.utils import timezone\n\nfrom django.core.signing import Signer, BadSignature\nfrom django.http import HttpResponse\nfrom django.utils import timezone\nfrom filebrowser.base import FileObject\nfrom filebrowser.settings import VERSIONS\n\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.events.models import Event\nfrom apps.splash.models import SplashYear\n\nimport icalendar\n\n\ndef get_group_restricted_events(user):\n \"\"\" Returns a queryset of events with attendance_event that a user has access to \"\"\"\n types_allowed = []\n\n groups = user.groups.all()\n\n if reduce(lambda r, g: g.name in ['Hovedstyret', 'dotKom'] or r, groups, False):\n return Event.objects.filter(attendance_event__isnull=False)\n\n for group in groups:\n if group.name == 'arrKom':\n types_allowed.append(1) # sosialt\n types_allowed.append(4) # utflukt\n\n if group.name == 'bedKom':\n types_allowed.append(2) # bedriftspresentasjon\n\n if group.name == 'fagKom':\n types_allowed.append(3) # kurs\n\n return Event.objects.filter(attendance_event__isnull=False, event_type__in=types_allowed)\n\n\ndef handle_waitlist_bump(event, host, attendees, payment=None):\n\n title = u'Du har f\u00e5tt plass p\u00e5 %s' % (event.title)\n\n extended_deadline = timezone.now() + timedelta(days=2)\n message = u'Du har st\u00e5tt p\u00e5 venteliste for arrangementet \"%s\" og har n\u00e5 f\u00e5tt plass.\\n' % (unicode(event.title))\n\n if payment:\n if payment.payment_type == 1: #Instant\n for attendee in attendees:\n payment.create_payment_delay(attendee.user, extended_deadline)\n message += u\"Dette arrangementet krever betaling og du m\u00e5 betale innen 48 timer.\"\n\n elif payment.payment_type == 2: #Deadline\n if payment.deadline > extended_deadline: #More than 2 days left of payment deadline\n message += u\"Dette arrangementet krever betaling og fristen for og betale er %s\" % (payment.deadline.strftime('%-d %B %Y kl: %H:%M'))\n else: #The deadline is in less than 2 days\n for attendee in attendees:\n payment.create_payment_delay(attendee.user, extended_deadline)\n message += u\"Dette arrangementet krever betaling og du har 48 timer p\u00e5 \u00e5 betale\"\n\n elif payment.payment_type == 3: #Delay\n deadline = timezone.now() + timedelta(days=payment.delay)\n for attendee in attendees:\n payment.create_payment_delay(attendee.user, deadline)\n message += u\"Dette arrangementet krever betaling og du m\u00e5 betale innen %d dager.\" % (payment.delay)\n if len(payment.prices()) == 1:\n message += u\"\\nPrisen for dette arrangementet er %skr.\" % (payment.prices()[0].price)\n # elif len(payment.prices()) >= 2:\n # message += u\"\\nDette arrangementet har flere prisklasser:\"\n # for payment_price in payment.prices():\n # message += \"\\n%s: %skr\" % (payment_price.description, payment_price.price)\n else:\n message += u\"Det kreves ingen ekstra handling fra deg med mindre du vil melde deg av.\"\n\n message += u\"\\n\\nFor mer info:\"\n message += u\"\\nhttp://%s%s\" % (host, event.get_absolute_url())\n\n for attendee in attendees:\n send_mail(title, message, settings.DEFAULT_FROM_EMAIL, [attendee.user.email])\n\n\nclass Calendar(object):\n def __init__(self):\n self.cal = icalendar.Calendar()\n # Filename served by webserver\n self.filename = 'online'\n # Required ical info\n self.cal.add('prodid', '-//Online//Onlineweb//EN')\n self.cal.add('version', '2.0')\n\n def add_event(self, event):\n raise NotImplementedError\n\n def add_events(self, events):\n for event in events:\n self.add_event(event)\n\n def output(self):\n \"\"\"Return icalendar as text\"\"\"\n return self.cal.to_ical()\n\n def response(self):\n \"\"\"Returns a response object\"\"\"\n response = HttpResponse(self.cal.to_ical(), content_type='text/calendar')\n response['Content-Type'] = 'text/calendar; charset=utf-8'\n response['Content-Disposition'] = 'attachment; filename=' + self.filename + '.ics'\n return response\n\n\nclass EventCalendar(Calendar):\n def user(self, user):\n \"\"\"\n Personalized calendar\n This calendar is publicly available, but the url is not guessable so data should not be leaked to everyone\n \"\"\"\n signer = Signer()\n try:\n username = signer.unsign(user)\n user = User.objects.get(username=username)\n except (BadSignature, User.DoesNotExist):\n user = None\n if user:\n # Getting all events that the user has/is participating in\n self.add_events(Event.objects.filter(\n attendance_event__attendees__user=user\n ).order_by('event_start').prefetch_related(\n 'attendance_event', 'attendance_event__attendees'\n ))\n self.filename = username\n\n def event(self, event_id):\n \"\"\"Single event\"\"\"\n try:\n self.add_event(Event.objects.get(id=event_id))\n except Event.DoesNotExist:\n pass\n self.filename = str(event_id)\n\n def events(self):\n \"\"\"All events that haven't ended yet\"\"\"\n self.add_events(Event.objects.filter(event_end__gt=timezone.now()).order_by('event_start'))\n self.filename = 'events'\n\n def add_event(self, event):\n cal_event = icalendar.Event()\n\n cal_event.add('dtstart', event.event_start)\n cal_event.add('dtend', event.event_end)\n cal_event.add('location', event.location)\n cal_event.add('summary', event.title)\n cal_event.add('description', event.ingress_short)\n cal_event.add('uid', 'event-' + str(event.id) + '@online.ntnu.no')\n\n self.cal.add_component(cal_event)\n\n\nclass SplashCalendar(Calendar):\n def add_event(self, event):\n cal_event = icalendar.Event()\n cal_event.add('dtstart', event.start_time)\n cal_event.add('dtend', event.end_time)\n cal_event.add('summary', event.title)\n cal_event.add('description', event.content)\n cal_event.add('uid', 'splash-' + str(event.id) + '@online.ntnu.no')\n\n self.cal.add_component(cal_event)\n\n def events(self):\n self.add_events(SplashYear.objects.current().splash_events.all())\n self.filename = 'events'\n\n\ndef find_image_versions(event):\n img = event.image\n img_strings = []\n\n for ver in VERSIONS.keys():\n if ver.startswith('events_'):\n img_strings.append(img.version_generate(ver).url)\n\n return img_strings\n", "path": "apps/events/utils.py"}, {"content": "from filebrowser.settings import VERSIONS\n\n\ndef find_image_versions(company):\n img = company.old_image\n img_strings = []\n\n for ver in VERSIONS.keys():\n if ver.startswith('companies_'):\n img_strings.append(img.version_generate(ver).url)\n\n return img_strings\n", "path": "apps/companyprofile/utils.py"}]}
2,498
444
gh_patches_debug_44285
rasdani/github-patches
git_diff
lisa-lab__pylearn2-579
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update default training algorithm to support multiple monitoring datasets Just follow SGD or BGD for an example --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pylearn2/training_algorithms/default.py` Content: ``` 1 """ 2 .. todo:: 3 4 WRITEME 5 """ 6 from pylearn2.monitor import Monitor 7 from pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm 8 from pylearn2.utils import safe_zip 9 from pylearn2.utils.data_specs import DataSpecsMapping 10 import theano.tensor as T 11 12 13 class DefaultTrainingAlgorithm(TrainingAlgorithm): 14 """ 15 .. todo:: 16 17 WRITEME 18 """ 19 def __init__(self, batch_size=None, batches_per_iter=1000, 20 monitoring_batches=-1, monitoring_dataset=None, 21 termination_criterion=None): 22 """ 23 Parameters 24 ---------- 25 batch_size : int 26 If batch_size is None, reverts to the `force_batch_size` field of \ 27 the model 28 batches_per_iter : int 29 WRITEME 30 monitoring_batches : int 31 WRITEME 32 monitoring_dataset : WRITEME 33 termination_criterion : WRITEME 34 If specified, can cause the algorithm to terminate before \ 35 `model.learn_batch` says to 36 """ 37 self.batch_size, self.batches_per_iter = batch_size, batches_per_iter 38 if monitoring_dataset is None: 39 assert monitoring_batches == -1 40 self.monitoring_dataset = monitoring_dataset 41 self.monitoring_batches = monitoring_batches 42 self.bSetup = False 43 self.termination_criterion = termination_criterion 44 45 def setup(self, model, dataset): 46 """ 47 Allows the training algorithm to do some preliminary configuration 48 *before* we actually start training the model. The dataset is provided 49 in case other derived training algorithms need to modify model based on 50 the dataset. 51 52 Parameters 53 ---------- 54 model : object 55 Python object representing the model to train loosely \ 56 implementing the interface of models.model.Model. 57 58 dataset : pylearn2.datasets.dataset.Dataset 59 Dataset object used to draw training data 60 """ 61 self.model = model 62 63 self.monitor = Monitor.get_monitor(model) 64 65 if self.monitoring_dataset is not None: 66 # Get the data specifications needed by the model 67 space, source = model.get_monitoring_data_specs() 68 69 # Create Theano variables for each of the individual components 70 # of that data. Usually, it will be X for inputs and Y for targets. 71 # First, we need to find these components, and put them in a tuple 72 mapping = DataSpecsMapping((space, source)) 73 space_tuple = mapping.flatten(space, return_tuple=True) 74 source_tuple = mapping.flatten(source, return_tuple=True) 75 # Then, build a flat tuple of these Theano variables 76 ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src) 77 for (sp, src) in safe_zip(space_tuple, source_tuple)) 78 # Finally, organize them back into a structure expected by the 79 # monitoring channels of the model 80 nested_ipt = mapping.nest(ipt) 81 82 self.monitor.add_dataset(dataset=self.monitoring_dataset, 83 mode="sequential", 84 batch_size=self.batch_size, 85 num_batches=self.monitoring_batches) 86 87 channels = model.get_monitoring_channels(nested_ipt) 88 if not isinstance(channels, dict): 89 raise TypeError("model.get_monitoring_channels must return a " 90 "dictionary, but it returned " + str(channels)) 91 for name in channels: 92 J = channels[name] 93 if isinstance(J, tuple): 94 assert len(J) == 2 95 J, prereqs = J 96 else: 97 prereqs = None 98 99 self.monitor.add_channel(name=name, 100 ipt=nested_ipt, 101 val=J, 102 prereqs=prereqs, 103 data_specs=(space, source)) 104 self.first = True 105 self.bSetup = True 106 107 def train(self, dataset): 108 """ 109 .. todo:: 110 111 WRITEME 112 """ 113 assert self.bSetup 114 model = self.model 115 if self.batch_size is None: 116 batch_size = model.force_batch_size 117 else: 118 batch_size = self.batch_size 119 if hasattr(model, 'force_batch_size'): 120 assert (model.force_batch_size <= 0 or batch_size == 121 model.force_batch_size) 122 123 for i in xrange(self.batches_per_iter): 124 # model.train_batch and self.train both return False when training 125 # should terminate. 126 learn_more = model.train_batch(dataset, batch_size) 127 model.monitor.report_batch(batch_size) 128 if not learn_more: 129 break 130 131 # Make sure we didn't exit training loop because Model.learn 132 # hasn't been updated to new interface yet. 133 if learn_more not in [True,False]: 134 msg = ('The learn method of model %s did not return a boolean ' + 135 'value. Please update your model accordingly.') 136 raise ValueError(msg % str(model)) 137 self.learn_more = learn_more 138 139 def continue_learning(self, model): 140 """ 141 .. todo:: 142 143 WRITEME 144 """ 145 if self.learn_more: 146 if self.termination_criterion is not None: 147 return self.termination_criterion.continue_learning(model) 148 return True 149 return False 150 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pylearn2/training_algorithms/default.py b/pylearn2/training_algorithms/default.py --- a/pylearn2/training_algorithms/default.py +++ b/pylearn2/training_algorithms/default.py @@ -29,7 +29,8 @@ WRITEME monitoring_batches : int WRITEME - monitoring_dataset : WRITEME + monitoring_dataset: Dataset or dict + A Dataset or a dictionary mapping string dataset names to Datasets termination_criterion : WRITEME If specified, can cause the algorithm to terminate before \ `model.learn_batch` says to @@ -37,7 +38,8 @@ self.batch_size, self.batches_per_iter = batch_size, batches_per_iter if monitoring_dataset is None: assert monitoring_batches == -1 - self.monitoring_dataset = monitoring_dataset + + self._set_monitoring_dataset(monitoring_dataset) self.monitoring_batches = monitoring_batches self.bSetup = False self.termination_criterion = termination_criterion @@ -74,33 +76,42 @@ source_tuple = mapping.flatten(source, return_tuple=True) # Then, build a flat tuple of these Theano variables ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src) - for (sp, src) in safe_zip(space_tuple, source_tuple)) + for (sp, src) in safe_zip(space_tuple, source_tuple)) # Finally, organize them back into a structure expected by the # monitoring channels of the model nested_ipt = mapping.nest(ipt) - self.monitor.add_dataset(dataset=self.monitoring_dataset, - mode="sequential", - batch_size=self.batch_size, - num_batches=self.monitoring_batches) - channels = model.get_monitoring_channels(nested_ipt) if not isinstance(channels, dict): raise TypeError("model.get_monitoring_channels must return a " "dictionary, but it returned " + str(channels)) - for name in channels: - J = channels[name] - if isinstance(J, tuple): - assert len(J) == 2 - J, prereqs = J + + for dataset_name in self.monitoring_dataset: + if dataset_name == '': + prefix = '' else: - prereqs = None + prefix = dataset_name + '_' + monitoring_dataset = self.monitoring_dataset[dataset_name] + + self.monitor.add_dataset(dataset=monitoring_dataset, + mode="sequential", + batch_size=self.batch_size) + + for name in channels: + J = channels[name] + if isinstance(J, tuple): + assert len(J) == 2 + J, prereqs = J + else: + prereqs = None + + self.monitor.add_channel(name=prefix + name, + ipt=nested_ipt, + val=J, + dataset=monitoring_dataset, + prereqs=prereqs, + data_specs=(space, source)) - self.monitor.add_channel(name=name, - ipt=nested_ipt, - val=J, - prereqs=prereqs, - data_specs=(space, source)) self.first = True self.bSetup = True @@ -130,7 +141,7 @@ # Make sure we didn't exit training loop because Model.learn # hasn't been updated to new interface yet. - if learn_more not in [True,False]: + if learn_more not in [True, False]: msg = ('The learn method of model %s did not return a boolean ' + 'value. Please update your model accordingly.') raise ValueError(msg % str(model))
{"golden_diff": "diff --git a/pylearn2/training_algorithms/default.py b/pylearn2/training_algorithms/default.py\n--- a/pylearn2/training_algorithms/default.py\n+++ b/pylearn2/training_algorithms/default.py\n@@ -29,7 +29,8 @@\n WRITEME\n monitoring_batches : int\n WRITEME\n- monitoring_dataset : WRITEME\n+ monitoring_dataset: Dataset or dict\n+ A Dataset or a dictionary mapping string dataset names to Datasets\n termination_criterion : WRITEME\n If specified, can cause the algorithm to terminate before \\\n `model.learn_batch` says to\n@@ -37,7 +38,8 @@\n self.batch_size, self.batches_per_iter = batch_size, batches_per_iter\n if monitoring_dataset is None:\n assert monitoring_batches == -1\n- self.monitoring_dataset = monitoring_dataset\n+\n+ self._set_monitoring_dataset(monitoring_dataset)\n self.monitoring_batches = monitoring_batches\n self.bSetup = False\n self.termination_criterion = termination_criterion\n@@ -74,33 +76,42 @@\n source_tuple = mapping.flatten(source, return_tuple=True)\n # Then, build a flat tuple of these Theano variables\n ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)\n- for (sp, src) in safe_zip(space_tuple, source_tuple))\n+ for (sp, src) in safe_zip(space_tuple, source_tuple))\n # Finally, organize them back into a structure expected by the\n # monitoring channels of the model\n nested_ipt = mapping.nest(ipt)\n \n- self.monitor.add_dataset(dataset=self.monitoring_dataset,\n- mode=\"sequential\",\n- batch_size=self.batch_size,\n- num_batches=self.monitoring_batches)\n-\n channels = model.get_monitoring_channels(nested_ipt)\n if not isinstance(channels, dict):\n raise TypeError(\"model.get_monitoring_channels must return a \"\n \"dictionary, but it returned \" + str(channels))\n- for name in channels:\n- J = channels[name]\n- if isinstance(J, tuple):\n- assert len(J) == 2\n- J, prereqs = J\n+\n+ for dataset_name in self.monitoring_dataset:\n+ if dataset_name == '':\n+ prefix = ''\n else:\n- prereqs = None\n+ prefix = dataset_name + '_'\n+ monitoring_dataset = self.monitoring_dataset[dataset_name]\n+\n+ self.monitor.add_dataset(dataset=monitoring_dataset,\n+ mode=\"sequential\",\n+ batch_size=self.batch_size)\n+\n+ for name in channels:\n+ J = channels[name]\n+ if isinstance(J, tuple):\n+ assert len(J) == 2\n+ J, prereqs = J\n+ else:\n+ prereqs = None\n+\n+ self.monitor.add_channel(name=prefix + name,\n+ ipt=nested_ipt,\n+ val=J,\n+ dataset=monitoring_dataset,\n+ prereqs=prereqs,\n+ data_specs=(space, source))\n \n- self.monitor.add_channel(name=name,\n- ipt=nested_ipt,\n- val=J,\n- prereqs=prereqs,\n- data_specs=(space, source))\n self.first = True\n self.bSetup = True\n \n@@ -130,7 +141,7 @@\n \n # Make sure we didn't exit training loop because Model.learn\n # hasn't been updated to new interface yet.\n- if learn_more not in [True,False]:\n+ if learn_more not in [True, False]:\n msg = ('The learn method of model %s did not return a boolean ' +\n 'value. Please update your model accordingly.')\n raise ValueError(msg % str(model))\n", "issue": "Update default training algorithm to support multiple monitoring datasets\nJust follow SGD or BGD for an example\n\n", "before_files": [{"content": "\"\"\"\n.. todo::\n\n WRITEME\n\"\"\"\nfrom pylearn2.monitor import Monitor\nfrom pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm\nfrom pylearn2.utils import safe_zip\nfrom pylearn2.utils.data_specs import DataSpecsMapping\nimport theano.tensor as T\n\n\nclass DefaultTrainingAlgorithm(TrainingAlgorithm):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n def __init__(self, batch_size=None, batches_per_iter=1000,\n monitoring_batches=-1, monitoring_dataset=None,\n termination_criterion=None):\n \"\"\"\n Parameters\n ----------\n batch_size : int\n If batch_size is None, reverts to the `force_batch_size` field of \\\n the model\n batches_per_iter : int\n WRITEME\n monitoring_batches : int\n WRITEME\n monitoring_dataset : WRITEME\n termination_criterion : WRITEME\n If specified, can cause the algorithm to terminate before \\\n `model.learn_batch` says to\n \"\"\"\n self.batch_size, self.batches_per_iter = batch_size, batches_per_iter\n if monitoring_dataset is None:\n assert monitoring_batches == -1\n self.monitoring_dataset = monitoring_dataset\n self.monitoring_batches = monitoring_batches\n self.bSetup = False\n self.termination_criterion = termination_criterion\n\n def setup(self, model, dataset):\n \"\"\"\n Allows the training algorithm to do some preliminary configuration\n *before* we actually start training the model. The dataset is provided\n in case other derived training algorithms need to modify model based on\n the dataset.\n\n Parameters\n ----------\n model : object\n Python object representing the model to train loosely \\\n implementing the interface of models.model.Model.\n\n dataset : pylearn2.datasets.dataset.Dataset\n Dataset object used to draw training data\n \"\"\"\n self.model = model\n\n self.monitor = Monitor.get_monitor(model)\n\n if self.monitoring_dataset is not None:\n # Get the data specifications needed by the model\n space, source = model.get_monitoring_data_specs()\n\n # Create Theano variables for each of the individual components\n # of that data. Usually, it will be X for inputs and Y for targets.\n # First, we need to find these components, and put them in a tuple\n mapping = DataSpecsMapping((space, source))\n space_tuple = mapping.flatten(space, return_tuple=True)\n source_tuple = mapping.flatten(source, return_tuple=True)\n # Then, build a flat tuple of these Theano variables\n ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)\n for (sp, src) in safe_zip(space_tuple, source_tuple))\n # Finally, organize them back into a structure expected by the\n # monitoring channels of the model\n nested_ipt = mapping.nest(ipt)\n\n self.monitor.add_dataset(dataset=self.monitoring_dataset,\n mode=\"sequential\",\n batch_size=self.batch_size,\n num_batches=self.monitoring_batches)\n\n channels = model.get_monitoring_channels(nested_ipt)\n if not isinstance(channels, dict):\n raise TypeError(\"model.get_monitoring_channels must return a \"\n \"dictionary, but it returned \" + str(channels))\n for name in channels:\n J = channels[name]\n if isinstance(J, tuple):\n assert len(J) == 2\n J, prereqs = J\n else:\n prereqs = None\n\n self.monitor.add_channel(name=name,\n ipt=nested_ipt,\n val=J,\n prereqs=prereqs,\n data_specs=(space, source))\n self.first = True\n self.bSetup = True\n\n def train(self, dataset):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n assert self.bSetup\n model = self.model\n if self.batch_size is None:\n batch_size = model.force_batch_size\n else:\n batch_size = self.batch_size\n if hasattr(model, 'force_batch_size'):\n assert (model.force_batch_size <= 0 or batch_size ==\n model.force_batch_size)\n\n for i in xrange(self.batches_per_iter):\n # model.train_batch and self.train both return False when training\n # should terminate.\n learn_more = model.train_batch(dataset, batch_size)\n model.monitor.report_batch(batch_size)\n if not learn_more:\n break\n\n # Make sure we didn't exit training loop because Model.learn\n # hasn't been updated to new interface yet.\n if learn_more not in [True,False]:\n msg = ('The learn method of model %s did not return a boolean ' +\n 'value. Please update your model accordingly.')\n raise ValueError(msg % str(model))\n self.learn_more = learn_more\n\n def continue_learning(self, model):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n if self.learn_more:\n if self.termination_criterion is not None:\n return self.termination_criterion.continue_learning(model)\n return True\n return False\n", "path": "pylearn2/training_algorithms/default.py"}], "after_files": [{"content": "\"\"\"\n.. todo::\n\n WRITEME\n\"\"\"\nfrom pylearn2.monitor import Monitor\nfrom pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm\nfrom pylearn2.utils import safe_zip\nfrom pylearn2.utils.data_specs import DataSpecsMapping\nimport theano.tensor as T\n\n\nclass DefaultTrainingAlgorithm(TrainingAlgorithm):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n def __init__(self, batch_size=None, batches_per_iter=1000,\n monitoring_batches=-1, monitoring_dataset=None,\n termination_criterion=None):\n \"\"\"\n Parameters\n ----------\n batch_size : int\n If batch_size is None, reverts to the `force_batch_size` field of \\\n the model\n batches_per_iter : int\n WRITEME\n monitoring_batches : int\n WRITEME\n monitoring_dataset: Dataset or dict\n A Dataset or a dictionary mapping string dataset names to Datasets\n termination_criterion : WRITEME\n If specified, can cause the algorithm to terminate before \\\n `model.learn_batch` says to\n \"\"\"\n self.batch_size, self.batches_per_iter = batch_size, batches_per_iter\n if monitoring_dataset is None:\n assert monitoring_batches == -1\n\n self._set_monitoring_dataset(monitoring_dataset)\n self.monitoring_batches = monitoring_batches\n self.bSetup = False\n self.termination_criterion = termination_criterion\n\n def setup(self, model, dataset):\n \"\"\"\n Allows the training algorithm to do some preliminary configuration\n *before* we actually start training the model. The dataset is provided\n in case other derived training algorithms need to modify model based on\n the dataset.\n\n Parameters\n ----------\n model : object\n Python object representing the model to train loosely \\\n implementing the interface of models.model.Model.\n\n dataset : pylearn2.datasets.dataset.Dataset\n Dataset object used to draw training data\n \"\"\"\n self.model = model\n\n self.monitor = Monitor.get_monitor(model)\n\n if self.monitoring_dataset is not None:\n # Get the data specifications needed by the model\n space, source = model.get_monitoring_data_specs()\n\n # Create Theano variables for each of the individual components\n # of that data. Usually, it will be X for inputs and Y for targets.\n # First, we need to find these components, and put them in a tuple\n mapping = DataSpecsMapping((space, source))\n space_tuple = mapping.flatten(space, return_tuple=True)\n source_tuple = mapping.flatten(source, return_tuple=True)\n # Then, build a flat tuple of these Theano variables\n ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)\n for (sp, src) in safe_zip(space_tuple, source_tuple))\n # Finally, organize them back into a structure expected by the\n # monitoring channels of the model\n nested_ipt = mapping.nest(ipt)\n\n channels = model.get_monitoring_channels(nested_ipt)\n if not isinstance(channels, dict):\n raise TypeError(\"model.get_monitoring_channels must return a \"\n \"dictionary, but it returned \" + str(channels))\n\n for dataset_name in self.monitoring_dataset:\n if dataset_name == '':\n prefix = ''\n else:\n prefix = dataset_name + '_'\n monitoring_dataset = self.monitoring_dataset[dataset_name]\n\n self.monitor.add_dataset(dataset=monitoring_dataset,\n mode=\"sequential\",\n batch_size=self.batch_size)\n\n for name in channels:\n J = channels[name]\n if isinstance(J, tuple):\n assert len(J) == 2\n J, prereqs = J\n else:\n prereqs = None\n\n self.monitor.add_channel(name=prefix + name,\n ipt=nested_ipt,\n val=J,\n dataset=monitoring_dataset,\n prereqs=prereqs,\n data_specs=(space, source))\n\n self.first = True\n self.bSetup = True\n\n def train(self, dataset):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n assert self.bSetup\n model = self.model\n if self.batch_size is None:\n batch_size = model.force_batch_size\n else:\n batch_size = self.batch_size\n if hasattr(model, 'force_batch_size'):\n assert (model.force_batch_size <= 0 or batch_size ==\n model.force_batch_size)\n\n for i in xrange(self.batches_per_iter):\n # model.train_batch and self.train both return False when training\n # should terminate.\n learn_more = model.train_batch(dataset, batch_size)\n model.monitor.report_batch(batch_size)\n if not learn_more:\n break\n\n # Make sure we didn't exit training loop because Model.learn\n # hasn't been updated to new interface yet.\n if learn_more not in [True, False]:\n msg = ('The learn method of model %s did not return a boolean ' +\n 'value. Please update your model accordingly.')\n raise ValueError(msg % str(model))\n self.learn_more = learn_more\n\n def continue_learning(self, model):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n if self.learn_more:\n if self.termination_criterion is not None:\n return self.termination_criterion.continue_learning(model)\n return True\n return False\n", "path": "pylearn2/training_algorithms/default.py"}]}
1,714
833
gh_patches_debug_34098
rasdani/github-patches
git_diff
airctic__icevision-646
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Allow passing a Config object to the MMDetection models ## 🚀 Feature **Is your feature request related to a problem? Please describe.** In the current version, to update the loss_weight for example, We have to duplicate an existing config .py file and make the changes in the new file, and pass it the model method. The latter expect a config file_path **Describe the solution you'd like** I think it would be better to pass the MMDetection config object instead to the model method. By doing so, we could let the users populate the config object and update any field they wish to change, without creating and updating external files, like this: ``` cfg = Config.fromfile(cfg_filepath) cfg.model.bbox_head.loss_cls.loss_weight = 0.8 cfg.model.bbox_head.loss_bbox.loss_weight = 2 ``` **Additional context** There is a discussion in our Discord forum around this issue: https://discordapp.com/channels/735877944085446747/780951884683935744/811650062706540644 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `icevision/models/mmdet/common/bbox/two_stage/model.py` Content: ``` 1 __all__ = ["model"] 2 3 from icevision.imports import * 4 from mmcv import Config 5 from mmdet.models import build_detector 6 from mmcv.runner import load_checkpoint 7 8 9 def model( 10 cfg_path: Union[str, Path], 11 num_classes: int, 12 weights_path: Optional[Union[str, Path]] = None, 13 ) -> nn.Module: 14 cfg = Config.fromfile(str(cfg_path)) 15 cfg.model.roi_head.bbox_head.num_classes = num_classes - 1 16 if weights_path is not None: 17 cfg.model.pretrained = None 18 19 _model = build_detector(cfg.model, cfg.get("train_cfg"), cfg.get("test_cfg")) 20 21 if weights_path is not None: 22 load_checkpoint(_model, str(weights_path)) 23 24 return _model 25 ``` Path: `icevision/models/mmdet/common/bbox/single_stage/model.py` Content: ``` 1 __all__ = ["model"] 2 3 from icevision.imports import * 4 from mmcv import Config 5 from mmdet.models import build_detector 6 from mmcv.runner import load_checkpoint 7 8 9 def model( 10 cfg_path: Union[str, Path], 11 num_classes: int, 12 weights_path: Optional[Union[str, Path]] = None, 13 ) -> nn.Module: 14 cfg = Config.fromfile(str(cfg_path)) 15 cfg.model.bbox_head.num_classes = num_classes - 1 16 if weights_path is not None: 17 cfg.model.pretrained = None 18 19 _model = build_detector(cfg.model, cfg.get("train_cfg"), cfg.get("test_cfg")) 20 21 if weights_path is not None: 22 load_checkpoint(_model, str(weights_path)) 23 24 return _model 25 ``` Path: `icevision/models/mmdet/common/mask/two_stage/model.py` Content: ``` 1 __all__ = ["model"] 2 3 from icevision.imports import * 4 from mmcv import Config 5 from mmdet.models import build_detector 6 from mmcv.runner import load_checkpoint 7 8 9 def model( 10 cfg_path: Union[str, Path], 11 num_classes: int, 12 weights_path: Optional[Union[str, Path]] = None, 13 ) -> nn.Module: 14 cfg = Config.fromfile(str(cfg_path)) 15 cfg.model.roi_head.bbox_head.num_classes = num_classes - 1 16 cfg.model.roi_head.mask_head.num_classes = num_classes - 1 17 if weights_path is not None: 18 cfg.model.pretrained = None 19 20 _model = build_detector(cfg.model, cfg.get("train_cfg"), cfg.get("test_cfg")) 21 22 if weights_path is not None: 23 load_checkpoint(_model, str(weights_path)) 24 25 return _model 26 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/icevision/models/mmdet/common/bbox/single_stage/model.py b/icevision/models/mmdet/common/bbox/single_stage/model.py --- a/icevision/models/mmdet/common/bbox/single_stage/model.py +++ b/icevision/models/mmdet/common/bbox/single_stage/model.py @@ -7,11 +7,16 @@ def model( - cfg_path: Union[str, Path], + cfg: Union[str, Path, Config], num_classes: int, weights_path: Optional[Union[str, Path]] = None, ) -> nn.Module: - cfg = Config.fromfile(str(cfg_path)) + + # if `cfg` argument is a path (str, Path) create an Config object from the file + # otherwise cfg should be already an Config object + if isinstance(cfg, (str, Path)): + cfg = Config.fromfile(str(cfg)) + cfg.model.bbox_head.num_classes = num_classes - 1 if weights_path is not None: cfg.model.pretrained = None diff --git a/icevision/models/mmdet/common/bbox/two_stage/model.py b/icevision/models/mmdet/common/bbox/two_stage/model.py --- a/icevision/models/mmdet/common/bbox/two_stage/model.py +++ b/icevision/models/mmdet/common/bbox/two_stage/model.py @@ -7,11 +7,16 @@ def model( - cfg_path: Union[str, Path], + cfg: Union[str, Path, Config], num_classes: int, weights_path: Optional[Union[str, Path]] = None, ) -> nn.Module: - cfg = Config.fromfile(str(cfg_path)) + + # if `cfg` argument is a path (str, Path) create an Config object from the file + # otherwise cfg should be already an Config object + if isinstance(cfg, (str, Path)): + cfg = Config.fromfile(str(cfg)) + cfg.model.roi_head.bbox_head.num_classes = num_classes - 1 if weights_path is not None: cfg.model.pretrained = None diff --git a/icevision/models/mmdet/common/mask/two_stage/model.py b/icevision/models/mmdet/common/mask/two_stage/model.py --- a/icevision/models/mmdet/common/mask/two_stage/model.py +++ b/icevision/models/mmdet/common/mask/two_stage/model.py @@ -7,11 +7,16 @@ def model( - cfg_path: Union[str, Path], + cfg: Union[str, Path, Config], num_classes: int, weights_path: Optional[Union[str, Path]] = None, ) -> nn.Module: - cfg = Config.fromfile(str(cfg_path)) + + # if `cfg` argument is a path (str, Path) create an Config object from the file + # otherwise cfg should be already an Config object + if isinstance(cfg, (str, Path)): + cfg = Config.fromfile(str(cfg)) + cfg.model.roi_head.bbox_head.num_classes = num_classes - 1 cfg.model.roi_head.mask_head.num_classes = num_classes - 1 if weights_path is not None:
{"golden_diff": "diff --git a/icevision/models/mmdet/common/bbox/single_stage/model.py b/icevision/models/mmdet/common/bbox/single_stage/model.py\n--- a/icevision/models/mmdet/common/bbox/single_stage/model.py\n+++ b/icevision/models/mmdet/common/bbox/single_stage/model.py\n@@ -7,11 +7,16 @@\n \n \n def model(\n- cfg_path: Union[str, Path],\n+ cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n ) -> nn.Module:\n- cfg = Config.fromfile(str(cfg_path))\n+\n+ # if `cfg` argument is a path (str, Path) create an Config object from the file\n+ # otherwise cfg should be already an Config object\n+ if isinstance(cfg, (str, Path)):\n+ cfg = Config.fromfile(str(cfg))\n+\n cfg.model.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\ndiff --git a/icevision/models/mmdet/common/bbox/two_stage/model.py b/icevision/models/mmdet/common/bbox/two_stage/model.py\n--- a/icevision/models/mmdet/common/bbox/two_stage/model.py\n+++ b/icevision/models/mmdet/common/bbox/two_stage/model.py\n@@ -7,11 +7,16 @@\n \n \n def model(\n- cfg_path: Union[str, Path],\n+ cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n ) -> nn.Module:\n- cfg = Config.fromfile(str(cfg_path))\n+\n+ # if `cfg` argument is a path (str, Path) create an Config object from the file\n+ # otherwise cfg should be already an Config object\n+ if isinstance(cfg, (str, Path)):\n+ cfg = Config.fromfile(str(cfg))\n+\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\ndiff --git a/icevision/models/mmdet/common/mask/two_stage/model.py b/icevision/models/mmdet/common/mask/two_stage/model.py\n--- a/icevision/models/mmdet/common/mask/two_stage/model.py\n+++ b/icevision/models/mmdet/common/mask/two_stage/model.py\n@@ -7,11 +7,16 @@\n \n \n def model(\n- cfg_path: Union[str, Path],\n+ cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n ) -> nn.Module:\n- cfg = Config.fromfile(str(cfg_path))\n+\n+ # if `cfg` argument is a path (str, Path) create an Config object from the file\n+ # otherwise cfg should be already an Config object\n+ if isinstance(cfg, (str, Path)):\n+ cfg = Config.fromfile(str(cfg))\n+\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n cfg.model.roi_head.mask_head.num_classes = num_classes - 1\n if weights_path is not None:\n", "issue": "Allow passing a Config object to the MMDetection models\n## \ud83d\ude80 Feature\r\n**Is your feature request related to a problem? Please describe.**\r\nIn the current version, to update the loss_weight for example, We have to duplicate an existing config .py file and make the changes in the new file, and pass it the model method. The latter expect a config file_path\r\n\r\n\r\n**Describe the solution you'd like**\r\n I think it would be better to pass the MMDetection config object instead to the model method. By doing so, we could let the users populate the config object and update any field they wish to change, without creating and updating external files, like this: \r\n\r\n```\r\ncfg = Config.fromfile(cfg_filepath)\r\ncfg.model.bbox_head.loss_cls.loss_weight = 0.8\r\ncfg.model.bbox_head.loss_bbox.loss_weight = 2\r\n```\r\n\r\n\r\n**Additional context**\r\nThere is a discussion in our Discord forum around this issue:\r\nhttps://discordapp.com/channels/735877944085446747/780951884683935744/811650062706540644\r\n\n", "before_files": [{"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg_path: Union[str, Path],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n cfg = Config.fromfile(str(cfg_path))\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/bbox/two_stage/model.py"}, {"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg_path: Union[str, Path],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n cfg = Config.fromfile(str(cfg_path))\n cfg.model.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/bbox/single_stage/model.py"}, {"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg_path: Union[str, Path],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n cfg = Config.fromfile(str(cfg_path))\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n cfg.model.roi_head.mask_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/mask/two_stage/model.py"}], "after_files": [{"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n\n # if `cfg` argument is a path (str, Path) create an Config object from the file\n # otherwise cfg should be already an Config object\n if isinstance(cfg, (str, Path)):\n cfg = Config.fromfile(str(cfg))\n\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/bbox/two_stage/model.py"}, {"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n\n # if `cfg` argument is a path (str, Path) create an Config object from the file\n # otherwise cfg should be already an Config object\n if isinstance(cfg, (str, Path)):\n cfg = Config.fromfile(str(cfg))\n\n cfg.model.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/bbox/single_stage/model.py"}, {"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n\n # if `cfg` argument is a path (str, Path) create an Config object from the file\n # otherwise cfg should be already an Config object\n if isinstance(cfg, (str, Path)):\n cfg = Config.fromfile(str(cfg))\n\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n cfg.model.roi_head.mask_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/mask/two_stage/model.py"}]}
1,222
716
gh_patches_debug_43066
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-1053
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Provide detailed error info for POST to records API when in violation of a not-null constraint ## Current behavior 1. Set up a table which contains a `NOT NULL` column. 1. POST an empty request (`{}`) to the `records` API (e.g. by clicking the `+` icon to add a new row to the table). 1. Observe the following response: ```json [ { "code": 4999, "message": "(psycopg2.errors.NotNullViolation) null value in column \"common_name\" of relation \"trees\" violates not-null constraint\nDETAIL: Failing row contains (11, null, null, null, null, null, null).\n\n[SQL: INSERT INTO public.trees DEFAULT VALUES RETURNING public.trees.id]\n(Background on this error at: http://sqlalche.me/e/14/gkpj)", "field": null, "detail": null } ] ``` 1. Within the response, observe no easy way to identify _which column_ (or columns) was responsible for the error. ## Expected behavior - For #775, we need enough detail for the front end to be able to display error messages specific to certain cells. For example, if a POST request has two columns with invalid data (perhaps even invalid in _different ways_), we need to be able to match specific error messages to each of those columns. - @pavish and I are flexible on the exact response schema from the API here, as long as it satisfies the above goal. - We should also keep #896 in-mind with this change. ## Status - blocked by #896 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mathesar/api/exceptions/database_exceptions/exceptions.py` Content: ``` 1 from rest_framework import status 2 3 from mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException 4 from mathesar.api.exceptions.error_codes import ErrorCodes 5 from mathesar.api.exceptions.generic_exceptions.base_exceptions import ( 6 MathesarAPIException, 7 get_default_exception_detail, 8 ) 9 10 11 class UniqueViolationAPIException(MathesarAPIException): 12 error_code = ErrorCodes.UniqueViolation.value 13 14 def __init__( 15 self, 16 exception, 17 message="This column has non-unique values so a unique constraint cannot be set", 18 field=None, 19 details=None, 20 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR 21 ): 22 exception_detail = get_default_exception_detail(exception, self.error_code, message, field, details)._asdict() 23 self.detail = [exception_detail] 24 self.status_code = status_code 25 26 27 class DuplicateTableAPIException(ProgrammingAPIException): 28 # Default message is not needed as the exception string provides enough details 29 error_code = ErrorCodes.DuplicateTableError.value 30 31 def __init__( 32 self, 33 exception, 34 message=None, 35 field=None, 36 details=None, 37 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR 38 ): 39 super().__init__(exception, self.error_code, message, field, details, status_code) 40 41 42 class DuplicateColumnAPIException(ProgrammingAPIException): 43 # Default message is not needed as the exception string provides enough details 44 error_code = ErrorCodes.DuplicateColumnError.value 45 46 def __init__( 47 self, 48 exception, 49 message=None, 50 field=None, 51 details=None, 52 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR 53 ): 54 super().__init__(exception, self.error_code, message, field, details, status_code) 55 56 57 class InvalidDefaultAPIException(MathesarAPIException): 58 # Default message is not needed as the exception string provides enough details 59 error_code = ErrorCodes.InvalidDefault.value 60 61 def __init__( 62 self, 63 exception, 64 message=None, 65 field=None, 66 details=None, 67 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR 68 ): 69 super().__init__(exception, self.error_code, message, field, details, status_code) 70 71 72 class InvalidTypeOptionAPIException(MathesarAPIException): 73 # Default message is not needed as the exception string provides enough details 74 error_code = ErrorCodes.InvalidTypeOption.value 75 76 def __init__( 77 self, 78 exception, 79 message=None, 80 field=None, 81 details=None, 82 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR 83 ): 84 super().__init__(exception, self.error_code, message, field, details, status_code) 85 86 87 class InvalidTypeCastAPIException(MathesarAPIException): 88 # Default message is not needed as the exception string provides enough details 89 error_code = ErrorCodes.InvalidTypeCast.value 90 91 def __init__( 92 self, 93 exception, 94 message="Invalid type cast requested.", 95 field=None, 96 details=None, 97 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR 98 ): 99 super().__init__(exception, self.error_code, message, field, details, status_code) 100 101 102 class DynamicDefaultAPIException(MathesarAPIException): 103 # Default message is not needed as the exception string provides enough details 104 error_code = ErrorCodes.UndefinedFunction.value 105 106 def __init__( 107 self, 108 exception, 109 message=None, 110 field=None, 111 details=None, 112 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR 113 ): 114 super().__init__(exception, self.error_code, message, field, details, status_code) 115 116 117 class UnsupportedTypeAPIException(MathesarAPIException): 118 # Default message is not needed as the exception string provides enough details 119 error_code = ErrorCodes.UnsupportedType.value 120 121 def __init__( 122 self, 123 exception, 124 message=None, 125 field=None, 126 details=None, 127 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR 128 ): 129 super().__init__(exception, self.error_code, message, field, details, status_code) 130 131 132 class BadFilterAPIException(MathesarAPIException): 133 error_code = ErrorCodes.UnsupportedType.value 134 135 def __init__( 136 self, 137 exception, 138 message="Filter arguments are not correct", 139 field=None, 140 details=None, 141 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR 142 ): 143 super().__init__(exception, self.error_code, message, field, details, status_code) 144 145 146 class BadSortAPIException(MathesarAPIException): 147 # Default message is not needed as the exception string provides enough details 148 error_code = ErrorCodes.UnsupportedType.value 149 150 def __init__( 151 self, 152 exception, 153 message=None, 154 field=None, 155 details=None, 156 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR 157 ): 158 super().__init__(exception, self.error_code, message, field, details, status_code) 159 160 161 class BadGroupAPIException(MathesarAPIException): 162 # Default message is not needed as the exception string provides enough details 163 error_code = ErrorCodes.UnsupportedType.value 164 165 def __init__( 166 self, 167 exception, 168 message=None, 169 field=None, 170 details=None, 171 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR 172 ): 173 super().__init__(exception, self.error_code, message, field, details, status_code) 174 175 176 class RaiseExceptionAPIException(MathesarAPIException): 177 """ 178 Exception raised inside a postgres function 179 """ 180 error_code = ErrorCodes.RaiseException.value 181 182 def __init__( 183 self, 184 exception, 185 message=None, 186 field=None, 187 details=None, 188 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR 189 ): 190 super().__init__(exception, self.error_code, message, field, details, status_code) 191 192 193 class UndefinedFunctionAPIException(MathesarAPIException): 194 # Default message is not needed as the exception string provides enough details 195 error_code = ErrorCodes.UndefinedFunction.value 196 197 def __init__( 198 self, 199 exception, 200 message=None, 201 field=None, 202 details=None, 203 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR 204 ): 205 super().__init__(exception, self.error_code, message, field, details, status_code) 206 207 208 class NotNullViolationAPIException(MathesarAPIException): 209 """ 210 Exception raised when trying to add not null constraint to column with null value 211 or when trying to add non-null value to a column with not null constraint 212 """ 213 error_code = ErrorCodes.NotNullViolation.value 214 215 def __init__( 216 self, exception, 217 message=None, 218 field=None, 219 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR 220 ): 221 message_str, row_detail = exception.orig.args[0].split("DETAIL") 222 message_str = message if message is not None else message_str 223 details = {'row_parameters': exception.params, 'row_detail': row_detail} 224 super().__init__(exception, self.error_code, message_str, field, details, status_code) 225 ``` Path: `mathesar/api/db/viewsets/records.py` Content: ``` 1 from psycopg2.errors import NotNullViolation 2 3 from rest_framework import status, viewsets 4 from rest_framework.exceptions import NotFound 5 from rest_framework.response import Response 6 from rest_framework.renderers import BrowsableAPIRenderer 7 from sqlalchemy.exc import IntegrityError 8 from sqlalchemy_filters.exceptions import BadFilterFormat, BadSortFormat, FilterFieldNotFound, SortFieldNotFound 9 10 import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions 11 from db.records.exceptions import BadGroupFormat, GroupFieldNotFound, InvalidGroupType 12 from mathesar.api.pagination import TableLimitOffsetGroupPagination 13 from mathesar.api.serializers.records import RecordListParameterSerializer, RecordSerializer 14 from mathesar.api.utils import get_table_or_404 15 from mathesar.models import Table 16 from mathesar.utils.json import MathesarJSONRenderer 17 18 19 class RecordViewSet(viewsets.ViewSet): 20 # There is no 'update' method. 21 # We're not supporting PUT requests because there aren't a lot of use cases 22 # where the entire record needs to be replaced, PATCH suffices for updates. 23 def get_queryset(self): 24 return Table.objects.all().order_by('-created_at') 25 26 renderer_classes = [MathesarJSONRenderer, BrowsableAPIRenderer] 27 28 # For filter parameter formatting, see: 29 # https://github.com/centerofci/sqlalchemy-filters#filters-format 30 # For sorting parameter formatting, see: 31 # https://github.com/centerofci/sqlalchemy-filters#sort-format 32 def list(self, request, table_pk=None): 33 paginator = TableLimitOffsetGroupPagination() 34 35 serializer = RecordListParameterSerializer(data=request.GET) 36 serializer.is_valid(raise_exception=True) 37 38 try: 39 records = paginator.paginate_queryset( 40 self.get_queryset(), request, table_pk, 41 filters=serializer.validated_data['filters'], 42 order_by=serializer.validated_data['order_by'], 43 grouping=serializer.validated_data['grouping'], 44 ) 45 except (BadFilterFormat, FilterFieldNotFound) as e: 46 raise database_api_exceptions.BadFilterAPIException(e, field='filters', status_code=status.HTTP_400_BAD_REQUEST) 47 except (BadSortFormat, SortFieldNotFound) as e: 48 raise database_api_exceptions.BadSortAPIException(e, field='order_by', status_code=status.HTTP_400_BAD_REQUEST) 49 except (BadGroupFormat, GroupFieldNotFound, InvalidGroupType) as e: 50 raise database_api_exceptions.BadGroupAPIException(e, field='grouping', status_code=status.HTTP_400_BAD_REQUEST) 51 52 serializer = RecordSerializer(records, many=True) 53 return paginator.get_paginated_response(serializer.data) 54 55 def retrieve(self, request, pk=None, table_pk=None): 56 table = get_table_or_404(table_pk) 57 record = table.get_record(pk) 58 if not record: 59 raise NotFound 60 serializer = RecordSerializer(record) 61 return Response(serializer.data) 62 63 def create(self, request, table_pk=None): 64 table = get_table_or_404(table_pk) 65 # We only support adding a single record through the API. 66 assert isinstance((request.data), dict) 67 try: 68 record = table.create_record_or_records(request.data) 69 except IntegrityError as e: 70 if e.orig == NotNullViolation: 71 raise database_api_exceptions.NotNullViolationAPIException(e, status_code=status.HTTP_400_BAD_REQUEST) 72 else: 73 raise database_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST) 74 serializer = RecordSerializer(record) 75 return Response(serializer.data, status=status.HTTP_201_CREATED) 76 77 def partial_update(self, request, pk=None, table_pk=None): 78 table = get_table_or_404(table_pk) 79 record = table.update_record(pk, request.data) 80 serializer = RecordSerializer(record) 81 return Response(serializer.data) 82 83 def destroy(self, request, pk=None, table_pk=None): 84 table = get_table_or_404(table_pk) 85 table.delete_record(pk) 86 return Response(status=status.HTTP_204_NO_CONTENT) 87 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mathesar/api/db/viewsets/records.py b/mathesar/api/db/viewsets/records.py --- a/mathesar/api/db/viewsets/records.py +++ b/mathesar/api/db/viewsets/records.py @@ -43,11 +43,23 @@ grouping=serializer.validated_data['grouping'], ) except (BadFilterFormat, FilterFieldNotFound) as e: - raise database_api_exceptions.BadFilterAPIException(e, field='filters', status_code=status.HTTP_400_BAD_REQUEST) + raise database_api_exceptions.BadFilterAPIException( + e, + field='filters', + status_code=status.HTTP_400_BAD_REQUEST + ) except (BadSortFormat, SortFieldNotFound) as e: - raise database_api_exceptions.BadSortAPIException(e, field='order_by', status_code=status.HTTP_400_BAD_REQUEST) + raise database_api_exceptions.BadSortAPIException( + e, + field='order_by', + status_code=status.HTTP_400_BAD_REQUEST + ) except (BadGroupFormat, GroupFieldNotFound, InvalidGroupType) as e: - raise database_api_exceptions.BadGroupAPIException(e, field='grouping', status_code=status.HTTP_400_BAD_REQUEST) + raise database_api_exceptions.BadGroupAPIException( + e, + field='grouping', + status_code=status.HTTP_400_BAD_REQUEST + ) serializer = RecordSerializer(records, many=True) return paginator.get_paginated_response(serializer.data) @@ -67,8 +79,12 @@ try: record = table.create_record_or_records(request.data) except IntegrityError as e: - if e.orig == NotNullViolation: - raise database_api_exceptions.NotNullViolationAPIException(e, status_code=status.HTTP_400_BAD_REQUEST) + if type(e.orig) == NotNullViolation: + raise database_api_exceptions.NotNullViolationAPIException( + e, + status_code=status.HTTP_400_BAD_REQUEST, + table=table + ) else: raise database_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST) serializer = RecordSerializer(record) diff --git a/mathesar/api/exceptions/database_exceptions/exceptions.py b/mathesar/api/exceptions/database_exceptions/exceptions.py --- a/mathesar/api/exceptions/database_exceptions/exceptions.py +++ b/mathesar/api/exceptions/database_exceptions/exceptions.py @@ -1,11 +1,13 @@ from rest_framework import status +from db.columns.operations.select import get_columns_attnum_from_names from mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException from mathesar.api.exceptions.error_codes import ErrorCodes from mathesar.api.exceptions.generic_exceptions.base_exceptions import ( MathesarAPIException, get_default_exception_detail, ) +from mathesar.models import Column class UniqueViolationAPIException(MathesarAPIException): @@ -216,9 +218,19 @@ self, exception, message=None, field=None, - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + table=None ): - message_str, row_detail = exception.orig.args[0].split("DETAIL") - message_str = message if message is not None else message_str - details = {'row_parameters': exception.params, 'row_detail': row_detail} + exception_diagnostics = exception.orig.diag + message_str = message if message is not None else exception_diagnostics.message_primary + column_attnum = get_columns_attnum_from_names( + table.oid, + [exception.orig.diag.column_name], + table.schema._sa_engine + )[0][0] + column = Column.objects.get(attnum=column_attnum) + details = { + 'record_detail': exception_diagnostics.message_detail, + 'column_id': column.id + } super().__init__(exception, self.error_code, message_str, field, details, status_code)
{"golden_diff": "diff --git a/mathesar/api/db/viewsets/records.py b/mathesar/api/db/viewsets/records.py\n--- a/mathesar/api/db/viewsets/records.py\n+++ b/mathesar/api/db/viewsets/records.py\n@@ -43,11 +43,23 @@\n grouping=serializer.validated_data['grouping'],\n )\n except (BadFilterFormat, FilterFieldNotFound) as e:\n- raise database_api_exceptions.BadFilterAPIException(e, field='filters', status_code=status.HTTP_400_BAD_REQUEST)\n+ raise database_api_exceptions.BadFilterAPIException(\n+ e,\n+ field='filters',\n+ status_code=status.HTTP_400_BAD_REQUEST\n+ )\n except (BadSortFormat, SortFieldNotFound) as e:\n- raise database_api_exceptions.BadSortAPIException(e, field='order_by', status_code=status.HTTP_400_BAD_REQUEST)\n+ raise database_api_exceptions.BadSortAPIException(\n+ e,\n+ field='order_by',\n+ status_code=status.HTTP_400_BAD_REQUEST\n+ )\n except (BadGroupFormat, GroupFieldNotFound, InvalidGroupType) as e:\n- raise database_api_exceptions.BadGroupAPIException(e, field='grouping', status_code=status.HTTP_400_BAD_REQUEST)\n+ raise database_api_exceptions.BadGroupAPIException(\n+ e,\n+ field='grouping',\n+ status_code=status.HTTP_400_BAD_REQUEST\n+ )\n \n serializer = RecordSerializer(records, many=True)\n return paginator.get_paginated_response(serializer.data)\n@@ -67,8 +79,12 @@\n try:\n record = table.create_record_or_records(request.data)\n except IntegrityError as e:\n- if e.orig == NotNullViolation:\n- raise database_api_exceptions.NotNullViolationAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n+ if type(e.orig) == NotNullViolation:\n+ raise database_api_exceptions.NotNullViolationAPIException(\n+ e,\n+ status_code=status.HTTP_400_BAD_REQUEST,\n+ table=table\n+ )\n else:\n raise database_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n serializer = RecordSerializer(record)\ndiff --git a/mathesar/api/exceptions/database_exceptions/exceptions.py b/mathesar/api/exceptions/database_exceptions/exceptions.py\n--- a/mathesar/api/exceptions/database_exceptions/exceptions.py\n+++ b/mathesar/api/exceptions/database_exceptions/exceptions.py\n@@ -1,11 +1,13 @@\n from rest_framework import status\n \n+from db.columns.operations.select import get_columns_attnum_from_names\n from mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException\n from mathesar.api.exceptions.error_codes import ErrorCodes\n from mathesar.api.exceptions.generic_exceptions.base_exceptions import (\n MathesarAPIException,\n get_default_exception_detail,\n )\n+from mathesar.models import Column\n \n \n class UniqueViolationAPIException(MathesarAPIException):\n@@ -216,9 +218,19 @@\n self, exception,\n message=None,\n field=None,\n- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n+ table=None\n ):\n- message_str, row_detail = exception.orig.args[0].split(\"DETAIL\")\n- message_str = message if message is not None else message_str\n- details = {'row_parameters': exception.params, 'row_detail': row_detail}\n+ exception_diagnostics = exception.orig.diag\n+ message_str = message if message is not None else exception_diagnostics.message_primary\n+ column_attnum = get_columns_attnum_from_names(\n+ table.oid,\n+ [exception.orig.diag.column_name],\n+ table.schema._sa_engine\n+ )[0][0]\n+ column = Column.objects.get(attnum=column_attnum)\n+ details = {\n+ 'record_detail': exception_diagnostics.message_detail,\n+ 'column_id': column.id\n+ }\n super().__init__(exception, self.error_code, message_str, field, details, status_code)\n", "issue": "Provide detailed error info for POST to records API when in violation of a not-null constraint\n## Current behavior\r\n\r\n1. Set up a table which contains a `NOT NULL` column.\r\n1. POST an empty request (`{}`) to the `records` API (e.g. by clicking the `+` icon to add a new row to the table).\r\n1. Observe the following response:\r\n\r\n ```json\r\n [\r\n {\r\n \"code\": 4999,\r\n \"message\": \"(psycopg2.errors.NotNullViolation) null value in column \\\"common_name\\\" of relation \\\"trees\\\" violates not-null constraint\\nDETAIL: Failing row contains (11, null, null, null, null, null, null).\\n\\n[SQL: INSERT INTO public.trees DEFAULT VALUES RETURNING public.trees.id]\\n(Background on this error at: http://sqlalche.me/e/14/gkpj)\",\r\n \"field\": null,\r\n \"detail\": null\r\n }\r\n ]\r\n ```\r\n\r\n1. Within the response, observe no easy way to identify _which column_ (or columns) was responsible for the error.\r\n\r\n## Expected behavior\r\n\r\n- For #775, we need enough detail for the front end to be able to display error messages specific to certain cells. For example, if a POST request has two columns with invalid data (perhaps even invalid in _different ways_), we need to be able to match specific error messages to each of those columns.\r\n\r\n- @pavish and I are flexible on the exact response schema from the API here, as long as it satisfies the above goal.\r\n\r\n- We should also keep #896 in-mind with this change.\r\n\r\n## Status\r\n\r\n- blocked by #896\r\n\n", "before_files": [{"content": "from rest_framework import status\n\nfrom mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException\nfrom mathesar.api.exceptions.error_codes import ErrorCodes\nfrom mathesar.api.exceptions.generic_exceptions.base_exceptions import (\n MathesarAPIException,\n get_default_exception_detail,\n)\n\n\nclass UniqueViolationAPIException(MathesarAPIException):\n error_code = ErrorCodes.UniqueViolation.value\n\n def __init__(\n self,\n exception,\n message=\"This column has non-unique values so a unique constraint cannot be set\",\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n exception_detail = get_default_exception_detail(exception, self.error_code, message, field, details)._asdict()\n self.detail = [exception_detail]\n self.status_code = status_code\n\n\nclass DuplicateTableAPIException(ProgrammingAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.DuplicateTableError.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass DuplicateColumnAPIException(ProgrammingAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.DuplicateColumnError.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass InvalidDefaultAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.InvalidDefault.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass InvalidTypeOptionAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.InvalidTypeOption.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass InvalidTypeCastAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.InvalidTypeCast.value\n\n def __init__(\n self,\n exception,\n message=\"Invalid type cast requested.\",\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass DynamicDefaultAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.UndefinedFunction.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass UnsupportedTypeAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.UnsupportedType.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass BadFilterAPIException(MathesarAPIException):\n error_code = ErrorCodes.UnsupportedType.value\n\n def __init__(\n self,\n exception,\n message=\"Filter arguments are not correct\",\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass BadSortAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.UnsupportedType.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass BadGroupAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.UnsupportedType.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass RaiseExceptionAPIException(MathesarAPIException):\n \"\"\"\n Exception raised inside a postgres function\n \"\"\"\n error_code = ErrorCodes.RaiseException.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass UndefinedFunctionAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.UndefinedFunction.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass NotNullViolationAPIException(MathesarAPIException):\n \"\"\"\n Exception raised when trying to add not null constraint to column with null value\n or when trying to add non-null value to a column with not null constraint\n \"\"\"\n error_code = ErrorCodes.NotNullViolation.value\n\n def __init__(\n self, exception,\n message=None,\n field=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n message_str, row_detail = exception.orig.args[0].split(\"DETAIL\")\n message_str = message if message is not None else message_str\n details = {'row_parameters': exception.params, 'row_detail': row_detail}\n super().__init__(exception, self.error_code, message_str, field, details, status_code)\n", "path": "mathesar/api/exceptions/database_exceptions/exceptions.py"}, {"content": "from psycopg2.errors import NotNullViolation\n\nfrom rest_framework import status, viewsets\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.response import Response\nfrom rest_framework.renderers import BrowsableAPIRenderer\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy_filters.exceptions import BadFilterFormat, BadSortFormat, FilterFieldNotFound, SortFieldNotFound\n\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nfrom db.records.exceptions import BadGroupFormat, GroupFieldNotFound, InvalidGroupType\nfrom mathesar.api.pagination import TableLimitOffsetGroupPagination\nfrom mathesar.api.serializers.records import RecordListParameterSerializer, RecordSerializer\nfrom mathesar.api.utils import get_table_or_404\nfrom mathesar.models import Table\nfrom mathesar.utils.json import MathesarJSONRenderer\n\n\nclass RecordViewSet(viewsets.ViewSet):\n # There is no 'update' method.\n # We're not supporting PUT requests because there aren't a lot of use cases\n # where the entire record needs to be replaced, PATCH suffices for updates.\n def get_queryset(self):\n return Table.objects.all().order_by('-created_at')\n\n renderer_classes = [MathesarJSONRenderer, BrowsableAPIRenderer]\n\n # For filter parameter formatting, see:\n # https://github.com/centerofci/sqlalchemy-filters#filters-format\n # For sorting parameter formatting, see:\n # https://github.com/centerofci/sqlalchemy-filters#sort-format\n def list(self, request, table_pk=None):\n paginator = TableLimitOffsetGroupPagination()\n\n serializer = RecordListParameterSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n\n try:\n records = paginator.paginate_queryset(\n self.get_queryset(), request, table_pk,\n filters=serializer.validated_data['filters'],\n order_by=serializer.validated_data['order_by'],\n grouping=serializer.validated_data['grouping'],\n )\n except (BadFilterFormat, FilterFieldNotFound) as e:\n raise database_api_exceptions.BadFilterAPIException(e, field='filters', status_code=status.HTTP_400_BAD_REQUEST)\n except (BadSortFormat, SortFieldNotFound) as e:\n raise database_api_exceptions.BadSortAPIException(e, field='order_by', status_code=status.HTTP_400_BAD_REQUEST)\n except (BadGroupFormat, GroupFieldNotFound, InvalidGroupType) as e:\n raise database_api_exceptions.BadGroupAPIException(e, field='grouping', status_code=status.HTTP_400_BAD_REQUEST)\n\n serializer = RecordSerializer(records, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n def retrieve(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n record = table.get_record(pk)\n if not record:\n raise NotFound\n serializer = RecordSerializer(record)\n return Response(serializer.data)\n\n def create(self, request, table_pk=None):\n table = get_table_or_404(table_pk)\n # We only support adding a single record through the API.\n assert isinstance((request.data), dict)\n try:\n record = table.create_record_or_records(request.data)\n except IntegrityError as e:\n if e.orig == NotNullViolation:\n raise database_api_exceptions.NotNullViolationAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n else:\n raise database_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n serializer = RecordSerializer(record)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n record = table.update_record(pk, request.data)\n serializer = RecordSerializer(record)\n return Response(serializer.data)\n\n def destroy(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n table.delete_record(pk)\n return Response(status=status.HTTP_204_NO_CONTENT)\n", "path": "mathesar/api/db/viewsets/records.py"}], "after_files": [{"content": "from rest_framework import status\n\nfrom db.columns.operations.select import get_columns_attnum_from_names\nfrom mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException\nfrom mathesar.api.exceptions.error_codes import ErrorCodes\nfrom mathesar.api.exceptions.generic_exceptions.base_exceptions import (\n MathesarAPIException,\n get_default_exception_detail,\n)\nfrom mathesar.models import Column\n\n\nclass UniqueViolationAPIException(MathesarAPIException):\n error_code = ErrorCodes.UniqueViolation.value\n\n def __init__(\n self,\n exception,\n message=\"This column has non-unique values so a unique constraint cannot be set\",\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n exception_detail = get_default_exception_detail(exception, self.error_code, message, field, details)._asdict()\n self.detail = [exception_detail]\n self.status_code = status_code\n\n\nclass DuplicateTableAPIException(ProgrammingAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.DuplicateTableError.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass DuplicateColumnAPIException(ProgrammingAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.DuplicateColumnError.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass InvalidDefaultAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.InvalidDefault.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass InvalidTypeOptionAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.InvalidTypeOption.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass InvalidTypeCastAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.InvalidTypeCast.value\n\n def __init__(\n self,\n exception,\n message=\"Invalid type cast requested.\",\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass DynamicDefaultAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.UndefinedFunction.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass UnsupportedTypeAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.UnsupportedType.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass BadFilterAPIException(MathesarAPIException):\n error_code = ErrorCodes.UnsupportedType.value\n\n def __init__(\n self,\n exception,\n message=\"Filter arguments are not correct\",\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass BadSortAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.UnsupportedType.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass BadGroupAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.UnsupportedType.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass RaiseExceptionAPIException(MathesarAPIException):\n \"\"\"\n Exception raised inside a postgres function\n \"\"\"\n error_code = ErrorCodes.RaiseException.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass UndefinedFunctionAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.UndefinedFunction.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass NotNullViolationAPIException(MathesarAPIException):\n \"\"\"\n Exception raised when trying to add not null constraint to column with null value\n or when trying to add non-null value to a column with not null constraint\n \"\"\"\n error_code = ErrorCodes.NotNullViolation.value\n\n def __init__(\n self, exception,\n message=None,\n field=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n table=None\n ):\n exception_diagnostics = exception.orig.diag\n message_str = message if message is not None else exception_diagnostics.message_primary\n column_attnum = get_columns_attnum_from_names(\n table.oid,\n [exception.orig.diag.column_name],\n table.schema._sa_engine\n )[0][0]\n column = Column.objects.get(attnum=column_attnum)\n details = {\n 'record_detail': exception_diagnostics.message_detail,\n 'column_id': column.id\n }\n super().__init__(exception, self.error_code, message_str, field, details, status_code)\n", "path": "mathesar/api/exceptions/database_exceptions/exceptions.py"}, {"content": "from psycopg2.errors import NotNullViolation\n\nfrom rest_framework import status, viewsets\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.response import Response\nfrom rest_framework.renderers import BrowsableAPIRenderer\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy_filters.exceptions import BadFilterFormat, BadSortFormat, FilterFieldNotFound, SortFieldNotFound\n\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nfrom db.records.exceptions import BadGroupFormat, GroupFieldNotFound, InvalidGroupType\nfrom mathesar.api.pagination import TableLimitOffsetGroupPagination\nfrom mathesar.api.serializers.records import RecordListParameterSerializer, RecordSerializer\nfrom mathesar.api.utils import get_table_or_404\nfrom mathesar.models import Table\nfrom mathesar.utils.json import MathesarJSONRenderer\n\n\nclass RecordViewSet(viewsets.ViewSet):\n # There is no 'update' method.\n # We're not supporting PUT requests because there aren't a lot of use cases\n # where the entire record needs to be replaced, PATCH suffices for updates.\n def get_queryset(self):\n return Table.objects.all().order_by('-created_at')\n\n renderer_classes = [MathesarJSONRenderer, BrowsableAPIRenderer]\n\n # For filter parameter formatting, see:\n # https://github.com/centerofci/sqlalchemy-filters#filters-format\n # For sorting parameter formatting, see:\n # https://github.com/centerofci/sqlalchemy-filters#sort-format\n def list(self, request, table_pk=None):\n paginator = TableLimitOffsetGroupPagination()\n\n serializer = RecordListParameterSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n\n try:\n records = paginator.paginate_queryset(\n self.get_queryset(), request, table_pk,\n filters=serializer.validated_data['filters'],\n order_by=serializer.validated_data['order_by'],\n grouping=serializer.validated_data['grouping'],\n )\n except (BadFilterFormat, FilterFieldNotFound) as e:\n raise database_api_exceptions.BadFilterAPIException(\n e,\n field='filters',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except (BadSortFormat, SortFieldNotFound) as e:\n raise database_api_exceptions.BadSortAPIException(\n e,\n field='order_by',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except (BadGroupFormat, GroupFieldNotFound, InvalidGroupType) as e:\n raise database_api_exceptions.BadGroupAPIException(\n e,\n field='grouping',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n\n serializer = RecordSerializer(records, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n def retrieve(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n record = table.get_record(pk)\n if not record:\n raise NotFound\n serializer = RecordSerializer(record)\n return Response(serializer.data)\n\n def create(self, request, table_pk=None):\n table = get_table_or_404(table_pk)\n # We only support adding a single record through the API.\n assert isinstance((request.data), dict)\n try:\n record = table.create_record_or_records(request.data)\n except IntegrityError as e:\n if type(e.orig) == NotNullViolation:\n raise database_api_exceptions.NotNullViolationAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n table=table\n )\n else:\n raise database_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n serializer = RecordSerializer(record)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n record = table.update_record(pk, request.data)\n serializer = RecordSerializer(record)\n return Response(serializer.data)\n\n def destroy(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n table.delete_record(pk)\n return Response(status=status.HTTP_204_NO_CONTENT)\n", "path": "mathesar/api/db/viewsets/records.py"}]}
3,723
892
gh_patches_debug_17331
rasdani/github-patches
git_diff
wemake-services__wemake-python-styleguide-914
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Allow `super()` as overused expression We need to add a new predicate to allow `super()` calls here: https://github.com/wemake-services/wemake-python-styleguide/blob/39a3caeb9883c3112d878a88038e8b7331389265/wemake_python_styleguide/visitors/ast/complexity/overuses.py#L114 It is obviously allowed. Then we need to add a unit test here: https://github.com/wemake-services/wemake-python-styleguide/blob/ed1afeb3366525f0b3f1adc346a189a3c0f3b923/tests/test_visitors/test_ast/test_complexity/test_overuses/test_overused_expressions.py That's it. It is beginner friendly. Allow `super()` as overused expression We need to add a new predicate to allow `super()` calls here: https://github.com/wemake-services/wemake-python-styleguide/blob/39a3caeb9883c3112d878a88038e8b7331389265/wemake_python_styleguide/visitors/ast/complexity/overuses.py#L114 It is obviously allowed. Then we need to add a unit test here: https://github.com/wemake-services/wemake-python-styleguide/blob/ed1afeb3366525f0b3f1adc346a189a3c0f3b923/tests/test_visitors/test_ast/test_complexity/test_overuses/test_overused_expressions.py That's it. It is beginner friendly. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wemake_python_styleguide/visitors/ast/complexity/overuses.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 import ast 4 from collections import defaultdict 5 from typing import ClassVar, DefaultDict, List, Union 6 7 from typing_extensions import final 8 9 from wemake_python_styleguide.compat.aliases import FunctionNodes 10 from wemake_python_styleguide.constants import SPECIAL_ARGUMENT_NAMES_WHITELIST 11 from wemake_python_styleguide.logic import nodes, source, walk 12 from wemake_python_styleguide.types import AnyNodes 13 from wemake_python_styleguide.violations import complexity 14 from wemake_python_styleguide.visitors import base 15 16 _Expressions = DefaultDict[str, List[ast.AST]] 17 _FunctionExpressions = DefaultDict[ast.AST, _Expressions] 18 _Annotated = Union[ast.arg, ast.AnnAssign] 19 20 _AnnNodes = (ast.AnnAssign, ast.arg) 21 22 23 @final 24 class StringOveruseVisitor(base.BaseNodeVisitor): 25 """Restricts several string usages.""" 26 27 def __init__(self, *args, **kwargs) -> None: 28 """Inits the counter for constants.""" 29 super().__init__(*args, **kwargs) 30 self._string_constants: DefaultDict[str, int] = defaultdict(int) 31 32 def visit_Str(self, node: ast.Str) -> None: 33 """ 34 Restricts to over-use string constants. 35 36 Raises: 37 OverusedStringViolation 38 39 """ 40 self._check_string_constant(node) 41 self.generic_visit(node) 42 43 def _check_string_constant(self, node: ast.Str) -> None: 44 parent = nodes.get_parent(node) 45 if isinstance(parent, _AnnNodes) and parent.annotation == node: 46 return # it is argument or variable annotation 47 48 if isinstance(parent, FunctionNodes) and parent.returns == node: 49 return # it is return annotation 50 51 self._string_constants[node.s] += 1 52 53 def _post_visit(self) -> None: 54 for string, usage_count in self._string_constants.items(): 55 if usage_count > self.options.max_string_usages: 56 self.add_violation( 57 complexity.OverusedStringViolation(text=string or "''"), 58 ) 59 60 61 @final 62 class ExpressionOveruseVisitor(base.BaseNodeVisitor): 63 """Finds overused expressions.""" 64 65 _expressions: ClassVar[AnyNodes] = ( 66 # We do not treat `ast.Attribute`s as expressions 67 # because they are too widely used. That's a compromise. 68 ast.Assert, 69 ast.BoolOp, 70 ast.BinOp, 71 ast.Call, 72 ast.Compare, 73 ast.Starred, 74 ast.Subscript, 75 ast.UnaryOp, 76 ast.Lambda, 77 78 ast.DictComp, 79 ast.Dict, 80 ast.List, 81 ast.ListComp, 82 ast.Tuple, 83 ast.GeneratorExp, 84 ast.Set, 85 ast.SetComp, 86 ) 87 88 _msg: ClassVar[str] = '{0}; used {1} times' 89 90 def __init__(self, *args, **kwargs) -> None: 91 """We need to track expression usage in functions and modules.""" 92 super().__init__(*args, **kwargs) 93 self._module_expressions: _Expressions = defaultdict(list) 94 self._function_expressions: _FunctionExpressions = defaultdict( 95 lambda: defaultdict(list), 96 ) 97 98 def visit(self, node: ast.AST) -> None: 99 """ 100 Visits all nodes in a module to find overused values. 101 102 Raises: 103 OverusedExpressionViolation 104 105 """ 106 if isinstance(node, self._expressions): 107 self._add_expression(node) 108 self.generic_visit(node) 109 110 def _add_expression(self, node: ast.AST) -> None: 111 ignore_predicates = [ 112 self._is_decorator, 113 self._is_self_method, 114 self._is_annotation, 115 116 # We use this predicate because classes have quite complex 117 # DSL to be created: like django-orm, attrs, and dataclasses. 118 # And these DSLs are built using attributes and calls. 119 _is_class_context, 120 ] 121 if any(ignore(node) for ignore in ignore_predicates): 122 return 123 124 source_code = source.node_to_string(node) 125 self._module_expressions[source_code].append(node) 126 127 maybe_function = walk.get_closest_parent(node, FunctionNodes) 128 if maybe_function is not None: 129 self._function_expressions[maybe_function][source_code].append( 130 node, 131 ) 132 133 def _is_decorator( 134 self, 135 node: ast.AST, 136 ) -> bool: 137 parent = walk.get_closest_parent(node, FunctionNodes) 138 if isinstance(parent, FunctionNodes) and parent.decorator_list: 139 return any( 140 node == decorator or walk.is_contained_by(node, decorator) 141 for decorator in parent.decorator_list 142 ) 143 return False 144 145 def _is_self_method(self, node: ast.AST) -> bool: 146 if isinstance(node, ast.Call) and isinstance(node.func, ast.Attribute): 147 if isinstance(node.func.value, ast.Name): 148 if node.func.value.id in SPECIAL_ARGUMENT_NAMES_WHITELIST: 149 return True 150 return False 151 152 def _is_annotation(self, node: ast.AST) -> bool: 153 typed_assign = walk.get_closest_parent( 154 node, 155 (ast.AnnAssign, ast.arg), 156 ) 157 158 if isinstance(typed_assign, _AnnNodes) and typed_assign.annotation: 159 is_same_node = node == typed_assign.annotation 160 is_child_annotation = walk.is_contained_by( 161 node, typed_assign.annotation, 162 ) 163 return is_same_node or is_child_annotation 164 return False 165 166 def _post_visit(self) -> None: 167 for mod_source, module_nodes in self._module_expressions.items(): 168 if len(module_nodes) > self.options.max_module_expressions: 169 self.add_violation( 170 complexity.OverusedExpressionViolation( 171 module_nodes[0], 172 text=self._msg.format(mod_source, len(module_nodes)), 173 ), 174 ) 175 176 for function_contexts in self._function_expressions.values(): 177 for src, function_nodes in function_contexts.items(): 178 if len(function_nodes) > self.options.max_function_expressions: 179 self.add_violation( 180 complexity.OverusedExpressionViolation( 181 function_nodes[0], 182 text=self._msg.format(src, len(function_nodes)), 183 ), 184 ) 185 186 187 def _is_class_context(node: ast.AST) -> bool: 188 return isinstance(nodes.get_context(node), ast.ClassDef) 189 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wemake_python_styleguide/visitors/ast/complexity/overuses.py b/wemake_python_styleguide/visitors/ast/complexity/overuses.py --- a/wemake_python_styleguide/visitors/ast/complexity/overuses.py +++ b/wemake_python_styleguide/visitors/ast/complexity/overuses.py @@ -117,6 +117,7 @@ # DSL to be created: like django-orm, attrs, and dataclasses. # And these DSLs are built using attributes and calls. _is_class_context, + _is_super_call, ] if any(ignore(node) for ignore in ignore_predicates): return @@ -186,3 +187,9 @@ def _is_class_context(node: ast.AST) -> bool: return isinstance(nodes.get_context(node), ast.ClassDef) + + +def _is_super_call(node: ast.AST) -> bool: + if isinstance(node, ast.Call) and isinstance(node.func, ast.Name): + return node.func.id == 'super' + return False
{"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/complexity/overuses.py b/wemake_python_styleguide/visitors/ast/complexity/overuses.py\n--- a/wemake_python_styleguide/visitors/ast/complexity/overuses.py\n+++ b/wemake_python_styleguide/visitors/ast/complexity/overuses.py\n@@ -117,6 +117,7 @@\n # DSL to be created: like django-orm, attrs, and dataclasses.\n # And these DSLs are built using attributes and calls.\n _is_class_context,\n+ _is_super_call,\n ]\n if any(ignore(node) for ignore in ignore_predicates):\n return\n@@ -186,3 +187,9 @@\n \n def _is_class_context(node: ast.AST) -> bool:\n return isinstance(nodes.get_context(node), ast.ClassDef)\n+\n+\n+def _is_super_call(node: ast.AST) -> bool:\n+ if isinstance(node, ast.Call) and isinstance(node.func, ast.Name):\n+ return node.func.id == 'super'\n+ return False\n", "issue": "Allow `super()` as overused expression\nWe need to add a new predicate to allow `super()` calls here: https://github.com/wemake-services/wemake-python-styleguide/blob/39a3caeb9883c3112d878a88038e8b7331389265/wemake_python_styleguide/visitors/ast/complexity/overuses.py#L114\r\n\r\nIt is obviously allowed.\r\n\r\nThen we need to add a unit test here: https://github.com/wemake-services/wemake-python-styleguide/blob/ed1afeb3366525f0b3f1adc346a189a3c0f3b923/tests/test_visitors/test_ast/test_complexity/test_overuses/test_overused_expressions.py\r\n\r\nThat's it. It is beginner friendly.\nAllow `super()` as overused expression\nWe need to add a new predicate to allow `super()` calls here: https://github.com/wemake-services/wemake-python-styleguide/blob/39a3caeb9883c3112d878a88038e8b7331389265/wemake_python_styleguide/visitors/ast/complexity/overuses.py#L114\r\n\r\nIt is obviously allowed.\r\n\r\nThen we need to add a unit test here: https://github.com/wemake-services/wemake-python-styleguide/blob/ed1afeb3366525f0b3f1adc346a189a3c0f3b923/tests/test_visitors/test_ast/test_complexity/test_overuses/test_overused_expressions.py\r\n\r\nThat's it. It is beginner friendly.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom collections import defaultdict\nfrom typing import ClassVar, DefaultDict, List, Union\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.compat.aliases import FunctionNodes\nfrom wemake_python_styleguide.constants import SPECIAL_ARGUMENT_NAMES_WHITELIST\nfrom wemake_python_styleguide.logic import nodes, source, walk\nfrom wemake_python_styleguide.types import AnyNodes\nfrom wemake_python_styleguide.violations import complexity\nfrom wemake_python_styleguide.visitors import base\n\n_Expressions = DefaultDict[str, List[ast.AST]]\n_FunctionExpressions = DefaultDict[ast.AST, _Expressions]\n_Annotated = Union[ast.arg, ast.AnnAssign]\n\n_AnnNodes = (ast.AnnAssign, ast.arg)\n\n\n@final\nclass StringOveruseVisitor(base.BaseNodeVisitor):\n \"\"\"Restricts several string usages.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Inits the counter for constants.\"\"\"\n super().__init__(*args, **kwargs)\n self._string_constants: DefaultDict[str, int] = defaultdict(int)\n\n def visit_Str(self, node: ast.Str) -> None:\n \"\"\"\n Restricts to over-use string constants.\n\n Raises:\n OverusedStringViolation\n\n \"\"\"\n self._check_string_constant(node)\n self.generic_visit(node)\n\n def _check_string_constant(self, node: ast.Str) -> None:\n parent = nodes.get_parent(node)\n if isinstance(parent, _AnnNodes) and parent.annotation == node:\n return # it is argument or variable annotation\n\n if isinstance(parent, FunctionNodes) and parent.returns == node:\n return # it is return annotation\n\n self._string_constants[node.s] += 1\n\n def _post_visit(self) -> None:\n for string, usage_count in self._string_constants.items():\n if usage_count > self.options.max_string_usages:\n self.add_violation(\n complexity.OverusedStringViolation(text=string or \"''\"),\n )\n\n\n@final\nclass ExpressionOveruseVisitor(base.BaseNodeVisitor):\n \"\"\"Finds overused expressions.\"\"\"\n\n _expressions: ClassVar[AnyNodes] = (\n # We do not treat `ast.Attribute`s as expressions\n # because they are too widely used. That's a compromise.\n ast.Assert,\n ast.BoolOp,\n ast.BinOp,\n ast.Call,\n ast.Compare,\n ast.Starred,\n ast.Subscript,\n ast.UnaryOp,\n ast.Lambda,\n\n ast.DictComp,\n ast.Dict,\n ast.List,\n ast.ListComp,\n ast.Tuple,\n ast.GeneratorExp,\n ast.Set,\n ast.SetComp,\n )\n\n _msg: ClassVar[str] = '{0}; used {1} times'\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"We need to track expression usage in functions and modules.\"\"\"\n super().__init__(*args, **kwargs)\n self._module_expressions: _Expressions = defaultdict(list)\n self._function_expressions: _FunctionExpressions = defaultdict(\n lambda: defaultdict(list),\n )\n\n def visit(self, node: ast.AST) -> None:\n \"\"\"\n Visits all nodes in a module to find overused values.\n\n Raises:\n OverusedExpressionViolation\n\n \"\"\"\n if isinstance(node, self._expressions):\n self._add_expression(node)\n self.generic_visit(node)\n\n def _add_expression(self, node: ast.AST) -> None:\n ignore_predicates = [\n self._is_decorator,\n self._is_self_method,\n self._is_annotation,\n\n # We use this predicate because classes have quite complex\n # DSL to be created: like django-orm, attrs, and dataclasses.\n # And these DSLs are built using attributes and calls.\n _is_class_context,\n ]\n if any(ignore(node) for ignore in ignore_predicates):\n return\n\n source_code = source.node_to_string(node)\n self._module_expressions[source_code].append(node)\n\n maybe_function = walk.get_closest_parent(node, FunctionNodes)\n if maybe_function is not None:\n self._function_expressions[maybe_function][source_code].append(\n node,\n )\n\n def _is_decorator(\n self,\n node: ast.AST,\n ) -> bool:\n parent = walk.get_closest_parent(node, FunctionNodes)\n if isinstance(parent, FunctionNodes) and parent.decorator_list:\n return any(\n node == decorator or walk.is_contained_by(node, decorator)\n for decorator in parent.decorator_list\n )\n return False\n\n def _is_self_method(self, node: ast.AST) -> bool:\n if isinstance(node, ast.Call) and isinstance(node.func, ast.Attribute):\n if isinstance(node.func.value, ast.Name):\n if node.func.value.id in SPECIAL_ARGUMENT_NAMES_WHITELIST:\n return True\n return False\n\n def _is_annotation(self, node: ast.AST) -> bool:\n typed_assign = walk.get_closest_parent(\n node,\n (ast.AnnAssign, ast.arg),\n )\n\n if isinstance(typed_assign, _AnnNodes) and typed_assign.annotation:\n is_same_node = node == typed_assign.annotation\n is_child_annotation = walk.is_contained_by(\n node, typed_assign.annotation,\n )\n return is_same_node or is_child_annotation\n return False\n\n def _post_visit(self) -> None:\n for mod_source, module_nodes in self._module_expressions.items():\n if len(module_nodes) > self.options.max_module_expressions:\n self.add_violation(\n complexity.OverusedExpressionViolation(\n module_nodes[0],\n text=self._msg.format(mod_source, len(module_nodes)),\n ),\n )\n\n for function_contexts in self._function_expressions.values():\n for src, function_nodes in function_contexts.items():\n if len(function_nodes) > self.options.max_function_expressions:\n self.add_violation(\n complexity.OverusedExpressionViolation(\n function_nodes[0],\n text=self._msg.format(src, len(function_nodes)),\n ),\n )\n\n\ndef _is_class_context(node: ast.AST) -> bool:\n return isinstance(nodes.get_context(node), ast.ClassDef)\n", "path": "wemake_python_styleguide/visitors/ast/complexity/overuses.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom collections import defaultdict\nfrom typing import ClassVar, DefaultDict, List, Union\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.compat.aliases import FunctionNodes\nfrom wemake_python_styleguide.constants import SPECIAL_ARGUMENT_NAMES_WHITELIST\nfrom wemake_python_styleguide.logic import nodes, source, walk\nfrom wemake_python_styleguide.types import AnyNodes\nfrom wemake_python_styleguide.violations import complexity\nfrom wemake_python_styleguide.visitors import base\n\n_Expressions = DefaultDict[str, List[ast.AST]]\n_FunctionExpressions = DefaultDict[ast.AST, _Expressions]\n_Annotated = Union[ast.arg, ast.AnnAssign]\n\n_AnnNodes = (ast.AnnAssign, ast.arg)\n\n\n@final\nclass StringOveruseVisitor(base.BaseNodeVisitor):\n \"\"\"Restricts several string usages.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Inits the counter for constants.\"\"\"\n super().__init__(*args, **kwargs)\n self._string_constants: DefaultDict[str, int] = defaultdict(int)\n\n def visit_Str(self, node: ast.Str) -> None:\n \"\"\"\n Restricts to over-use string constants.\n\n Raises:\n OverusedStringViolation\n\n \"\"\"\n self._check_string_constant(node)\n self.generic_visit(node)\n\n def _check_string_constant(self, node: ast.Str) -> None:\n parent = nodes.get_parent(node)\n if isinstance(parent, _AnnNodes) and parent.annotation == node:\n return # it is argument or variable annotation\n\n if isinstance(parent, FunctionNodes) and parent.returns == node:\n return # it is return annotation\n\n self._string_constants[node.s] += 1\n\n def _post_visit(self) -> None:\n for string, usage_count in self._string_constants.items():\n if usage_count > self.options.max_string_usages:\n self.add_violation(\n complexity.OverusedStringViolation(text=string or \"''\"),\n )\n\n\n@final\nclass ExpressionOveruseVisitor(base.BaseNodeVisitor):\n \"\"\"Finds overused expressions.\"\"\"\n\n _expressions: ClassVar[AnyNodes] = (\n # We do not treat `ast.Attribute`s as expressions\n # because they are too widely used. That's a compromise.\n ast.Assert,\n ast.BoolOp,\n ast.BinOp,\n ast.Call,\n ast.Compare,\n ast.Starred,\n ast.Subscript,\n ast.UnaryOp,\n ast.Lambda,\n\n ast.DictComp,\n ast.Dict,\n ast.List,\n ast.ListComp,\n ast.Tuple,\n ast.GeneratorExp,\n ast.Set,\n ast.SetComp,\n )\n\n _msg: ClassVar[str] = '{0}; used {1} times'\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"We need to track expression usage in functions and modules.\"\"\"\n super().__init__(*args, **kwargs)\n self._module_expressions: _Expressions = defaultdict(list)\n self._function_expressions: _FunctionExpressions = defaultdict(\n lambda: defaultdict(list),\n )\n\n def visit(self, node: ast.AST) -> None:\n \"\"\"\n Visits all nodes in a module to find overused values.\n\n Raises:\n OverusedExpressionViolation\n\n \"\"\"\n if isinstance(node, self._expressions):\n self._add_expression(node)\n self.generic_visit(node)\n\n def _add_expression(self, node: ast.AST) -> None:\n ignore_predicates = [\n self._is_decorator,\n self._is_self_method,\n self._is_annotation,\n\n # We use this predicate because classes have quite complex\n # DSL to be created: like django-orm, attrs, and dataclasses.\n # And these DSLs are built using attributes and calls.\n _is_class_context,\n _is_super_call,\n ]\n if any(ignore(node) for ignore in ignore_predicates):\n return\n\n source_code = source.node_to_string(node)\n self._module_expressions[source_code].append(node)\n\n maybe_function = walk.get_closest_parent(node, FunctionNodes)\n if maybe_function is not None:\n self._function_expressions[maybe_function][source_code].append(\n node,\n )\n\n def _is_decorator(\n self,\n node: ast.AST,\n ) -> bool:\n parent = walk.get_closest_parent(node, FunctionNodes)\n if isinstance(parent, FunctionNodes) and parent.decorator_list:\n return any(\n node == decorator or walk.is_contained_by(node, decorator)\n for decorator in parent.decorator_list\n )\n return False\n\n def _is_self_method(self, node: ast.AST) -> bool:\n if isinstance(node, ast.Call) and isinstance(node.func, ast.Attribute):\n if isinstance(node.func.value, ast.Name):\n if node.func.value.id in SPECIAL_ARGUMENT_NAMES_WHITELIST:\n return True\n return False\n\n def _is_annotation(self, node: ast.AST) -> bool:\n typed_assign = walk.get_closest_parent(\n node,\n (ast.AnnAssign, ast.arg),\n )\n\n if isinstance(typed_assign, _AnnNodes) and typed_assign.annotation:\n is_same_node = node == typed_assign.annotation\n is_child_annotation = walk.is_contained_by(\n node, typed_assign.annotation,\n )\n return is_same_node or is_child_annotation\n return False\n\n def _post_visit(self) -> None:\n for mod_source, module_nodes in self._module_expressions.items():\n if len(module_nodes) > self.options.max_module_expressions:\n self.add_violation(\n complexity.OverusedExpressionViolation(\n module_nodes[0],\n text=self._msg.format(mod_source, len(module_nodes)),\n ),\n )\n\n for function_contexts in self._function_expressions.values():\n for src, function_nodes in function_contexts.items():\n if len(function_nodes) > self.options.max_function_expressions:\n self.add_violation(\n complexity.OverusedExpressionViolation(\n function_nodes[0],\n text=self._msg.format(src, len(function_nodes)),\n ),\n )\n\n\ndef _is_class_context(node: ast.AST) -> bool:\n return isinstance(nodes.get_context(node), ast.ClassDef)\n\n\ndef _is_super_call(node: ast.AST) -> bool:\n if isinstance(node, ast.Call) and isinstance(node.func, ast.Name):\n return node.func.id == 'super'\n return False\n", "path": "wemake_python_styleguide/visitors/ast/complexity/overuses.py"}]}
2,498
250
gh_patches_debug_31083
rasdani/github-patches
git_diff
sunpy__sunpy-6855
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Suggest the use of `assume_spherical_screen()` in the autoalign/reproject examples These two examples: * Autoaligning: https://docs.sunpy.org/en/v3.1.5/generated/gallery/map_transformations/autoalign_aia_hmi.html * Reprojecting: https://docs.sunpy.org/en/v3.1.5/generated/gallery/map_transformations/reprojection_align_aia_hmi.html are silent about the fact that off-disk HMI data is being discarded. The examples should note that, and additionally suggest that the user can use `Helioprojective.assume_spherical_screen()` to retain the off-disk HMI data. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/map_transformations/reprojection_align_aia_hmi.py` Content: ``` 1 """ 2 ======================================== 3 Aligning AIA and HMI Data with Reproject 4 ======================================== 5 6 This example shows how to reproject one image to be aligned with another image. 7 8 Here we use the `reproject <https://reproject.readthedocs.io/en/stable/>`__ 9 package to transform one image to the reference frame of another image. The 10 reference frames are defined by the respective World Coordinate System (WCS) 11 information. This is a very generic way of aligning data, and can be very 12 accurate. 13 14 You will need ``reproject`` v0.6 or higher installed. 15 16 See :ref:`sphx_glr_generated_gallery_map_transformations_autoalign_aia_hmi.py` 17 for an alternate approach to image alignment, where the images are aligned 18 as they are plotted, with no modified map created. 19 """ 20 import matplotlib.pyplot as plt 21 22 import astropy.units as u 23 24 import sunpy.data.sample 25 import sunpy.map 26 27 ###################################################################### 28 # We use the AIA image and HMI image from the sample data. For the 29 # HMI map, we use the special HMI color map, which expects the plotted 30 # range to be -1500 to 1500. 31 32 map_aia = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE) 33 map_hmi = sunpy.map.Map(sunpy.data.sample.HMI_LOS_IMAGE) 34 map_hmi.plot_settings['cmap'] = "hmimag" 35 map_hmi.plot_settings['norm'] = plt.Normalize(-1500, 1500) 36 37 ###################################################################### 38 # Plot both images side by side. 39 40 fig = plt.figure(figsize=(12, 5)) 41 ax1 = fig.add_subplot(121, projection=map_aia) 42 map_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent) 43 ax2 = fig.add_subplot(122, projection=map_hmi) 44 map_hmi.plot(axes=ax2) 45 46 ###################################################################### 47 # We can now reproject the HMI image to the WCS of the AIA image. We are using 48 # the fast `~reproject.reproject_interp`, however the slower but most accurate 49 # `~reproject.reproject_exact` would also work well here. The 50 # `~reproject.reproject_exact` function only works when reprojecting between 51 # two WCSes with the same observer, which makes it well suited to aligning 52 # data. 53 54 out_hmi = map_hmi.reproject_to(map_aia.wcs) 55 56 ###################################################################### 57 # Plot the images side by side. 58 59 fig = plt.figure(figsize=(12, 5)) 60 ax1 = fig.add_subplot(121, projection=map_aia) 61 map_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent) 62 ax2 = fig.add_subplot(122, projection=out_hmi) 63 out_hmi.plot(axes=ax2, title='Reprojected HMI image') 64 65 ###################################################################### 66 # As both of these images are now on the same pixel grid we can directly plot 67 # them over one another, by setting the transparency of the HMI plot. 68 69 fig = plt.figure() 70 ax1 = fig.add_subplot(projection=map_aia) 71 map_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent) 72 out_hmi.plot(axes=ax1, alpha=0.5) 73 plt.title('HMI overlaid on AIA') 74 75 plt.show() 76 77 # sphinx_gallery_thumbnail_number = 2 78 ``` Path: `examples/map_transformations/autoalign_aia_hmi.py` Content: ``` 1 """ 2 ============================================== 3 Auto-Aligning AIA and HMI Data During Plotting 4 ============================================== 5 6 This example shows how to auto-align two images with different reference frames 7 during plotting. 8 9 Here we use the optional keyword ``autoalign`` when calling Map's 10 :meth:`~sunpy.map.GenericMap.plot` method. The reference frames are defined by 11 the respective World Coordinate System (WCS) information. 12 13 See :ref:`sphx_glr_generated_gallery_map_transformations_reprojection_align_aia_hmi.py` 14 for an alternate approach to image alignment, where one of the maps is modified 15 prior to plotting, and thus is available for purposes other than plotting. 16 """ 17 import matplotlib.pyplot as plt 18 19 import astropy.units as u 20 21 import sunpy.data.sample 22 import sunpy.map 23 24 ###################################################################### 25 # We use the AIA image and HMI image from the sample data. For the 26 # HMI map, we use the special HMI color map, which expects the plotted 27 # range to be -1500 to 1500. 28 29 map_aia = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE) 30 map_hmi = sunpy.map.Map(sunpy.data.sample.HMI_LOS_IMAGE) 31 map_hmi.plot_settings['cmap'] = "hmimag" 32 map_hmi.plot_settings['norm'] = plt.Normalize(-1500, 1500) 33 34 ###################################################################### 35 # Plot both images side by side. Note that the HMI image is oriented 36 # "upside down" relative to the AIA image. 37 38 fig = plt.figure(figsize=(12, 5)) 39 ax1 = fig.add_subplot(121, projection=map_aia) 40 map_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent) 41 ax2 = fig.add_subplot(122, projection=map_hmi) 42 map_hmi.plot(axes=ax2) 43 44 ###################################################################### 45 # Setting ``autoalign=True`` allows plotting the HMI image onto axes 46 # defined by the AIA reference frame. In contrast to the above code 47 # block, we intentionally set the ``projection`` for the axes to be 48 # the AIA map # instead of the HMI map. We also need to manually set 49 # the plot limits because Matplotlib gets confused by the off-disk 50 # parts of the image. Note that the HMI image now has the same 51 # orientation as the AIA image. 52 53 fig = plt.figure(figsize=(12, 5)) 54 ax1 = fig.add_subplot(121, projection=map_aia) 55 map_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent) 56 ax2 = fig.add_subplot(122, projection=map_aia) 57 map_hmi.plot(axes=ax2, autoalign=True, title='HMI image in AIA reference frame') 58 ax2.axis(ax1.axis()) 59 60 ###################################################################### 61 # We can directly plot them over one another, by setting the 62 # transparency of the HMI plot. 63 64 fig = plt.figure() 65 ax1 = fig.add_subplot(projection=map_aia) 66 map_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent) 67 map_hmi.plot(axes=ax1, autoalign=True, alpha=0.5) 68 ax1.set_title('HMI overlaid on AIA') 69 70 plt.show() 71 72 # sphinx_gallery_thumbnail_number = 2 73 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/map_transformations/autoalign_aia_hmi.py b/examples/map_transformations/autoalign_aia_hmi.py --- a/examples/map_transformations/autoalign_aia_hmi.py +++ b/examples/map_transformations/autoalign_aia_hmi.py @@ -45,10 +45,18 @@ # Setting ``autoalign=True`` allows plotting the HMI image onto axes # defined by the AIA reference frame. In contrast to the above code # block, we intentionally set the ``projection`` for the axes to be -# the AIA map # instead of the HMI map. We also need to manually set +# the AIA map instead of the HMI map. We also need to manually set # the plot limits because Matplotlib gets confused by the off-disk -# parts of the image. Note that the HMI image now has the same +# parts of the image. The HMI image now has the same # orientation as the AIA image. +# +# Note that off-disk HMI data are not retained by default because an +# additional assumption is required to define the location of the HMI +# emission in 3D space. We can use +# :meth:`~sunpy.coordinates.Helioprojective.assume_spherical_screen` to +# retain the off-disk HMI data. See +# :ref:`sphx_glr_generated_gallery_map_transformations_reprojection_spherical_screen.py` +# for more reference. fig = plt.figure(figsize=(12, 5)) ax1 = fig.add_subplot(121, projection=map_aia) diff --git a/examples/map_transformations/reprojection_align_aia_hmi.py b/examples/map_transformations/reprojection_align_aia_hmi.py --- a/examples/map_transformations/reprojection_align_aia_hmi.py +++ b/examples/map_transformations/reprojection_align_aia_hmi.py @@ -55,6 +55,14 @@ ###################################################################### # Plot the images side by side. +# +# Note that off-disk HMI data are not retained by default because an +# additional assumption is required to define the location of the HMI +# emission in 3D space. We can use +# :meth:`~sunpy.coordinates.Helioprojective.assume_spherical_screen` to +# retain the off-disk HMI data. See +# :ref:`sphx_glr_generated_gallery_map_transformations_reprojection_spherical_screen.py` +# for more reference. fig = plt.figure(figsize=(12, 5)) ax1 = fig.add_subplot(121, projection=map_aia)
{"golden_diff": "diff --git a/examples/map_transformations/autoalign_aia_hmi.py b/examples/map_transformations/autoalign_aia_hmi.py\n--- a/examples/map_transformations/autoalign_aia_hmi.py\n+++ b/examples/map_transformations/autoalign_aia_hmi.py\n@@ -45,10 +45,18 @@\n # Setting ``autoalign=True`` allows plotting the HMI image onto axes\n # defined by the AIA reference frame. In contrast to the above code\n # block, we intentionally set the ``projection`` for the axes to be\n-# the AIA map # instead of the HMI map. We also need to manually set\n+# the AIA map instead of the HMI map. We also need to manually set\n # the plot limits because Matplotlib gets confused by the off-disk\n-# parts of the image. Note that the HMI image now has the same\n+# parts of the image. The HMI image now has the same\n # orientation as the AIA image.\n+#\n+# Note that off-disk HMI data are not retained by default because an\n+# additional assumption is required to define the location of the HMI\n+# emission in 3D space. We can use\n+# :meth:`~sunpy.coordinates.Helioprojective.assume_spherical_screen` to\n+# retain the off-disk HMI data. See\n+# :ref:`sphx_glr_generated_gallery_map_transformations_reprojection_spherical_screen.py`\n+# for more reference.\n \n fig = plt.figure(figsize=(12, 5))\n ax1 = fig.add_subplot(121, projection=map_aia)\ndiff --git a/examples/map_transformations/reprojection_align_aia_hmi.py b/examples/map_transformations/reprojection_align_aia_hmi.py\n--- a/examples/map_transformations/reprojection_align_aia_hmi.py\n+++ b/examples/map_transformations/reprojection_align_aia_hmi.py\n@@ -55,6 +55,14 @@\n \n ######################################################################\n # Plot the images side by side.\n+#\n+# Note that off-disk HMI data are not retained by default because an\n+# additional assumption is required to define the location of the HMI\n+# emission in 3D space. We can use\n+# :meth:`~sunpy.coordinates.Helioprojective.assume_spherical_screen` to\n+# retain the off-disk HMI data. See\n+# :ref:`sphx_glr_generated_gallery_map_transformations_reprojection_spherical_screen.py`\n+# for more reference.\n \n fig = plt.figure(figsize=(12, 5))\n ax1 = fig.add_subplot(121, projection=map_aia)\n", "issue": "Suggest the use of `assume_spherical_screen()` in the autoalign/reproject examples\nThese two examples:\r\n* Autoaligning: https://docs.sunpy.org/en/v3.1.5/generated/gallery/map_transformations/autoalign_aia_hmi.html\r\n* Reprojecting: https://docs.sunpy.org/en/v3.1.5/generated/gallery/map_transformations/reprojection_align_aia_hmi.html\r\n\r\nare silent about the fact that off-disk HMI data is being discarded. The examples should note that, and additionally suggest that the user can use `Helioprojective.assume_spherical_screen()` to retain the off-disk HMI data.\n", "before_files": [{"content": "\"\"\"\n========================================\nAligning AIA and HMI Data with Reproject\n========================================\n\nThis example shows how to reproject one image to be aligned with another image.\n\nHere we use the `reproject <https://reproject.readthedocs.io/en/stable/>`__\npackage to transform one image to the reference frame of another image. The\nreference frames are defined by the respective World Coordinate System (WCS)\ninformation. This is a very generic way of aligning data, and can be very\naccurate.\n\nYou will need ``reproject`` v0.6 or higher installed.\n\nSee :ref:`sphx_glr_generated_gallery_map_transformations_autoalign_aia_hmi.py`\nfor an alternate approach to image alignment, where the images are aligned\nas they are plotted, with no modified map created.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\n\nimport sunpy.data.sample\nimport sunpy.map\n\n######################################################################\n# We use the AIA image and HMI image from the sample data. For the\n# HMI map, we use the special HMI color map, which expects the plotted\n# range to be -1500 to 1500.\n\nmap_aia = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)\nmap_hmi = sunpy.map.Map(sunpy.data.sample.HMI_LOS_IMAGE)\nmap_hmi.plot_settings['cmap'] = \"hmimag\"\nmap_hmi.plot_settings['norm'] = plt.Normalize(-1500, 1500)\n\n######################################################################\n# Plot both images side by side.\n\nfig = plt.figure(figsize=(12, 5))\nax1 = fig.add_subplot(121, projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nax2 = fig.add_subplot(122, projection=map_hmi)\nmap_hmi.plot(axes=ax2)\n\n######################################################################\n# We can now reproject the HMI image to the WCS of the AIA image. We are using\n# the fast `~reproject.reproject_interp`, however the slower but most accurate\n# `~reproject.reproject_exact` would also work well here. The\n# `~reproject.reproject_exact` function only works when reprojecting between\n# two WCSes with the same observer, which makes it well suited to aligning\n# data.\n\nout_hmi = map_hmi.reproject_to(map_aia.wcs)\n\n######################################################################\n# Plot the images side by side.\n\nfig = plt.figure(figsize=(12, 5))\nax1 = fig.add_subplot(121, projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nax2 = fig.add_subplot(122, projection=out_hmi)\nout_hmi.plot(axes=ax2, title='Reprojected HMI image')\n\n######################################################################\n# As both of these images are now on the same pixel grid we can directly plot\n# them over one another, by setting the transparency of the HMI plot.\n\nfig = plt.figure()\nax1 = fig.add_subplot(projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nout_hmi.plot(axes=ax1, alpha=0.5)\nplt.title('HMI overlaid on AIA')\n\nplt.show()\n\n# sphinx_gallery_thumbnail_number = 2\n", "path": "examples/map_transformations/reprojection_align_aia_hmi.py"}, {"content": "\"\"\"\n==============================================\nAuto-Aligning AIA and HMI Data During Plotting\n==============================================\n\nThis example shows how to auto-align two images with different reference frames\nduring plotting.\n\nHere we use the optional keyword ``autoalign`` when calling Map's\n:meth:`~sunpy.map.GenericMap.plot` method. The reference frames are defined by\nthe respective World Coordinate System (WCS) information.\n\nSee :ref:`sphx_glr_generated_gallery_map_transformations_reprojection_align_aia_hmi.py`\nfor an alternate approach to image alignment, where one of the maps is modified\nprior to plotting, and thus is available for purposes other than plotting.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\n\nimport sunpy.data.sample\nimport sunpy.map\n\n######################################################################\n# We use the AIA image and HMI image from the sample data. For the\n# HMI map, we use the special HMI color map, which expects the plotted\n# range to be -1500 to 1500.\n\nmap_aia = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)\nmap_hmi = sunpy.map.Map(sunpy.data.sample.HMI_LOS_IMAGE)\nmap_hmi.plot_settings['cmap'] = \"hmimag\"\nmap_hmi.plot_settings['norm'] = plt.Normalize(-1500, 1500)\n\n######################################################################\n# Plot both images side by side. Note that the HMI image is oriented\n# \"upside down\" relative to the AIA image.\n\nfig = plt.figure(figsize=(12, 5))\nax1 = fig.add_subplot(121, projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nax2 = fig.add_subplot(122, projection=map_hmi)\nmap_hmi.plot(axes=ax2)\n\n######################################################################\n# Setting ``autoalign=True`` allows plotting the HMI image onto axes\n# defined by the AIA reference frame. In contrast to the above code\n# block, we intentionally set the ``projection`` for the axes to be\n# the AIA map # instead of the HMI map. We also need to manually set\n# the plot limits because Matplotlib gets confused by the off-disk\n# parts of the image. Note that the HMI image now has the same\n# orientation as the AIA image.\n\nfig = plt.figure(figsize=(12, 5))\nax1 = fig.add_subplot(121, projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nax2 = fig.add_subplot(122, projection=map_aia)\nmap_hmi.plot(axes=ax2, autoalign=True, title='HMI image in AIA reference frame')\nax2.axis(ax1.axis())\n\n######################################################################\n# We can directly plot them over one another, by setting the\n# transparency of the HMI plot.\n\nfig = plt.figure()\nax1 = fig.add_subplot(projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nmap_hmi.plot(axes=ax1, autoalign=True, alpha=0.5)\nax1.set_title('HMI overlaid on AIA')\n\nplt.show()\n\n# sphinx_gallery_thumbnail_number = 2\n", "path": "examples/map_transformations/autoalign_aia_hmi.py"}], "after_files": [{"content": "\"\"\"\n========================================\nAligning AIA and HMI Data with Reproject\n========================================\n\nThis example shows how to reproject one image to be aligned with another image.\n\nHere we use the `reproject <https://reproject.readthedocs.io/en/stable/>`__\npackage to transform one image to the reference frame of another image. The\nreference frames are defined by the respective World Coordinate System (WCS)\ninformation. This is a very generic way of aligning data, and can be very\naccurate.\n\nYou will need ``reproject`` v0.6 or higher installed.\n\nSee :ref:`sphx_glr_generated_gallery_map_transformations_autoalign_aia_hmi.py`\nfor an alternate approach to image alignment, where the images are aligned\nas they are plotted, with no modified map created.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\n\nimport sunpy.data.sample\nimport sunpy.map\n\n######################################################################\n# We use the AIA image and HMI image from the sample data. For the\n# HMI map, we use the special HMI color map, which expects the plotted\n# range to be -1500 to 1500.\n\nmap_aia = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)\nmap_hmi = sunpy.map.Map(sunpy.data.sample.HMI_LOS_IMAGE)\nmap_hmi.plot_settings['cmap'] = \"hmimag\"\nmap_hmi.plot_settings['norm'] = plt.Normalize(-1500, 1500)\n\n######################################################################\n# Plot both images side by side.\n\nfig = plt.figure(figsize=(12, 5))\nax1 = fig.add_subplot(121, projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nax2 = fig.add_subplot(122, projection=map_hmi)\nmap_hmi.plot(axes=ax2)\n\n######################################################################\n# We can now reproject the HMI image to the WCS of the AIA image. We are using\n# the fast `~reproject.reproject_interp`, however the slower but most accurate\n# `~reproject.reproject_exact` would also work well here. The\n# `~reproject.reproject_exact` function only works when reprojecting between\n# two WCSes with the same observer, which makes it well suited to aligning\n# data.\n\nout_hmi = map_hmi.reproject_to(map_aia.wcs)\n\n######################################################################\n# Plot the images side by side.\n#\n# Note that off-disk HMI data are not retained by default because an\n# additional assumption is required to define the location of the HMI\n# emission in 3D space. We can use\n# :meth:`~sunpy.coordinates.Helioprojective.assume_spherical_screen` to\n# retain the off-disk HMI data. See\n# :ref:`sphx_glr_generated_gallery_map_transformations_reprojection_spherical_screen.py`\n# for more reference.\n\nfig = plt.figure(figsize=(12, 5))\nax1 = fig.add_subplot(121, projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nax2 = fig.add_subplot(122, projection=out_hmi)\nout_hmi.plot(axes=ax2, title='Reprojected HMI image')\n\n######################################################################\n# As both of these images are now on the same pixel grid we can directly plot\n# them over one another, by setting the transparency of the HMI plot.\n\nfig = plt.figure()\nax1 = fig.add_subplot(projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nout_hmi.plot(axes=ax1, alpha=0.5)\nplt.title('HMI overlaid on AIA')\n\nplt.show()\n\n# sphinx_gallery_thumbnail_number = 2\n", "path": "examples/map_transformations/reprojection_align_aia_hmi.py"}, {"content": "\"\"\"\n==============================================\nAuto-Aligning AIA and HMI Data During Plotting\n==============================================\n\nThis example shows how to auto-align two images with different reference frames\nduring plotting.\n\nHere we use the optional keyword ``autoalign`` when calling Map's\n:meth:`~sunpy.map.GenericMap.plot` method. The reference frames are defined by\nthe respective World Coordinate System (WCS) information.\n\nSee :ref:`sphx_glr_generated_gallery_map_transformations_reprojection_align_aia_hmi.py`\nfor an alternate approach to image alignment, where one of the maps is modified\nprior to plotting, and thus is available for purposes other than plotting.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\n\nimport sunpy.data.sample\nimport sunpy.map\n\n######################################################################\n# We use the AIA image and HMI image from the sample data. For the\n# HMI map, we use the special HMI color map, which expects the plotted\n# range to be -1500 to 1500.\n\nmap_aia = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)\nmap_hmi = sunpy.map.Map(sunpy.data.sample.HMI_LOS_IMAGE)\nmap_hmi.plot_settings['cmap'] = \"hmimag\"\nmap_hmi.plot_settings['norm'] = plt.Normalize(-1500, 1500)\n\n######################################################################\n# Plot both images side by side. Note that the HMI image is oriented\n# \"upside down\" relative to the AIA image.\n\nfig = plt.figure(figsize=(12, 5))\nax1 = fig.add_subplot(121, projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nax2 = fig.add_subplot(122, projection=map_hmi)\nmap_hmi.plot(axes=ax2)\n\n######################################################################\n# Setting ``autoalign=True`` allows plotting the HMI image onto axes\n# defined by the AIA reference frame. In contrast to the above code\n# block, we intentionally set the ``projection`` for the axes to be\n# the AIA map instead of the HMI map. We also need to manually set\n# the plot limits because Matplotlib gets confused by the off-disk\n# parts of the image. The HMI image now has the same\n# orientation as the AIA image.\n#\n# Note that off-disk HMI data are not retained by default because an\n# additional assumption is required to define the location of the HMI\n# emission in 3D space. We can use\n# :meth:`~sunpy.coordinates.Helioprojective.assume_spherical_screen` to\n# retain the off-disk HMI data. See\n# :ref:`sphx_glr_generated_gallery_map_transformations_reprojection_spherical_screen.py`\n# for more reference.\n\nfig = plt.figure(figsize=(12, 5))\nax1 = fig.add_subplot(121, projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nax2 = fig.add_subplot(122, projection=map_aia)\nmap_hmi.plot(axes=ax2, autoalign=True, title='HMI image in AIA reference frame')\nax2.axis(ax1.axis())\n\n######################################################################\n# We can directly plot them over one another, by setting the\n# transparency of the HMI plot.\n\nfig = plt.figure()\nax1 = fig.add_subplot(projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nmap_hmi.plot(axes=ax1, autoalign=True, alpha=0.5)\nax1.set_title('HMI overlaid on AIA')\n\nplt.show()\n\n# sphinx_gallery_thumbnail_number = 2\n", "path": "examples/map_transformations/autoalign_aia_hmi.py"}]}
2,215
565
gh_patches_debug_21978
rasdani/github-patches
git_diff
pymedusa__Medusa-4366
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [APP SUBMITTED]: error: [Errno 99] Address not available ### INFO **Python Version**: `2.7.14 (default, Dec 14 2017, 15:51:29) [GCC 6.4.0]` **Operating System**: `Linux-4.2.8-x86_64-with` **Locale**: `None` **Branch**: [master](../tree/master) **Database**: `44.11` **Commit**: pymedusa/Medusa@c65ca2607fae74405dcceaaf8c02f3d29d0313ea **Link to Log**: https://gist.github.com/0d062813555bacfdadcc7bbfcf7c7e28 ### ERROR <pre> 2018-06-11 14:32:43 ERROR SEARCHQUEUE-DAILY-SEARCH :: [c65ca26] DailySearchQueueItem Exception, error: [Errno 99] Address not available Traceback (most recent call last): File "/app/medusa/medusa/search/queue.py", line 289, in run self.success = snatch_episode(result) File "/app/medusa/medusa/search/core.py", line 147, in snatch_episode result_downloaded = **********.sendNZB(result, is_proper) File "/app/medusa/medusa/clients/nzb/**********.py", line 92, in sendNZB if not NZBConnection(url): File "/app/medusa/medusa/clients/nzb/**********.py", line 29, in NZBConnection if nzbGetRPC.writelog('INFO', 'Medusa connected to test connection.'): File "/usr/lib/python2.7/xmlrpclib.py", line 1243, in __call__ return self.__send(self.__name, args) File "/usr/lib/python2.7/xmlrpclib.py", line 1602, in __request verbose=self.__verbose File "/usr/lib/python2.7/xmlrpclib.py", line 1283, in request return self.single_request(host, handler, request_body, verbose) File "/usr/lib/python2.7/xmlrpclib.py", line 1311, in single_request self.send_content(h, request_body) File "/usr/lib/python2.7/xmlrpclib.py", line 1459, in send_content connection.endheaders(request_body) File "/usr/lib/python2.7/httplib.py", line 1038, in endheaders self._send_output(message_body) File "/usr/lib/python2.7/httplib.py", line 882, in _send_output self.send(msg) File "/usr/lib/python2.7/httplib.py", line 844, in send self.connect() File "/usr/lib/python2.7/httplib.py", line 821, in connect self.timeout, self.source_address) File "/usr/lib/python2.7/socket.py", line 575, in create_connection raise err error: [Errno 99] Address not available </pre> --- _STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `medusa/clients/nzb/nzbget.py` Content: ``` 1 # coding=utf-8 2 3 from __future__ import unicode_literals 4 5 import datetime 6 import logging 7 from base64 import standard_b64encode 8 from xmlrpc.client import Error, ProtocolError, ServerProxy 9 10 from medusa import app 11 from medusa.common import Quality 12 from medusa.helper.common import try_int 13 from medusa.logger.adapters.style import BraceAdapter 14 15 from six import text_type 16 17 log = BraceAdapter(logging.getLogger(__name__)) 18 log.logger.addHandler(logging.NullHandler()) 19 20 21 def NZBConnection(url): 22 """Method to connect to NZBget client. 23 24 :param url: nzb url to connect 25 :return: True if connected, else False 26 """ 27 nzbGetRPC = ServerProxy(url) 28 try: 29 if nzbGetRPC.writelog('INFO', 'Medusa connected to test connection.'): 30 log.debug('Successfully connected to NZBget') 31 else: 32 log.warning('Successfully connected to NZBget but unable to' 33 ' send a message') 34 return True 35 36 except ProtocolError as error: 37 if error.errmsg == 'Unauthorized': 38 log.warning('NZBget username or password is incorrect.') 39 else: 40 log.error('Protocol Error: {msg}', {'msg': error.errmsg}) 41 return False 42 43 except Error as error: 44 log.warning('Please check your NZBget host and port (if it is' 45 ' running). NZBget is not responding to this combination' 46 ' Error: {msg}', {'msg': error.errmsg}) 47 return False 48 49 50 def testNZB(host, username, password, use_https): 51 """Test NZBget client connection. 52 53 :param host: nzb host to connect 54 :param username: nzb username 55 :param password: nzb password 56 :param use_https: If we should use https or not 57 58 :return True if connected. Else False 59 """ 60 url = 'http{}://{}:{}@{}/xmlrpc'.format( 61 's' if use_https else '', 62 username, 63 password, 64 host) 65 return NZBConnection(url) 66 67 68 def sendNZB(nzb, proper=False): 69 """ 70 Sends NZB to NZBGet client 71 72 :param nzb: nzb object 73 :param proper: True if a Proper download, False if not. 74 """ 75 if app.NZBGET_HOST is None: 76 log.warning('No NZBget host found in configuration.' 77 ' Please configure it.') 78 return False 79 80 addToTop = False 81 nzbgetprio = 0 82 category = app.NZBGET_CATEGORY 83 if nzb.series.is_anime: 84 category = app.NZBGET_CATEGORY_ANIME 85 86 url = 'http{}://{}:{}@{}/xmlrpc'.format( 87 's' if app.NZBGET_USE_HTTPS else '', 88 app.NZBGET_USERNAME, 89 app.NZBGET_PASSWORD, 90 app.NZBGET_HOST) 91 92 if not NZBConnection(url): 93 return False 94 95 nzbGetRPC = ServerProxy(url) 96 97 dupekey = '' 98 dupescore = 0 99 # if it aired recently make it high priority and generate DupeKey/Score 100 for cur_ep in nzb.episodes: 101 if dupekey == '': 102 if cur_ep.series.indexer == 1: 103 dupekey = 'Medusa-' + text_type(cur_ep.series.indexerid) 104 elif cur_ep.series.indexer == 2: 105 dupekey = 'Medusa-tvr' + text_type(cur_ep.series.indexerid) 106 dupekey += '-' + text_type(cur_ep.season) + '.' + text_type(cur_ep.episode) 107 if datetime.date.today() - cur_ep.airdate <= datetime.timedelta(days=7): 108 addToTop = True 109 nzbgetprio = app.NZBGET_PRIORITY 110 else: 111 category = app.NZBGET_CATEGORY_BACKLOG 112 if nzb.series.is_anime: 113 category = app.NZBGET_CATEGORY_ANIME_BACKLOG 114 115 if nzb.quality != Quality.UNKNOWN: 116 dupescore = nzb.quality * 100 117 if proper: 118 dupescore += 10 119 120 nzbcontent64 = None 121 if nzb.result_type == 'nzbdata': 122 data = nzb.extra_info[0] 123 nzbcontent64 = standard_b64encode(data) 124 125 log.info('Sending NZB to NZBget') 126 log.debug('URL: {}', url) 127 128 try: 129 # Find out if nzbget supports priority (Version 9.0+), 130 # old versions beginning with a 0.x will use the old command 131 nzbget_version_str = nzbGetRPC.version() 132 nzbget_version = try_int( 133 nzbget_version_str[:nzbget_version_str.find('.')] 134 ) 135 if nzbget_version == 0: 136 if nzbcontent64: 137 nzbget_result = nzbGetRPC.append( 138 nzb.name + '.nzb', 139 category, 140 addToTop, 141 nzbcontent64 142 ) 143 else: 144 if nzb.result_type == 'nzb': 145 if not nzb.provider.login(): 146 return False 147 148 # TODO: Check if this needs exception handling 149 data = nzb.provider.session(nzb.url).content 150 if data is None: 151 return False 152 153 nzbcontent64 = standard_b64encode(data) 154 155 nzbget_result = nzbGetRPC.append( 156 nzb.name + '.nzb', 157 category, 158 addToTop, 159 nzbcontent64 160 ) 161 elif nzbget_version == 12: 162 if nzbcontent64 is not None: 163 nzbget_result = nzbGetRPC.append( 164 nzb.name + '.nzb', category, nzbgetprio, False, 165 nzbcontent64, False, dupekey, dupescore, 'score' 166 ) 167 else: 168 nzbget_result = nzbGetRPC.appendurl( 169 nzb.name + '.nzb', category, nzbgetprio, False, nzb.url, 170 False, dupekey, dupescore, 'score' 171 ) 172 # v13+ has a new combined append method that accepts both (url and 173 # content) also the return value has changed from boolean to integer 174 # (Positive number representing NZBID of the queue item. 0 and negative 175 # numbers represent error codes.) 176 elif nzbget_version >= 13: 177 nzbget_result = nzbGetRPC.append( 178 nzb.name + '.nzb', 179 nzbcontent64 if nzbcontent64 is not None else nzb.url, 180 category, nzbgetprio, False, False, dupekey, dupescore, 181 'score' 182 ) > 0 183 else: 184 if nzbcontent64 is not None: 185 nzbget_result = nzbGetRPC.append( 186 nzb.name + '.nzb', category, nzbgetprio, False, 187 nzbcontent64 188 ) 189 else: 190 nzbget_result = nzbGetRPC.appendurl( 191 nzb.name + '.nzb', category, nzbgetprio, False, nzb.url 192 ) 193 194 if nzbget_result: 195 log.debug('NZB sent to NZBget successfully') 196 return True 197 else: 198 log.warning('NZBget could not add {name}.nzb to the queue', 199 {'name': nzb.name}) 200 return False 201 except Exception: 202 log.warning('Connect Error to NZBget: could not add {file}.nzb to the' 203 ' queue', {'name': nzb.name}) 204 return False 205 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/medusa/clients/nzb/nzbget.py b/medusa/clients/nzb/nzbget.py --- a/medusa/clients/nzb/nzbget.py +++ b/medusa/clients/nzb/nzbget.py @@ -4,6 +4,7 @@ import datetime import logging +import socket from base64 import standard_b64encode from xmlrpc.client import Error, ProtocolError, ServerProxy @@ -41,11 +42,17 @@ return False except Error as error: - log.warning('Please check your NZBget host and port (if it is' - ' running). NZBget is not responding to this combination' + log.warning('Please check your NZBget host and port (if it is running).' + ' NZBget is not responding to this combination.' ' Error: {msg}', {'msg': error.errmsg}) return False + except socket.error as error: + log.warning('Please check your NZBget host and port (if it is running).' + ' NZBget is not responding to this combination.' + ' Socket Error: {msg}', {'msg': error}) + return False + def testNZB(host, username, password, use_https): """Test NZBget client connection.
{"golden_diff": "diff --git a/medusa/clients/nzb/nzbget.py b/medusa/clients/nzb/nzbget.py\n--- a/medusa/clients/nzb/nzbget.py\n+++ b/medusa/clients/nzb/nzbget.py\n@@ -4,6 +4,7 @@\n \n import datetime\n import logging\n+import socket\n from base64 import standard_b64encode\n from xmlrpc.client import Error, ProtocolError, ServerProxy\n \n@@ -41,11 +42,17 @@\n return False\n \n except Error as error:\n- log.warning('Please check your NZBget host and port (if it is'\n- ' running). NZBget is not responding to this combination'\n+ log.warning('Please check your NZBget host and port (if it is running).'\n+ ' NZBget is not responding to this combination.'\n ' Error: {msg}', {'msg': error.errmsg})\n return False\n \n+ except socket.error as error:\n+ log.warning('Please check your NZBget host and port (if it is running).'\n+ ' NZBget is not responding to this combination.'\n+ ' Socket Error: {msg}', {'msg': error})\n+ return False\n+\n \n def testNZB(host, username, password, use_https):\n \"\"\"Test NZBget client connection.\n", "issue": "[APP SUBMITTED]: error: [Errno 99] Address not available\n\n### INFO\n**Python Version**: `2.7.14 (default, Dec 14 2017, 15:51:29) [GCC 6.4.0]`\n**Operating System**: `Linux-4.2.8-x86_64-with`\n**Locale**: `None`\n**Branch**: [master](../tree/master)\n**Database**: `44.11`\n**Commit**: pymedusa/Medusa@c65ca2607fae74405dcceaaf8c02f3d29d0313ea\n**Link to Log**: https://gist.github.com/0d062813555bacfdadcc7bbfcf7c7e28\n### ERROR\n<pre>\n2018-06-11 14:32:43 ERROR SEARCHQUEUE-DAILY-SEARCH :: [c65ca26] DailySearchQueueItem Exception, error: [Errno 99] Address not available\nTraceback (most recent call last):\n File \"/app/medusa/medusa/search/queue.py\", line 289, in run\n self.success = snatch_episode(result)\n File \"/app/medusa/medusa/search/core.py\", line 147, in snatch_episode\n result_downloaded = **********.sendNZB(result, is_proper)\n File \"/app/medusa/medusa/clients/nzb/**********.py\", line 92, in sendNZB\n if not NZBConnection(url):\n File \"/app/medusa/medusa/clients/nzb/**********.py\", line 29, in NZBConnection\n if nzbGetRPC.writelog('INFO', 'Medusa connected to test connection.'):\n File \"/usr/lib/python2.7/xmlrpclib.py\", line 1243, in __call__\n return self.__send(self.__name, args)\n File \"/usr/lib/python2.7/xmlrpclib.py\", line 1602, in __request\n verbose=self.__verbose\n File \"/usr/lib/python2.7/xmlrpclib.py\", line 1283, in request\n return self.single_request(host, handler, request_body, verbose)\n File \"/usr/lib/python2.7/xmlrpclib.py\", line 1311, in single_request\n self.send_content(h, request_body)\n File \"/usr/lib/python2.7/xmlrpclib.py\", line 1459, in send_content\n connection.endheaders(request_body)\n File \"/usr/lib/python2.7/httplib.py\", line 1038, in endheaders\n self._send_output(message_body)\n File \"/usr/lib/python2.7/httplib.py\", line 882, in _send_output\n self.send(msg)\n File \"/usr/lib/python2.7/httplib.py\", line 844, in send\n self.connect()\n File \"/usr/lib/python2.7/httplib.py\", line 821, in connect\n self.timeout, self.source_address)\n File \"/usr/lib/python2.7/socket.py\", line 575, in create_connection\n raise err\nerror: [Errno 99] Address not available\n</pre>\n---\n_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport datetime\nimport logging\nfrom base64 import standard_b64encode\nfrom xmlrpc.client import Error, ProtocolError, ServerProxy\n\nfrom medusa import app\nfrom medusa.common import Quality\nfrom medusa.helper.common import try_int\nfrom medusa.logger.adapters.style import BraceAdapter\n\nfrom six import text_type\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\ndef NZBConnection(url):\n \"\"\"Method to connect to NZBget client.\n\n :param url: nzb url to connect\n :return: True if connected, else False\n \"\"\"\n nzbGetRPC = ServerProxy(url)\n try:\n if nzbGetRPC.writelog('INFO', 'Medusa connected to test connection.'):\n log.debug('Successfully connected to NZBget')\n else:\n log.warning('Successfully connected to NZBget but unable to'\n ' send a message')\n return True\n\n except ProtocolError as error:\n if error.errmsg == 'Unauthorized':\n log.warning('NZBget username or password is incorrect.')\n else:\n log.error('Protocol Error: {msg}', {'msg': error.errmsg})\n return False\n\n except Error as error:\n log.warning('Please check your NZBget host and port (if it is'\n ' running). NZBget is not responding to this combination'\n ' Error: {msg}', {'msg': error.errmsg})\n return False\n\n\ndef testNZB(host, username, password, use_https):\n \"\"\"Test NZBget client connection.\n\n :param host: nzb host to connect\n :param username: nzb username\n :param password: nzb password\n :param use_https: If we should use https or not\n\n :return True if connected. Else False\n \"\"\"\n url = 'http{}://{}:{}@{}/xmlrpc'.format(\n 's' if use_https else '',\n username,\n password,\n host)\n return NZBConnection(url)\n\n\ndef sendNZB(nzb, proper=False):\n \"\"\"\n Sends NZB to NZBGet client\n\n :param nzb: nzb object\n :param proper: True if a Proper download, False if not.\n \"\"\"\n if app.NZBGET_HOST is None:\n log.warning('No NZBget host found in configuration.'\n ' Please configure it.')\n return False\n\n addToTop = False\n nzbgetprio = 0\n category = app.NZBGET_CATEGORY\n if nzb.series.is_anime:\n category = app.NZBGET_CATEGORY_ANIME\n\n url = 'http{}://{}:{}@{}/xmlrpc'.format(\n 's' if app.NZBGET_USE_HTTPS else '',\n app.NZBGET_USERNAME,\n app.NZBGET_PASSWORD,\n app.NZBGET_HOST)\n\n if not NZBConnection(url):\n return False\n\n nzbGetRPC = ServerProxy(url)\n\n dupekey = ''\n dupescore = 0\n # if it aired recently make it high priority and generate DupeKey/Score\n for cur_ep in nzb.episodes:\n if dupekey == '':\n if cur_ep.series.indexer == 1:\n dupekey = 'Medusa-' + text_type(cur_ep.series.indexerid)\n elif cur_ep.series.indexer == 2:\n dupekey = 'Medusa-tvr' + text_type(cur_ep.series.indexerid)\n dupekey += '-' + text_type(cur_ep.season) + '.' + text_type(cur_ep.episode)\n if datetime.date.today() - cur_ep.airdate <= datetime.timedelta(days=7):\n addToTop = True\n nzbgetprio = app.NZBGET_PRIORITY\n else:\n category = app.NZBGET_CATEGORY_BACKLOG\n if nzb.series.is_anime:\n category = app.NZBGET_CATEGORY_ANIME_BACKLOG\n\n if nzb.quality != Quality.UNKNOWN:\n dupescore = nzb.quality * 100\n if proper:\n dupescore += 10\n\n nzbcontent64 = None\n if nzb.result_type == 'nzbdata':\n data = nzb.extra_info[0]\n nzbcontent64 = standard_b64encode(data)\n\n log.info('Sending NZB to NZBget')\n log.debug('URL: {}', url)\n\n try:\n # Find out if nzbget supports priority (Version 9.0+),\n # old versions beginning with a 0.x will use the old command\n nzbget_version_str = nzbGetRPC.version()\n nzbget_version = try_int(\n nzbget_version_str[:nzbget_version_str.find('.')]\n )\n if nzbget_version == 0:\n if nzbcontent64:\n nzbget_result = nzbGetRPC.append(\n nzb.name + '.nzb',\n category,\n addToTop,\n nzbcontent64\n )\n else:\n if nzb.result_type == 'nzb':\n if not nzb.provider.login():\n return False\n\n # TODO: Check if this needs exception handling\n data = nzb.provider.session(nzb.url).content\n if data is None:\n return False\n\n nzbcontent64 = standard_b64encode(data)\n\n nzbget_result = nzbGetRPC.append(\n nzb.name + '.nzb',\n category,\n addToTop,\n nzbcontent64\n )\n elif nzbget_version == 12:\n if nzbcontent64 is not None:\n nzbget_result = nzbGetRPC.append(\n nzb.name + '.nzb', category, nzbgetprio, False,\n nzbcontent64, False, dupekey, dupescore, 'score'\n )\n else:\n nzbget_result = nzbGetRPC.appendurl(\n nzb.name + '.nzb', category, nzbgetprio, False, nzb.url,\n False, dupekey, dupescore, 'score'\n )\n # v13+ has a new combined append method that accepts both (url and\n # content) also the return value has changed from boolean to integer\n # (Positive number representing NZBID of the queue item. 0 and negative\n # numbers represent error codes.)\n elif nzbget_version >= 13:\n nzbget_result = nzbGetRPC.append(\n nzb.name + '.nzb',\n nzbcontent64 if nzbcontent64 is not None else nzb.url,\n category, nzbgetprio, False, False, dupekey, dupescore,\n 'score'\n ) > 0\n else:\n if nzbcontent64 is not None:\n nzbget_result = nzbGetRPC.append(\n nzb.name + '.nzb', category, nzbgetprio, False,\n nzbcontent64\n )\n else:\n nzbget_result = nzbGetRPC.appendurl(\n nzb.name + '.nzb', category, nzbgetprio, False, nzb.url\n )\n\n if nzbget_result:\n log.debug('NZB sent to NZBget successfully')\n return True\n else:\n log.warning('NZBget could not add {name}.nzb to the queue',\n {'name': nzb.name})\n return False\n except Exception:\n log.warning('Connect Error to NZBget: could not add {file}.nzb to the'\n ' queue', {'name': nzb.name})\n return False\n", "path": "medusa/clients/nzb/nzbget.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport datetime\nimport logging\nimport socket\nfrom base64 import standard_b64encode\nfrom xmlrpc.client import Error, ProtocolError, ServerProxy\n\nfrom medusa import app\nfrom medusa.common import Quality\nfrom medusa.helper.common import try_int\nfrom medusa.logger.adapters.style import BraceAdapter\n\nfrom six import text_type\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\ndef NZBConnection(url):\n \"\"\"Method to connect to NZBget client.\n\n :param url: nzb url to connect\n :return: True if connected, else False\n \"\"\"\n nzbGetRPC = ServerProxy(url)\n try:\n if nzbGetRPC.writelog('INFO', 'Medusa connected to test connection.'):\n log.debug('Successfully connected to NZBget')\n else:\n log.warning('Successfully connected to NZBget but unable to'\n ' send a message')\n return True\n\n except ProtocolError as error:\n if error.errmsg == 'Unauthorized':\n log.warning('NZBget username or password is incorrect.')\n else:\n log.error('Protocol Error: {msg}', {'msg': error.errmsg})\n return False\n\n except Error as error:\n log.warning('Please check your NZBget host and port (if it is running).'\n ' NZBget is not responding to this combination.'\n ' Error: {msg}', {'msg': error.errmsg})\n return False\n\n except socket.error as error:\n log.warning('Please check your NZBget host and port (if it is running).'\n ' NZBget is not responding to this combination.'\n ' Socket Error: {msg}', {'msg': error})\n return False\n\n\ndef testNZB(host, username, password, use_https):\n \"\"\"Test NZBget client connection.\n\n :param host: nzb host to connect\n :param username: nzb username\n :param password: nzb password\n :param use_https: If we should use https or not\n\n :return True if connected. Else False\n \"\"\"\n url = 'http{}://{}:{}@{}/xmlrpc'.format(\n 's' if use_https else '',\n username,\n password,\n host)\n return NZBConnection(url)\n\n\ndef sendNZB(nzb, proper=False):\n \"\"\"\n Sends NZB to NZBGet client\n\n :param nzb: nzb object\n :param proper: True if a Proper download, False if not.\n \"\"\"\n if app.NZBGET_HOST is None:\n log.warning('No NZBget host found in configuration.'\n ' Please configure it.')\n return False\n\n addToTop = False\n nzbgetprio = 0\n category = app.NZBGET_CATEGORY\n if nzb.series.is_anime:\n category = app.NZBGET_CATEGORY_ANIME\n\n url = 'http{}://{}:{}@{}/xmlrpc'.format(\n 's' if app.NZBGET_USE_HTTPS else '',\n app.NZBGET_USERNAME,\n app.NZBGET_PASSWORD,\n app.NZBGET_HOST)\n\n if not NZBConnection(url):\n return False\n\n nzbGetRPC = ServerProxy(url)\n\n dupekey = ''\n dupescore = 0\n # if it aired recently make it high priority and generate DupeKey/Score\n for cur_ep in nzb.episodes:\n if dupekey == '':\n if cur_ep.series.indexer == 1:\n dupekey = 'Medusa-' + text_type(cur_ep.series.indexerid)\n elif cur_ep.series.indexer == 2:\n dupekey = 'Medusa-tvr' + text_type(cur_ep.series.indexerid)\n dupekey += '-' + text_type(cur_ep.season) + '.' + text_type(cur_ep.episode)\n if datetime.date.today() - cur_ep.airdate <= datetime.timedelta(days=7):\n addToTop = True\n nzbgetprio = app.NZBGET_PRIORITY\n else:\n category = app.NZBGET_CATEGORY_BACKLOG\n if nzb.series.is_anime:\n category = app.NZBGET_CATEGORY_ANIME_BACKLOG\n\n if nzb.quality != Quality.UNKNOWN:\n dupescore = nzb.quality * 100\n if proper:\n dupescore += 10\n\n nzbcontent64 = None\n if nzb.result_type == 'nzbdata':\n data = nzb.extra_info[0]\n nzbcontent64 = standard_b64encode(data)\n\n log.info('Sending NZB to NZBget')\n log.debug('URL: {}', url)\n\n try:\n # Find out if nzbget supports priority (Version 9.0+),\n # old versions beginning with a 0.x will use the old command\n nzbget_version_str = nzbGetRPC.version()\n nzbget_version = try_int(\n nzbget_version_str[:nzbget_version_str.find('.')]\n )\n if nzbget_version == 0:\n if nzbcontent64:\n nzbget_result = nzbGetRPC.append(\n nzb.name + '.nzb',\n category,\n addToTop,\n nzbcontent64\n )\n else:\n if nzb.result_type == 'nzb':\n if not nzb.provider.login():\n return False\n\n # TODO: Check if this needs exception handling\n data = nzb.provider.session(nzb.url).content\n if data is None:\n return False\n\n nzbcontent64 = standard_b64encode(data)\n\n nzbget_result = nzbGetRPC.append(\n nzb.name + '.nzb',\n category,\n addToTop,\n nzbcontent64\n )\n elif nzbget_version == 12:\n if nzbcontent64 is not None:\n nzbget_result = nzbGetRPC.append(\n nzb.name + '.nzb', category, nzbgetprio, False,\n nzbcontent64, False, dupekey, dupescore, 'score'\n )\n else:\n nzbget_result = nzbGetRPC.appendurl(\n nzb.name + '.nzb', category, nzbgetprio, False, nzb.url,\n False, dupekey, dupescore, 'score'\n )\n # v13+ has a new combined append method that accepts both (url and\n # content) also the return value has changed from boolean to integer\n # (Positive number representing NZBID of the queue item. 0 and negative\n # numbers represent error codes.)\n elif nzbget_version >= 13:\n nzbget_result = nzbGetRPC.append(\n nzb.name + '.nzb',\n nzbcontent64 if nzbcontent64 is not None else nzb.url,\n category, nzbgetprio, False, False, dupekey, dupescore,\n 'score'\n ) > 0\n else:\n if nzbcontent64 is not None:\n nzbget_result = nzbGetRPC.append(\n nzb.name + '.nzb', category, nzbgetprio, False,\n nzbcontent64\n )\n else:\n nzbget_result = nzbGetRPC.appendurl(\n nzb.name + '.nzb', category, nzbgetprio, False, nzb.url\n )\n\n if nzbget_result:\n log.debug('NZB sent to NZBget successfully')\n return True\n else:\n log.warning('NZBget could not add {name}.nzb to the queue',\n {'name': nzb.name})\n return False\n except Exception:\n log.warning('Connect Error to NZBget: could not add {file}.nzb to the'\n ' queue', {'name': nzb.name})\n return False\n", "path": "medusa/clients/nzb/nzbget.py"}]}
3,249
296
gh_patches_debug_4636
rasdani/github-patches
git_diff
enthought__chaco-885
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ImportError: cannot import name 'marker_trait' from 'chaco.api' **Problem Description** ImportError: cannot import name 'marker_trait' from 'chaco.api' when trying to run a 2008 demo **Reproduction Steps:** ```python python chaco/examples/tutorials/scipy2008/traits_example.py ``` **Expected behavior:** test past **OS, Python version:** [MacOS, python3.8 (with or without edm) or 3.11] --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/tutorials/scipy2008/traits_example.py` Content: ``` 1 from numpy import linspace, sin 2 3 from enable.api import ColorTrait 4 from chaco.api import ArrayPlotData, Plot, marker_trait 5 from enable.api import ComponentEditor 6 from traits.api import HasTraits, Instance, Int 7 from traitsui.api import Group, Item, View 8 9 10 class ScatterPlotTraits(HasTraits): 11 12 plot = Instance(Plot) 13 color = ColorTrait("blue") 14 marker = marker_trait 15 marker_size = Int(4) 16 17 traits_view = View( 18 Group( 19 Item("color", label="Color", style="custom"), 20 Item("marker", label="Marker"), 21 Item("marker_size", label="Size"), 22 Item("plot", editor=ComponentEditor(), show_label=False), 23 orientation="vertical", 24 ), 25 width=800, 26 height=600, 27 resizable=True, 28 title="Chaco Plot", 29 ) 30 31 def __init__(self): 32 # Create the data and the PlotData object 33 x = linspace(-14, 14, 100) 34 y = sin(x) * x ** 3 35 plotdata = ArrayPlotData(x=x, y=y) 36 # Create a Plot and associate it with the PlotData 37 plot = Plot(plotdata) 38 # Create a line plot in the Plot 39 self.renderer = plot.plot(("x", "y"), type="scatter", color="blue")[0] 40 self.plot = plot 41 42 def _color_changed(self): 43 self.renderer.color = self.color 44 45 def _marker_changed(self): 46 self.renderer.marker = self.marker 47 48 def _marker_size_changed(self): 49 self.renderer.marker_size = self.marker_size 50 51 52 # =============================================================================== 53 # demo object that is used by the demo.py application. 54 # =============================================================================== 55 demo = ScatterPlotTraits() 56 57 if __name__ == "__main__": 58 demo.configure_traits() 59 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/tutorials/scipy2008/traits_example.py b/examples/tutorials/scipy2008/traits_example.py --- a/examples/tutorials/scipy2008/traits_example.py +++ b/examples/tutorials/scipy2008/traits_example.py @@ -1,7 +1,7 @@ from numpy import linspace, sin -from enable.api import ColorTrait -from chaco.api import ArrayPlotData, Plot, marker_trait +from enable.api import ColorTrait, marker_trait +from chaco.api import ArrayPlotData, Plot from enable.api import ComponentEditor from traits.api import HasTraits, Instance, Int from traitsui.api import Group, Item, View
{"golden_diff": "diff --git a/examples/tutorials/scipy2008/traits_example.py b/examples/tutorials/scipy2008/traits_example.py\n--- a/examples/tutorials/scipy2008/traits_example.py\n+++ b/examples/tutorials/scipy2008/traits_example.py\n@@ -1,7 +1,7 @@\n from numpy import linspace, sin\n \n-from enable.api import ColorTrait\n-from chaco.api import ArrayPlotData, Plot, marker_trait\n+from enable.api import ColorTrait, marker_trait\n+from chaco.api import ArrayPlotData, Plot\n from enable.api import ComponentEditor\n from traits.api import HasTraits, Instance, Int\n from traitsui.api import Group, Item, View\n", "issue": "ImportError: cannot import name 'marker_trait' from 'chaco.api'\n**Problem Description**\nImportError: cannot import name 'marker_trait' from 'chaco.api' when trying to run a 2008 demo\n\n**Reproduction Steps:**\n\n```python\npython chaco/examples/tutorials/scipy2008/traits_example.py\n```\n\n**Expected behavior:**\ntest past\n\n**OS, Python version:** [MacOS, python3.8 (with or without edm) or 3.11]\n\n", "before_files": [{"content": "from numpy import linspace, sin\n\nfrom enable.api import ColorTrait\nfrom chaco.api import ArrayPlotData, Plot, marker_trait\nfrom enable.api import ComponentEditor\nfrom traits.api import HasTraits, Instance, Int\nfrom traitsui.api import Group, Item, View\n\n\nclass ScatterPlotTraits(HasTraits):\n\n plot = Instance(Plot)\n color = ColorTrait(\"blue\")\n marker = marker_trait\n marker_size = Int(4)\n\n traits_view = View(\n Group(\n Item(\"color\", label=\"Color\", style=\"custom\"),\n Item(\"marker\", label=\"Marker\"),\n Item(\"marker_size\", label=\"Size\"),\n Item(\"plot\", editor=ComponentEditor(), show_label=False),\n orientation=\"vertical\",\n ),\n width=800,\n height=600,\n resizable=True,\n title=\"Chaco Plot\",\n )\n\n def __init__(self):\n # Create the data and the PlotData object\n x = linspace(-14, 14, 100)\n y = sin(x) * x ** 3\n plotdata = ArrayPlotData(x=x, y=y)\n # Create a Plot and associate it with the PlotData\n plot = Plot(plotdata)\n # Create a line plot in the Plot\n self.renderer = plot.plot((\"x\", \"y\"), type=\"scatter\", color=\"blue\")[0]\n self.plot = plot\n\n def _color_changed(self):\n self.renderer.color = self.color\n\n def _marker_changed(self):\n self.renderer.marker = self.marker\n\n def _marker_size_changed(self):\n self.renderer.marker_size = self.marker_size\n\n\n# ===============================================================================\n# demo object that is used by the demo.py application.\n# ===============================================================================\ndemo = ScatterPlotTraits()\n\nif __name__ == \"__main__\":\n demo.configure_traits()\n", "path": "examples/tutorials/scipy2008/traits_example.py"}], "after_files": [{"content": "from numpy import linspace, sin\n\nfrom enable.api import ColorTrait, marker_trait\nfrom chaco.api import ArrayPlotData, Plot\nfrom enable.api import ComponentEditor\nfrom traits.api import HasTraits, Instance, Int\nfrom traitsui.api import Group, Item, View\n\n\nclass ScatterPlotTraits(HasTraits):\n\n plot = Instance(Plot)\n color = ColorTrait(\"blue\")\n marker = marker_trait\n marker_size = Int(4)\n\n traits_view = View(\n Group(\n Item(\"color\", label=\"Color\", style=\"custom\"),\n Item(\"marker\", label=\"Marker\"),\n Item(\"marker_size\", label=\"Size\"),\n Item(\"plot\", editor=ComponentEditor(), show_label=False),\n orientation=\"vertical\",\n ),\n width=800,\n height=600,\n resizable=True,\n title=\"Chaco Plot\",\n )\n\n def __init__(self):\n # Create the data and the PlotData object\n x = linspace(-14, 14, 100)\n y = sin(x) * x ** 3\n plotdata = ArrayPlotData(x=x, y=y)\n # Create a Plot and associate it with the PlotData\n plot = Plot(plotdata)\n # Create a line plot in the Plot\n self.renderer = plot.plot((\"x\", \"y\"), type=\"scatter\", color=\"blue\")[0]\n self.plot = plot\n\n def _color_changed(self):\n self.renderer.color = self.color\n\n def _marker_changed(self):\n self.renderer.marker = self.marker\n\n def _marker_size_changed(self):\n self.renderer.marker_size = self.marker_size\n\n\n# ===============================================================================\n# demo object that is used by the demo.py application.\n# ===============================================================================\ndemo = ScatterPlotTraits()\n\nif __name__ == \"__main__\":\n demo.configure_traits()\n", "path": "examples/tutorials/scipy2008/traits_example.py"}]}
885
154
gh_patches_debug_4639
rasdani/github-patches
git_diff
ManimCommunity__manim-2962
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Documentation Website Has Broken Links: I Can't Submit Pull Requests! On each page of Docs.Manim.Community there is a Pencil icon if editing needs be for that page. But now if I click the Pencil for any page it gives a 404! For example: If I go on this link: https://docs.manim.community/en/stable/reference/manim.mobject.geometry.arc.Circle.html And click the Pencil on the top-right I get a 404! Fix it! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/source/conf.py` Content: ``` 1 # Configuration file for the Sphinx documentation builder. 2 # 3 # This file only contains a selection of the most common options. For a full 4 # list see the documentation: 5 # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 7 from __future__ import annotations 8 9 import os 10 import sys 11 from pathlib import Path 12 13 import manim 14 15 # -- Path setup -------------------------------------------------------------- 16 # If extensions (or modules to document with autodoc) are in another directory, 17 # add these directories to sys.path here. If the directory is relative to the 18 # documentation root, use os.path.abspath to make it absolute, like shown here. 19 20 21 sys.path.insert(0, os.path.abspath(".")) 22 23 24 # -- Project information ----------------------------------------------------- 25 26 project = "Manim" 27 copyright = "2020-2022, The Manim Community Dev Team" 28 author = "The Manim Community Dev Team" 29 30 31 # -- General configuration --------------------------------------------------- 32 33 # Add any Sphinx extension module names here, as strings. They can be 34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 35 # ones. 36 extensions = [ 37 "sphinx.ext.autodoc", 38 "sphinx_copybutton", 39 "sphinx.ext.napoleon", 40 "sphinx.ext.autosummary", 41 "sphinx.ext.doctest", 42 "sphinx.ext.extlinks", 43 "sphinx.ext.viewcode", 44 "sphinxext.opengraph", 45 "manim.utils.docbuild.manim_directive", 46 "sphinx.ext.graphviz", 47 "sphinx.ext.inheritance_diagram", 48 "sphinxcontrib.programoutput", 49 "myst_parser", 50 ] 51 52 # Automatically generate stub pages when using the .. autosummary directive 53 autosummary_generate = True 54 55 # generate documentation from type hints 56 autodoc_typehints = "description" 57 autoclass_content = "both" 58 59 # controls whether functions documented by the autofunction directive 60 # appear with their full module names 61 add_module_names = False 62 63 # Add any paths that contain templates here, relative to this directory. 64 templates_path = ["_templates"] 65 66 # Custom section headings in our documentation 67 napoleon_custom_sections = ["Tests", ("Test", "Tests")] 68 69 # List of patterns, relative to source directory, that match files and 70 # directories to ignore when looking for source files. 71 # This pattern also affects html_static_path and html_extra_path. 72 html_extra_path = ["robots.txt"] 73 74 exclude_patterns: list[str] = [] 75 76 # -- Options for internationalization ---------------------------------------- 77 # Set the destination directory of the localized po files 78 locale_dirs = ["../i18n/"] 79 80 # Splits the text in more pot files. 81 gettext_compact = False 82 83 # Remove useless metadata from po files. 84 gettext_last_translator = "" 85 gettext_language_team = "" 86 87 # -- Options for HTML output ------------------------------------------------- 88 89 # The theme to use for HTML and HTML Help pages. See the documentation for 90 # a list of builtin themes. 91 # 92 93 html_theme = "furo" 94 html_favicon = str(Path("_static/favicon.ico")) 95 96 # Add any paths that contain custom static files (such as style sheets) here, 97 # relative to this directory. They are copied after the builtin static files, 98 # so a file named "default.css" will overwrite the builtin "default.css". 99 html_static_path = ["_static"] 100 101 html_theme_options = { 102 "light_logo": "manim-logo-sidebar.svg", 103 "dark_logo": "manim-logo-sidebar-dark.svg", 104 "light_css_variables": { 105 "color-content-foreground": "#000000", 106 "color-background-primary": "#ffffff", 107 "color-background-border": "#ffffff", 108 "color-sidebar-background": "#f8f9fb", 109 "color-brand-content": "#1c00e3", 110 "color-brand-primary": "#192bd0", 111 "color-link": "#c93434", 112 "color-link--hover": "#5b0000", 113 "color-inline-code-background": "#f6f6f6;", 114 "color-foreground-secondary": "#000", 115 }, 116 "dark_css_variables": { 117 "color-content-foreground": "#ffffffd9", 118 "color-background-primary": "#131416", 119 "color-background-border": "#303335", 120 "color-sidebar-background": "#1a1c1e", 121 "color-brand-content": "#2196f3", 122 "color-brand-primary": "#007fff", 123 "color-link": "#51ba86", 124 "color-link--hover": "#9cefc6", 125 "color-inline-code-background": "#262626", 126 "color-foreground-secondary": "#ffffffd9", 127 }, 128 } 129 html_title = f"Manim Community v{manim.__version__}" 130 131 # This specifies any additional css files that will override the theme's 132 html_css_files = ["custom.css"] 133 134 135 # external links 136 extlinks = { 137 "issue": ("https://github.com/ManimCommunity/manim/issues/%s", "#"), 138 "pr": ("https://github.com/ManimCommunity/manim/pull/%s", "#"), 139 } 140 141 # opengraph settings 142 ogp_image = "https://www.manim.community/logo.png" 143 ogp_site_name = "Manim Community | Documentation" 144 ogp_site_url = "https://docs.manim.community/" 145 146 147 # inheritance_graph settings 148 inheritance_graph_attrs = { 149 "concentrate": True, 150 "size": '""', 151 "splines": "ortho", 152 "nodesep": 0.1, 153 "ranksep": 0.2, 154 } 155 156 inheritance_node_attrs = { 157 "penwidth": 0, 158 "shape": "box", 159 "width": 0.05, 160 "height": 0.05, 161 "margin": 0.05, 162 } 163 164 inheritance_edge_attrs = { 165 "penwidth": 1, 166 } 167 168 html_js_files = [ 169 "responsiveSvg.js", 170 ] 171 172 graphviz_output_format = "svg" 173 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -99,6 +99,10 @@ html_static_path = ["_static"] html_theme_options = { + "source_repository": "https://github.com/ManimCommunity/manim/", + "source_branch": "main", + "source_directory": "docs/source/", + "top_of_page_button": None, "light_logo": "manim-logo-sidebar.svg", "dark_logo": "manim-logo-sidebar-dark.svg", "light_css_variables": {
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -99,6 +99,10 @@\n html_static_path = [\"_static\"]\n \n html_theme_options = {\n+ \"source_repository\": \"https://github.com/ManimCommunity/manim/\",\n+ \"source_branch\": \"main\",\n+ \"source_directory\": \"docs/source/\",\n+ \"top_of_page_button\": None,\n \"light_logo\": \"manim-logo-sidebar.svg\",\n \"dark_logo\": \"manim-logo-sidebar-dark.svg\",\n \"light_css_variables\": {\n", "issue": "Documentation Website Has Broken Links: I Can't Submit Pull Requests!\nOn each page of Docs.Manim.Community there is a Pencil icon if editing needs be for that page.\r\nBut now if I click the Pencil for any page it gives a 404!\r\n\r\nFor example:\r\nIf I go on this link:\r\nhttps://docs.manim.community/en/stable/reference/manim.mobject.geometry.arc.Circle.html\r\n\r\nAnd click the Pencil on the top-right I get a 404!\r\n\r\nFix it!\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nfrom pathlib import Path\n\nimport manim\n\n# -- Path setup --------------------------------------------------------------\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\n\nsys.path.insert(0, os.path.abspath(\".\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Manim\"\ncopyright = \"2020-2022, The Manim Community Dev Team\"\nauthor = \"The Manim Community Dev Team\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx_copybutton\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.viewcode\",\n \"sphinxext.opengraph\",\n \"manim.utils.docbuild.manim_directive\",\n \"sphinx.ext.graphviz\",\n \"sphinx.ext.inheritance_diagram\",\n \"sphinxcontrib.programoutput\",\n \"myst_parser\",\n]\n\n# Automatically generate stub pages when using the .. autosummary directive\nautosummary_generate = True\n\n# generate documentation from type hints\nautodoc_typehints = \"description\"\nautoclass_content = \"both\"\n\n# controls whether functions documented by the autofunction directive\n# appear with their full module names\nadd_module_names = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# Custom section headings in our documentation\nnapoleon_custom_sections = [\"Tests\", (\"Test\", \"Tests\")]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nhtml_extra_path = [\"robots.txt\"]\n\nexclude_patterns: list[str] = []\n\n# -- Options for internationalization ----------------------------------------\n# Set the destination directory of the localized po files\nlocale_dirs = [\"../i18n/\"]\n\n# Splits the text in more pot files.\ngettext_compact = False\n\n# Remove useless metadata from po files.\ngettext_last_translator = \"\"\ngettext_language_team = \"\"\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n\nhtml_theme = \"furo\"\nhtml_favicon = str(Path(\"_static/favicon.ico\"))\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nhtml_theme_options = {\n \"light_logo\": \"manim-logo-sidebar.svg\",\n \"dark_logo\": \"manim-logo-sidebar-dark.svg\",\n \"light_css_variables\": {\n \"color-content-foreground\": \"#000000\",\n \"color-background-primary\": \"#ffffff\",\n \"color-background-border\": \"#ffffff\",\n \"color-sidebar-background\": \"#f8f9fb\",\n \"color-brand-content\": \"#1c00e3\",\n \"color-brand-primary\": \"#192bd0\",\n \"color-link\": \"#c93434\",\n \"color-link--hover\": \"#5b0000\",\n \"color-inline-code-background\": \"#f6f6f6;\",\n \"color-foreground-secondary\": \"#000\",\n },\n \"dark_css_variables\": {\n \"color-content-foreground\": \"#ffffffd9\",\n \"color-background-primary\": \"#131416\",\n \"color-background-border\": \"#303335\",\n \"color-sidebar-background\": \"#1a1c1e\",\n \"color-brand-content\": \"#2196f3\",\n \"color-brand-primary\": \"#007fff\",\n \"color-link\": \"#51ba86\",\n \"color-link--hover\": \"#9cefc6\",\n \"color-inline-code-background\": \"#262626\",\n \"color-foreground-secondary\": \"#ffffffd9\",\n },\n}\nhtml_title = f\"Manim Community v{manim.__version__}\"\n\n# This specifies any additional css files that will override the theme's\nhtml_css_files = [\"custom.css\"]\n\n\n# external links\nextlinks = {\n \"issue\": (\"https://github.com/ManimCommunity/manim/issues/%s\", \"#\"),\n \"pr\": (\"https://github.com/ManimCommunity/manim/pull/%s\", \"#\"),\n}\n\n# opengraph settings\nogp_image = \"https://www.manim.community/logo.png\"\nogp_site_name = \"Manim Community | Documentation\"\nogp_site_url = \"https://docs.manim.community/\"\n\n\n# inheritance_graph settings\ninheritance_graph_attrs = {\n \"concentrate\": True,\n \"size\": '\"\"',\n \"splines\": \"ortho\",\n \"nodesep\": 0.1,\n \"ranksep\": 0.2,\n}\n\ninheritance_node_attrs = {\n \"penwidth\": 0,\n \"shape\": \"box\",\n \"width\": 0.05,\n \"height\": 0.05,\n \"margin\": 0.05,\n}\n\ninheritance_edge_attrs = {\n \"penwidth\": 1,\n}\n\nhtml_js_files = [\n \"responsiveSvg.js\",\n]\n\ngraphviz_output_format = \"svg\"\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nfrom pathlib import Path\n\nimport manim\n\n# -- Path setup --------------------------------------------------------------\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\n\nsys.path.insert(0, os.path.abspath(\".\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Manim\"\ncopyright = \"2020-2022, The Manim Community Dev Team\"\nauthor = \"The Manim Community Dev Team\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx_copybutton\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.viewcode\",\n \"sphinxext.opengraph\",\n \"manim.utils.docbuild.manim_directive\",\n \"sphinx.ext.graphviz\",\n \"sphinx.ext.inheritance_diagram\",\n \"sphinxcontrib.programoutput\",\n \"myst_parser\",\n]\n\n# Automatically generate stub pages when using the .. autosummary directive\nautosummary_generate = True\n\n# generate documentation from type hints\nautodoc_typehints = \"description\"\nautoclass_content = \"both\"\n\n# controls whether functions documented by the autofunction directive\n# appear with their full module names\nadd_module_names = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# Custom section headings in our documentation\nnapoleon_custom_sections = [\"Tests\", (\"Test\", \"Tests\")]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nhtml_extra_path = [\"robots.txt\"]\n\nexclude_patterns: list[str] = []\n\n# -- Options for internationalization ----------------------------------------\n# Set the destination directory of the localized po files\nlocale_dirs = [\"../i18n/\"]\n\n# Splits the text in more pot files.\ngettext_compact = False\n\n# Remove useless metadata from po files.\ngettext_last_translator = \"\"\ngettext_language_team = \"\"\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n\nhtml_theme = \"furo\"\nhtml_favicon = str(Path(\"_static/favicon.ico\"))\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nhtml_theme_options = {\n \"source_repository\": \"https://github.com/ManimCommunity/manim/\",\n \"source_branch\": \"main\",\n \"source_directory\": \"docs/source/\",\n \"top_of_page_button\": None,\n \"light_logo\": \"manim-logo-sidebar.svg\",\n \"dark_logo\": \"manim-logo-sidebar-dark.svg\",\n \"light_css_variables\": {\n \"color-content-foreground\": \"#000000\",\n \"color-background-primary\": \"#ffffff\",\n \"color-background-border\": \"#ffffff\",\n \"color-sidebar-background\": \"#f8f9fb\",\n \"color-brand-content\": \"#1c00e3\",\n \"color-brand-primary\": \"#192bd0\",\n \"color-link\": \"#c93434\",\n \"color-link--hover\": \"#5b0000\",\n \"color-inline-code-background\": \"#f6f6f6;\",\n \"color-foreground-secondary\": \"#000\",\n },\n \"dark_css_variables\": {\n \"color-content-foreground\": \"#ffffffd9\",\n \"color-background-primary\": \"#131416\",\n \"color-background-border\": \"#303335\",\n \"color-sidebar-background\": \"#1a1c1e\",\n \"color-brand-content\": \"#2196f3\",\n \"color-brand-primary\": \"#007fff\",\n \"color-link\": \"#51ba86\",\n \"color-link--hover\": \"#9cefc6\",\n \"color-inline-code-background\": \"#262626\",\n \"color-foreground-secondary\": \"#ffffffd9\",\n },\n}\nhtml_title = f\"Manim Community v{manim.__version__}\"\n\n# This specifies any additional css files that will override the theme's\nhtml_css_files = [\"custom.css\"]\n\n\n# external links\nextlinks = {\n \"issue\": (\"https://github.com/ManimCommunity/manim/issues/%s\", \"#\"),\n \"pr\": (\"https://github.com/ManimCommunity/manim/pull/%s\", \"#\"),\n}\n\n# opengraph settings\nogp_image = \"https://www.manim.community/logo.png\"\nogp_site_name = \"Manim Community | Documentation\"\nogp_site_url = \"https://docs.manim.community/\"\n\n\n# inheritance_graph settings\ninheritance_graph_attrs = {\n \"concentrate\": True,\n \"size\": '\"\"',\n \"splines\": \"ortho\",\n \"nodesep\": 0.1,\n \"ranksep\": 0.2,\n}\n\ninheritance_node_attrs = {\n \"penwidth\": 0,\n \"shape\": \"box\",\n \"width\": 0.05,\n \"height\": 0.05,\n \"margin\": 0.05,\n}\n\ninheritance_edge_attrs = {\n \"penwidth\": 1,\n}\n\nhtml_js_files = [\n \"responsiveSvg.js\",\n]\n\ngraphviz_output_format = \"svg\"\n", "path": "docs/source/conf.py"}]}
2,073
134
gh_patches_debug_21115
rasdani/github-patches
git_diff
opsdroid__opsdroid-1233
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ssl.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed in Slack Connector <!-- Before you post an issue or if you are unsure about something join our matrix channel https://riot.im/app/#/room/#opsdroid-general:matrix.org and ask away! We are more than happy to help you. --> # Description I am trying to build a Slack bot using Opsdroid (master branch). When running `opsdroid start`, I get an error where the Opsdroid bot fails to connect with the Slack Workspace. ## Steps to Reproduce 1. Install opsdroid ``` pip install git+https://github.com/opsdroid/opsdroid.git ``` 2. Create `configuration.yaml` with following content ``` welcome-message: true connectors: - name: slack api-token: "<Bot OAuth Token>" skills: - name: hello ``` 3. Start opsdroid ``` opsdroid start ``` ## Expected Functionality The expected functionality is for the Opsdroid Bot to get connected with Slack Workspace and interact with the Slack user as per the configured skill. ## Experienced Functionality The Opsdroid Bot failed to connect with the Slack Workspace with the following error: ``` INFO opsdroid.connector.slack: Connecting to Slack Traceback (most recent call last): File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 936, in _wrap_create_connection return await self._loop.create_connection(*args, **kwargs) # type: ignore # noqa File "/usr/lib/python3.6/asyncio/base_events.py", line 820, in create_connection sock, protocol_factory, ssl, server_hostname) File "/usr/lib/python3.6/asyncio/base_events.py", line 846, in _create_connection_transport yield from waiter File "/usr/lib/python3.6/asyncio/sslproto.py", line 505, in data_received ssldata, appdata = self._sslpipe.feed_ssldata(data) File "/usr/lib/python3.6/asyncio/sslproto.py", line 201, in feed_ssldata self._sslobj.do_handshake() File "/usr/lib/python3.6/ssl.py", line 689, in do_handshake self._sslobj.do_handshake() ssl.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:852) The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/opsdroid/connector/slack/__init__.py", line 55, in connect self.auth_info = (await self.slack.api_call("auth.test")).data File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/slack/web/base_client.py", line 229, in _send http_verb=http_verb, api_url=api_url, req_args=req_args File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/slack/web/base_client.py", line 259, in _request async with session.request(http_verb, api_url, **req_args) as res: File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/client.py", line 1012, in __aenter__ self._resp = await self._coro File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/client.py", line 483, in _request timeout=real_timeout File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 523, in connect proto = await self._create_connection(req, traces, timeout) File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 859, in _create_connection req, traces, timeout) File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 1004, in _create_direct_connection raise last_exc File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 986, in _create_direct_connection req=req, client_error=client_error) File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 941, in _wrap_create_connection raise ClientConnectorSSLError(req.connection_key, exc) from exc aiohttp.client_exceptions.ClientConnectorSSLError: Cannot connect to host www.slack.com:443 ssl:default [[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:852)] ``` ## Versions - **Opsdroid version:** master branch in git - **Python version:** 3.6.8 - **OS/Docker version:** Ubuntu 18.04 LTS ## Configuration File Please include your version of the configuration file below. ```yaml welcome-message: true connectors: - name: slack api-token: "<Bot OAuth Token>" skills: - name: hello ``` ## Additional Details Any other details you wish to include such as screenshots, console messages, etc. <!-- Love opsdroid? Please consider supporting our collective: +👉 https://opencollective.com/opsdroid/donate --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opsdroid/connector/slack/__init__.py` Content: ``` 1 """A connector for Slack.""" 2 import logging 3 import re 4 5 import slack 6 from emoji import demojize 7 8 from opsdroid.connector import Connector, register_event 9 from opsdroid.events import Message, Reaction 10 from opsdroid.connector.slack.events import Blocks 11 12 13 _LOGGER = logging.getLogger(__name__) 14 15 16 class ConnectorSlack(Connector): 17 """A connector for Slack.""" 18 19 def __init__(self, config, opsdroid=None): 20 """Create the connector.""" 21 super().__init__(config, opsdroid=opsdroid) 22 _LOGGER.debug(_("Starting Slack connector")) 23 self.name = "slack" 24 self.default_target = config.get("default-room", "#general") 25 self.icon_emoji = config.get("icon-emoji", ":robot_face:") 26 self.token = config["api-token"] 27 self.timeout = config.get("connect-timeout", 10) 28 self.slack = slack.WebClient(token=self.token, run_async=True) 29 self.slack_rtm = slack.RTMClient(token=self.token, run_async=True) 30 self.websocket = None 31 self.bot_name = config.get("bot-name", "opsdroid") 32 self.auth_info = None 33 self.user_info = None 34 self.bot_id = None 35 self.known_users = {} 36 self.keepalive = None 37 self.reconnecting = False 38 self.listening = True 39 self._message_id = 0 40 41 # Register callbacks 42 slack.RTMClient.on(event="message", callback=self.process_message) 43 44 async def connect(self): 45 """Connect to the chat service.""" 46 _LOGGER.info(_("Connecting to Slack")) 47 48 try: 49 # The slack library recommends you call `self.slack_rtm.start()`` here but it 50 # seems to mess with the event loop's signal handlers which breaks opsdroid. 51 # Therefore we need to directly call the private `_connect_and_read` method 52 # instead. This method also blocks so we need to dispatch it to the loop as a task. 53 self.opsdroid.eventloop.create_task(self.slack_rtm._connect_and_read()) 54 55 self.auth_info = (await self.slack.api_call("auth.test")).data 56 self.user_info = ( 57 await self.slack.api_call( 58 "users.info", 59 http_verb="GET", 60 params={"user": self.auth_info["user_id"]}, 61 ) 62 ).data 63 self.bot_id = self.user_info["user"]["profile"]["bot_id"] 64 65 _LOGGER.debug(_("Connected as %s"), self.bot_name) 66 _LOGGER.debug(_("Using icon %s"), self.icon_emoji) 67 _LOGGER.debug(_("Default room is %s"), self.default_target) 68 _LOGGER.info(_("Connected successfully")) 69 except slack.errors.SlackApiError as error: 70 _LOGGER.error( 71 _( 72 "Unable to connect to Slack due to %s - " 73 "The Slack Connector will not be available." 74 ), 75 error, 76 ) 77 except Exception: 78 await self.disconnect() 79 raise 80 81 async def disconnect(self): 82 """Disconnect from Slack.""" 83 await self.slack_rtm.stop() 84 self.listening = False 85 86 async def listen(self): 87 """Listen for and parse new messages.""" 88 89 async def process_message(self, **payload): 90 """Process a raw message and pass it to the parser.""" 91 message = payload["data"] 92 93 # Ignore message edits 94 if "subtype" in message and message["subtype"] == "message_changed": 95 return 96 97 # Ignore own messages 98 if ( 99 "subtype" in message 100 and message["subtype"] == "bot_message" 101 and message["bot_id"] == self.bot_id 102 ): 103 return 104 105 # Lookup username 106 _LOGGER.debug(_("Looking up sender username")) 107 try: 108 user_info = await self.lookup_username(message["user"]) 109 except ValueError: 110 return 111 112 # Replace usernames in the message 113 _LOGGER.debug(_("Replacing userids in message with usernames")) 114 message["text"] = await self.replace_usernames(message["text"]) 115 116 await self.opsdroid.parse( 117 Message( 118 message["text"], 119 user_info["name"], 120 message["channel"], 121 self, 122 raw_event=message, 123 ) 124 ) 125 126 @register_event(Message) 127 async def send_message(self, message): 128 """Respond with a message.""" 129 _LOGGER.debug( 130 _("Responding with: '%s' in room %s"), message.text, message.target 131 ) 132 await self.slack.api_call( 133 "chat.postMessage", 134 data={ 135 "channel": message.target, 136 "text": message.text, 137 "as_user": False, 138 "username": self.bot_name, 139 "icon_emoji": self.icon_emoji, 140 }, 141 ) 142 143 @register_event(Blocks) 144 async def send_blocks(self, blocks): 145 """Respond with structured blocks.""" 146 _LOGGER.debug( 147 _("Responding with interactive blocks in room %s"), blocks.target 148 ) 149 await self.slack.api_call( 150 "chat.postMessage", 151 data={ 152 "channel": blocks.target, 153 "username": self.bot_name, 154 "blocks": blocks.blocks, 155 "icon_emoji": self.icon_emoji, 156 }, 157 ) 158 159 @register_event(Reaction) 160 async def send_reaction(self, reaction): 161 """React to a message.""" 162 emoji = demojize(reaction.emoji).replace(":", "") 163 _LOGGER.debug(_("Reacting with: %s"), emoji) 164 try: 165 await self.slack.api_call( 166 "reactions.add", 167 data={ 168 "name": emoji, 169 "channel": reaction.target, 170 "timestamp": reaction.linked_event.raw_event["ts"], 171 }, 172 ) 173 except slack.errors.SlackApiError as error: 174 if "invalid_name" in str(error): 175 _LOGGER.warning(_("Slack does not support the emoji %s"), emoji) 176 else: 177 raise 178 179 async def lookup_username(self, userid): 180 """Lookup a username and cache it.""" 181 if userid in self.known_users: 182 user_info = self.known_users[userid] 183 else: 184 response = await self.slack.users_info(user=userid) 185 user_info = response.data["user"] 186 if isinstance(user_info, dict): 187 self.known_users[userid] = user_info 188 else: 189 raise ValueError("Returned user is not a dict.") 190 return user_info 191 192 async def replace_usernames(self, message): 193 """Replace User ID with username in message text.""" 194 userids = re.findall(r"\<\@([A-Z0-9]+)(?:\|.+)?\>", message) 195 for userid in userids: 196 user_info = await self.lookup_username(userid) 197 message = message.replace( 198 "<@{userid}>".format(userid=userid), user_info["name"] 199 ) 200 return message 201 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opsdroid/connector/slack/__init__.py b/opsdroid/connector/slack/__init__.py --- a/opsdroid/connector/slack/__init__.py +++ b/opsdroid/connector/slack/__init__.py @@ -1,6 +1,8 @@ """A connector for Slack.""" import logging import re +import ssl +import certifi import slack from emoji import demojize @@ -25,8 +27,13 @@ self.icon_emoji = config.get("icon-emoji", ":robot_face:") self.token = config["api-token"] self.timeout = config.get("connect-timeout", 10) - self.slack = slack.WebClient(token=self.token, run_async=True) - self.slack_rtm = slack.RTMClient(token=self.token, run_async=True) + self.ssl_context = ssl.create_default_context(cafile=certifi.where()) + self.slack = slack.WebClient( + token=self.token, run_async=True, ssl=self.ssl_context + ) + self.slack_rtm = slack.RTMClient( + token=self.token, run_async=True, ssl=self.ssl_context + ) self.websocket = None self.bot_name = config.get("bot-name", "opsdroid") self.auth_info = None
{"golden_diff": "diff --git a/opsdroid/connector/slack/__init__.py b/opsdroid/connector/slack/__init__.py\n--- a/opsdroid/connector/slack/__init__.py\n+++ b/opsdroid/connector/slack/__init__.py\n@@ -1,6 +1,8 @@\n \"\"\"A connector for Slack.\"\"\"\n import logging\n import re\n+import ssl\n+import certifi\n \n import slack\n from emoji import demojize\n@@ -25,8 +27,13 @@\n self.icon_emoji = config.get(\"icon-emoji\", \":robot_face:\")\n self.token = config[\"api-token\"]\n self.timeout = config.get(\"connect-timeout\", 10)\n- self.slack = slack.WebClient(token=self.token, run_async=True)\n- self.slack_rtm = slack.RTMClient(token=self.token, run_async=True)\n+ self.ssl_context = ssl.create_default_context(cafile=certifi.where())\n+ self.slack = slack.WebClient(\n+ token=self.token, run_async=True, ssl=self.ssl_context\n+ )\n+ self.slack_rtm = slack.RTMClient(\n+ token=self.token, run_async=True, ssl=self.ssl_context\n+ )\n self.websocket = None\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.auth_info = None\n", "issue": "ssl.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed in Slack Connector\n<!-- Before you post an issue or if you are unsure about something join our matrix channel https://riot.im/app/#/room/#opsdroid-general:matrix.org and ask away! We are more than happy to help you. -->\r\n# Description\r\nI am trying to build a Slack bot using Opsdroid (master branch). When running `opsdroid start`, I get an error where the Opsdroid bot fails to connect with the Slack Workspace.\r\n\r\n## Steps to Reproduce\r\n1. Install opsdroid\r\n```\r\npip install git+https://github.com/opsdroid/opsdroid.git\r\n```\r\n\r\n2. Create `configuration.yaml` with following content\r\n```\r\nwelcome-message: true\r\n\r\nconnectors:\r\n - name: slack\r\n api-token: \"<Bot OAuth Token>\"\r\n\r\nskills:\r\n - name: hello\r\n```\r\n\r\n3. Start opsdroid\r\n\r\n```\r\nopsdroid start\r\n```\r\n\r\n## Expected Functionality\r\nThe expected functionality is for the Opsdroid Bot to get connected with Slack Workspace and interact with the Slack user as per the configured skill.\r\n\r\n\r\n## Experienced Functionality\r\nThe Opsdroid Bot failed to connect with the Slack Workspace with the following error:\r\n\r\n```\r\nINFO opsdroid.connector.slack: Connecting to Slack\r\nTraceback (most recent call last):\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py\", line 936, in _wrap_create_connection\r\n return await self._loop.create_connection(*args, **kwargs) # type: ignore # noqa\r\n File \"/usr/lib/python3.6/asyncio/base_events.py\", line 820, in create_connection\r\n sock, protocol_factory, ssl, server_hostname)\r\n File \"/usr/lib/python3.6/asyncio/base_events.py\", line 846, in _create_connection_transport\r\n yield from waiter\r\n File \"/usr/lib/python3.6/asyncio/sslproto.py\", line 505, in data_received\r\n ssldata, appdata = self._sslpipe.feed_ssldata(data)\r\n File \"/usr/lib/python3.6/asyncio/sslproto.py\", line 201, in feed_ssldata\r\n self._sslobj.do_handshake()\r\n File \"/usr/lib/python3.6/ssl.py\", line 689, in do_handshake\r\n self._sslobj.do_handshake()\r\nssl.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:852)\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/opsdroid/connector/slack/__init__.py\", line 55, in connect\r\n self.auth_info = (await self.slack.api_call(\"auth.test\")).data\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/slack/web/base_client.py\", line 229, in _send\r\n http_verb=http_verb, api_url=api_url, req_args=req_args\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/slack/web/base_client.py\", line 259, in _request\r\n async with session.request(http_verb, api_url, **req_args) as res:\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/client.py\", line 1012, in __aenter__\r\n self._resp = await self._coro\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/client.py\", line 483, in _request\r\n timeout=real_timeout\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py\", line 523, in connect\r\n proto = await self._create_connection(req, traces, timeout)\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py\", line 859, in _create_connection\r\n req, traces, timeout)\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py\", line 1004, in _create_direct_connection\r\n raise last_exc\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py\", line 986, in _create_direct_connection\r\n req=req, client_error=client_error)\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py\", line 941, in _wrap_create_connection\r\n raise ClientConnectorSSLError(req.connection_key, exc) from exc\r\naiohttp.client_exceptions.ClientConnectorSSLError: Cannot connect to host www.slack.com:443 ssl:default [[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:852)]\r\n```\r\n\r\n## Versions\r\n- **Opsdroid version:** master branch in git\r\n- **Python version:** 3.6.8\r\n- **OS/Docker version:** Ubuntu 18.04 LTS\r\n\r\n## Configuration File\r\nPlease include your version of the configuration file below.\r\n\r\n```yaml\r\nwelcome-message: true\r\n\r\nconnectors:\r\n - name: slack\r\n api-token: \"<Bot OAuth Token>\"\r\n\r\nskills:\r\n - name: hello\r\n```\r\n\r\n## Additional Details\r\nAny other details you wish to include such as screenshots, console messages, etc.\r\n\r\n\r\n<!-- Love opsdroid? Please consider supporting our collective:\r\n +\ud83d\udc49 https://opencollective.com/opsdroid/donate -->\r\n\n", "before_files": [{"content": "\"\"\"A connector for Slack.\"\"\"\nimport logging\nimport re\n\nimport slack\nfrom emoji import demojize\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message, Reaction\nfrom opsdroid.connector.slack.events import Blocks\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass ConnectorSlack(Connector):\n \"\"\"A connector for Slack.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Slack connector\"))\n self.name = \"slack\"\n self.default_target = config.get(\"default-room\", \"#general\")\n self.icon_emoji = config.get(\"icon-emoji\", \":robot_face:\")\n self.token = config[\"api-token\"]\n self.timeout = config.get(\"connect-timeout\", 10)\n self.slack = slack.WebClient(token=self.token, run_async=True)\n self.slack_rtm = slack.RTMClient(token=self.token, run_async=True)\n self.websocket = None\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.auth_info = None\n self.user_info = None\n self.bot_id = None\n self.known_users = {}\n self.keepalive = None\n self.reconnecting = False\n self.listening = True\n self._message_id = 0\n\n # Register callbacks\n slack.RTMClient.on(event=\"message\", callback=self.process_message)\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Slack\"))\n\n try:\n # The slack library recommends you call `self.slack_rtm.start()`` here but it\n # seems to mess with the event loop's signal handlers which breaks opsdroid.\n # Therefore we need to directly call the private `_connect_and_read` method\n # instead. This method also blocks so we need to dispatch it to the loop as a task.\n self.opsdroid.eventloop.create_task(self.slack_rtm._connect_and_read())\n\n self.auth_info = (await self.slack.api_call(\"auth.test\")).data\n self.user_info = (\n await self.slack.api_call(\n \"users.info\",\n http_verb=\"GET\",\n params={\"user\": self.auth_info[\"user_id\"]},\n )\n ).data\n self.bot_id = self.user_info[\"user\"][\"profile\"][\"bot_id\"]\n\n _LOGGER.debug(_(\"Connected as %s\"), self.bot_name)\n _LOGGER.debug(_(\"Using icon %s\"), self.icon_emoji)\n _LOGGER.debug(_(\"Default room is %s\"), self.default_target)\n _LOGGER.info(_(\"Connected successfully\"))\n except slack.errors.SlackApiError as error:\n _LOGGER.error(\n _(\n \"Unable to connect to Slack due to %s - \"\n \"The Slack Connector will not be available.\"\n ),\n error,\n )\n except Exception:\n await self.disconnect()\n raise\n\n async def disconnect(self):\n \"\"\"Disconnect from Slack.\"\"\"\n await self.slack_rtm.stop()\n self.listening = False\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n\n async def process_message(self, **payload):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n message = payload[\"data\"]\n\n # Ignore message edits\n if \"subtype\" in message and message[\"subtype\"] == \"message_changed\":\n return\n\n # Ignore own messages\n if (\n \"subtype\" in message\n and message[\"subtype\"] == \"bot_message\"\n and message[\"bot_id\"] == self.bot_id\n ):\n return\n\n # Lookup username\n _LOGGER.debug(_(\"Looking up sender username\"))\n try:\n user_info = await self.lookup_username(message[\"user\"])\n except ValueError:\n return\n\n # Replace usernames in the message\n _LOGGER.debug(_(\"Replacing userids in message with usernames\"))\n message[\"text\"] = await self.replace_usernames(message[\"text\"])\n\n await self.opsdroid.parse(\n Message(\n message[\"text\"],\n user_info[\"name\"],\n message[\"channel\"],\n self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s\"), message.text, message.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": message.target,\n \"text\": message.text,\n \"as_user\": False,\n \"username\": self.bot_name,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Blocks)\n async def send_blocks(self, blocks):\n \"\"\"Respond with structured blocks.\"\"\"\n _LOGGER.debug(\n _(\"Responding with interactive blocks in room %s\"), blocks.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": blocks.target,\n \"username\": self.bot_name,\n \"blocks\": blocks.blocks,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Reaction)\n async def send_reaction(self, reaction):\n \"\"\"React to a message.\"\"\"\n emoji = demojize(reaction.emoji).replace(\":\", \"\")\n _LOGGER.debug(_(\"Reacting with: %s\"), emoji)\n try:\n await self.slack.api_call(\n \"reactions.add\",\n data={\n \"name\": emoji,\n \"channel\": reaction.target,\n \"timestamp\": reaction.linked_event.raw_event[\"ts\"],\n },\n )\n except slack.errors.SlackApiError as error:\n if \"invalid_name\" in str(error):\n _LOGGER.warning(_(\"Slack does not support the emoji %s\"), emoji)\n else:\n raise\n\n async def lookup_username(self, userid):\n \"\"\"Lookup a username and cache it.\"\"\"\n if userid in self.known_users:\n user_info = self.known_users[userid]\n else:\n response = await self.slack.users_info(user=userid)\n user_info = response.data[\"user\"]\n if isinstance(user_info, dict):\n self.known_users[userid] = user_info\n else:\n raise ValueError(\"Returned user is not a dict.\")\n return user_info\n\n async def replace_usernames(self, message):\n \"\"\"Replace User ID with username in message text.\"\"\"\n userids = re.findall(r\"\\<\\@([A-Z0-9]+)(?:\\|.+)?\\>\", message)\n for userid in userids:\n user_info = await self.lookup_username(userid)\n message = message.replace(\n \"<@{userid}>\".format(userid=userid), user_info[\"name\"]\n )\n return message\n", "path": "opsdroid/connector/slack/__init__.py"}], "after_files": [{"content": "\"\"\"A connector for Slack.\"\"\"\nimport logging\nimport re\nimport ssl\nimport certifi\n\nimport slack\nfrom emoji import demojize\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message, Reaction\nfrom opsdroid.connector.slack.events import Blocks\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass ConnectorSlack(Connector):\n \"\"\"A connector for Slack.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Slack connector\"))\n self.name = \"slack\"\n self.default_target = config.get(\"default-room\", \"#general\")\n self.icon_emoji = config.get(\"icon-emoji\", \":robot_face:\")\n self.token = config[\"api-token\"]\n self.timeout = config.get(\"connect-timeout\", 10)\n self.ssl_context = ssl.create_default_context(cafile=certifi.where())\n self.slack = slack.WebClient(\n token=self.token, run_async=True, ssl=self.ssl_context\n )\n self.slack_rtm = slack.RTMClient(\n token=self.token, run_async=True, ssl=self.ssl_context\n )\n self.websocket = None\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.auth_info = None\n self.user_info = None\n self.bot_id = None\n self.known_users = {}\n self.keepalive = None\n self.reconnecting = False\n self.listening = True\n self._message_id = 0\n\n # Register callbacks\n slack.RTMClient.on(event=\"message\", callback=self.process_message)\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Slack\"))\n\n try:\n # The slack library recommends you call `self.slack_rtm.start()`` here but it\n # seems to mess with the event loop's signal handlers which breaks opsdroid.\n # Therefore we need to directly call the private `_connect_and_read` method\n # instead. This method also blocks so we need to dispatch it to the loop as a task.\n self.opsdroid.eventloop.create_task(self.slack_rtm._connect_and_read())\n\n self.auth_info = (await self.slack.api_call(\"auth.test\")).data\n self.user_info = (\n await self.slack.api_call(\n \"users.info\",\n http_verb=\"GET\",\n params={\"user\": self.auth_info[\"user_id\"]},\n )\n ).data\n self.bot_id = self.user_info[\"user\"][\"profile\"][\"bot_id\"]\n\n _LOGGER.debug(_(\"Connected as %s\"), self.bot_name)\n _LOGGER.debug(_(\"Using icon %s\"), self.icon_emoji)\n _LOGGER.debug(_(\"Default room is %s\"), self.default_target)\n _LOGGER.info(_(\"Connected successfully\"))\n except slack.errors.SlackApiError as error:\n _LOGGER.error(\n _(\n \"Unable to connect to Slack due to %s - \"\n \"The Slack Connector will not be available.\"\n ),\n error,\n )\n except Exception:\n await self.disconnect()\n raise\n\n async def disconnect(self):\n \"\"\"Disconnect from Slack.\"\"\"\n await self.slack_rtm.stop()\n self.listening = False\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n\n async def process_message(self, **payload):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n message = payload[\"data\"]\n\n # Ignore message edits\n if \"subtype\" in message and message[\"subtype\"] == \"message_changed\":\n return\n\n # Ignore own messages\n if (\n \"subtype\" in message\n and message[\"subtype\"] == \"bot_message\"\n and message[\"bot_id\"] == self.bot_id\n ):\n return\n\n # Lookup username\n _LOGGER.debug(_(\"Looking up sender username\"))\n try:\n user_info = await self.lookup_username(message[\"user\"])\n except ValueError:\n return\n\n # Replace usernames in the message\n _LOGGER.debug(_(\"Replacing userids in message with usernames\"))\n message[\"text\"] = await self.replace_usernames(message[\"text\"])\n\n await self.opsdroid.parse(\n Message(\n message[\"text\"],\n user_info[\"name\"],\n message[\"channel\"],\n self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s\"), message.text, message.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": message.target,\n \"text\": message.text,\n \"as_user\": False,\n \"username\": self.bot_name,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Blocks)\n async def send_blocks(self, blocks):\n \"\"\"Respond with structured blocks.\"\"\"\n _LOGGER.debug(\n _(\"Responding with interactive blocks in room %s\"), blocks.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": blocks.target,\n \"username\": self.bot_name,\n \"blocks\": blocks.blocks,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Reaction)\n async def send_reaction(self, reaction):\n \"\"\"React to a message.\"\"\"\n emoji = demojize(reaction.emoji).replace(\":\", \"\")\n _LOGGER.debug(_(\"Reacting with: %s\"), emoji)\n try:\n await self.slack.api_call(\n \"reactions.add\",\n data={\n \"name\": emoji,\n \"channel\": reaction.target,\n \"timestamp\": reaction.linked_event.raw_event[\"ts\"],\n },\n )\n except slack.errors.SlackApiError as error:\n if \"invalid_name\" in str(error):\n _LOGGER.warning(_(\"Slack does not support the emoji %s\"), emoji)\n else:\n raise\n\n async def lookup_username(self, userid):\n \"\"\"Lookup a username and cache it.\"\"\"\n if userid in self.known_users:\n user_info = self.known_users[userid]\n else:\n response = await self.slack.users_info(user=userid)\n user_info = response.data[\"user\"]\n if isinstance(user_info, dict):\n self.known_users[userid] = user_info\n else:\n raise ValueError(\"Returned user is not a dict.\")\n return user_info\n\n async def replace_usernames(self, message):\n \"\"\"Replace User ID with username in message text.\"\"\"\n userids = re.findall(r\"\\<\\@([A-Z0-9]+)(?:\\|.+)?\\>\", message)\n for userid in userids:\n user_info = await self.lookup_username(userid)\n message = message.replace(\n \"<@{userid}>\".format(userid=userid), user_info[\"name\"]\n )\n return message\n", "path": "opsdroid/connector/slack/__init__.py"}]}
3,557
297
gh_patches_debug_22193
rasdani/github-patches
git_diff
DataBiosphere__toil-3590
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add type hints to conversions.py Add type hints to src/toil/lib/encryption/conversions so it can be checked under mypy during linting. Refers to #3568. ┆Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-888) ┆Issue Number: TOIL-888 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/toil/lib/conversions.py` Content: ``` 1 """Conversion utilities for mapping memory, disk, core declarations from strings to numbers and vice versa.""" 2 import math 3 from typing import Optional, SupportsInt, Tuple 4 5 # See https://en.wikipedia.org/wiki/Binary_prefix 6 BINARY_PREFIXES = ['ki', 'mi', 'gi', 'ti', 'pi', 'ei', 'kib', 'mib', 'gib', 'tib', 'pib', 'eib'] 7 DECIMAL_PREFIXES = ['b', 'k', 'm', 'g', 't', 'p', 'e', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb'] 8 VALID_PREFIXES = BINARY_PREFIXES + DECIMAL_PREFIXES 9 10 11 def bytes_in_unit(unit: str = 'B') -> int: 12 num_bytes = 1 13 if unit.lower() in ['ki', 'kib']: 14 num_bytes = 1 << 10 15 if unit.lower() in ['mi', 'mib']: 16 num_bytes = 1 << 20 17 if unit.lower() in ['gi', 'gib']: 18 num_bytes = 1 << 30 19 if unit.lower() in ['ti', 'tib']: 20 num_bytes = 1 << 40 21 if unit.lower() in ['pi', 'pib']: 22 num_bytes = 1 << 50 23 if unit.lower() in ['ei', 'eib']: 24 num_bytes = 1 << 60 25 26 if unit.lower() in ['k', 'kb']: 27 num_bytes = 1000 28 if unit.lower() in ['m', 'mb']: 29 num_bytes = 1000 ** 2 30 if unit.lower() in ['g', 'gb']: 31 num_bytes = 1000 ** 3 32 if unit.lower() in ['t', 'tb']: 33 num_bytes = 1000 ** 4 34 if unit.lower() in ['p', 'pb']: 35 num_bytes = 1000 ** 5 36 if unit.lower() in ['e', 'eb']: 37 num_bytes = 1000 ** 6 38 return num_bytes 39 40 41 def convert_units(num: float, 42 src_unit: str, 43 dst_unit: Optional[str] = 'B') -> float: 44 """Returns a float representing the converted input in dst_units.""" 45 assert src_unit.lower() in VALID_PREFIXES, f"{src_unit} not a valid unit, valid units are {VALID_PREFIXES}." 46 assert dst_unit.lower() in VALID_PREFIXES, f"{dst_unit} not a valid unit, valid units are {VALID_PREFIXES}." 47 return (num * bytes_in_unit(src_unit)) / bytes_in_unit(dst_unit) 48 49 50 def parse_memory_string(string: str) -> Tuple[float, str]: 51 """ 52 Given a string representation of some memory (i.e. '1024 Mib'), return the 53 number and unit. 54 """ 55 for i, character in enumerate(string): 56 # find the first character of the unit 57 if character not in '0123456789.-_ ': 58 units = string[i:].strip() 59 assert units.lower() in VALID_PREFIXES, f"{units} not a valid unit, valid units are {VALID_PREFIXES}." 60 return float(string[:i]), units 61 return float(string), 'b' 62 63 64 def human2bytes(string: str) -> int: 65 """ 66 Given a string representation of some memory (i.e. '1024 Mib'), return the 67 integer number of bytes. 68 """ 69 value, unit = parse_memory_string(string) 70 return int(convert_units(value, src_unit=unit, dst_unit='b')) 71 72 73 def bytes2human(n: SupportsInt) -> str: 74 """Return a binary value as a human readable string with units.""" 75 n = int(n) 76 if n < 0: 77 raise ValueError("n < 0") 78 elif n < 1: 79 return '0 b' 80 81 power_level = math.floor(math.log(n, 1024)) 82 units = ('b', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei') 83 84 unit = units[power_level if power_level < len(units) else -1] 85 value = convert_units(n, "b", unit) 86 return f'{value:.1f} {unit}' 87 ``` Path: `contrib/admin/mypy-with-ignore.py` Content: ``` 1 #!/usr/bin/env python3 2 """ 3 Runs mypy and ignores files that do not yet have passing type hints. 4 5 Does not type check test files (any path including "src/toil/test"). 6 """ 7 import os 8 import subprocess 9 import sys 10 11 pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa 12 sys.path.insert(0, pkg_root) # noqa 13 14 from src.toil.lib.resources import glob # type: ignore 15 16 17 def main(): 18 all_files_to_check = [] 19 for d in ['dashboard', 'docker', 'docs', 'src']: 20 all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d)) 21 22 # TODO: Remove these paths as typing is added and mypy conflicts are addressed 23 ignore_paths = [os.path.abspath(f) for f in [ 24 'docker/Dockerfile.py', 25 'docs/conf.py', 26 'docs/vendor/sphinxcontrib/fulltoc.py', 27 'docs/vendor/sphinxcontrib/__init__.py', 28 'src/toil/job.py', 29 'src/toil/leader.py', 30 'src/toil/statsAndLogging.py', 31 'src/toil/common.py', 32 'src/toil/realtimeLogger.py', 33 'src/toil/worker.py', 34 'src/toil/serviceManager.py', 35 'src/toil/toilState.py', 36 'src/toil/__init__.py', 37 'src/toil/resource.py', 38 'src/toil/deferred.py', 39 'src/toil/version.py', 40 'src/toil/wdl/utils.py', 41 'src/toil/wdl/wdl_types.py', 42 'src/toil/wdl/wdl_synthesis.py', 43 'src/toil/wdl/wdl_analysis.py', 44 'src/toil/wdl/wdl_functions.py', 45 'src/toil/wdl/toilwdl.py', 46 'src/toil/wdl/versions/draft2.py', 47 'src/toil/wdl/versions/v1.py', 48 'src/toil/wdl/versions/dev.py', 49 'src/toil/provisioners/clusterScaler.py', 50 'src/toil/provisioners/abstractProvisioner.py', 51 'src/toil/provisioners/gceProvisioner.py', 52 'src/toil/provisioners/__init__.py', 53 'src/toil/provisioners/node.py', 54 'src/toil/provisioners/aws/boto2Context.py', 55 'src/toil/provisioners/aws/awsProvisioner.py', 56 'src/toil/provisioners/aws/__init__.py', 57 'src/toil/batchSystems/slurm.py', 58 'src/toil/batchSystems/gridengine.py', 59 'src/toil/batchSystems/singleMachine.py', 60 'src/toil/batchSystems/abstractBatchSystem.py', 61 'src/toil/batchSystems/parasol.py', 62 'src/toil/batchSystems/kubernetes.py', 63 'src/toil/batchSystems/torque.py', 64 'src/toil/batchSystems/options.py', 65 'src/toil/batchSystems/registry.py', 66 'src/toil/batchSystems/lsf.py', 67 'src/toil/batchSystems/__init__.py', 68 'src/toil/batchSystems/abstractGridEngineBatchSystem.py', 69 'src/toil/batchSystems/lsfHelper.py', 70 'src/toil/batchSystems/htcondor.py', 71 'src/toil/batchSystems/mesos/batchSystem.py', 72 'src/toil/batchSystems/mesos/executor.py', 73 'src/toil/batchSystems/mesos/conftest.py', 74 'src/toil/batchSystems/mesos/__init__.py', 75 'src/toil/batchSystems/mesos/test/__init__.py', 76 'src/toil/cwl/conftest.py', 77 'src/toil/cwl/__init__.py', 78 'src/toil/cwl/cwltoil.py', 79 'src/toil/fileStores/cachingFileStore.py', 80 'src/toil/fileStores/abstractFileStore.py', 81 'src/toil/fileStores/nonCachingFileStore.py', 82 'src/toil/fileStores/__init__.py', 83 'src/toil/jobStores/utils.py', 84 'src/toil/jobStores/abstractJobStore.py', 85 'src/toil/jobStores/conftest.py', 86 'src/toil/jobStores/fileJobStore.py', 87 'src/toil/jobStores/__init__.py', 88 'src/toil/jobStores/googleJobStore.py', 89 'src/toil/jobStores/aws/utils.py', 90 'src/toil/jobStores/aws/jobStore.py', 91 'src/toil/jobStores/aws/__init__.py', 92 'src/toil/utils/toilDebugFile.py', 93 'src/toil/utils/toilUpdateEC2Instances.py', 94 'src/toil/utils/toilStatus.py', 95 'src/toil/utils/toilStats.py', 96 'src/toil/utils/toilSshCluster.py', 97 'src/toil/utils/toilMain.py', 98 'src/toil/utils/toilKill.py', 99 'src/toil/utils/__init__.py', 100 'src/toil/utils/toilDestroyCluster.py', 101 'src/toil/utils/toilDebugJob.py', 102 'src/toil/utils/toilRsyncCluster.py', 103 'src/toil/utils/toilClean.py', 104 'src/toil/utils/toilLaunchCluster.py', 105 'src/toil/lib/memoize.py', 106 'src/toil/lib/throttle.py', 107 'src/toil/lib/humanize.py', 108 'src/toil/lib/compatibility.py', 109 'src/toil/lib/iterables.py', 110 'src/toil/lib/bioio.py', 111 'src/toil/lib/ec2.py', 112 'src/toil/lib/conversions.py', 113 'src/toil/lib/ec2nodes.py', 114 'src/toil/lib/misc.py', 115 'src/toil/lib/expando.py', 116 'src/toil/lib/threading.py', 117 'src/toil/lib/exceptions.py', 118 'src/toil/lib/__init__.py', 119 'src/toil/lib/generatedEC2Lists.py', 120 'src/toil/lib/retry.py', 121 'src/toil/lib/objects.py', 122 'src/toil/lib/io.py', 123 'src/toil/lib/docker.py', 124 'src/toil/lib/encryption/_nacl.py', 125 'src/toil/lib/encryption/_dummy.py', 126 'src/toil/lib/encryption/conftest.py', 127 'src/toil/lib/encryption/__init__.py', 128 'src/toil/lib/aws/utils.py', 129 'src/toil/lib/aws/__init__.py' 130 ]] 131 132 filtered_files_to_check = [] 133 for file_path in all_files_to_check: 134 if file_path not in ignore_paths and 'src/toil/test' not in file_path: 135 filtered_files_to_check.append(file_path) 136 # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default? 137 args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check 138 p = subprocess.run(args=args, stdout=subprocess.PIPE) 139 result = p.stdout.decode() 140 print(result) 141 if 'Success: no issues found' not in result: 142 exit(1) 143 144 145 if __name__ == '__main__': 146 main() 147 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/contrib/admin/mypy-with-ignore.py b/contrib/admin/mypy-with-ignore.py --- a/contrib/admin/mypy-with-ignore.py +++ b/contrib/admin/mypy-with-ignore.py @@ -109,7 +109,7 @@ 'src/toil/lib/iterables.py', 'src/toil/lib/bioio.py', 'src/toil/lib/ec2.py', - 'src/toil/lib/conversions.py', + # 'src/toil/lib/conversions.py', 'src/toil/lib/ec2nodes.py', 'src/toil/lib/misc.py', 'src/toil/lib/expando.py', diff --git a/src/toil/lib/conversions.py b/src/toil/lib/conversions.py --- a/src/toil/lib/conversions.py +++ b/src/toil/lib/conversions.py @@ -40,7 +40,7 @@ def convert_units(num: float, src_unit: str, - dst_unit: Optional[str] = 'B') -> float: + dst_unit: str = 'B') -> float: """Returns a float representing the converted input in dst_units.""" assert src_unit.lower() in VALID_PREFIXES, f"{src_unit} not a valid unit, valid units are {VALID_PREFIXES}." assert dst_unit.lower() in VALID_PREFIXES, f"{dst_unit} not a valid unit, valid units are {VALID_PREFIXES}."
{"golden_diff": "diff --git a/contrib/admin/mypy-with-ignore.py b/contrib/admin/mypy-with-ignore.py\n--- a/contrib/admin/mypy-with-ignore.py\n+++ b/contrib/admin/mypy-with-ignore.py\n@@ -109,7 +109,7 @@\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\n- 'src/toil/lib/conversions.py',\n+ # 'src/toil/lib/conversions.py',\n 'src/toil/lib/ec2nodes.py',\n 'src/toil/lib/misc.py',\n 'src/toil/lib/expando.py',\ndiff --git a/src/toil/lib/conversions.py b/src/toil/lib/conversions.py\n--- a/src/toil/lib/conversions.py\n+++ b/src/toil/lib/conversions.py\n@@ -40,7 +40,7 @@\n \n def convert_units(num: float,\n src_unit: str,\n- dst_unit: Optional[str] = 'B') -> float:\n+ dst_unit: str = 'B') -> float:\n \"\"\"Returns a float representing the converted input in dst_units.\"\"\"\n assert src_unit.lower() in VALID_PREFIXES, f\"{src_unit} not a valid unit, valid units are {VALID_PREFIXES}.\"\n assert dst_unit.lower() in VALID_PREFIXES, f\"{dst_unit} not a valid unit, valid units are {VALID_PREFIXES}.\"\n", "issue": "Add type hints to conversions.py\nAdd type hints to src/toil/lib/encryption/conversions so it can be checked under mypy during linting.\n\nRefers to #3568.\n\n\u2506Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-888)\n\u2506Issue Number: TOIL-888\n\n", "before_files": [{"content": "\"\"\"Conversion utilities for mapping memory, disk, core declarations from strings to numbers and vice versa.\"\"\"\nimport math\nfrom typing import Optional, SupportsInt, Tuple\n\n# See https://en.wikipedia.org/wiki/Binary_prefix\nBINARY_PREFIXES = ['ki', 'mi', 'gi', 'ti', 'pi', 'ei', 'kib', 'mib', 'gib', 'tib', 'pib', 'eib']\nDECIMAL_PREFIXES = ['b', 'k', 'm', 'g', 't', 'p', 'e', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb']\nVALID_PREFIXES = BINARY_PREFIXES + DECIMAL_PREFIXES\n\n\ndef bytes_in_unit(unit: str = 'B') -> int:\n num_bytes = 1\n if unit.lower() in ['ki', 'kib']:\n num_bytes = 1 << 10\n if unit.lower() in ['mi', 'mib']:\n num_bytes = 1 << 20\n if unit.lower() in ['gi', 'gib']:\n num_bytes = 1 << 30\n if unit.lower() in ['ti', 'tib']:\n num_bytes = 1 << 40\n if unit.lower() in ['pi', 'pib']:\n num_bytes = 1 << 50\n if unit.lower() in ['ei', 'eib']:\n num_bytes = 1 << 60\n\n if unit.lower() in ['k', 'kb']:\n num_bytes = 1000\n if unit.lower() in ['m', 'mb']:\n num_bytes = 1000 ** 2\n if unit.lower() in ['g', 'gb']:\n num_bytes = 1000 ** 3\n if unit.lower() in ['t', 'tb']:\n num_bytes = 1000 ** 4\n if unit.lower() in ['p', 'pb']:\n num_bytes = 1000 ** 5\n if unit.lower() in ['e', 'eb']:\n num_bytes = 1000 ** 6\n return num_bytes\n\n\ndef convert_units(num: float,\n src_unit: str,\n dst_unit: Optional[str] = 'B') -> float:\n \"\"\"Returns a float representing the converted input in dst_units.\"\"\"\n assert src_unit.lower() in VALID_PREFIXES, f\"{src_unit} not a valid unit, valid units are {VALID_PREFIXES}.\"\n assert dst_unit.lower() in VALID_PREFIXES, f\"{dst_unit} not a valid unit, valid units are {VALID_PREFIXES}.\"\n return (num * bytes_in_unit(src_unit)) / bytes_in_unit(dst_unit)\n\n\ndef parse_memory_string(string: str) -> Tuple[float, str]:\n \"\"\"\n Given a string representation of some memory (i.e. '1024 Mib'), return the\n number and unit.\n \"\"\"\n for i, character in enumerate(string):\n # find the first character of the unit\n if character not in '0123456789.-_ ':\n units = string[i:].strip()\n assert units.lower() in VALID_PREFIXES, f\"{units} not a valid unit, valid units are {VALID_PREFIXES}.\"\n return float(string[:i]), units\n return float(string), 'b'\n\n\ndef human2bytes(string: str) -> int:\n \"\"\"\n Given a string representation of some memory (i.e. '1024 Mib'), return the\n integer number of bytes.\n \"\"\"\n value, unit = parse_memory_string(string)\n return int(convert_units(value, src_unit=unit, dst_unit='b'))\n\n\ndef bytes2human(n: SupportsInt) -> str:\n \"\"\"Return a binary value as a human readable string with units.\"\"\"\n n = int(n)\n if n < 0:\n raise ValueError(\"n < 0\")\n elif n < 1:\n return '0 b'\n\n power_level = math.floor(math.log(n, 1024))\n units = ('b', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei')\n\n unit = units[power_level if power_level < len(units) else -1]\n value = convert_units(n, \"b\", unit)\n return f'{value:.1f} {unit}'\n", "path": "src/toil/lib/conversions.py"}, {"content": "#!/usr/bin/env python3\n\"\"\"\nRuns mypy and ignores files that do not yet have passing type hints.\n\nDoes not type check test files (any path including \"src/toil/test\").\n\"\"\"\nimport os\nimport subprocess\nimport sys\n\npkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa\nsys.path.insert(0, pkg_root) # noqa\n\nfrom src.toil.lib.resources import glob # type: ignore\n\n\ndef main():\n all_files_to_check = []\n for d in ['dashboard', 'docker', 'docs', 'src']:\n all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))\n\n # TODO: Remove these paths as typing is added and mypy conflicts are addressed\n ignore_paths = [os.path.abspath(f) for f in [\n 'docker/Dockerfile.py',\n 'docs/conf.py',\n 'docs/vendor/sphinxcontrib/fulltoc.py',\n 'docs/vendor/sphinxcontrib/__init__.py',\n 'src/toil/job.py',\n 'src/toil/leader.py',\n 'src/toil/statsAndLogging.py',\n 'src/toil/common.py',\n 'src/toil/realtimeLogger.py',\n 'src/toil/worker.py',\n 'src/toil/serviceManager.py',\n 'src/toil/toilState.py',\n 'src/toil/__init__.py',\n 'src/toil/resource.py',\n 'src/toil/deferred.py',\n 'src/toil/version.py',\n 'src/toil/wdl/utils.py',\n 'src/toil/wdl/wdl_types.py',\n 'src/toil/wdl/wdl_synthesis.py',\n 'src/toil/wdl/wdl_analysis.py',\n 'src/toil/wdl/wdl_functions.py',\n 'src/toil/wdl/toilwdl.py',\n 'src/toil/wdl/versions/draft2.py',\n 'src/toil/wdl/versions/v1.py',\n 'src/toil/wdl/versions/dev.py',\n 'src/toil/provisioners/clusterScaler.py',\n 'src/toil/provisioners/abstractProvisioner.py',\n 'src/toil/provisioners/gceProvisioner.py',\n 'src/toil/provisioners/__init__.py',\n 'src/toil/provisioners/node.py',\n 'src/toil/provisioners/aws/boto2Context.py',\n 'src/toil/provisioners/aws/awsProvisioner.py',\n 'src/toil/provisioners/aws/__init__.py',\n 'src/toil/batchSystems/slurm.py',\n 'src/toil/batchSystems/gridengine.py',\n 'src/toil/batchSystems/singleMachine.py',\n 'src/toil/batchSystems/abstractBatchSystem.py',\n 'src/toil/batchSystems/parasol.py',\n 'src/toil/batchSystems/kubernetes.py',\n 'src/toil/batchSystems/torque.py',\n 'src/toil/batchSystems/options.py',\n 'src/toil/batchSystems/registry.py',\n 'src/toil/batchSystems/lsf.py',\n 'src/toil/batchSystems/__init__.py',\n 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',\n 'src/toil/batchSystems/lsfHelper.py',\n 'src/toil/batchSystems/htcondor.py',\n 'src/toil/batchSystems/mesos/batchSystem.py',\n 'src/toil/batchSystems/mesos/executor.py',\n 'src/toil/batchSystems/mesos/conftest.py',\n 'src/toil/batchSystems/mesos/__init__.py',\n 'src/toil/batchSystems/mesos/test/__init__.py',\n 'src/toil/cwl/conftest.py',\n 'src/toil/cwl/__init__.py',\n 'src/toil/cwl/cwltoil.py',\n 'src/toil/fileStores/cachingFileStore.py',\n 'src/toil/fileStores/abstractFileStore.py',\n 'src/toil/fileStores/nonCachingFileStore.py',\n 'src/toil/fileStores/__init__.py',\n 'src/toil/jobStores/utils.py',\n 'src/toil/jobStores/abstractJobStore.py',\n 'src/toil/jobStores/conftest.py',\n 'src/toil/jobStores/fileJobStore.py',\n 'src/toil/jobStores/__init__.py',\n 'src/toil/jobStores/googleJobStore.py',\n 'src/toil/jobStores/aws/utils.py',\n 'src/toil/jobStores/aws/jobStore.py',\n 'src/toil/jobStores/aws/__init__.py',\n 'src/toil/utils/toilDebugFile.py',\n 'src/toil/utils/toilUpdateEC2Instances.py',\n 'src/toil/utils/toilStatus.py',\n 'src/toil/utils/toilStats.py',\n 'src/toil/utils/toilSshCluster.py',\n 'src/toil/utils/toilMain.py',\n 'src/toil/utils/toilKill.py',\n 'src/toil/utils/__init__.py',\n 'src/toil/utils/toilDestroyCluster.py',\n 'src/toil/utils/toilDebugJob.py',\n 'src/toil/utils/toilRsyncCluster.py',\n 'src/toil/utils/toilClean.py',\n 'src/toil/utils/toilLaunchCluster.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\n 'src/toil/lib/humanize.py',\n 'src/toil/lib/compatibility.py',\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\n 'src/toil/lib/conversions.py',\n 'src/toil/lib/ec2nodes.py',\n 'src/toil/lib/misc.py',\n 'src/toil/lib/expando.py',\n 'src/toil/lib/threading.py',\n 'src/toil/lib/exceptions.py',\n 'src/toil/lib/__init__.py',\n 'src/toil/lib/generatedEC2Lists.py',\n 'src/toil/lib/retry.py',\n 'src/toil/lib/objects.py',\n 'src/toil/lib/io.py',\n 'src/toil/lib/docker.py',\n 'src/toil/lib/encryption/_nacl.py',\n 'src/toil/lib/encryption/_dummy.py',\n 'src/toil/lib/encryption/conftest.py',\n 'src/toil/lib/encryption/__init__.py',\n 'src/toil/lib/aws/utils.py',\n 'src/toil/lib/aws/__init__.py'\n ]]\n\n filtered_files_to_check = []\n for file_path in all_files_to_check:\n if file_path not in ignore_paths and 'src/toil/test' not in file_path:\n filtered_files_to_check.append(file_path)\n # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?\n args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check\n p = subprocess.run(args=args, stdout=subprocess.PIPE)\n result = p.stdout.decode()\n print(result)\n if 'Success: no issues found' not in result:\n exit(1)\n\n\nif __name__ == '__main__':\n main()\n", "path": "contrib/admin/mypy-with-ignore.py"}], "after_files": [{"content": "\"\"\"Conversion utilities for mapping memory, disk, core declarations from strings to numbers and vice versa.\"\"\"\nimport math\nfrom typing import Optional, SupportsInt, Tuple\n\n# See https://en.wikipedia.org/wiki/Binary_prefix\nBINARY_PREFIXES = ['ki', 'mi', 'gi', 'ti', 'pi', 'ei', 'kib', 'mib', 'gib', 'tib', 'pib', 'eib']\nDECIMAL_PREFIXES = ['b', 'k', 'm', 'g', 't', 'p', 'e', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb']\nVALID_PREFIXES = BINARY_PREFIXES + DECIMAL_PREFIXES\n\n\ndef bytes_in_unit(unit: str = 'B') -> int:\n num_bytes = 1\n if unit.lower() in ['ki', 'kib']:\n num_bytes = 1 << 10\n if unit.lower() in ['mi', 'mib']:\n num_bytes = 1 << 20\n if unit.lower() in ['gi', 'gib']:\n num_bytes = 1 << 30\n if unit.lower() in ['ti', 'tib']:\n num_bytes = 1 << 40\n if unit.lower() in ['pi', 'pib']:\n num_bytes = 1 << 50\n if unit.lower() in ['ei', 'eib']:\n num_bytes = 1 << 60\n\n if unit.lower() in ['k', 'kb']:\n num_bytes = 1000\n if unit.lower() in ['m', 'mb']:\n num_bytes = 1000 ** 2\n if unit.lower() in ['g', 'gb']:\n num_bytes = 1000 ** 3\n if unit.lower() in ['t', 'tb']:\n num_bytes = 1000 ** 4\n if unit.lower() in ['p', 'pb']:\n num_bytes = 1000 ** 5\n if unit.lower() in ['e', 'eb']:\n num_bytes = 1000 ** 6\n return num_bytes\n\n\ndef convert_units(num: float,\n src_unit: str,\n dst_unit: str = 'B') -> float:\n \"\"\"Returns a float representing the converted input in dst_units.\"\"\"\n assert src_unit.lower() in VALID_PREFIXES, f\"{src_unit} not a valid unit, valid units are {VALID_PREFIXES}.\"\n assert dst_unit.lower() in VALID_PREFIXES, f\"{dst_unit} not a valid unit, valid units are {VALID_PREFIXES}.\"\n return (num * bytes_in_unit(src_unit)) / bytes_in_unit(dst_unit)\n\n\ndef parse_memory_string(string: str) -> Tuple[float, str]:\n \"\"\"\n Given a string representation of some memory (i.e. '1024 Mib'), return the\n number and unit.\n \"\"\"\n for i, character in enumerate(string):\n # find the first character of the unit\n if character not in '0123456789.-_ ':\n units = string[i:].strip()\n assert units.lower() in VALID_PREFIXES, f\"{units} not a valid unit, valid units are {VALID_PREFIXES}.\"\n return float(string[:i]), units\n return float(string), 'b'\n\n\ndef human2bytes(string: str) -> int:\n \"\"\"\n Given a string representation of some memory (i.e. '1024 Mib'), return the\n integer number of bytes.\n \"\"\"\n value, unit = parse_memory_string(string)\n return int(convert_units(value, src_unit=unit, dst_unit='b'))\n\n\ndef bytes2human(n: SupportsInt) -> str:\n \"\"\"Return a binary value as a human readable string with units.\"\"\"\n n = int(n)\n if n < 0:\n raise ValueError(\"n < 0\")\n elif n < 1:\n return '0 b'\n\n power_level = math.floor(math.log(n, 1024))\n units = ('b', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei')\n\n unit = units[power_level if power_level < len(units) else -1]\n value = convert_units(n, \"b\", unit)\n return f'{value:.1f} {unit}'\n", "path": "src/toil/lib/conversions.py"}, {"content": "#!/usr/bin/env python3\n\"\"\"\nRuns mypy and ignores files that do not yet have passing type hints.\n\nDoes not type check test files (any path including \"src/toil/test\").\n\"\"\"\nimport os\nimport subprocess\nimport sys\n\npkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa\nsys.path.insert(0, pkg_root) # noqa\n\nfrom src.toil.lib.resources import glob # type: ignore\n\n\ndef main():\n all_files_to_check = []\n for d in ['dashboard', 'docker', 'docs', 'src']:\n all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))\n\n # TODO: Remove these paths as typing is added and mypy conflicts are addressed\n ignore_paths = [os.path.abspath(f) for f in [\n 'docker/Dockerfile.py',\n 'docs/conf.py',\n 'docs/vendor/sphinxcontrib/fulltoc.py',\n 'docs/vendor/sphinxcontrib/__init__.py',\n 'src/toil/job.py',\n 'src/toil/leader.py',\n 'src/toil/statsAndLogging.py',\n 'src/toil/common.py',\n 'src/toil/realtimeLogger.py',\n 'src/toil/worker.py',\n 'src/toil/serviceManager.py',\n 'src/toil/toilState.py',\n 'src/toil/__init__.py',\n 'src/toil/resource.py',\n 'src/toil/deferred.py',\n 'src/toil/version.py',\n 'src/toil/wdl/utils.py',\n 'src/toil/wdl/wdl_types.py',\n 'src/toil/wdl/wdl_synthesis.py',\n 'src/toil/wdl/wdl_analysis.py',\n 'src/toil/wdl/wdl_functions.py',\n 'src/toil/wdl/toilwdl.py',\n 'src/toil/wdl/versions/draft2.py',\n 'src/toil/wdl/versions/v1.py',\n 'src/toil/wdl/versions/dev.py',\n 'src/toil/provisioners/clusterScaler.py',\n 'src/toil/provisioners/abstractProvisioner.py',\n 'src/toil/provisioners/gceProvisioner.py',\n 'src/toil/provisioners/__init__.py',\n 'src/toil/provisioners/node.py',\n 'src/toil/provisioners/aws/boto2Context.py',\n 'src/toil/provisioners/aws/awsProvisioner.py',\n 'src/toil/provisioners/aws/__init__.py',\n 'src/toil/batchSystems/slurm.py',\n 'src/toil/batchSystems/gridengine.py',\n 'src/toil/batchSystems/singleMachine.py',\n 'src/toil/batchSystems/abstractBatchSystem.py',\n 'src/toil/batchSystems/parasol.py',\n 'src/toil/batchSystems/kubernetes.py',\n 'src/toil/batchSystems/torque.py',\n 'src/toil/batchSystems/options.py',\n 'src/toil/batchSystems/registry.py',\n 'src/toil/batchSystems/lsf.py',\n 'src/toil/batchSystems/__init__.py',\n 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',\n 'src/toil/batchSystems/lsfHelper.py',\n 'src/toil/batchSystems/htcondor.py',\n 'src/toil/batchSystems/mesos/batchSystem.py',\n 'src/toil/batchSystems/mesos/executor.py',\n 'src/toil/batchSystems/mesos/conftest.py',\n 'src/toil/batchSystems/mesos/__init__.py',\n 'src/toil/batchSystems/mesos/test/__init__.py',\n 'src/toil/cwl/conftest.py',\n 'src/toil/cwl/__init__.py',\n 'src/toil/cwl/cwltoil.py',\n 'src/toil/fileStores/cachingFileStore.py',\n 'src/toil/fileStores/abstractFileStore.py',\n 'src/toil/fileStores/nonCachingFileStore.py',\n 'src/toil/fileStores/__init__.py',\n 'src/toil/jobStores/utils.py',\n 'src/toil/jobStores/abstractJobStore.py',\n 'src/toil/jobStores/conftest.py',\n 'src/toil/jobStores/fileJobStore.py',\n 'src/toil/jobStores/__init__.py',\n 'src/toil/jobStores/googleJobStore.py',\n 'src/toil/jobStores/aws/utils.py',\n 'src/toil/jobStores/aws/jobStore.py',\n 'src/toil/jobStores/aws/__init__.py',\n 'src/toil/utils/toilDebugFile.py',\n 'src/toil/utils/toilUpdateEC2Instances.py',\n 'src/toil/utils/toilStatus.py',\n 'src/toil/utils/toilStats.py',\n 'src/toil/utils/toilSshCluster.py',\n 'src/toil/utils/toilMain.py',\n 'src/toil/utils/toilKill.py',\n 'src/toil/utils/__init__.py',\n 'src/toil/utils/toilDestroyCluster.py',\n 'src/toil/utils/toilDebugJob.py',\n 'src/toil/utils/toilRsyncCluster.py',\n 'src/toil/utils/toilClean.py',\n 'src/toil/utils/toilLaunchCluster.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\n 'src/toil/lib/humanize.py',\n 'src/toil/lib/compatibility.py',\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\n # 'src/toil/lib/conversions.py',\n 'src/toil/lib/ec2nodes.py',\n 'src/toil/lib/misc.py',\n 'src/toil/lib/expando.py',\n 'src/toil/lib/threading.py',\n 'src/toil/lib/exceptions.py',\n 'src/toil/lib/__init__.py',\n 'src/toil/lib/generatedEC2Lists.py',\n 'src/toil/lib/retry.py',\n 'src/toil/lib/objects.py',\n 'src/toil/lib/io.py',\n 'src/toil/lib/docker.py',\n 'src/toil/lib/encryption/_nacl.py',\n 'src/toil/lib/encryption/_dummy.py',\n 'src/toil/lib/encryption/conftest.py',\n 'src/toil/lib/encryption/__init__.py',\n 'src/toil/lib/aws/utils.py',\n 'src/toil/lib/aws/__init__.py'\n ]]\n\n filtered_files_to_check = []\n for file_path in all_files_to_check:\n if file_path not in ignore_paths and 'src/toil/test' not in file_path:\n filtered_files_to_check.append(file_path)\n # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?\n args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check\n p = subprocess.run(args=args, stdout=subprocess.PIPE)\n result = p.stdout.decode()\n print(result)\n if 'Success: no issues found' not in result:\n exit(1)\n\n\nif __name__ == '__main__':\n main()\n", "path": "contrib/admin/mypy-with-ignore.py"}]}
3,394
310
gh_patches_debug_20262
rasdani/github-patches
git_diff
pypa__setuptools-2984
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- AttributeError: module '_distutils_hack' has no attribute 'ensure_shim' ### setuptools version setuptools==60.0.5 ### Python version python 3.10 ### OS macOS intel 12.1 ### Additional environment information _No response_ ### Description python3 -m pip install -U setuptools ### Expected behavior python3 -m pip install -U setuptools ### How to Reproduce brew install [email protected] python3 -m pip install -U setuptools ### Output ``` Collecting setuptools Downloading setuptools-60.1.1-py3-none-any.whl (952 kB) |████████████████████████████████| 952 kB 351 kB/s Installing collected packages: setuptools Attempting uninstall: setuptools Found existing installation: setuptools 60.0.5 Uninstalling setuptools-60.0.5: Successfully uninstalled setuptools-60.0.5 Successfully installed setuptools-60.1.1 ``` and when you exit and relogin ``` Error processing line 1 of /usr/local/Cellar/[email protected]/3.10.1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/distutils-precedence.pth: Traceback (most recent call last): File "/usr/local/Cellar/[email protected]/3.10.1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site.py", line 186, in addpackage exec(line) File "<string>", line 1, in <module> AttributeError: module '_distutils_hack' has no attribute 'ensure_shim' Remainder of file ignored ``` ### Code of Conduct - [X] I agree to follow the PSF Code of Conduct AttributeError: module '_distutils_hack' has no attribute 'ensure_shim' ### setuptools version setuptools==60.0.5 ### Python version python 3.10 ### OS macOS intel 12.1 ### Additional environment information _No response_ ### Description python3 -m pip install -U setuptools ### Expected behavior python3 -m pip install -U setuptools ### How to Reproduce brew install [email protected] python3 -m pip install -U setuptools ### Output ``` Collecting setuptools Downloading setuptools-60.1.1-py3-none-any.whl (952 kB) |████████████████████████████████| 952 kB 351 kB/s Installing collected packages: setuptools Attempting uninstall: setuptools Found existing installation: setuptools 60.0.5 Uninstalling setuptools-60.0.5: Successfully uninstalled setuptools-60.0.5 Successfully installed setuptools-60.1.1 ``` and when you exit and relogin ``` Error processing line 1 of /usr/local/Cellar/[email protected]/3.10.1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/distutils-precedence.pth: Traceback (most recent call last): File "/usr/local/Cellar/[email protected]/3.10.1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site.py", line 186, in addpackage exec(line) File "<string>", line 1, in <module> AttributeError: module '_distutils_hack' has no attribute 'ensure_shim' Remainder of file ignored ``` ### Code of Conduct - [X] I agree to follow the PSF Code of Conduct --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `_distutils_hack/__init__.py` Content: ``` 1 import sys 2 import os 3 import re 4 import importlib 5 import warnings 6 import contextlib 7 8 9 is_pypy = '__pypy__' in sys.builtin_module_names 10 11 12 warnings.filterwarnings('ignore', 13 r'.+ distutils\b.+ deprecated', 14 DeprecationWarning) 15 16 17 def warn_distutils_present(): 18 if 'distutils' not in sys.modules: 19 return 20 if is_pypy and sys.version_info < (3, 7): 21 # PyPy for 3.6 unconditionally imports distutils, so bypass the warning 22 # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250 23 return 24 warnings.warn( 25 "Distutils was imported before Setuptools, but importing Setuptools " 26 "also replaces the `distutils` module in `sys.modules`. This may lead " 27 "to undesirable behaviors or errors. To avoid these issues, avoid " 28 "using distutils directly, ensure that setuptools is installed in the " 29 "traditional way (e.g. not an editable install), and/or make sure " 30 "that setuptools is always imported before distutils.") 31 32 33 def clear_distutils(): 34 if 'distutils' not in sys.modules: 35 return 36 warnings.warn("Setuptools is replacing distutils.") 37 mods = [name for name in sys.modules if re.match(r'distutils\b', name)] 38 for name in mods: 39 del sys.modules[name] 40 41 42 def enabled(): 43 """ 44 Allow selection of distutils by environment variable. 45 """ 46 which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local') 47 return which == 'local' 48 49 50 def ensure_local_distutils(): 51 clear_distutils() 52 53 # With the DistutilsMetaFinder in place, 54 # perform an import to cause distutils to be 55 # loaded from setuptools._distutils. Ref #2906. 56 with shim(): 57 importlib.import_module('distutils') 58 59 # check that submodules load as expected 60 core = importlib.import_module('distutils.core') 61 assert '_distutils' in core.__file__, core.__file__ 62 63 64 def do_override(): 65 """ 66 Ensure that the local copy of distutils is preferred over stdlib. 67 68 See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401 69 for more motivation. 70 """ 71 if enabled(): 72 warn_distutils_present() 73 ensure_local_distutils() 74 75 76 class DistutilsMetaFinder: 77 def find_spec(self, fullname, path, target=None): 78 if path is not None: 79 return 80 81 method_name = 'spec_for_{fullname}'.format(**locals()) 82 method = getattr(self, method_name, lambda: None) 83 return method() 84 85 def spec_for_distutils(self): 86 import importlib.abc 87 import importlib.util 88 89 # In cases of path manipulation during sitecustomize, 90 # Setuptools might actually not be present even though 91 # the hook has been loaded. Allow the caller to fall 92 # back to stdlib behavior. See #2980. 93 if not importlib.util.find_spec('setuptools'): 94 return 95 96 class DistutilsLoader(importlib.abc.Loader): 97 98 def create_module(self, spec): 99 return importlib.import_module('setuptools._distutils') 100 101 def exec_module(self, module): 102 pass 103 104 return importlib.util.spec_from_loader('distutils', DistutilsLoader()) 105 106 def spec_for_pip(self): 107 """ 108 Ensure stdlib distutils when running under pip. 109 See pypa/pip#8761 for rationale. 110 """ 111 if self.pip_imported_during_build(): 112 return 113 clear_distutils() 114 self.spec_for_distutils = lambda: None 115 116 @classmethod 117 def pip_imported_during_build(cls): 118 """ 119 Detect if pip is being imported in a build script. Ref #2355. 120 """ 121 import traceback 122 return any( 123 cls.frame_file_is_setup(frame) 124 for frame, line in traceback.walk_stack(None) 125 ) 126 127 @staticmethod 128 def frame_file_is_setup(frame): 129 """ 130 Return True if the indicated frame suggests a setup.py file. 131 """ 132 # some frames may not have __file__ (#2940) 133 return frame.f_globals.get('__file__', '').endswith('setup.py') 134 135 136 DISTUTILS_FINDER = DistutilsMetaFinder() 137 138 139 def ensure_shim(): 140 DISTUTILS_FINDER in sys.meta_path or add_shim() 141 142 143 @contextlib.contextmanager 144 def shim(): 145 add_shim() 146 try: 147 yield 148 finally: 149 remove_shim() 150 151 152 def add_shim(): 153 sys.meta_path.insert(0, DISTUTILS_FINDER) 154 155 156 def remove_shim(): 157 try: 158 sys.meta_path.remove(DISTUTILS_FINDER) 159 except ValueError: 160 pass 161 ``` Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 import os 4 import sys 5 import textwrap 6 7 import setuptools 8 from setuptools.command.install import install 9 10 here = os.path.dirname(__file__) 11 12 13 package_data = dict( 14 setuptools=['script (dev).tmpl', 'script.tmpl', 'site-patch.py'], 15 ) 16 17 force_windows_specific_files = ( 18 os.environ.get("SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES", "1").lower() 19 not in ("", "0", "false", "no") 20 ) 21 22 include_windows_files = sys.platform == 'win32' or force_windows_specific_files 23 24 if include_windows_files: 25 package_data.setdefault('setuptools', []).extend(['*.exe']) 26 package_data.setdefault('setuptools.command', []).extend(['*.xml']) 27 28 29 def pypi_link(pkg_filename): 30 """ 31 Given the filename, including md5 fragment, construct the 32 dependency link for PyPI. 33 """ 34 root = 'https://files.pythonhosted.org/packages/source' 35 name, sep, rest = pkg_filename.partition('-') 36 parts = root, name[0], name, pkg_filename 37 return '/'.join(parts) 38 39 40 class install_with_pth(install): 41 """ 42 Custom install command to install a .pth file for distutils patching. 43 44 This hack is necessary because there's no standard way to install behavior 45 on startup (and it's debatable if there should be one). This hack (ab)uses 46 the `extra_path` behavior in Setuptools to install a `.pth` file with 47 implicit behavior on startup to give higher precedence to the local version 48 of `distutils` over the version from the standard library. 49 50 Please do not replicate this behavior. 51 """ 52 53 _pth_name = 'distutils-precedence' 54 _pth_contents = textwrap.dedent(""" 55 import os 56 var = 'SETUPTOOLS_USE_DISTUTILS' 57 enabled = os.environ.get(var, 'local') == 'local' 58 enabled and __import__('_distutils_hack').ensure_shim() 59 """).lstrip().replace('\n', '; ') 60 61 def initialize_options(self): 62 install.initialize_options(self) 63 self.extra_path = self._pth_name, self._pth_contents 64 65 def finalize_options(self): 66 install.finalize_options(self) 67 self._restore_install_lib() 68 69 def _restore_install_lib(self): 70 """ 71 Undo secondary effect of `extra_path` adding to `install_lib` 72 """ 73 suffix = os.path.relpath(self.install_lib, self.install_libbase) 74 75 if suffix.strip() == self._pth_contents.strip(): 76 self.install_lib = self.install_libbase 77 78 79 setup_params = dict( 80 cmdclass={'install': install_with_pth}, 81 package_data=package_data, 82 ) 83 84 if __name__ == '__main__': 85 # allow setup.py to run from another directory 86 here and os.chdir(here) 87 dist = setuptools.setup(**setup_params) 88 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py --- a/_distutils_hack/__init__.py +++ b/_distutils_hack/__init__.py @@ -136,20 +136,20 @@ DISTUTILS_FINDER = DistutilsMetaFinder() -def ensure_shim(): - DISTUTILS_FINDER in sys.meta_path or add_shim() +def add_shim(): + DISTUTILS_FINDER in sys.meta_path or insert_shim() @contextlib.contextmanager def shim(): - add_shim() + insert_shim() try: yield finally: remove_shim() -def add_shim(): +def insert_shim(): sys.meta_path.insert(0, DISTUTILS_FINDER) diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -55,7 +55,7 @@ import os var = 'SETUPTOOLS_USE_DISTUTILS' enabled = os.environ.get(var, 'local') == 'local' - enabled and __import__('_distutils_hack').ensure_shim() + enabled and __import__('_distutils_hack').add_shim() """).lstrip().replace('\n', '; ') def initialize_options(self):
{"golden_diff": "diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py\n--- a/_distutils_hack/__init__.py\n+++ b/_distutils_hack/__init__.py\n@@ -136,20 +136,20 @@\n DISTUTILS_FINDER = DistutilsMetaFinder()\n \n \n-def ensure_shim():\n- DISTUTILS_FINDER in sys.meta_path or add_shim()\n+def add_shim():\n+ DISTUTILS_FINDER in sys.meta_path or insert_shim()\n \n \n @contextlib.contextmanager\n def shim():\n- add_shim()\n+ insert_shim()\n try:\n yield\n finally:\n remove_shim()\n \n \n-def add_shim():\n+def insert_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n \n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -55,7 +55,7 @@\n import os\n var = 'SETUPTOOLS_USE_DISTUTILS'\n enabled = os.environ.get(var, 'local') == 'local'\n- enabled and __import__('_distutils_hack').ensure_shim()\n+ enabled and __import__('_distutils_hack').add_shim()\n \"\"\").lstrip().replace('\\n', '; ')\n \n def initialize_options(self):\n", "issue": "AttributeError: module '_distutils_hack' has no attribute 'ensure_shim'\n### setuptools version\r\n\r\nsetuptools==60.0.5\r\n\r\n### Python version\r\n\r\npython 3.10\r\n\r\n### OS\r\n\r\nmacOS intel 12.1\r\n\r\n### Additional environment information\r\n\r\n_No response_\r\n\r\n### Description\r\n\r\npython3 -m pip install -U setuptools\r\n\r\n### Expected behavior\r\n\r\npython3 -m pip install -U setuptools\r\n\r\n### How to Reproduce\r\n\r\nbrew install [email protected]\r\npython3 -m pip install -U setuptools\r\n\r\n### Output\r\n\r\n```\r\nCollecting setuptools\r\n Downloading setuptools-60.1.1-py3-none-any.whl (952 kB)\r\n |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 952 kB 351 kB/s \r\nInstalling collected packages: setuptools\r\n Attempting uninstall: setuptools\r\n Found existing installation: setuptools 60.0.5\r\n Uninstalling setuptools-60.0.5:\r\n Successfully uninstalled setuptools-60.0.5\r\nSuccessfully installed setuptools-60.1.1\r\n```\r\n\r\nand when you exit and relogin\r\n\r\n```\r\nError processing line 1 of /usr/local/Cellar/[email protected]/3.10.1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/distutils-precedence.pth:\r\n\r\n Traceback (most recent call last):\r\n File \"/usr/local/Cellar/[email protected]/3.10.1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site.py\", line 186, in addpackage\r\n exec(line)\r\n File \"<string>\", line 1, in <module>\r\n AttributeError: module '_distutils_hack' has no attribute 'ensure_shim'\r\n\r\nRemainder of file ignored\r\n```\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the PSF Code of Conduct\nAttributeError: module '_distutils_hack' has no attribute 'ensure_shim'\n### setuptools version\r\n\r\nsetuptools==60.0.5\r\n\r\n### Python version\r\n\r\npython 3.10\r\n\r\n### OS\r\n\r\nmacOS intel 12.1\r\n\r\n### Additional environment information\r\n\r\n_No response_\r\n\r\n### Description\r\n\r\npython3 -m pip install -U setuptools\r\n\r\n### Expected behavior\r\n\r\npython3 -m pip install -U setuptools\r\n\r\n### How to Reproduce\r\n\r\nbrew install [email protected]\r\npython3 -m pip install -U setuptools\r\n\r\n### Output\r\n\r\n```\r\nCollecting setuptools\r\n Downloading setuptools-60.1.1-py3-none-any.whl (952 kB)\r\n |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 952 kB 351 kB/s \r\nInstalling collected packages: setuptools\r\n Attempting uninstall: setuptools\r\n Found existing installation: setuptools 60.0.5\r\n Uninstalling setuptools-60.0.5:\r\n Successfully uninstalled setuptools-60.0.5\r\nSuccessfully installed setuptools-60.1.1\r\n```\r\n\r\nand when you exit and relogin\r\n\r\n```\r\nError processing line 1 of /usr/local/Cellar/[email protected]/3.10.1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/distutils-precedence.pth:\r\n\r\n Traceback (most recent call last):\r\n File \"/usr/local/Cellar/[email protected]/3.10.1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site.py\", line 186, in addpackage\r\n exec(line)\r\n File \"<string>\", line 1, in <module>\r\n AttributeError: module '_distutils_hack' has no attribute 'ensure_shim'\r\n\r\nRemainder of file ignored\r\n```\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the PSF Code of Conduct\n", "before_files": [{"content": "import sys\nimport os\nimport re\nimport importlib\nimport warnings\nimport contextlib\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\nwarnings.filterwarnings('ignore',\n r'.+ distutils\\b.+ deprecated',\n DeprecationWarning)\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure \"\n \"that setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n\n # With the DistutilsMetaFinder in place,\n # perform an import to cause distutils to be\n # loaded from setuptools._distutils. Ref #2906.\n with shim():\n importlib.import_module('distutils')\n\n # check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib.abc\n import importlib.util\n\n # In cases of path manipulation during sitecustomize,\n # Setuptools might actually not be present even though\n # the hook has been loaded. Allow the caller to fall\n # back to stdlib behavior. See #2980.\n if not importlib.util.find_spec('setuptools'):\n return\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return importlib.import_module('setuptools._distutils')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader('distutils', DistutilsLoader())\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n if self.pip_imported_during_build():\n return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n @classmethod\n def pip_imported_during_build(cls):\n \"\"\"\n Detect if pip is being imported in a build script. Ref #2355.\n \"\"\"\n import traceback\n return any(\n cls.frame_file_is_setup(frame)\n for frame, line in traceback.walk_stack(None)\n )\n\n @staticmethod\n def frame_file_is_setup(frame):\n \"\"\"\n Return True if the indicated frame suggests a setup.py file.\n \"\"\"\n # some frames may not have __file__ (#2940)\n return frame.f_globals.get('__file__', '').endswith('setup.py')\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef ensure_shim():\n DISTUTILS_FINDER in sys.meta_path or add_shim()\n\n\[email protected]\ndef shim():\n add_shim()\n try:\n yield\n finally:\n remove_shim()\n\n\ndef add_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}, {"content": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport textwrap\n\nimport setuptools\nfrom setuptools.command.install import install\n\nhere = os.path.dirname(__file__)\n\n\npackage_data = dict(\n setuptools=['script (dev).tmpl', 'script.tmpl', 'site-patch.py'],\n)\n\nforce_windows_specific_files = (\n os.environ.get(\"SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES\", \"1\").lower()\n not in (\"\", \"0\", \"false\", \"no\")\n)\n\ninclude_windows_files = sys.platform == 'win32' or force_windows_specific_files\n\nif include_windows_files:\n package_data.setdefault('setuptools', []).extend(['*.exe'])\n package_data.setdefault('setuptools.command', []).extend(['*.xml'])\n\n\ndef pypi_link(pkg_filename):\n \"\"\"\n Given the filename, including md5 fragment, construct the\n dependency link for PyPI.\n \"\"\"\n root = 'https://files.pythonhosted.org/packages/source'\n name, sep, rest = pkg_filename.partition('-')\n parts = root, name[0], name, pkg_filename\n return '/'.join(parts)\n\n\nclass install_with_pth(install):\n \"\"\"\n Custom install command to install a .pth file for distutils patching.\n\n This hack is necessary because there's no standard way to install behavior\n on startup (and it's debatable if there should be one). This hack (ab)uses\n the `extra_path` behavior in Setuptools to install a `.pth` file with\n implicit behavior on startup to give higher precedence to the local version\n of `distutils` over the version from the standard library.\n\n Please do not replicate this behavior.\n \"\"\"\n\n _pth_name = 'distutils-precedence'\n _pth_contents = textwrap.dedent(\"\"\"\n import os\n var = 'SETUPTOOLS_USE_DISTUTILS'\n enabled = os.environ.get(var, 'local') == 'local'\n enabled and __import__('_distutils_hack').ensure_shim()\n \"\"\").lstrip().replace('\\n', '; ')\n\n def initialize_options(self):\n install.initialize_options(self)\n self.extra_path = self._pth_name, self._pth_contents\n\n def finalize_options(self):\n install.finalize_options(self)\n self._restore_install_lib()\n\n def _restore_install_lib(self):\n \"\"\"\n Undo secondary effect of `extra_path` adding to `install_lib`\n \"\"\"\n suffix = os.path.relpath(self.install_lib, self.install_libbase)\n\n if suffix.strip() == self._pth_contents.strip():\n self.install_lib = self.install_libbase\n\n\nsetup_params = dict(\n cmdclass={'install': install_with_pth},\n package_data=package_data,\n)\n\nif __name__ == '__main__':\n # allow setup.py to run from another directory\n here and os.chdir(here)\n dist = setuptools.setup(**setup_params)\n", "path": "setup.py"}], "after_files": [{"content": "import sys\nimport os\nimport re\nimport importlib\nimport warnings\nimport contextlib\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\nwarnings.filterwarnings('ignore',\n r'.+ distutils\\b.+ deprecated',\n DeprecationWarning)\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure \"\n \"that setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n\n # With the DistutilsMetaFinder in place,\n # perform an import to cause distutils to be\n # loaded from setuptools._distutils. Ref #2906.\n with shim():\n importlib.import_module('distutils')\n\n # check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib.abc\n import importlib.util\n\n # In cases of path manipulation during sitecustomize,\n # Setuptools might actually not be present even though\n # the hook has been loaded. Allow the caller to fall\n # back to stdlib behavior. See #2980.\n if not importlib.util.find_spec('setuptools'):\n return\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return importlib.import_module('setuptools._distutils')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader('distutils', DistutilsLoader())\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n if self.pip_imported_during_build():\n return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n @classmethod\n def pip_imported_during_build(cls):\n \"\"\"\n Detect if pip is being imported in a build script. Ref #2355.\n \"\"\"\n import traceback\n return any(\n cls.frame_file_is_setup(frame)\n for frame, line in traceback.walk_stack(None)\n )\n\n @staticmethod\n def frame_file_is_setup(frame):\n \"\"\"\n Return True if the indicated frame suggests a setup.py file.\n \"\"\"\n # some frames may not have __file__ (#2940)\n return frame.f_globals.get('__file__', '').endswith('setup.py')\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n DISTUTILS_FINDER in sys.meta_path or insert_shim()\n\n\[email protected]\ndef shim():\n insert_shim()\n try:\n yield\n finally:\n remove_shim()\n\n\ndef insert_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}, {"content": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport textwrap\n\nimport setuptools\nfrom setuptools.command.install import install\n\nhere = os.path.dirname(__file__)\n\n\npackage_data = dict(\n setuptools=['script (dev).tmpl', 'script.tmpl', 'site-patch.py'],\n)\n\nforce_windows_specific_files = (\n os.environ.get(\"SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES\", \"1\").lower()\n not in (\"\", \"0\", \"false\", \"no\")\n)\n\ninclude_windows_files = sys.platform == 'win32' or force_windows_specific_files\n\nif include_windows_files:\n package_data.setdefault('setuptools', []).extend(['*.exe'])\n package_data.setdefault('setuptools.command', []).extend(['*.xml'])\n\n\ndef pypi_link(pkg_filename):\n \"\"\"\n Given the filename, including md5 fragment, construct the\n dependency link for PyPI.\n \"\"\"\n root = 'https://files.pythonhosted.org/packages/source'\n name, sep, rest = pkg_filename.partition('-')\n parts = root, name[0], name, pkg_filename\n return '/'.join(parts)\n\n\nclass install_with_pth(install):\n \"\"\"\n Custom install command to install a .pth file for distutils patching.\n\n This hack is necessary because there's no standard way to install behavior\n on startup (and it's debatable if there should be one). This hack (ab)uses\n the `extra_path` behavior in Setuptools to install a `.pth` file with\n implicit behavior on startup to give higher precedence to the local version\n of `distutils` over the version from the standard library.\n\n Please do not replicate this behavior.\n \"\"\"\n\n _pth_name = 'distutils-precedence'\n _pth_contents = textwrap.dedent(\"\"\"\n import os\n var = 'SETUPTOOLS_USE_DISTUTILS'\n enabled = os.environ.get(var, 'local') == 'local'\n enabled and __import__('_distutils_hack').add_shim()\n \"\"\").lstrip().replace('\\n', '; ')\n\n def initialize_options(self):\n install.initialize_options(self)\n self.extra_path = self._pth_name, self._pth_contents\n\n def finalize_options(self):\n install.finalize_options(self)\n self._restore_install_lib()\n\n def _restore_install_lib(self):\n \"\"\"\n Undo secondary effect of `extra_path` adding to `install_lib`\n \"\"\"\n suffix = os.path.relpath(self.install_lib, self.install_libbase)\n\n if suffix.strip() == self._pth_contents.strip():\n self.install_lib = self.install_libbase\n\n\nsetup_params = dict(\n cmdclass={'install': install_with_pth},\n package_data=package_data,\n)\n\nif __name__ == '__main__':\n # allow setup.py to run from another directory\n here and os.chdir(here)\n dist = setuptools.setup(**setup_params)\n", "path": "setup.py"}]}
3,410
300
gh_patches_debug_32421
rasdani/github-patches
git_diff
cloud-custodian__cloud-custodian-4249
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [SecurityHub] Findings format seems to require a 'Description' field that is not supplied The SecurityHub findings format seems to have changed, such that it has a requirement that a 'Description' field should be provided which Cloud Custodian doesn't honour. 'FailedFindings': [{'Id': 'eu-west-1/627886280200/a4c96f76077a6a98477f63fbd679d331/e2e53110f710e5e82ae9e7c7d174d6f9', 'ErrorCode': 'InvalidInput', 'ErrorMessage': "Finding does not adhere to Amazon Finding Format. data should have required property 'Description'."}]} --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `c7n/actions/securityhub.py` Content: ``` 1 # Copyright 2018 Capital One Services, LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from collections import Counter 15 from datetime import datetime 16 from dateutil.tz import tzutc 17 18 import hashlib 19 import jmespath 20 import json 21 22 from .core import BaseAction 23 from c7n.utils import type_schema, local_session, chunks, dumps, filter_empty 24 from c7n.exceptions import PolicyValidationError 25 26 from c7n.manager import resources as aws_resources 27 from c7n.version import version 28 29 30 FindingTypes = { 31 "Software and Configuration Checks", 32 "TTPs", 33 "Effects", 34 "Unusual Behaviors", 35 "Sensitive Data Identifications" 36 } 37 38 # Mostly undocumented value size limit 39 SECHUB_VALUE_SIZE_LIMIT = 1024 40 41 42 class PostFinding(BaseAction): 43 """Report a finding to AWS Security Hub. 44 45 Custodian acts as a finding provider, allowing users to craft 46 policies that report to the AWS SecurityHub. 47 48 For resources that are taggable, we will tag the resource with an identifier 49 such that further findings generate updates. 50 51 Example generate a finding for accounts that don't have shield enabled. 52 53 :example: 54 55 .. code-block:: yaml 56 57 policies: 58 59 - name: account-shield-enabled 60 resource: account 61 filters: 62 - shield-enabled 63 actions: 64 - type: post-finding 65 severity_normalized: 6 66 types: 67 - "Software and Configuration Checks/Industry and Regulatory Standards/NIST CSF Controls (USA)" 68 recommendation: "Enable shield" 69 recommendation_url: "https://www.example.com/policies/AntiDDoS.html" 70 confidence: 100 71 compliance_status: FAILED 72 73 """ # NOQA 74 75 FindingVersion = "2018-10-08" 76 ProductName = "default" 77 78 permissions = ('securityhub:BatchImportFindings',) 79 80 schema_alias = True 81 schema = type_schema( 82 "post-finding", 83 required=["types"], 84 title={"type": "string"}, 85 severity={"type": "number", 'default': 0}, 86 severity_normalized={"type": "number", "min": 0, "max": 100, 'default': 0}, 87 confidence={"type": "number", "min": 0, "max": 100}, 88 criticality={"type": "number", "min": 0, "max": 100}, 89 # Cross region aggregation 90 region={'type': 'string', 'description': 'cross-region aggregation target'}, 91 recommendation={"type": "string"}, 92 recommendation_url={"type": "string"}, 93 fields={"type": "object"}, 94 batch_size={'type': 'integer', 'minimum': 1, 'maximum': 10}, 95 types={ 96 "type": "array", 97 "minItems": 1, 98 "items": {"type": "string"}, 99 }, 100 compliance_status={ 101 "type": "string", 102 "enum": ["PASSED", "WARNING", "FAILED", "NOT_AVAILABLE"], 103 }, 104 ) 105 106 NEW_FINDING = 'New' 107 108 def validate(self): 109 for finding_type in self.data["types"]: 110 if finding_type.count('/') > 2 or finding_type.split('/')[0] not in FindingTypes: 111 raise PolicyValidationError( 112 "Finding types must be in the format 'namespace/category/classifier'." 113 " Found {}. Valid namespace values are: {}.".format( 114 finding_type, " | ".join([ns for ns in FindingTypes]))) 115 116 def get_finding_tag(self, resource): 117 finding_tag = None 118 tags = resource.get('Tags', []) 119 120 finding_key = '{}:{}'.format('c7n:FindingId', 121 self.data.get('title', self.manager.ctx.policy.name)) 122 123 # Support Tags as dictionary 124 if isinstance(tags, dict): 125 return tags.get(finding_key) 126 127 # Support Tags as list of {'Key': 'Value'} 128 for t in tags: 129 key = t['Key'] 130 value = t['Value'] 131 if key == finding_key: 132 finding_tag = value 133 return finding_tag 134 135 def group_resources(self, resources): 136 grouped_resources = {} 137 for r in resources: 138 finding_tag = self.get_finding_tag(r) or self.NEW_FINDING 139 grouped_resources.setdefault(finding_tag, []).append(r) 140 return grouped_resources 141 142 def process(self, resources, event=None): 143 region_name = self.data.get('region', self.manager.config.region) 144 client = local_session( 145 self.manager.session_factory).client( 146 "securityhub", region_name=region_name) 147 148 now = datetime.utcnow().replace(tzinfo=tzutc()).isoformat() 149 # default batch size to one to work around security hub console issue 150 # which only shows a single resource in a finding. 151 batch_size = self.data.get('batch_size', 1) 152 stats = Counter() 153 for key, grouped_resources in self.group_resources(resources).items(): 154 for resource_set in chunks(grouped_resources, batch_size): 155 stats['Finding'] += 1 156 if key == self.NEW_FINDING: 157 finding_id = None 158 created_at = now 159 updated_at = now 160 else: 161 finding_id, created_at = self.get_finding_tag( 162 resource_set[0]).split(':', 1) 163 updated_at = now 164 165 finding = self.get_finding( 166 resource_set, finding_id, created_at, updated_at) 167 import_response = client.batch_import_findings( 168 Findings=[finding]) 169 if import_response['FailedCount'] > 0: 170 stats['Failed'] += import_response['FailedCount'] 171 self.log.error( 172 "import_response=%s" % (import_response)) 173 if key == self.NEW_FINDING: 174 stats['New'] += len(resource_set) 175 # Tag resources with new finding ids 176 tag_action = self.manager.action_registry.get('tag') 177 if tag_action is None: 178 continue 179 tag_action({ 180 'key': '{}:{}'.format( 181 'c7n:FindingId', 182 self.data.get( 183 'title', self.manager.ctx.policy.name)), 184 'value': '{}:{}'.format( 185 finding['Id'], created_at)}, 186 self.manager).process(resource_set) 187 else: 188 stats['Update'] += len(resource_set) 189 190 self.log.debug( 191 "policy:%s securityhub %d findings resources %d new %d updated %d failed", 192 self.manager.ctx.policy.name, 193 stats['Finding'], 194 stats['New'], 195 stats['Update'], 196 stats['Failed']) 197 198 def get_finding(self, resources, existing_finding_id, created_at, updated_at): 199 policy = self.manager.ctx.policy 200 model = self.manager.resource_type 201 region = self.data.get('region', self.manager.config.region) 202 203 if existing_finding_id: 204 finding_id = existing_finding_id 205 else: 206 finding_id = '{}/{}/{}/{}'.format( 207 self.manager.config.region, 208 self.manager.config.account_id, 209 hashlib.md5(json.dumps( 210 policy.data).encode('utf8')).hexdigest(), 211 hashlib.md5(json.dumps(list(sorted( 212 [r[model.id] for r in resources]))).encode( 213 'utf8')).hexdigest()) 214 finding = { 215 "SchemaVersion": self.FindingVersion, 216 "ProductArn": "arn:aws:securityhub:{}:{}:product/{}/{}".format( 217 region, 218 self.manager.config.account_id, 219 self.manager.config.account_id, 220 self.ProductName, 221 ), 222 "AwsAccountId": self.manager.config.account_id, 223 "Description": self.data.get( 224 "description", policy.data.get("description", "") 225 ).strip(), 226 "Title": self.data.get("title", policy.name), 227 'Id': finding_id, 228 "GeneratorId": policy.name, 229 'CreatedAt': created_at, 230 'UpdatedAt': updated_at, 231 "RecordState": "ACTIVE", 232 } 233 234 severity = {'Product': 0, 'Normalized': 0} 235 if self.data.get("severity") is not None: 236 severity["Product"] = self.data["severity"] 237 if self.data.get("severity_normalized") is not None: 238 severity["Normalized"] = self.data["severity_normalized"] 239 if severity: 240 finding["Severity"] = severity 241 242 recommendation = {} 243 if self.data.get("recommendation"): 244 recommendation["Text"] = self.data["recommendation"] 245 if self.data.get("recommendation_url"): 246 recommendation["Url"] = self.data["recommendation_url"] 247 if recommendation: 248 finding["Remediation"] = {"Recommendation": recommendation} 249 250 if "confidence" in self.data: 251 finding["Confidence"] = self.data["confidence"] 252 if "criticality" in self.data: 253 finding["Criticality"] = self.data["criticality"] 254 if "compliance_status" in self.data: 255 finding["Compliance"] = {"Status": self.data["compliance_status"]} 256 257 fields = { 258 'resource': policy.resource_type, 259 'ProviderName': 'CloudCustodian', 260 'ProviderVersion': version 261 } 262 263 if "fields" in self.data: 264 fields.update(self.data["fields"]) 265 else: 266 tags = {} 267 for t in policy.tags: 268 if ":" in t: 269 k, v = t.split(":", 1) 270 else: 271 k, v = t, "" 272 tags[k] = v 273 fields.update(tags) 274 if fields: 275 finding["ProductFields"] = fields 276 277 finding_resources = [] 278 for r in resources: 279 finding_resources.append(self.format_resource(r)) 280 finding["Resources"] = finding_resources 281 finding["Types"] = list(self.data["types"]) 282 283 return filter_empty(finding) 284 285 def format_resource(self, r): 286 raise NotImplementedError("subclass responsibility") 287 288 289 class OtherResourcePostFinding(PostFinding): 290 291 fields = () 292 293 def format_resource(self, r): 294 details = {} 295 for k in r: 296 if isinstance(k, (list, dict)): 297 continue 298 details[k] = r[k] 299 300 for f in self.fields: 301 value = jmespath.search(f['expr'], r) 302 if not value: 303 continue 304 details[f['key']] = value 305 306 for k, v in details.items(): 307 if isinstance(v, datetime): 308 v = v.isoformat() 309 elif isinstance(v, (list, dict)): 310 v = dumps(v) 311 elif isinstance(v, (int, float, bool)): 312 v = str(v) 313 else: 314 continue 315 details[k] = v[:SECHUB_VALUE_SIZE_LIMIT] 316 317 details['c7n:resource-type'] = self.manager.type 318 other = { 319 'Type': 'Other', 320 'Id': self.manager.get_arns([r])[0], 321 'Region': self.manager.config.region, 322 'Details': {'Other': filter_empty(details)} 323 } 324 tags = {t['Key']: t['Value'] for t in r.get('Tags', [])} 325 if tags: 326 other['Tags'] = tags 327 return other 328 329 @classmethod 330 def register_resource(klass, registry, event): 331 for rtype, resource_manager in registry.items(): 332 if not resource_manager.has_arn(): 333 continue 334 if 'post-finding' in resource_manager.action_registry: 335 continue 336 resource_manager.action_registry.register('post-finding', klass) 337 338 339 aws_resources.subscribe( 340 aws_resources.EVENT_FINAL, OtherResourcePostFinding.register_resource) 341 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/c7n/actions/securityhub.py b/c7n/actions/securityhub.py --- a/c7n/actions/securityhub.py +++ b/c7n/actions/securityhub.py @@ -62,6 +62,8 @@ - shield-enabled actions: - type: post-finding + description: | + Shield should be enabled on account to allow for DDOS protection (1 time 3k USD Charge). severity_normalized: 6 types: - "Software and Configuration Checks/Industry and Regulatory Standards/NIST CSF Controls (USA)" @@ -82,6 +84,7 @@ "post-finding", required=["types"], title={"type": "string"}, + description={'type': 'string'}, severity={"type": "number", 'default': 0}, severity_normalized={"type": "number", "min": 0, "max": 100, 'default': 0}, confidence={"type": "number", "min": 0, "max": 100}, @@ -220,9 +223,14 @@ self.ProductName, ), "AwsAccountId": self.manager.config.account_id, + # Long search chain for description values, as this was + # made required long after users had policies deployed, so + # use explicit description, or policy description, or + # explicit title, or policy name, in that order. "Description": self.data.get( - "description", policy.data.get("description", "") - ).strip(), + "description", policy.data.get( + "description", + self.data.get('title', policy.name))).strip(), "Title": self.data.get("title", policy.name), 'Id': finding_id, "GeneratorId": policy.name,
{"golden_diff": "diff --git a/c7n/actions/securityhub.py b/c7n/actions/securityhub.py\n--- a/c7n/actions/securityhub.py\n+++ b/c7n/actions/securityhub.py\n@@ -62,6 +62,8 @@\n - shield-enabled\n actions:\n - type: post-finding\n+ description: |\n+ Shield should be enabled on account to allow for DDOS protection (1 time 3k USD Charge).\n severity_normalized: 6\n types:\n - \"Software and Configuration Checks/Industry and Regulatory Standards/NIST CSF Controls (USA)\"\n@@ -82,6 +84,7 @@\n \"post-finding\",\n required=[\"types\"],\n title={\"type\": \"string\"},\n+ description={'type': 'string'},\n severity={\"type\": \"number\", 'default': 0},\n severity_normalized={\"type\": \"number\", \"min\": 0, \"max\": 100, 'default': 0},\n confidence={\"type\": \"number\", \"min\": 0, \"max\": 100},\n@@ -220,9 +223,14 @@\n self.ProductName,\n ),\n \"AwsAccountId\": self.manager.config.account_id,\n+ # Long search chain for description values, as this was\n+ # made required long after users had policies deployed, so\n+ # use explicit description, or policy description, or\n+ # explicit title, or policy name, in that order.\n \"Description\": self.data.get(\n- \"description\", policy.data.get(\"description\", \"\")\n- ).strip(),\n+ \"description\", policy.data.get(\n+ \"description\",\n+ self.data.get('title', policy.name))).strip(),\n \"Title\": self.data.get(\"title\", policy.name),\n 'Id': finding_id,\n \"GeneratorId\": policy.name,\n", "issue": "[SecurityHub] Findings format seems to require a 'Description' field that is not supplied\nThe SecurityHub findings format seems to have changed, such that it has a requirement that a 'Description' field should be provided which Cloud Custodian doesn't honour. \r\n\r\n'FailedFindings': [{'Id': 'eu-west-1/627886280200/a4c96f76077a6a98477f63fbd679d331/e2e53110f710e5e82ae9e7c7d174d6f9', 'ErrorCode': 'InvalidInput', 'ErrorMessage': \"Finding does not adhere to Amazon Finding Format. data should have required property 'Description'.\"}]}\r\n\n", "before_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom collections import Counter\nfrom datetime import datetime\nfrom dateutil.tz import tzutc\n\nimport hashlib\nimport jmespath\nimport json\n\nfrom .core import BaseAction\nfrom c7n.utils import type_schema, local_session, chunks, dumps, filter_empty\nfrom c7n.exceptions import PolicyValidationError\n\nfrom c7n.manager import resources as aws_resources\nfrom c7n.version import version\n\n\nFindingTypes = {\n \"Software and Configuration Checks\",\n \"TTPs\",\n \"Effects\",\n \"Unusual Behaviors\",\n \"Sensitive Data Identifications\"\n}\n\n# Mostly undocumented value size limit\nSECHUB_VALUE_SIZE_LIMIT = 1024\n\n\nclass PostFinding(BaseAction):\n \"\"\"Report a finding to AWS Security Hub.\n\n Custodian acts as a finding provider, allowing users to craft\n policies that report to the AWS SecurityHub.\n\n For resources that are taggable, we will tag the resource with an identifier\n such that further findings generate updates.\n\n Example generate a finding for accounts that don't have shield enabled.\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n\n - name: account-shield-enabled\n resource: account\n filters:\n - shield-enabled\n actions:\n - type: post-finding\n severity_normalized: 6\n types:\n - \"Software and Configuration Checks/Industry and Regulatory Standards/NIST CSF Controls (USA)\"\n recommendation: \"Enable shield\"\n recommendation_url: \"https://www.example.com/policies/AntiDDoS.html\"\n confidence: 100\n compliance_status: FAILED\n\n \"\"\" # NOQA\n\n FindingVersion = \"2018-10-08\"\n ProductName = \"default\"\n\n permissions = ('securityhub:BatchImportFindings',)\n\n schema_alias = True\n schema = type_schema(\n \"post-finding\",\n required=[\"types\"],\n title={\"type\": \"string\"},\n severity={\"type\": \"number\", 'default': 0},\n severity_normalized={\"type\": \"number\", \"min\": 0, \"max\": 100, 'default': 0},\n confidence={\"type\": \"number\", \"min\": 0, \"max\": 100},\n criticality={\"type\": \"number\", \"min\": 0, \"max\": 100},\n # Cross region aggregation\n region={'type': 'string', 'description': 'cross-region aggregation target'},\n recommendation={\"type\": \"string\"},\n recommendation_url={\"type\": \"string\"},\n fields={\"type\": \"object\"},\n batch_size={'type': 'integer', 'minimum': 1, 'maximum': 10},\n types={\n \"type\": \"array\",\n \"minItems\": 1,\n \"items\": {\"type\": \"string\"},\n },\n compliance_status={\n \"type\": \"string\",\n \"enum\": [\"PASSED\", \"WARNING\", \"FAILED\", \"NOT_AVAILABLE\"],\n },\n )\n\n NEW_FINDING = 'New'\n\n def validate(self):\n for finding_type in self.data[\"types\"]:\n if finding_type.count('/') > 2 or finding_type.split('/')[0] not in FindingTypes:\n raise PolicyValidationError(\n \"Finding types must be in the format 'namespace/category/classifier'.\"\n \" Found {}. Valid namespace values are: {}.\".format(\n finding_type, \" | \".join([ns for ns in FindingTypes])))\n\n def get_finding_tag(self, resource):\n finding_tag = None\n tags = resource.get('Tags', [])\n\n finding_key = '{}:{}'.format('c7n:FindingId',\n self.data.get('title', self.manager.ctx.policy.name))\n\n # Support Tags as dictionary\n if isinstance(tags, dict):\n return tags.get(finding_key)\n\n # Support Tags as list of {'Key': 'Value'}\n for t in tags:\n key = t['Key']\n value = t['Value']\n if key == finding_key:\n finding_tag = value\n return finding_tag\n\n def group_resources(self, resources):\n grouped_resources = {}\n for r in resources:\n finding_tag = self.get_finding_tag(r) or self.NEW_FINDING\n grouped_resources.setdefault(finding_tag, []).append(r)\n return grouped_resources\n\n def process(self, resources, event=None):\n region_name = self.data.get('region', self.manager.config.region)\n client = local_session(\n self.manager.session_factory).client(\n \"securityhub\", region_name=region_name)\n\n now = datetime.utcnow().replace(tzinfo=tzutc()).isoformat()\n # default batch size to one to work around security hub console issue\n # which only shows a single resource in a finding.\n batch_size = self.data.get('batch_size', 1)\n stats = Counter()\n for key, grouped_resources in self.group_resources(resources).items():\n for resource_set in chunks(grouped_resources, batch_size):\n stats['Finding'] += 1\n if key == self.NEW_FINDING:\n finding_id = None\n created_at = now\n updated_at = now\n else:\n finding_id, created_at = self.get_finding_tag(\n resource_set[0]).split(':', 1)\n updated_at = now\n\n finding = self.get_finding(\n resource_set, finding_id, created_at, updated_at)\n import_response = client.batch_import_findings(\n Findings=[finding])\n if import_response['FailedCount'] > 0:\n stats['Failed'] += import_response['FailedCount']\n self.log.error(\n \"import_response=%s\" % (import_response))\n if key == self.NEW_FINDING:\n stats['New'] += len(resource_set)\n # Tag resources with new finding ids\n tag_action = self.manager.action_registry.get('tag')\n if tag_action is None:\n continue\n tag_action({\n 'key': '{}:{}'.format(\n 'c7n:FindingId',\n self.data.get(\n 'title', self.manager.ctx.policy.name)),\n 'value': '{}:{}'.format(\n finding['Id'], created_at)},\n self.manager).process(resource_set)\n else:\n stats['Update'] += len(resource_set)\n\n self.log.debug(\n \"policy:%s securityhub %d findings resources %d new %d updated %d failed\",\n self.manager.ctx.policy.name,\n stats['Finding'],\n stats['New'],\n stats['Update'],\n stats['Failed'])\n\n def get_finding(self, resources, existing_finding_id, created_at, updated_at):\n policy = self.manager.ctx.policy\n model = self.manager.resource_type\n region = self.data.get('region', self.manager.config.region)\n\n if existing_finding_id:\n finding_id = existing_finding_id\n else:\n finding_id = '{}/{}/{}/{}'.format(\n self.manager.config.region,\n self.manager.config.account_id,\n hashlib.md5(json.dumps(\n policy.data).encode('utf8')).hexdigest(),\n hashlib.md5(json.dumps(list(sorted(\n [r[model.id] for r in resources]))).encode(\n 'utf8')).hexdigest())\n finding = {\n \"SchemaVersion\": self.FindingVersion,\n \"ProductArn\": \"arn:aws:securityhub:{}:{}:product/{}/{}\".format(\n region,\n self.manager.config.account_id,\n self.manager.config.account_id,\n self.ProductName,\n ),\n \"AwsAccountId\": self.manager.config.account_id,\n \"Description\": self.data.get(\n \"description\", policy.data.get(\"description\", \"\")\n ).strip(),\n \"Title\": self.data.get(\"title\", policy.name),\n 'Id': finding_id,\n \"GeneratorId\": policy.name,\n 'CreatedAt': created_at,\n 'UpdatedAt': updated_at,\n \"RecordState\": \"ACTIVE\",\n }\n\n severity = {'Product': 0, 'Normalized': 0}\n if self.data.get(\"severity\") is not None:\n severity[\"Product\"] = self.data[\"severity\"]\n if self.data.get(\"severity_normalized\") is not None:\n severity[\"Normalized\"] = self.data[\"severity_normalized\"]\n if severity:\n finding[\"Severity\"] = severity\n\n recommendation = {}\n if self.data.get(\"recommendation\"):\n recommendation[\"Text\"] = self.data[\"recommendation\"]\n if self.data.get(\"recommendation_url\"):\n recommendation[\"Url\"] = self.data[\"recommendation_url\"]\n if recommendation:\n finding[\"Remediation\"] = {\"Recommendation\": recommendation}\n\n if \"confidence\" in self.data:\n finding[\"Confidence\"] = self.data[\"confidence\"]\n if \"criticality\" in self.data:\n finding[\"Criticality\"] = self.data[\"criticality\"]\n if \"compliance_status\" in self.data:\n finding[\"Compliance\"] = {\"Status\": self.data[\"compliance_status\"]}\n\n fields = {\n 'resource': policy.resource_type,\n 'ProviderName': 'CloudCustodian',\n 'ProviderVersion': version\n }\n\n if \"fields\" in self.data:\n fields.update(self.data[\"fields\"])\n else:\n tags = {}\n for t in policy.tags:\n if \":\" in t:\n k, v = t.split(\":\", 1)\n else:\n k, v = t, \"\"\n tags[k] = v\n fields.update(tags)\n if fields:\n finding[\"ProductFields\"] = fields\n\n finding_resources = []\n for r in resources:\n finding_resources.append(self.format_resource(r))\n finding[\"Resources\"] = finding_resources\n finding[\"Types\"] = list(self.data[\"types\"])\n\n return filter_empty(finding)\n\n def format_resource(self, r):\n raise NotImplementedError(\"subclass responsibility\")\n\n\nclass OtherResourcePostFinding(PostFinding):\n\n fields = ()\n\n def format_resource(self, r):\n details = {}\n for k in r:\n if isinstance(k, (list, dict)):\n continue\n details[k] = r[k]\n\n for f in self.fields:\n value = jmespath.search(f['expr'], r)\n if not value:\n continue\n details[f['key']] = value\n\n for k, v in details.items():\n if isinstance(v, datetime):\n v = v.isoformat()\n elif isinstance(v, (list, dict)):\n v = dumps(v)\n elif isinstance(v, (int, float, bool)):\n v = str(v)\n else:\n continue\n details[k] = v[:SECHUB_VALUE_SIZE_LIMIT]\n\n details['c7n:resource-type'] = self.manager.type\n other = {\n 'Type': 'Other',\n 'Id': self.manager.get_arns([r])[0],\n 'Region': self.manager.config.region,\n 'Details': {'Other': filter_empty(details)}\n }\n tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}\n if tags:\n other['Tags'] = tags\n return other\n\n @classmethod\n def register_resource(klass, registry, event):\n for rtype, resource_manager in registry.items():\n if not resource_manager.has_arn():\n continue\n if 'post-finding' in resource_manager.action_registry:\n continue\n resource_manager.action_registry.register('post-finding', klass)\n\n\naws_resources.subscribe(\n aws_resources.EVENT_FINAL, OtherResourcePostFinding.register_resource)\n", "path": "c7n/actions/securityhub.py"}], "after_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom collections import Counter\nfrom datetime import datetime\nfrom dateutil.tz import tzutc\n\nimport hashlib\nimport jmespath\nimport json\n\nfrom .core import BaseAction\nfrom c7n.utils import type_schema, local_session, chunks, dumps, filter_empty\nfrom c7n.exceptions import PolicyValidationError\n\nfrom c7n.manager import resources as aws_resources\nfrom c7n.version import version\n\n\nFindingTypes = {\n \"Software and Configuration Checks\",\n \"TTPs\",\n \"Effects\",\n \"Unusual Behaviors\",\n \"Sensitive Data Identifications\"\n}\n\n# Mostly undocumented value size limit\nSECHUB_VALUE_SIZE_LIMIT = 1024\n\n\nclass PostFinding(BaseAction):\n \"\"\"Report a finding to AWS Security Hub.\n\n Custodian acts as a finding provider, allowing users to craft\n policies that report to the AWS SecurityHub.\n\n For resources that are taggable, we will tag the resource with an identifier\n such that further findings generate updates.\n\n Example generate a finding for accounts that don't have shield enabled.\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n\n - name: account-shield-enabled\n resource: account\n filters:\n - shield-enabled\n actions:\n - type: post-finding\n description: |\n Shield should be enabled on account to allow for DDOS protection (1 time 3k USD Charge).\n severity_normalized: 6\n types:\n - \"Software and Configuration Checks/Industry and Regulatory Standards/NIST CSF Controls (USA)\"\n recommendation: \"Enable shield\"\n recommendation_url: \"https://www.example.com/policies/AntiDDoS.html\"\n confidence: 100\n compliance_status: FAILED\n\n \"\"\" # NOQA\n\n FindingVersion = \"2018-10-08\"\n ProductName = \"default\"\n\n permissions = ('securityhub:BatchImportFindings',)\n\n schema_alias = True\n schema = type_schema(\n \"post-finding\",\n required=[\"types\"],\n title={\"type\": \"string\"},\n description={'type': 'string'},\n severity={\"type\": \"number\", 'default': 0},\n severity_normalized={\"type\": \"number\", \"min\": 0, \"max\": 100, 'default': 0},\n confidence={\"type\": \"number\", \"min\": 0, \"max\": 100},\n criticality={\"type\": \"number\", \"min\": 0, \"max\": 100},\n # Cross region aggregation\n region={'type': 'string', 'description': 'cross-region aggregation target'},\n recommendation={\"type\": \"string\"},\n recommendation_url={\"type\": \"string\"},\n fields={\"type\": \"object\"},\n batch_size={'type': 'integer', 'minimum': 1, 'maximum': 10},\n types={\n \"type\": \"array\",\n \"minItems\": 1,\n \"items\": {\"type\": \"string\"},\n },\n compliance_status={\n \"type\": \"string\",\n \"enum\": [\"PASSED\", \"WARNING\", \"FAILED\", \"NOT_AVAILABLE\"],\n },\n )\n\n NEW_FINDING = 'New'\n\n def validate(self):\n for finding_type in self.data[\"types\"]:\n if finding_type.count('/') > 2 or finding_type.split('/')[0] not in FindingTypes:\n raise PolicyValidationError(\n \"Finding types must be in the format 'namespace/category/classifier'.\"\n \" Found {}. Valid namespace values are: {}.\".format(\n finding_type, \" | \".join([ns for ns in FindingTypes])))\n\n def get_finding_tag(self, resource):\n finding_tag = None\n tags = resource.get('Tags', [])\n\n finding_key = '{}:{}'.format('c7n:FindingId',\n self.data.get('title', self.manager.ctx.policy.name))\n\n # Support Tags as dictionary\n if isinstance(tags, dict):\n return tags.get(finding_key)\n\n # Support Tags as list of {'Key': 'Value'}\n for t in tags:\n key = t['Key']\n value = t['Value']\n if key == finding_key:\n finding_tag = value\n return finding_tag\n\n def group_resources(self, resources):\n grouped_resources = {}\n for r in resources:\n finding_tag = self.get_finding_tag(r) or self.NEW_FINDING\n grouped_resources.setdefault(finding_tag, []).append(r)\n return grouped_resources\n\n def process(self, resources, event=None):\n region_name = self.data.get('region', self.manager.config.region)\n client = local_session(\n self.manager.session_factory).client(\n \"securityhub\", region_name=region_name)\n\n now = datetime.utcnow().replace(tzinfo=tzutc()).isoformat()\n # default batch size to one to work around security hub console issue\n # which only shows a single resource in a finding.\n batch_size = self.data.get('batch_size', 1)\n stats = Counter()\n for key, grouped_resources in self.group_resources(resources).items():\n for resource_set in chunks(grouped_resources, batch_size):\n stats['Finding'] += 1\n if key == self.NEW_FINDING:\n finding_id = None\n created_at = now\n updated_at = now\n else:\n finding_id, created_at = self.get_finding_tag(\n resource_set[0]).split(':', 1)\n updated_at = now\n\n finding = self.get_finding(\n resource_set, finding_id, created_at, updated_at)\n import_response = client.batch_import_findings(\n Findings=[finding])\n if import_response['FailedCount'] > 0:\n stats['Failed'] += import_response['FailedCount']\n self.log.error(\n \"import_response=%s\" % (import_response))\n if key == self.NEW_FINDING:\n stats['New'] += len(resource_set)\n # Tag resources with new finding ids\n tag_action = self.manager.action_registry.get('tag')\n if tag_action is None:\n continue\n tag_action({\n 'key': '{}:{}'.format(\n 'c7n:FindingId',\n self.data.get(\n 'title', self.manager.ctx.policy.name)),\n 'value': '{}:{}'.format(\n finding['Id'], created_at)},\n self.manager).process(resource_set)\n else:\n stats['Update'] += len(resource_set)\n\n self.log.debug(\n \"policy:%s securityhub %d findings resources %d new %d updated %d failed\",\n self.manager.ctx.policy.name,\n stats['Finding'],\n stats['New'],\n stats['Update'],\n stats['Failed'])\n\n def get_finding(self, resources, existing_finding_id, created_at, updated_at):\n policy = self.manager.ctx.policy\n model = self.manager.resource_type\n region = self.data.get('region', self.manager.config.region)\n\n if existing_finding_id:\n finding_id = existing_finding_id\n else:\n finding_id = '{}/{}/{}/{}'.format(\n self.manager.config.region,\n self.manager.config.account_id,\n hashlib.md5(json.dumps(\n policy.data).encode('utf8')).hexdigest(),\n hashlib.md5(json.dumps(list(sorted(\n [r[model.id] for r in resources]))).encode(\n 'utf8')).hexdigest())\n finding = {\n \"SchemaVersion\": self.FindingVersion,\n \"ProductArn\": \"arn:aws:securityhub:{}:{}:product/{}/{}\".format(\n region,\n self.manager.config.account_id,\n self.manager.config.account_id,\n self.ProductName,\n ),\n \"AwsAccountId\": self.manager.config.account_id,\n # Long search chain for description values, as this was\n # made required long after users had policies deployed, so\n # use explicit description, or policy description, or\n # explicit title, or policy name, in that order.\n \"Description\": self.data.get(\n \"description\", policy.data.get(\n \"description\",\n self.data.get('title', policy.name))).strip(),\n \"Title\": self.data.get(\"title\", policy.name),\n 'Id': finding_id,\n \"GeneratorId\": policy.name,\n 'CreatedAt': created_at,\n 'UpdatedAt': updated_at,\n \"RecordState\": \"ACTIVE\",\n }\n\n severity = {'Product': 0, 'Normalized': 0}\n if self.data.get(\"severity\") is not None:\n severity[\"Product\"] = self.data[\"severity\"]\n if self.data.get(\"severity_normalized\") is not None:\n severity[\"Normalized\"] = self.data[\"severity_normalized\"]\n if severity:\n finding[\"Severity\"] = severity\n\n recommendation = {}\n if self.data.get(\"recommendation\"):\n recommendation[\"Text\"] = self.data[\"recommendation\"]\n if self.data.get(\"recommendation_url\"):\n recommendation[\"Url\"] = self.data[\"recommendation_url\"]\n if recommendation:\n finding[\"Remediation\"] = {\"Recommendation\": recommendation}\n\n if \"confidence\" in self.data:\n finding[\"Confidence\"] = self.data[\"confidence\"]\n if \"criticality\" in self.data:\n finding[\"Criticality\"] = self.data[\"criticality\"]\n if \"compliance_status\" in self.data:\n finding[\"Compliance\"] = {\"Status\": self.data[\"compliance_status\"]}\n\n fields = {\n 'resource': policy.resource_type,\n 'ProviderName': 'CloudCustodian',\n 'ProviderVersion': version\n }\n\n if \"fields\" in self.data:\n fields.update(self.data[\"fields\"])\n else:\n tags = {}\n for t in policy.tags:\n if \":\" in t:\n k, v = t.split(\":\", 1)\n else:\n k, v = t, \"\"\n tags[k] = v\n fields.update(tags)\n if fields:\n finding[\"ProductFields\"] = fields\n\n finding_resources = []\n for r in resources:\n finding_resources.append(self.format_resource(r))\n finding[\"Resources\"] = finding_resources\n finding[\"Types\"] = list(self.data[\"types\"])\n\n return filter_empty(finding)\n\n def format_resource(self, r):\n raise NotImplementedError(\"subclass responsibility\")\n\n\nclass OtherResourcePostFinding(PostFinding):\n\n fields = ()\n\n def format_resource(self, r):\n details = {}\n for k in r:\n if isinstance(k, (list, dict)):\n continue\n details[k] = r[k]\n\n for f in self.fields:\n value = jmespath.search(f['expr'], r)\n if not value:\n continue\n details[f['key']] = value\n\n for k, v in details.items():\n if isinstance(v, datetime):\n v = v.isoformat()\n elif isinstance(v, (list, dict)):\n v = dumps(v)\n elif isinstance(v, (int, float, bool)):\n v = str(v)\n else:\n continue\n details[k] = v[:SECHUB_VALUE_SIZE_LIMIT]\n\n details['c7n:resource-type'] = self.manager.type\n other = {\n 'Type': 'Other',\n 'Id': self.manager.get_arns([r])[0],\n 'Region': self.manager.config.region,\n 'Details': {'Other': filter_empty(details)}\n }\n tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}\n if tags:\n other['Tags'] = tags\n return other\n\n @classmethod\n def register_resource(klass, registry, event):\n for rtype, resource_manager in registry.items():\n if not resource_manager.has_arn():\n continue\n if 'post-finding' in resource_manager.action_registry:\n continue\n resource_manager.action_registry.register('post-finding', klass)\n\n\naws_resources.subscribe(\n aws_resources.EVENT_FINAL, OtherResourcePostFinding.register_resource)\n", "path": "c7n/actions/securityhub.py"}]}
3,931
399
gh_patches_debug_20785
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-8035
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Prevent Matplotlib's backend from being collected more than once **Is your feature request related to a problem? Please describe.** Hi, I've noticed that some of `Matplotlib`'s backend are collected more than once during building. While this does not cause building to fail, it would be more readable to remove duplicate items from the list shown in the building log. **Describe the solution you'd like** Prevent `Matplotlib`'s backend from being collected into the list. **Describe alternatives you've considered** Remove duplicate items from the list after collection. **Additional context** `PyInstaller` 6.0 ``` 136360 INFO: Loading module hook 'hook-matplotlib.backends.py' from 'D:\\Python\\lib\\site-packages\\PyInstaller\\hooks'... 136361 INFO: Matplotlib backend selection method: automatic discovery of used backends 136374 INFO: Discovered Matplotlib backend(s) via `matplotlib.use()` call in module 'E:\\Wordless\\wordless\\wl_main.py': ['Qt5Agg'] 136380 INFO: Discovered Matplotlib backend(s) via `matplotlib.use()` call in module 'sympy.testing.runtests': ['Agg', 'Agg'] 136391 INFO: The following Matplotlib backends were discovered by scanning for `matplotlib.use()` calls: ['Qt5Agg', 'Agg', 'Agg']. If your backend of choice is not in this list, either add a `matplotlib.use()` call to your code, or configure the backend collection via hook options (see: https://pyinstaller.org/en/stable/hooks-config.html#matplotlib-hooks). 136391 INFO: Selected matplotlib backends: ['Qt5Agg', 'Agg', 'Agg'] ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `PyInstaller/hooks/hook-matplotlib.backends.py` Content: ``` 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2013-2023, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License (version 2 5 # or later) with exception for distributing the bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 # 9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) 10 #----------------------------------------------------------------------------- 11 12 from PyInstaller.compat import is_darwin 13 from PyInstaller.utils.hooks import logger, get_hook_config 14 from PyInstaller import isolated 15 16 17 @isolated.decorate 18 def _get_configured_default_backend(): 19 """ 20 Return the configured default matplotlib backend name, if available as matplotlib.rcParams['backend'] (or overridden 21 by MPLBACKEND environment variable. If the value of matplotlib.rcParams['backend'] corresponds to the auto-sentinel 22 object, returns None 23 """ 24 import matplotlib 25 # matplotlib.rcParams overrides the __getitem__ implementation and attempts to determine and load the default 26 # backend using pyplot.switch_backend(). Therefore, use dict.__getitem__(). 27 val = dict.__getitem__(matplotlib.rcParams, 'backend') 28 if isinstance(val, str): 29 return val 30 return None 31 32 33 @isolated.decorate 34 def _list_available_mpl_backends(): 35 """ 36 Returns the names of all available matplotlib backends. 37 """ 38 import matplotlib 39 return matplotlib.rcsetup.all_backends 40 41 42 @isolated.decorate 43 def _check_mpl_backend_importable(module_name): 44 """ 45 Attempts to import the given module name (matplotlib backend module). 46 47 Exceptions are propagated to caller. 48 """ 49 __import__(module_name) 50 51 52 # Bytecode scanning 53 def _recursive_scan_code_objects_for_mpl_use(co): 54 """ 55 Recursively scan the bytecode for occurrences of matplotlib.use() or mpl.use() calls with const arguments, and 56 collect those arguments into list of used matplotlib backend names. 57 """ 58 59 from PyInstaller.depend.bytecode import any_alias, recursive_function_calls 60 61 mpl_use_names = { 62 *any_alias("matplotlib.use"), 63 *any_alias("mpl.use"), # matplotlib is commonly aliased as mpl 64 } 65 66 backends = [] 67 for calls in recursive_function_calls(co).values(): 68 for name, args in calls: 69 # matplotlib.use(backend) or matplotlib.use(backend, force) 70 # We support only literal arguments. Similarly, kwargs are 71 # not supported. 72 if len(args) not in {1, 2} or not isinstance(args[0], str): 73 continue 74 if name in mpl_use_names: 75 backends.append(args[0]) 76 77 return backends 78 79 80 def _backend_module_name(name): 81 """ 82 Converts matplotlib backend name to its corresponding module name. 83 84 Equivalent to matplotlib.cbook._backend_module_name(). 85 """ 86 if name.startswith("module://"): 87 return name[9:] 88 return f"matplotlib.backends.backend_{name.lower()}" 89 90 91 def _autodetect_used_backends(hook_api): 92 """ 93 Returns a list of automatically-discovered matplotlib backends in use, or the name of the default matplotlib 94 backend. Implements the 'auto' backend selection method. 95 """ 96 # Scan the code for matplotlib.use() 97 modulegraph = hook_api.analysis.graph 98 mpl_code_objs = modulegraph.get_code_using("matplotlib") 99 used_backends = [] 100 for name, co in mpl_code_objs.items(): 101 co_backends = _recursive_scan_code_objects_for_mpl_use(co) 102 if co_backends: 103 logger.info( 104 "Discovered Matplotlib backend(s) via `matplotlib.use()` call in module %r: %r", name, co_backends 105 ) 106 used_backends += co_backends 107 108 if used_backends: 109 HOOK_CONFIG_DOCS = 'https://pyinstaller.org/en/stable/hooks-config.html#matplotlib-hooks' 110 logger.info( 111 "The following Matplotlib backends were discovered by scanning for `matplotlib.use()` calls: %r. If your " 112 "backend of choice is not in this list, either add a `matplotlib.use()` call to your code, or configure " 113 "the backend collection via hook options (see: %s).", used_backends, HOOK_CONFIG_DOCS 114 ) 115 return used_backends 116 117 # Determine the default matplotlib backend. 118 # 119 # Ideally, this would be done by calling ``matplotlib.get_backend()``. However, that function tries to switch to the 120 # default backend (calling ``matplotlib.pyplot.switch_backend()``), which seems to occasionally fail on our linux CI 121 # with an error and, on other occasions, returns the headless Agg backend instead of the GUI one (even with display 122 # server running). Furthermore, using ``matplotlib.get_backend()`` returns headless 'Agg' when display server is 123 # unavailable, which is not ideal for automated builds. 124 # 125 # Therefore, we try to emulate ``matplotlib.get_backend()`` ourselves. First, we try to obtain the configured 126 # default backend from settings (rcparams and/or MPLBACKEND environment variable). If that is unavailable, we try to 127 # find the first importable GUI-based backend, using the same list as matplotlib.pyplot.switch_backend() uses for 128 # automatic backend selection. The difference is that we only test whether the backend module is importable, without 129 # trying to switch to it. 130 default_backend = _get_configured_default_backend() # isolated sub-process 131 if default_backend: 132 logger.info("Found configured default matplotlib backend: %s", default_backend) 133 return [default_backend] 134 135 candidates = ["Qt5Agg", "Gtk3Agg", "TkAgg", "WxAgg"] 136 if is_darwin: 137 candidates = ["MacOSX"] + candidates 138 logger.info("Trying determine the default backend as first importable candidate from the list: %r", candidates) 139 140 for candidate in candidates: 141 try: 142 module_name = _backend_module_name(candidate) 143 _check_mpl_backend_importable(module_name) # NOTE: uses an isolated sub-process. 144 except Exception: 145 continue 146 return [candidate] 147 148 # Fall back to headless Agg backend 149 logger.info("None of the backend candidates could be imported; falling back to headless Agg!") 150 return ['Agg'] 151 152 153 def _collect_all_importable_backends(hook_api): 154 """ 155 Returns a list of all importable matplotlib backends. Implements the 'all' backend selection method. 156 """ 157 # List of the human-readable names of all available backends. 158 backend_names = _list_available_mpl_backends() # NOTE: retrieved in an isolated sub-process. 159 logger.info("All available matplotlib backends: %r", backend_names) 160 161 # Try to import the module(s). 162 importable_backends = [] 163 164 # List of backends to exclude; Qt4 is not supported by PyInstaller anymore. 165 exclude_backends = {'Qt4Agg', 'Qt4Cairo'} 166 167 # Ignore "CocoaAgg" on OSes other than Mac OS; attempting to import it on other OSes halts the current 168 # (sub)process without printing output or raising exceptions, preventing reliable detection. Apply the 169 # same logic for the (newer) "MacOSX" backend. 170 if not is_darwin: 171 exclude_backends |= {'CocoaAgg', 'MacOSX'} 172 173 # For safety, attempt to import each backend in an isolated sub-process. 174 for backend_name in backend_names: 175 if backend_name in exclude_backends: 176 logger.info(' Matplotlib backend %r: excluded', backend_name) 177 continue 178 179 try: 180 module_name = _backend_module_name(backend_name) 181 _check_mpl_backend_importable(module_name) # NOTE: uses an isolated sub-process. 182 except Exception: 183 # Backend is not importable, for whatever reason. 184 logger.info(' Matplotlib backend %r: ignored due to import error', backend_name) 185 continue 186 187 logger.info(' Matplotlib backend %r: added', backend_name) 188 importable_backends.append(backend_name) 189 190 return importable_backends 191 192 193 def hook(hook_api): 194 # Backend collection setting 195 backends_method = get_hook_config(hook_api, 'matplotlib', 'backends') 196 if backends_method is None: 197 backends_method = 'auto' # default method 198 199 # Select backend(s) 200 if backends_method == 'auto': 201 logger.info("Matplotlib backend selection method: automatic discovery of used backends") 202 backend_names = _autodetect_used_backends(hook_api) 203 elif backends_method == 'all': 204 logger.info("Matplotlib backend selection method: collection of all importable backends") 205 backend_names = _collect_all_importable_backends(hook_api) 206 else: 207 logger.info("Matplotlib backend selection method: user-provided name(s)") 208 if isinstance(backends_method, str): 209 backend_names = [backends_method] 210 else: 211 assert isinstance(backends_method, list), "User-provided backend name(s) must be either a string or a list!" 212 backend_names = backends_method 213 214 logger.info("Selected matplotlib backends: %r", backend_names) 215 216 # Set module names as hiddenimports 217 module_names = [_backend_module_name(backend) for backend in backend_names] # backend name -> module name 218 hook_api.add_imports(*module_names) 219 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/PyInstaller/hooks/hook-matplotlib.backends.py b/PyInstaller/hooks/hook-matplotlib.backends.py --- a/PyInstaller/hooks/hook-matplotlib.backends.py +++ b/PyInstaller/hooks/hook-matplotlib.backends.py @@ -105,6 +105,9 @@ ) used_backends += co_backends + # Deduplicate and sort the list of used backends before displaying it. + used_backends = sorted(set(used_backends)) + if used_backends: HOOK_CONFIG_DOCS = 'https://pyinstaller.org/en/stable/hooks-config.html#matplotlib-hooks' logger.info( @@ -211,6 +214,9 @@ assert isinstance(backends_method, list), "User-provided backend name(s) must be either a string or a list!" backend_names = backends_method + # Deduplicate and sort the list of selected backends before displaying it. + backend_names = sorted(set(backend_names)) + logger.info("Selected matplotlib backends: %r", backend_names) # Set module names as hiddenimports
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-matplotlib.backends.py b/PyInstaller/hooks/hook-matplotlib.backends.py\n--- a/PyInstaller/hooks/hook-matplotlib.backends.py\n+++ b/PyInstaller/hooks/hook-matplotlib.backends.py\n@@ -105,6 +105,9 @@\n )\n used_backends += co_backends\n \n+ # Deduplicate and sort the list of used backends before displaying it.\n+ used_backends = sorted(set(used_backends))\n+\n if used_backends:\n HOOK_CONFIG_DOCS = 'https://pyinstaller.org/en/stable/hooks-config.html#matplotlib-hooks'\n logger.info(\n@@ -211,6 +214,9 @@\n assert isinstance(backends_method, list), \"User-provided backend name(s) must be either a string or a list!\"\n backend_names = backends_method\n \n+ # Deduplicate and sort the list of selected backends before displaying it.\n+ backend_names = sorted(set(backend_names))\n+\n logger.info(\"Selected matplotlib backends: %r\", backend_names)\n \n # Set module names as hiddenimports\n", "issue": "Prevent Matplotlib's backend from being collected more than once\n**Is your feature request related to a problem? Please describe.**\r\nHi, I've noticed that some of `Matplotlib`'s backend are collected more than once during building. While this does not cause building to fail, it would be more readable to remove duplicate items from the list shown in the building log.\r\n\r\n**Describe the solution you'd like**\r\nPrevent `Matplotlib`'s backend from being collected into the list.\r\n\r\n**Describe alternatives you've considered**\r\nRemove duplicate items from the list after collection.\r\n\r\n**Additional context**\r\n`PyInstaller` 6.0\r\n```\r\n136360 INFO: Loading module hook 'hook-matplotlib.backends.py' from 'D:\\\\Python\\\\lib\\\\site-packages\\\\PyInstaller\\\\hooks'...\r\n136361 INFO: Matplotlib backend selection method: automatic discovery of used backends\r\n136374 INFO: Discovered Matplotlib backend(s) via `matplotlib.use()` call in module 'E:\\\\Wordless\\\\wordless\\\\wl_main.py': ['Qt5Agg']\r\n136380 INFO: Discovered Matplotlib backend(s) via `matplotlib.use()` call in module 'sympy.testing.runtests': ['Agg', 'Agg']\r\n136391 INFO: The following Matplotlib backends were discovered by scanning for `matplotlib.use()` calls: ['Qt5Agg', 'Agg', 'Agg']. If your backend of choice is not in this list, either add a `matplotlib.use()` call to your code, or configure the backend collection via hook options (see: https://pyinstaller.org/en/stable/hooks-config.html#matplotlib-hooks).\r\n136391 INFO: Selected matplotlib backends: ['Qt5Agg', 'Agg', 'Agg']\r\n```\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2023, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.compat import is_darwin\nfrom PyInstaller.utils.hooks import logger, get_hook_config\nfrom PyInstaller import isolated\n\n\[email protected]\ndef _get_configured_default_backend():\n \"\"\"\n Return the configured default matplotlib backend name, if available as matplotlib.rcParams['backend'] (or overridden\n by MPLBACKEND environment variable. If the value of matplotlib.rcParams['backend'] corresponds to the auto-sentinel\n object, returns None\n \"\"\"\n import matplotlib\n # matplotlib.rcParams overrides the __getitem__ implementation and attempts to determine and load the default\n # backend using pyplot.switch_backend(). Therefore, use dict.__getitem__().\n val = dict.__getitem__(matplotlib.rcParams, 'backend')\n if isinstance(val, str):\n return val\n return None\n\n\[email protected]\ndef _list_available_mpl_backends():\n \"\"\"\n Returns the names of all available matplotlib backends.\n \"\"\"\n import matplotlib\n return matplotlib.rcsetup.all_backends\n\n\[email protected]\ndef _check_mpl_backend_importable(module_name):\n \"\"\"\n Attempts to import the given module name (matplotlib backend module).\n\n Exceptions are propagated to caller.\n \"\"\"\n __import__(module_name)\n\n\n# Bytecode scanning\ndef _recursive_scan_code_objects_for_mpl_use(co):\n \"\"\"\n Recursively scan the bytecode for occurrences of matplotlib.use() or mpl.use() calls with const arguments, and\n collect those arguments into list of used matplotlib backend names.\n \"\"\"\n\n from PyInstaller.depend.bytecode import any_alias, recursive_function_calls\n\n mpl_use_names = {\n *any_alias(\"matplotlib.use\"),\n *any_alias(\"mpl.use\"), # matplotlib is commonly aliased as mpl\n }\n\n backends = []\n for calls in recursive_function_calls(co).values():\n for name, args in calls:\n # matplotlib.use(backend) or matplotlib.use(backend, force)\n # We support only literal arguments. Similarly, kwargs are\n # not supported.\n if len(args) not in {1, 2} or not isinstance(args[0], str):\n continue\n if name in mpl_use_names:\n backends.append(args[0])\n\n return backends\n\n\ndef _backend_module_name(name):\n \"\"\"\n Converts matplotlib backend name to its corresponding module name.\n\n Equivalent to matplotlib.cbook._backend_module_name().\n \"\"\"\n if name.startswith(\"module://\"):\n return name[9:]\n return f\"matplotlib.backends.backend_{name.lower()}\"\n\n\ndef _autodetect_used_backends(hook_api):\n \"\"\"\n Returns a list of automatically-discovered matplotlib backends in use, or the name of the default matplotlib\n backend. Implements the 'auto' backend selection method.\n \"\"\"\n # Scan the code for matplotlib.use()\n modulegraph = hook_api.analysis.graph\n mpl_code_objs = modulegraph.get_code_using(\"matplotlib\")\n used_backends = []\n for name, co in mpl_code_objs.items():\n co_backends = _recursive_scan_code_objects_for_mpl_use(co)\n if co_backends:\n logger.info(\n \"Discovered Matplotlib backend(s) via `matplotlib.use()` call in module %r: %r\", name, co_backends\n )\n used_backends += co_backends\n\n if used_backends:\n HOOK_CONFIG_DOCS = 'https://pyinstaller.org/en/stable/hooks-config.html#matplotlib-hooks'\n logger.info(\n \"The following Matplotlib backends were discovered by scanning for `matplotlib.use()` calls: %r. If your \"\n \"backend of choice is not in this list, either add a `matplotlib.use()` call to your code, or configure \"\n \"the backend collection via hook options (see: %s).\", used_backends, HOOK_CONFIG_DOCS\n )\n return used_backends\n\n # Determine the default matplotlib backend.\n #\n # Ideally, this would be done by calling ``matplotlib.get_backend()``. However, that function tries to switch to the\n # default backend (calling ``matplotlib.pyplot.switch_backend()``), which seems to occasionally fail on our linux CI\n # with an error and, on other occasions, returns the headless Agg backend instead of the GUI one (even with display\n # server running). Furthermore, using ``matplotlib.get_backend()`` returns headless 'Agg' when display server is\n # unavailable, which is not ideal for automated builds.\n #\n # Therefore, we try to emulate ``matplotlib.get_backend()`` ourselves. First, we try to obtain the configured\n # default backend from settings (rcparams and/or MPLBACKEND environment variable). If that is unavailable, we try to\n # find the first importable GUI-based backend, using the same list as matplotlib.pyplot.switch_backend() uses for\n # automatic backend selection. The difference is that we only test whether the backend module is importable, without\n # trying to switch to it.\n default_backend = _get_configured_default_backend() # isolated sub-process\n if default_backend:\n logger.info(\"Found configured default matplotlib backend: %s\", default_backend)\n return [default_backend]\n\n candidates = [\"Qt5Agg\", \"Gtk3Agg\", \"TkAgg\", \"WxAgg\"]\n if is_darwin:\n candidates = [\"MacOSX\"] + candidates\n logger.info(\"Trying determine the default backend as first importable candidate from the list: %r\", candidates)\n\n for candidate in candidates:\n try:\n module_name = _backend_module_name(candidate)\n _check_mpl_backend_importable(module_name) # NOTE: uses an isolated sub-process.\n except Exception:\n continue\n return [candidate]\n\n # Fall back to headless Agg backend\n logger.info(\"None of the backend candidates could be imported; falling back to headless Agg!\")\n return ['Agg']\n\n\ndef _collect_all_importable_backends(hook_api):\n \"\"\"\n Returns a list of all importable matplotlib backends. Implements the 'all' backend selection method.\n \"\"\"\n # List of the human-readable names of all available backends.\n backend_names = _list_available_mpl_backends() # NOTE: retrieved in an isolated sub-process.\n logger.info(\"All available matplotlib backends: %r\", backend_names)\n\n # Try to import the module(s).\n importable_backends = []\n\n # List of backends to exclude; Qt4 is not supported by PyInstaller anymore.\n exclude_backends = {'Qt4Agg', 'Qt4Cairo'}\n\n # Ignore \"CocoaAgg\" on OSes other than Mac OS; attempting to import it on other OSes halts the current\n # (sub)process without printing output or raising exceptions, preventing reliable detection. Apply the\n # same logic for the (newer) \"MacOSX\" backend.\n if not is_darwin:\n exclude_backends |= {'CocoaAgg', 'MacOSX'}\n\n # For safety, attempt to import each backend in an isolated sub-process.\n for backend_name in backend_names:\n if backend_name in exclude_backends:\n logger.info(' Matplotlib backend %r: excluded', backend_name)\n continue\n\n try:\n module_name = _backend_module_name(backend_name)\n _check_mpl_backend_importable(module_name) # NOTE: uses an isolated sub-process.\n except Exception:\n # Backend is not importable, for whatever reason.\n logger.info(' Matplotlib backend %r: ignored due to import error', backend_name)\n continue\n\n logger.info(' Matplotlib backend %r: added', backend_name)\n importable_backends.append(backend_name)\n\n return importable_backends\n\n\ndef hook(hook_api):\n # Backend collection setting\n backends_method = get_hook_config(hook_api, 'matplotlib', 'backends')\n if backends_method is None:\n backends_method = 'auto' # default method\n\n # Select backend(s)\n if backends_method == 'auto':\n logger.info(\"Matplotlib backend selection method: automatic discovery of used backends\")\n backend_names = _autodetect_used_backends(hook_api)\n elif backends_method == 'all':\n logger.info(\"Matplotlib backend selection method: collection of all importable backends\")\n backend_names = _collect_all_importable_backends(hook_api)\n else:\n logger.info(\"Matplotlib backend selection method: user-provided name(s)\")\n if isinstance(backends_method, str):\n backend_names = [backends_method]\n else:\n assert isinstance(backends_method, list), \"User-provided backend name(s) must be either a string or a list!\"\n backend_names = backends_method\n\n logger.info(\"Selected matplotlib backends: %r\", backend_names)\n\n # Set module names as hiddenimports\n module_names = [_backend_module_name(backend) for backend in backend_names] # backend name -> module name\n hook_api.add_imports(*module_names)\n", "path": "PyInstaller/hooks/hook-matplotlib.backends.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2023, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.compat import is_darwin\nfrom PyInstaller.utils.hooks import logger, get_hook_config\nfrom PyInstaller import isolated\n\n\[email protected]\ndef _get_configured_default_backend():\n \"\"\"\n Return the configured default matplotlib backend name, if available as matplotlib.rcParams['backend'] (or overridden\n by MPLBACKEND environment variable. If the value of matplotlib.rcParams['backend'] corresponds to the auto-sentinel\n object, returns None\n \"\"\"\n import matplotlib\n # matplotlib.rcParams overrides the __getitem__ implementation and attempts to determine and load the default\n # backend using pyplot.switch_backend(). Therefore, use dict.__getitem__().\n val = dict.__getitem__(matplotlib.rcParams, 'backend')\n if isinstance(val, str):\n return val\n return None\n\n\[email protected]\ndef _list_available_mpl_backends():\n \"\"\"\n Returns the names of all available matplotlib backends.\n \"\"\"\n import matplotlib\n return matplotlib.rcsetup.all_backends\n\n\[email protected]\ndef _check_mpl_backend_importable(module_name):\n \"\"\"\n Attempts to import the given module name (matplotlib backend module).\n\n Exceptions are propagated to caller.\n \"\"\"\n __import__(module_name)\n\n\n# Bytecode scanning\ndef _recursive_scan_code_objects_for_mpl_use(co):\n \"\"\"\n Recursively scan the bytecode for occurrences of matplotlib.use() or mpl.use() calls with const arguments, and\n collect those arguments into list of used matplotlib backend names.\n \"\"\"\n\n from PyInstaller.depend.bytecode import any_alias, recursive_function_calls\n\n mpl_use_names = {\n *any_alias(\"matplotlib.use\"),\n *any_alias(\"mpl.use\"), # matplotlib is commonly aliased as mpl\n }\n\n backends = []\n for calls in recursive_function_calls(co).values():\n for name, args in calls:\n # matplotlib.use(backend) or matplotlib.use(backend, force)\n # We support only literal arguments. Similarly, kwargs are\n # not supported.\n if len(args) not in {1, 2} or not isinstance(args[0], str):\n continue\n if name in mpl_use_names:\n backends.append(args[0])\n\n return backends\n\n\ndef _backend_module_name(name):\n \"\"\"\n Converts matplotlib backend name to its corresponding module name.\n\n Equivalent to matplotlib.cbook._backend_module_name().\n \"\"\"\n if name.startswith(\"module://\"):\n return name[9:]\n return f\"matplotlib.backends.backend_{name.lower()}\"\n\n\ndef _autodetect_used_backends(hook_api):\n \"\"\"\n Returns a list of automatically-discovered matplotlib backends in use, or the name of the default matplotlib\n backend. Implements the 'auto' backend selection method.\n \"\"\"\n # Scan the code for matplotlib.use()\n modulegraph = hook_api.analysis.graph\n mpl_code_objs = modulegraph.get_code_using(\"matplotlib\")\n used_backends = []\n for name, co in mpl_code_objs.items():\n co_backends = _recursive_scan_code_objects_for_mpl_use(co)\n if co_backends:\n logger.info(\n \"Discovered Matplotlib backend(s) via `matplotlib.use()` call in module %r: %r\", name, co_backends\n )\n used_backends += co_backends\n\n # Deduplicate and sort the list of used backends before displaying it.\n used_backends = sorted(set(used_backends))\n\n if used_backends:\n HOOK_CONFIG_DOCS = 'https://pyinstaller.org/en/stable/hooks-config.html#matplotlib-hooks'\n logger.info(\n \"The following Matplotlib backends were discovered by scanning for `matplotlib.use()` calls: %r. If your \"\n \"backend of choice is not in this list, either add a `matplotlib.use()` call to your code, or configure \"\n \"the backend collection via hook options (see: %s).\", used_backends, HOOK_CONFIG_DOCS\n )\n return used_backends\n\n # Determine the default matplotlib backend.\n #\n # Ideally, this would be done by calling ``matplotlib.get_backend()``. However, that function tries to switch to the\n # default backend (calling ``matplotlib.pyplot.switch_backend()``), which seems to occasionally fail on our linux CI\n # with an error and, on other occasions, returns the headless Agg backend instead of the GUI one (even with display\n # server running). Furthermore, using ``matplotlib.get_backend()`` returns headless 'Agg' when display server is\n # unavailable, which is not ideal for automated builds.\n #\n # Therefore, we try to emulate ``matplotlib.get_backend()`` ourselves. First, we try to obtain the configured\n # default backend from settings (rcparams and/or MPLBACKEND environment variable). If that is unavailable, we try to\n # find the first importable GUI-based backend, using the same list as matplotlib.pyplot.switch_backend() uses for\n # automatic backend selection. The difference is that we only test whether the backend module is importable, without\n # trying to switch to it.\n default_backend = _get_configured_default_backend() # isolated sub-process\n if default_backend:\n logger.info(\"Found configured default matplotlib backend: %s\", default_backend)\n return [default_backend]\n\n candidates = [\"Qt5Agg\", \"Gtk3Agg\", \"TkAgg\", \"WxAgg\"]\n if is_darwin:\n candidates = [\"MacOSX\"] + candidates\n logger.info(\"Trying determine the default backend as first importable candidate from the list: %r\", candidates)\n\n for candidate in candidates:\n try:\n module_name = _backend_module_name(candidate)\n _check_mpl_backend_importable(module_name) # NOTE: uses an isolated sub-process.\n except Exception:\n continue\n return [candidate]\n\n # Fall back to headless Agg backend\n logger.info(\"None of the backend candidates could be imported; falling back to headless Agg!\")\n return ['Agg']\n\n\ndef _collect_all_importable_backends(hook_api):\n \"\"\"\n Returns a list of all importable matplotlib backends. Implements the 'all' backend selection method.\n \"\"\"\n # List of the human-readable names of all available backends.\n backend_names = _list_available_mpl_backends() # NOTE: retrieved in an isolated sub-process.\n logger.info(\"All available matplotlib backends: %r\", backend_names)\n\n # Try to import the module(s).\n importable_backends = []\n\n # List of backends to exclude; Qt4 is not supported by PyInstaller anymore.\n exclude_backends = {'Qt4Agg', 'Qt4Cairo'}\n\n # Ignore \"CocoaAgg\" on OSes other than Mac OS; attempting to import it on other OSes halts the current\n # (sub)process without printing output or raising exceptions, preventing reliable detection. Apply the\n # same logic for the (newer) \"MacOSX\" backend.\n if not is_darwin:\n exclude_backends |= {'CocoaAgg', 'MacOSX'}\n\n # For safety, attempt to import each backend in an isolated sub-process.\n for backend_name in backend_names:\n if backend_name in exclude_backends:\n logger.info(' Matplotlib backend %r: excluded', backend_name)\n continue\n\n try:\n module_name = _backend_module_name(backend_name)\n _check_mpl_backend_importable(module_name) # NOTE: uses an isolated sub-process.\n except Exception:\n # Backend is not importable, for whatever reason.\n logger.info(' Matplotlib backend %r: ignored due to import error', backend_name)\n continue\n\n logger.info(' Matplotlib backend %r: added', backend_name)\n importable_backends.append(backend_name)\n\n return importable_backends\n\n\ndef hook(hook_api):\n # Backend collection setting\n backends_method = get_hook_config(hook_api, 'matplotlib', 'backends')\n if backends_method is None:\n backends_method = 'auto' # default method\n\n # Select backend(s)\n if backends_method == 'auto':\n logger.info(\"Matplotlib backend selection method: automatic discovery of used backends\")\n backend_names = _autodetect_used_backends(hook_api)\n elif backends_method == 'all':\n logger.info(\"Matplotlib backend selection method: collection of all importable backends\")\n backend_names = _collect_all_importable_backends(hook_api)\n else:\n logger.info(\"Matplotlib backend selection method: user-provided name(s)\")\n if isinstance(backends_method, str):\n backend_names = [backends_method]\n else:\n assert isinstance(backends_method, list), \"User-provided backend name(s) must be either a string or a list!\"\n backend_names = backends_method\n\n # Deduplicate and sort the list of selected backends before displaying it.\n backend_names = sorted(set(backend_names))\n\n logger.info(\"Selected matplotlib backends: %r\", backend_names)\n\n # Set module names as hiddenimports\n module_names = [_backend_module_name(backend) for backend in backend_names] # backend name -> module name\n hook_api.add_imports(*module_names)\n", "path": "PyInstaller/hooks/hook-matplotlib.backends.py"}]}
3,237
247
gh_patches_debug_16497
rasdani/github-patches
git_diff
googleapis__google-cloud-python-2528
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Vision raises TypeError: b'...' is not JSON serializable on Python 3 Simple reproducible case: ``` python import io import os from google.cloud import vision vision_client = vision.Client() file_name = os.path.join( os.path.dirname(__file__), 'resources/wakeupcat.jpg') with io.open(file_name, 'rb') as image_file: image = vision_client.image( content=image_file.read()) labels = image.detect_labels() print('Labels:') for label in labels: print(label.description) ``` works fine on Python 2, fails on Python 3. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `vision/google/cloud/vision/image.py` Content: ``` 1 # Copyright 2016 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Image represented by either a URI or byte stream.""" 16 17 18 from base64 import b64encode 19 20 from google.cloud._helpers import _to_bytes 21 from google.cloud.vision.entity import EntityAnnotation 22 from google.cloud.vision.face import Face 23 from google.cloud.vision.feature import Feature 24 from google.cloud.vision.feature import FeatureTypes 25 from google.cloud.vision.color import ImagePropertiesAnnotation 26 from google.cloud.vision.safe import SafeSearchAnnotation 27 28 29 class Image(object): 30 """Image representation containing information to be annotate. 31 32 :type content: bytes 33 :param content: Byte stream of an image. 34 35 :type source_uri: str 36 :param source_uri: Google Cloud Storage URI of image. 37 38 :type client: :class:`~google.cloud.vision.client.Client` 39 :param client: Instance of Vision client. 40 """ 41 42 def __init__(self, client, content=None, source_uri=None): 43 self.client = client 44 self._content = None 45 self._source = None 46 47 if source_uri: 48 self._source = source_uri 49 else: 50 self._content = b64encode(_to_bytes(content)) 51 52 def as_dict(self): 53 """Generate dictionary structure for request. 54 55 :rtype: dict 56 :returns: Dictionary with source information for image. 57 """ 58 if self.content: 59 return { 60 'content': self.content 61 } 62 else: 63 return { 64 'source': { 65 'gcs_image_uri': self.source 66 } 67 } 68 69 @property 70 def content(self): 71 """Base64 encoded image content. 72 73 :rtype: str 74 :returns: Base64 encoded image bytes. 75 """ 76 return self._content 77 78 @property 79 def source(self): 80 """Google Cloud Storage URI. 81 82 :rtype: str 83 :returns: String of Google Cloud Storage URI. 84 """ 85 return self._source 86 87 def _detect_annotation(self, feature): 88 """Generic method for detecting a single annotation. 89 90 :type feature: :class:`~google.cloud.vision.feature.Feature` 91 :param feature: The ``Feature`` indication the type of annotation to 92 perform. 93 94 :rtype: list 95 :returns: List of 96 :class:`~google.cloud.vision.entity.EntityAnnotation`. 97 """ 98 reverse_types = { 99 'LABEL_DETECTION': 'labelAnnotations', 100 'LANDMARK_DETECTION': 'landmarkAnnotations', 101 'LOGO_DETECTION': 'logoAnnotations', 102 'TEXT_DETECTION': 'textAnnotations', 103 } 104 detected_objects = [] 105 result = self.client.annotate(self, [feature]) 106 for response in result[reverse_types[feature.feature_type]]: 107 detected_object = EntityAnnotation.from_api_repr(response) 108 detected_objects.append(detected_object) 109 return detected_objects 110 111 def detect_faces(self, limit=10): 112 """Detect faces in image. 113 114 :type limit: int 115 :param limit: The number of faces to try and detect. 116 117 :rtype: list 118 :returns: List of :class:`~google.cloud.vision.face.Face`. 119 """ 120 faces = [] 121 face_detection_feature = Feature(FeatureTypes.FACE_DETECTION, limit) 122 result = self.client.annotate(self, [face_detection_feature]) 123 for face_response in result['faceAnnotations']: 124 face = Face.from_api_repr(face_response) 125 faces.append(face) 126 127 return faces 128 129 def detect_labels(self, limit=10): 130 """Detect labels that describe objects in an image. 131 132 :type limit: int 133 :param limit: The maximum number of labels to try and detect. 134 135 :rtype: list 136 :returns: List of :class:`~google.cloud.vision.entity.EntityAnnotation` 137 """ 138 feature = Feature(FeatureTypes.LABEL_DETECTION, limit) 139 return self._detect_annotation(feature) 140 141 def detect_landmarks(self, limit=10): 142 """Detect landmarks in an image. 143 144 :type limit: int 145 :param limit: The maximum number of landmarks to find. 146 147 :rtype: list 148 :returns: List of 149 :class:`~google.cloud.vision.entity.EntityAnnotation`. 150 """ 151 feature = Feature(FeatureTypes.LANDMARK_DETECTION, limit) 152 return self._detect_annotation(feature) 153 154 def detect_logos(self, limit=10): 155 """Detect logos in an image. 156 157 :type limit: int 158 :param limit: The maximum number of logos to find. 159 160 :rtype: list 161 :returns: List of 162 :class:`~google.cloud.vision.entity.EntityAnnotation`. 163 """ 164 feature = Feature(FeatureTypes.LOGO_DETECTION, limit) 165 return self._detect_annotation(feature) 166 167 def detect_properties(self, limit=10): 168 """Detect the color properties of an image. 169 170 :type limit: int 171 :param limit: The maximum number of image properties to find. 172 173 :rtype: list 174 :returns: List of 175 :class:`~google.cloud.vision.color.ImagePropertiesAnnotation`. 176 """ 177 feature = Feature(FeatureTypes.IMAGE_PROPERTIES, limit) 178 result = self.client.annotate(self, [feature]) 179 response = result['imagePropertiesAnnotation'] 180 return ImagePropertiesAnnotation.from_api_repr(response) 181 182 def detect_safe_search(self, limit=10): 183 """Retreive safe search properties from an image. 184 185 :type limit: int 186 :param limit: The number of faces to try and detect. 187 188 :rtype: list 189 :returns: List of 190 :class:`~google.cloud.vision.sage.SafeSearchAnnotation`. 191 """ 192 safe_detection_feature = Feature(FeatureTypes.SAFE_SEARCH_DETECTION, 193 limit) 194 result = self.client.annotate(self, [safe_detection_feature]) 195 safe_search_response = result['safeSearchAnnotation'] 196 return SafeSearchAnnotation.from_api_repr(safe_search_response) 197 198 def detect_text(self, limit=10): 199 """Detect text in an image. 200 201 :type limit: int 202 :param limit: The maximum instances of text to find. 203 204 :rtype: list 205 :returns: List of 206 :class:`~google.cloud.vision.entity.EntityAnnotation`. 207 """ 208 feature = Feature(FeatureTypes.TEXT_DETECTION, limit) 209 return self._detect_annotation(feature) 210 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/vision/google/cloud/vision/image.py b/vision/google/cloud/vision/image.py --- a/vision/google/cloud/vision/image.py +++ b/vision/google/cloud/vision/image.py @@ -18,6 +18,7 @@ from base64 import b64encode from google.cloud._helpers import _to_bytes +from google.cloud._helpers import _bytes_to_unicode from google.cloud.vision.entity import EntityAnnotation from google.cloud.vision.face import Face from google.cloud.vision.feature import Feature @@ -47,7 +48,7 @@ if source_uri: self._source = source_uri else: - self._content = b64encode(_to_bytes(content)) + self._content = _bytes_to_unicode(b64encode(_to_bytes(content))) def as_dict(self): """Generate dictionary structure for request.
{"golden_diff": "diff --git a/vision/google/cloud/vision/image.py b/vision/google/cloud/vision/image.py\n--- a/vision/google/cloud/vision/image.py\n+++ b/vision/google/cloud/vision/image.py\n@@ -18,6 +18,7 @@\n from base64 import b64encode\n \n from google.cloud._helpers import _to_bytes\n+from google.cloud._helpers import _bytes_to_unicode\n from google.cloud.vision.entity import EntityAnnotation\n from google.cloud.vision.face import Face\n from google.cloud.vision.feature import Feature\n@@ -47,7 +48,7 @@\n if source_uri:\n self._source = source_uri\n else:\n- self._content = b64encode(_to_bytes(content))\n+ self._content = _bytes_to_unicode(b64encode(_to_bytes(content)))\n \n def as_dict(self):\n \"\"\"Generate dictionary structure for request.\n", "issue": "Vision raises TypeError: b'...' is not JSON serializable on Python 3\nSimple reproducible case:\n\n``` python\nimport io\nimport os\n\nfrom google.cloud import vision\n\nvision_client = vision.Client()\n\nfile_name = os.path.join(\n os.path.dirname(__file__),\n 'resources/wakeupcat.jpg')\n\nwith io.open(file_name, 'rb') as image_file:\n image = vision_client.image(\n content=image_file.read())\n\nlabels = image.detect_labels()\n\nprint('Labels:')\nfor label in labels:\n print(label.description)\n```\n\nworks fine on Python 2, fails on Python 3.\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Image represented by either a URI or byte stream.\"\"\"\n\n\nfrom base64 import b64encode\n\nfrom google.cloud._helpers import _to_bytes\nfrom google.cloud.vision.entity import EntityAnnotation\nfrom google.cloud.vision.face import Face\nfrom google.cloud.vision.feature import Feature\nfrom google.cloud.vision.feature import FeatureTypes\nfrom google.cloud.vision.color import ImagePropertiesAnnotation\nfrom google.cloud.vision.safe import SafeSearchAnnotation\n\n\nclass Image(object):\n \"\"\"Image representation containing information to be annotate.\n\n :type content: bytes\n :param content: Byte stream of an image.\n\n :type source_uri: str\n :param source_uri: Google Cloud Storage URI of image.\n\n :type client: :class:`~google.cloud.vision.client.Client`\n :param client: Instance of Vision client.\n \"\"\"\n\n def __init__(self, client, content=None, source_uri=None):\n self.client = client\n self._content = None\n self._source = None\n\n if source_uri:\n self._source = source_uri\n else:\n self._content = b64encode(_to_bytes(content))\n\n def as_dict(self):\n \"\"\"Generate dictionary structure for request.\n\n :rtype: dict\n :returns: Dictionary with source information for image.\n \"\"\"\n if self.content:\n return {\n 'content': self.content\n }\n else:\n return {\n 'source': {\n 'gcs_image_uri': self.source\n }\n }\n\n @property\n def content(self):\n \"\"\"Base64 encoded image content.\n\n :rtype: str\n :returns: Base64 encoded image bytes.\n \"\"\"\n return self._content\n\n @property\n def source(self):\n \"\"\"Google Cloud Storage URI.\n\n :rtype: str\n :returns: String of Google Cloud Storage URI.\n \"\"\"\n return self._source\n\n def _detect_annotation(self, feature):\n \"\"\"Generic method for detecting a single annotation.\n\n :type feature: :class:`~google.cloud.vision.feature.Feature`\n :param feature: The ``Feature`` indication the type of annotation to\n perform.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.entity.EntityAnnotation`.\n \"\"\"\n reverse_types = {\n 'LABEL_DETECTION': 'labelAnnotations',\n 'LANDMARK_DETECTION': 'landmarkAnnotations',\n 'LOGO_DETECTION': 'logoAnnotations',\n 'TEXT_DETECTION': 'textAnnotations',\n }\n detected_objects = []\n result = self.client.annotate(self, [feature])\n for response in result[reverse_types[feature.feature_type]]:\n detected_object = EntityAnnotation.from_api_repr(response)\n detected_objects.append(detected_object)\n return detected_objects\n\n def detect_faces(self, limit=10):\n \"\"\"Detect faces in image.\n\n :type limit: int\n :param limit: The number of faces to try and detect.\n\n :rtype: list\n :returns: List of :class:`~google.cloud.vision.face.Face`.\n \"\"\"\n faces = []\n face_detection_feature = Feature(FeatureTypes.FACE_DETECTION, limit)\n result = self.client.annotate(self, [face_detection_feature])\n for face_response in result['faceAnnotations']:\n face = Face.from_api_repr(face_response)\n faces.append(face)\n\n return faces\n\n def detect_labels(self, limit=10):\n \"\"\"Detect labels that describe objects in an image.\n\n :type limit: int\n :param limit: The maximum number of labels to try and detect.\n\n :rtype: list\n :returns: List of :class:`~google.cloud.vision.entity.EntityAnnotation`\n \"\"\"\n feature = Feature(FeatureTypes.LABEL_DETECTION, limit)\n return self._detect_annotation(feature)\n\n def detect_landmarks(self, limit=10):\n \"\"\"Detect landmarks in an image.\n\n :type limit: int\n :param limit: The maximum number of landmarks to find.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.entity.EntityAnnotation`.\n \"\"\"\n feature = Feature(FeatureTypes.LANDMARK_DETECTION, limit)\n return self._detect_annotation(feature)\n\n def detect_logos(self, limit=10):\n \"\"\"Detect logos in an image.\n\n :type limit: int\n :param limit: The maximum number of logos to find.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.entity.EntityAnnotation`.\n \"\"\"\n feature = Feature(FeatureTypes.LOGO_DETECTION, limit)\n return self._detect_annotation(feature)\n\n def detect_properties(self, limit=10):\n \"\"\"Detect the color properties of an image.\n\n :type limit: int\n :param limit: The maximum number of image properties to find.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.color.ImagePropertiesAnnotation`.\n \"\"\"\n feature = Feature(FeatureTypes.IMAGE_PROPERTIES, limit)\n result = self.client.annotate(self, [feature])\n response = result['imagePropertiesAnnotation']\n return ImagePropertiesAnnotation.from_api_repr(response)\n\n def detect_safe_search(self, limit=10):\n \"\"\"Retreive safe search properties from an image.\n\n :type limit: int\n :param limit: The number of faces to try and detect.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.sage.SafeSearchAnnotation`.\n \"\"\"\n safe_detection_feature = Feature(FeatureTypes.SAFE_SEARCH_DETECTION,\n limit)\n result = self.client.annotate(self, [safe_detection_feature])\n safe_search_response = result['safeSearchAnnotation']\n return SafeSearchAnnotation.from_api_repr(safe_search_response)\n\n def detect_text(self, limit=10):\n \"\"\"Detect text in an image.\n\n :type limit: int\n :param limit: The maximum instances of text to find.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.entity.EntityAnnotation`.\n \"\"\"\n feature = Feature(FeatureTypes.TEXT_DETECTION, limit)\n return self._detect_annotation(feature)\n", "path": "vision/google/cloud/vision/image.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Image represented by either a URI or byte stream.\"\"\"\n\n\nfrom base64 import b64encode\n\nfrom google.cloud._helpers import _to_bytes\nfrom google.cloud._helpers import _bytes_to_unicode\nfrom google.cloud.vision.entity import EntityAnnotation\nfrom google.cloud.vision.face import Face\nfrom google.cloud.vision.feature import Feature\nfrom google.cloud.vision.feature import FeatureTypes\nfrom google.cloud.vision.color import ImagePropertiesAnnotation\nfrom google.cloud.vision.safe import SafeSearchAnnotation\n\n\nclass Image(object):\n \"\"\"Image representation containing information to be annotate.\n\n :type content: bytes\n :param content: Byte stream of an image.\n\n :type source_uri: str\n :param source_uri: Google Cloud Storage URI of image.\n\n :type client: :class:`~google.cloud.vision.client.Client`\n :param client: Instance of Vision client.\n \"\"\"\n\n def __init__(self, client, content=None, source_uri=None):\n self.client = client\n self._content = None\n self._source = None\n\n if source_uri:\n self._source = source_uri\n else:\n self._content = _bytes_to_unicode(b64encode(_to_bytes(content)))\n\n def as_dict(self):\n \"\"\"Generate dictionary structure for request.\n\n :rtype: dict\n :returns: Dictionary with source information for image.\n \"\"\"\n if self.content:\n return {\n 'content': self.content\n }\n else:\n return {\n 'source': {\n 'gcs_image_uri': self.source\n }\n }\n\n @property\n def content(self):\n \"\"\"Base64 encoded image content.\n\n :rtype: str\n :returns: Base64 encoded image bytes.\n \"\"\"\n return self._content\n\n @property\n def source(self):\n \"\"\"Google Cloud Storage URI.\n\n :rtype: str\n :returns: String of Google Cloud Storage URI.\n \"\"\"\n return self._source\n\n def _detect_annotation(self, feature):\n \"\"\"Generic method for detecting a single annotation.\n\n :type feature: :class:`~google.cloud.vision.feature.Feature`\n :param feature: The ``Feature`` indication the type of annotation to\n perform.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.entity.EntityAnnotation`.\n \"\"\"\n reverse_types = {\n 'LABEL_DETECTION': 'labelAnnotations',\n 'LANDMARK_DETECTION': 'landmarkAnnotations',\n 'LOGO_DETECTION': 'logoAnnotations',\n 'TEXT_DETECTION': 'textAnnotations',\n }\n detected_objects = []\n result = self.client.annotate(self, [feature])\n for response in result[reverse_types[feature.feature_type]]:\n detected_object = EntityAnnotation.from_api_repr(response)\n detected_objects.append(detected_object)\n return detected_objects\n\n def detect_faces(self, limit=10):\n \"\"\"Detect faces in image.\n\n :type limit: int\n :param limit: The number of faces to try and detect.\n\n :rtype: list\n :returns: List of :class:`~google.cloud.vision.face.Face`.\n \"\"\"\n faces = []\n face_detection_feature = Feature(FeatureTypes.FACE_DETECTION, limit)\n result = self.client.annotate(self, [face_detection_feature])\n for face_response in result['faceAnnotations']:\n face = Face.from_api_repr(face_response)\n faces.append(face)\n\n return faces\n\n def detect_labels(self, limit=10):\n \"\"\"Detect labels that describe objects in an image.\n\n :type limit: int\n :param limit: The maximum number of labels to try and detect.\n\n :rtype: list\n :returns: List of :class:`~google.cloud.vision.entity.EntityAnnotation`\n \"\"\"\n feature = Feature(FeatureTypes.LABEL_DETECTION, limit)\n return self._detect_annotation(feature)\n\n def detect_landmarks(self, limit=10):\n \"\"\"Detect landmarks in an image.\n\n :type limit: int\n :param limit: The maximum number of landmarks to find.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.entity.EntityAnnotation`.\n \"\"\"\n feature = Feature(FeatureTypes.LANDMARK_DETECTION, limit)\n return self._detect_annotation(feature)\n\n def detect_logos(self, limit=10):\n \"\"\"Detect logos in an image.\n\n :type limit: int\n :param limit: The maximum number of logos to find.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.entity.EntityAnnotation`.\n \"\"\"\n feature = Feature(FeatureTypes.LOGO_DETECTION, limit)\n return self._detect_annotation(feature)\n\n def detect_properties(self, limit=10):\n \"\"\"Detect the color properties of an image.\n\n :type limit: int\n :param limit: The maximum number of image properties to find.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.color.ImagePropertiesAnnotation`.\n \"\"\"\n feature = Feature(FeatureTypes.IMAGE_PROPERTIES, limit)\n result = self.client.annotate(self, [feature])\n response = result['imagePropertiesAnnotation']\n return ImagePropertiesAnnotation.from_api_repr(response)\n\n def detect_safe_search(self, limit=10):\n \"\"\"Retreive safe search properties from an image.\n\n :type limit: int\n :param limit: The number of faces to try and detect.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.sage.SafeSearchAnnotation`.\n \"\"\"\n safe_detection_feature = Feature(FeatureTypes.SAFE_SEARCH_DETECTION,\n limit)\n result = self.client.annotate(self, [safe_detection_feature])\n safe_search_response = result['safeSearchAnnotation']\n return SafeSearchAnnotation.from_api_repr(safe_search_response)\n\n def detect_text(self, limit=10):\n \"\"\"Detect text in an image.\n\n :type limit: int\n :param limit: The maximum instances of text to find.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.entity.EntityAnnotation`.\n \"\"\"\n feature = Feature(FeatureTypes.TEXT_DETECTION, limit)\n return self._detect_annotation(feature)\n", "path": "vision/google/cloud/vision/image.py"}]}
2,384
193
gh_patches_debug_19957
rasdani/github-patches
git_diff
huggingface__accelerate-43
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- AcceleratedOptimizer `zero_grad` argument not supported: `set_to_none` Currently the `AcceleratedOptimizer` class doesn't support the argument `set_to_none`, is this an intentional exclusion? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/accelerate/optimizer.py` Content: ``` 1 # Copyright 2021 The HuggingFace Team. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import torch 16 17 from .state import AcceleratorState, DistributedType, is_tpu_available 18 19 20 if is_tpu_available(): 21 import torch_xla.core.xla_model as xm 22 23 24 def move_to_device(state, device): 25 if isinstance(state, (list, tuple)): 26 return type(state)(move_to_device(t, device) for t in state) 27 elif isinstance(state, dict): 28 return type(state)({k: move_to_device(v, device) for k, v in state.items()}) 29 elif isinstance(state, torch.Tensor): 30 return state.to(device) 31 return state 32 33 34 class AcceleratedOptimizer(torch.optim.Optimizer): 35 """ 36 Internal wrapper around a torch optimizer. 37 38 Args: 39 optimizer (:obj:`torch.optim.optimizer.Optimizer`): 40 The optimizer to wrap. 41 device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`): 42 Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of 43 :obj:`optimizer` on the right device. 44 scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`): 45 The scaler to use in the step function if training with mixed precision. 46 """ 47 48 def __init__(self, optimizer, device_placement=True, scaler=None): 49 self.optimizer = optimizer 50 self.scaler = scaler 51 self.state = AcceleratorState() 52 53 # Handle device placement 54 if device_placement: 55 state_dict = self.optimizer.state_dict() 56 if self.state.distributed_type == DistributedType.TPU: 57 xm.send_cpu_data_to_device(state_dict, self.state.device) 58 else: 59 state_dict = move_to_device(state_dict, self.state.device) 60 self.optimizer.load_state_dict(state_dict) 61 62 @property 63 def param_groups(self): 64 return self.optimizer.param_groups 65 66 @param_groups.setter 67 def param_groups(self, param_groups): 68 self.optimizer.param_groups = param_groups 69 70 @property 71 def defaults(self): 72 return self.optimizer.defaults 73 74 @defaults.setter 75 def defaults(self, defaults): 76 self.optimizer.defaults = defaults 77 78 def add_param_group(self, param_group): 79 self.optimizer.add_param_group(param_group) 80 81 def load_state_dict(self, state_dict): 82 if self.state.distributed_type == DistributedType.TPU and self.device_placement: 83 xm.send_cpu_data_to_device(state_dict, self.state.device) 84 self.optimizer.load_state_dict(state_dict) 85 86 def state_dict(self): 87 return self.optimizer.state_dict() 88 89 def zero_grad(self): 90 self.optimizer.zero_grad() 91 92 def step(self): 93 if self.state.distributed_type == DistributedType.TPU: 94 xm.optimizer_step(self.optimizer) 95 elif self.scaler is not None: 96 self.scaler.step(self.optimizer) 97 self.scaler.update() 98 else: 99 self.optimizer.step() 100 101 def _switch_parameters(self, parameters_map): 102 for param_group in self.optimizer.param_groups: 103 param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]] 104 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py --- a/src/accelerate/optimizer.py +++ b/src/accelerate/optimizer.py @@ -14,6 +14,8 @@ import torch +from packaging import version + from .state import AcceleratorState, DistributedType, is_tpu_available @@ -86,8 +88,18 @@ def state_dict(self): return self.optimizer.state_dict() - def zero_grad(self): - self.optimizer.zero_grad() + def zero_grad(self, set_to_none=None): + if version.parse(torch.__version__) < version.parse("1.7.0"): + if set_to_none is not None: + raise ValueError( + "`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for " + f"earlier versions (found version {torch.__version__})." + ) + self.optimizer.zero_grad() + else: + if set_to_none is not None: + set_to_none = False + self.optimizer.zero_grad(set_to_none=set_to_none) def step(self): if self.state.distributed_type == DistributedType.TPU:
{"golden_diff": "diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -14,6 +14,8 @@\n \n import torch\n \n+from packaging import version\n+\n from .state import AcceleratorState, DistributedType, is_tpu_available\n \n \n@@ -86,8 +88,18 @@\n def state_dict(self):\n return self.optimizer.state_dict()\n \n- def zero_grad(self):\n- self.optimizer.zero_grad()\n+ def zero_grad(self, set_to_none=None):\n+ if version.parse(torch.__version__) < version.parse(\"1.7.0\"):\n+ if set_to_none is not None:\n+ raise ValueError(\n+ \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n+ f\"earlier versions (found version {torch.__version__}).\"\n+ )\n+ self.optimizer.zero_grad()\n+ else:\n+ if set_to_none is not None:\n+ set_to_none = False\n+ self.optimizer.zero_grad(set_to_none=set_to_none)\n \n def step(self):\n if self.state.distributed_type == DistributedType.TPU:\n", "issue": "AcceleratedOptimizer `zero_grad` argument not supported: `set_to_none`\nCurrently the `AcceleratedOptimizer` class doesn't support the argument `set_to_none`, is this an intentional exclusion?\n", "before_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\nfrom .state import AcceleratorState, DistributedType, is_tpu_available\n\n\nif is_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\ndef move_to_device(state, device):\n if isinstance(state, (list, tuple)):\n return type(state)(move_to_device(t, device) for t in state)\n elif isinstance(state, dict):\n return type(state)({k: move_to_device(v, device) for k, v in state.items()})\n elif isinstance(state, torch.Tensor):\n return state.to(device)\n return state\n\n\nclass AcceleratedOptimizer(torch.optim.Optimizer):\n \"\"\"\n Internal wrapper around a torch optimizer.\n\n Args:\n optimizer (:obj:`torch.optim.optimizer.Optimizer`):\n The optimizer to wrap.\n device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of\n :obj:`optimizer` on the right device.\n scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`):\n The scaler to use in the step function if training with mixed precision.\n \"\"\"\n\n def __init__(self, optimizer, device_placement=True, scaler=None):\n self.optimizer = optimizer\n self.scaler = scaler\n self.state = AcceleratorState()\n\n # Handle device placement\n if device_placement:\n state_dict = self.optimizer.state_dict()\n if self.state.distributed_type == DistributedType.TPU:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n else:\n state_dict = move_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n @property\n def param_groups(self):\n return self.optimizer.param_groups\n\n @param_groups.setter\n def param_groups(self, param_groups):\n self.optimizer.param_groups = param_groups\n\n @property\n def defaults(self):\n return self.optimizer.defaults\n\n @defaults.setter\n def defaults(self, defaults):\n self.optimizer.defaults = defaults\n\n def add_param_group(self, param_group):\n self.optimizer.add_param_group(param_group)\n\n def load_state_dict(self, state_dict):\n if self.state.distributed_type == DistributedType.TPU and self.device_placement:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def step(self):\n if self.state.distributed_type == DistributedType.TPU:\n xm.optimizer_step(self.optimizer)\n elif self.scaler is not None:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n self.optimizer.step()\n\n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n param_group[\"params\"] = [parameters_map.get(p, p) for p in param_group[\"params\"]]\n", "path": "src/accelerate/optimizer.py"}], "after_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\nfrom packaging import version\n\nfrom .state import AcceleratorState, DistributedType, is_tpu_available\n\n\nif is_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\ndef move_to_device(state, device):\n if isinstance(state, (list, tuple)):\n return type(state)(move_to_device(t, device) for t in state)\n elif isinstance(state, dict):\n return type(state)({k: move_to_device(v, device) for k, v in state.items()})\n elif isinstance(state, torch.Tensor):\n return state.to(device)\n return state\n\n\nclass AcceleratedOptimizer(torch.optim.Optimizer):\n \"\"\"\n Internal wrapper around a torch optimizer.\n\n Args:\n optimizer (:obj:`torch.optim.optimizer.Optimizer`):\n The optimizer to wrap.\n device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of\n :obj:`optimizer` on the right device.\n scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`):\n The scaler to use in the step function if training with mixed precision.\n \"\"\"\n\n def __init__(self, optimizer, device_placement=True, scaler=None):\n self.optimizer = optimizer\n self.scaler = scaler\n self.state = AcceleratorState()\n\n # Handle device placement\n if device_placement:\n state_dict = self.optimizer.state_dict()\n if self.state.distributed_type == DistributedType.TPU:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n else:\n state_dict = move_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n @property\n def param_groups(self):\n return self.optimizer.param_groups\n\n @param_groups.setter\n def param_groups(self, param_groups):\n self.optimizer.param_groups = param_groups\n\n @property\n def defaults(self):\n return self.optimizer.defaults\n\n @defaults.setter\n def defaults(self, defaults):\n self.optimizer.defaults = defaults\n\n def add_param_group(self, param_group):\n self.optimizer.add_param_group(param_group)\n\n def load_state_dict(self, state_dict):\n if self.state.distributed_type == DistributedType.TPU and self.device_placement:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def zero_grad(self, set_to_none=None):\n if version.parse(torch.__version__) < version.parse(\"1.7.0\"):\n if set_to_none is not None:\n raise ValueError(\n \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n f\"earlier versions (found version {torch.__version__}).\"\n )\n self.optimizer.zero_grad()\n else:\n if set_to_none is not None:\n set_to_none = False\n self.optimizer.zero_grad(set_to_none=set_to_none)\n\n def step(self):\n if self.state.distributed_type == DistributedType.TPU:\n xm.optimizer_step(self.optimizer)\n elif self.scaler is not None:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n self.optimizer.step()\n\n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n param_group[\"params\"] = [parameters_map.get(p, p) for p in param_group[\"params\"]]\n", "path": "src/accelerate/optimizer.py"}]}
1,292
280
gh_patches_debug_35610
rasdani/github-patches
git_diff
joke2k__faker-712
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Credit card numbers' validity is not enforced Credit card numbers generated by `credit_card_number()` are not always valid. ### Steps to reproduce ```python fake = Faker() fake.credit_card_number() ``` ### Expected behavior The credit card numbers generated should always be valid ones. ### Actual behavior Some returned numbers are invalid. For example: "3337083863794024", "3337957986872777", "3088184514627042". You can verify it with http://www.validcreditcardnumber.com/ The problem is that the issuer identification is not always correct. You can check that by performing a RegEx, see as an example https://www.regular-expressions.info/creditcard.html Could the generator be modified to ensure that credit card numbers are always valid? Credit card numbers' validity is not enforced Credit card numbers generated by `credit_card_number()` are not always valid. ### Steps to reproduce ```python fake = Faker() fake.credit_card_number() ``` ### Expected behavior The credit card numbers generated should always be valid ones. ### Actual behavior Some returned numbers are invalid. For example: "3337083863794024", "3337957986872777", "3088184514627042". You can verify it with http://www.validcreditcardnumber.com/ The problem is that the issuer identification is not always correct. You can check that by performing a RegEx, see as an example https://www.regular-expressions.info/creditcard.html Could the generator be modified to ensure that credit card numbers are always valid? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `faker/providers/credit_card/__init__.py` Content: ``` 1 # coding=utf-8 2 from __future__ import unicode_literals 3 from collections import OrderedDict 4 5 from .. import BaseProvider 6 7 8 class CreditCard(object): 9 10 def __init__( 11 self, 12 name, 13 prefixes, 14 length=16, 15 security_code='CVC', 16 security_code_length=3): 17 self.name = name 18 self.prefixes = prefixes 19 self.length = length 20 self.security_code = security_code 21 self.security_code_length = security_code_length 22 23 24 class Provider(BaseProvider): 25 26 prefix_maestro = ['5018', '5020', '5038', '5612', '5893', 27 '6304', '6759', '6761', '6762', '6763', '0604', '6390'] 28 prefix_mastercard = ['51', '52', '53', '54', '55'] 29 prefix_visa = ['4'] 30 prefix_amex = ['34', '37'] 31 prefix_discover = ['6011'] 32 prefix_diners = ['300', '301', '302', '303', '304', '305'] 33 prefix_jcb16 = ['3088', '3096', '3112', '3158', '3337', '3528'] 34 prefix_jcb15 = ['2100', '1800'] 35 prefix_voyager = ['8699'] 36 37 credit_card_types = OrderedDict(( 38 ('maestro', CreditCard('Maestro', 39 prefix_maestro, 12, security_code='CVV')), 40 ('mastercard', CreditCard('Mastercard', 41 prefix_mastercard, 16, security_code='CVV')), 42 ('visa16', CreditCard('VISA 16 digit', prefix_visa)), 43 ('visa13', CreditCard('VISA 13 digit', prefix_visa, 13)), 44 ('amex', CreditCard('American Express', prefix_amex, 45 15, security_code='CID', security_code_length=4)), 46 ('discover', CreditCard('Discover', prefix_discover)), 47 ('diners', CreditCard('Diners Club / Carte Blanche', prefix_diners, 14)), 48 ('jcb15', CreditCard('JCB 15 digit', prefix_jcb15, 15)), 49 ('jcb16', CreditCard('JCB 16 digit', prefix_jcb16)), 50 ('voyager', CreditCard('Voyager', prefix_voyager, 15)), 51 )) 52 credit_card_types['visa'] = credit_card_types['visa16'] 53 credit_card_types['jcb'] = credit_card_types['jcb16'] 54 55 luhn_lookup = {'0': 0, '1': 2, '2': 4, '3': 6, '4': 8, 56 '5': 1, '6': 3, '7': 5, '8': 7, '9': 9} 57 58 def credit_card_provider(self, card_type=None): 59 """ Returns the provider's name of the credit card. """ 60 if card_type is None: 61 card_type = self.random_element(self.credit_card_types.keys()) 62 return self._credit_card_type(card_type).name 63 64 def credit_card_number(self, card_type=None): 65 """ Returns a valid credit card number. """ 66 card = self._credit_card_type(card_type) 67 prefix = self.random_element(card.prefixes) 68 number = self._generate_number(prefix, card.length) 69 return number 70 71 def credit_card_expire(self, start='now', end='+10y', date_format='%m/%y'): 72 expire_date = self.generator.date_time_between(start, end) 73 return expire_date.strftime(date_format) 74 75 def credit_card_full(self, card_type=None): 76 card = self._credit_card_type(card_type) 77 78 tpl = ('{provider}\n' 79 '{owner}\n' 80 '{number} {expire_date}\n' 81 '{security}: {security_nb}\n') 82 83 tpl = tpl.format(provider=card.name, 84 owner=self.generator.parse( 85 "{{first_name}} {{last_name}}"), 86 number=self.credit_card_number(card), 87 expire_date=self.credit_card_expire(), 88 security=card.security_code, 89 security_nb=self.credit_card_security_code(card)) 90 91 return self.generator.parse(tpl) 92 93 def credit_card_security_code(self, card_type=None): 94 """ Returns a security code string. """ 95 sec_len = self._credit_card_type(card_type).security_code_length 96 return self.numerify('#' * sec_len) 97 98 def _credit_card_type(self, card_type=None): 99 """ Returns a random credit card type instance. """ 100 if card_type is None: 101 card_type = self.random_element(self.credit_card_types.keys()) 102 elif isinstance(card_type, CreditCard): 103 return card_type 104 return self.credit_card_types[card_type] 105 106 def _generate_number(self, prefix, length): 107 """ 108 'prefix' is the start of the CC number as a string, any number of digits. 109 'length' is the length of the CC number to generate. Typically 13 or 16 110 """ 111 number = prefix 112 # Generate random char digits 113 number += '#' * (length - len(prefix) - 1) 114 number = self.numerify(number) 115 reverse = number[::-1] 116 # Calculate sum 117 tot = 0 118 pos = 0 119 while pos < length - 1: 120 tot += Provider.luhn_lookup[reverse[pos]] 121 if pos != (length - 2): 122 tot += int(reverse[pos + 1]) 123 pos += 2 124 # Calculate check digit 125 check_digit = (10 - (tot % 10)) % 10 126 number += str(check_digit) 127 return number 128 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/faker/providers/credit_card/__init__.py b/faker/providers/credit_card/__init__.py --- a/faker/providers/credit_card/__init__.py +++ b/faker/providers/credit_card/__init__.py @@ -23,16 +23,19 @@ class Provider(BaseProvider): - prefix_maestro = ['5018', '5020', '5038', '5612', '5893', + # Prefixes from: + # * https://en.wikipedia.org/wiki/Payment_card_number#Issuer_identification_number_.28IIN.29 + # * https://www.regular-expressions.info/creditcard.html + # * https://creditcardjs.com/credit-card-type-detection + prefix_maestro = ['5018', '5020', '5038', '56##', '57##', '58##', '6304', '6759', '6761', '6762', '6763', '0604', '6390'] - prefix_mastercard = ['51', '52', '53', '54', '55'] + prefix_mastercard = ['51', '52', '53', '54', '55', '222%'] prefix_visa = ['4'] prefix_amex = ['34', '37'] - prefix_discover = ['6011'] - prefix_diners = ['300', '301', '302', '303', '304', '305'] - prefix_jcb16 = ['3088', '3096', '3112', '3158', '3337', '3528'] - prefix_jcb15 = ['2100', '1800'] - prefix_voyager = ['8699'] + prefix_discover = ['6011', '65'] + prefix_diners = ['300', '301', '302', '303', '304', '305', '36', '38'] + prefix_jcb16 = ['35'] + prefix_jcb15 = ['2131', '1800'] credit_card_types = OrderedDict(( ('maestro', CreditCard('Maestro', @@ -47,7 +50,6 @@ ('diners', CreditCard('Diners Club / Carte Blanche', prefix_diners, 14)), ('jcb15', CreditCard('JCB 15 digit', prefix_jcb15, 15)), ('jcb16', CreditCard('JCB 16 digit', prefix_jcb16)), - ('voyager', CreditCard('Voyager', prefix_voyager, 15)), )) credit_card_types['visa'] = credit_card_types['visa16'] credit_card_types['jcb'] = credit_card_types['jcb16'] @@ -65,7 +67,7 @@ """ Returns a valid credit card number. """ card = self._credit_card_type(card_type) prefix = self.random_element(card.prefixes) - number = self._generate_number(prefix, card.length) + number = self._generate_number(self.numerify(prefix), card.length) return number def credit_card_expire(self, start='now', end='+10y', date_format='%m/%y'):
{"golden_diff": "diff --git a/faker/providers/credit_card/__init__.py b/faker/providers/credit_card/__init__.py\n--- a/faker/providers/credit_card/__init__.py\n+++ b/faker/providers/credit_card/__init__.py\n@@ -23,16 +23,19 @@\n \n class Provider(BaseProvider):\n \n- prefix_maestro = ['5018', '5020', '5038', '5612', '5893',\n+ # Prefixes from:\n+ # * https://en.wikipedia.org/wiki/Payment_card_number#Issuer_identification_number_.28IIN.29\n+ # * https://www.regular-expressions.info/creditcard.html\n+ # * https://creditcardjs.com/credit-card-type-detection\n+ prefix_maestro = ['5018', '5020', '5038', '56##', '57##', '58##',\n '6304', '6759', '6761', '6762', '6763', '0604', '6390']\n- prefix_mastercard = ['51', '52', '53', '54', '55']\n+ prefix_mastercard = ['51', '52', '53', '54', '55', '222%']\n prefix_visa = ['4']\n prefix_amex = ['34', '37']\n- prefix_discover = ['6011']\n- prefix_diners = ['300', '301', '302', '303', '304', '305']\n- prefix_jcb16 = ['3088', '3096', '3112', '3158', '3337', '3528']\n- prefix_jcb15 = ['2100', '1800']\n- prefix_voyager = ['8699']\n+ prefix_discover = ['6011', '65']\n+ prefix_diners = ['300', '301', '302', '303', '304', '305', '36', '38']\n+ prefix_jcb16 = ['35']\n+ prefix_jcb15 = ['2131', '1800']\n \n credit_card_types = OrderedDict((\n ('maestro', CreditCard('Maestro',\n@@ -47,7 +50,6 @@\n ('diners', CreditCard('Diners Club / Carte Blanche', prefix_diners, 14)),\n ('jcb15', CreditCard('JCB 15 digit', prefix_jcb15, 15)),\n ('jcb16', CreditCard('JCB 16 digit', prefix_jcb16)),\n- ('voyager', CreditCard('Voyager', prefix_voyager, 15)),\n ))\n credit_card_types['visa'] = credit_card_types['visa16']\n credit_card_types['jcb'] = credit_card_types['jcb16']\n@@ -65,7 +67,7 @@\n \"\"\" Returns a valid credit card number. \"\"\"\n card = self._credit_card_type(card_type)\n prefix = self.random_element(card.prefixes)\n- number = self._generate_number(prefix, card.length)\n+ number = self._generate_number(self.numerify(prefix), card.length)\n return number\n \n def credit_card_expire(self, start='now', end='+10y', date_format='%m/%y'):\n", "issue": "Credit card numbers' validity is not enforced\nCredit card numbers generated by `credit_card_number()` are not always valid.\r\n\r\n### Steps to reproduce\r\n\r\n```python\r\nfake = Faker()\r\nfake.credit_card_number()\r\n```\r\n\r\n### Expected behavior\r\n\r\nThe credit card numbers generated should always be valid ones.\r\n\r\n### Actual behavior\r\n\r\nSome returned numbers are invalid. For example: \"3337083863794024\", \"3337957986872777\", \"3088184514627042\". You can verify it with http://www.validcreditcardnumber.com/\r\n\r\nThe problem is that the issuer identification is not always correct. You can check that by performing a RegEx, see as an example https://www.regular-expressions.info/creditcard.html\r\n\r\nCould the generator be modified to ensure that credit card numbers are always valid?\nCredit card numbers' validity is not enforced\nCredit card numbers generated by `credit_card_number()` are not always valid.\r\n\r\n### Steps to reproduce\r\n\r\n```python\r\nfake = Faker()\r\nfake.credit_card_number()\r\n```\r\n\r\n### Expected behavior\r\n\r\nThe credit card numbers generated should always be valid ones.\r\n\r\n### Actual behavior\r\n\r\nSome returned numbers are invalid. For example: \"3337083863794024\", \"3337957986872777\", \"3088184514627042\". You can verify it with http://www.validcreditcardnumber.com/\r\n\r\nThe problem is that the issuer identification is not always correct. You can check that by performing a RegEx, see as an example https://www.regular-expressions.info/creditcard.html\r\n\r\nCould the generator be modified to ensure that credit card numbers are always valid?\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom collections import OrderedDict\n\nfrom .. import BaseProvider\n\n\nclass CreditCard(object):\n\n def __init__(\n self,\n name,\n prefixes,\n length=16,\n security_code='CVC',\n security_code_length=3):\n self.name = name\n self.prefixes = prefixes\n self.length = length\n self.security_code = security_code\n self.security_code_length = security_code_length\n\n\nclass Provider(BaseProvider):\n\n prefix_maestro = ['5018', '5020', '5038', '5612', '5893',\n '6304', '6759', '6761', '6762', '6763', '0604', '6390']\n prefix_mastercard = ['51', '52', '53', '54', '55']\n prefix_visa = ['4']\n prefix_amex = ['34', '37']\n prefix_discover = ['6011']\n prefix_diners = ['300', '301', '302', '303', '304', '305']\n prefix_jcb16 = ['3088', '3096', '3112', '3158', '3337', '3528']\n prefix_jcb15 = ['2100', '1800']\n prefix_voyager = ['8699']\n\n credit_card_types = OrderedDict((\n ('maestro', CreditCard('Maestro',\n prefix_maestro, 12, security_code='CVV')),\n ('mastercard', CreditCard('Mastercard',\n prefix_mastercard, 16, security_code='CVV')),\n ('visa16', CreditCard('VISA 16 digit', prefix_visa)),\n ('visa13', CreditCard('VISA 13 digit', prefix_visa, 13)),\n ('amex', CreditCard('American Express', prefix_amex,\n 15, security_code='CID', security_code_length=4)),\n ('discover', CreditCard('Discover', prefix_discover)),\n ('diners', CreditCard('Diners Club / Carte Blanche', prefix_diners, 14)),\n ('jcb15', CreditCard('JCB 15 digit', prefix_jcb15, 15)),\n ('jcb16', CreditCard('JCB 16 digit', prefix_jcb16)),\n ('voyager', CreditCard('Voyager', prefix_voyager, 15)),\n ))\n credit_card_types['visa'] = credit_card_types['visa16']\n credit_card_types['jcb'] = credit_card_types['jcb16']\n\n luhn_lookup = {'0': 0, '1': 2, '2': 4, '3': 6, '4': 8,\n '5': 1, '6': 3, '7': 5, '8': 7, '9': 9}\n\n def credit_card_provider(self, card_type=None):\n \"\"\" Returns the provider's name of the credit card. \"\"\"\n if card_type is None:\n card_type = self.random_element(self.credit_card_types.keys())\n return self._credit_card_type(card_type).name\n\n def credit_card_number(self, card_type=None):\n \"\"\" Returns a valid credit card number. \"\"\"\n card = self._credit_card_type(card_type)\n prefix = self.random_element(card.prefixes)\n number = self._generate_number(prefix, card.length)\n return number\n\n def credit_card_expire(self, start='now', end='+10y', date_format='%m/%y'):\n expire_date = self.generator.date_time_between(start, end)\n return expire_date.strftime(date_format)\n\n def credit_card_full(self, card_type=None):\n card = self._credit_card_type(card_type)\n\n tpl = ('{provider}\\n'\n '{owner}\\n'\n '{number} {expire_date}\\n'\n '{security}: {security_nb}\\n')\n\n tpl = tpl.format(provider=card.name,\n owner=self.generator.parse(\n \"{{first_name}} {{last_name}}\"),\n number=self.credit_card_number(card),\n expire_date=self.credit_card_expire(),\n security=card.security_code,\n security_nb=self.credit_card_security_code(card))\n\n return self.generator.parse(tpl)\n\n def credit_card_security_code(self, card_type=None):\n \"\"\" Returns a security code string. \"\"\"\n sec_len = self._credit_card_type(card_type).security_code_length\n return self.numerify('#' * sec_len)\n\n def _credit_card_type(self, card_type=None):\n \"\"\" Returns a random credit card type instance. \"\"\"\n if card_type is None:\n card_type = self.random_element(self.credit_card_types.keys())\n elif isinstance(card_type, CreditCard):\n return card_type\n return self.credit_card_types[card_type]\n\n def _generate_number(self, prefix, length):\n \"\"\"\n 'prefix' is the start of the CC number as a string, any number of digits.\n 'length' is the length of the CC number to generate. Typically 13 or 16\n \"\"\"\n number = prefix\n # Generate random char digits\n number += '#' * (length - len(prefix) - 1)\n number = self.numerify(number)\n reverse = number[::-1]\n # Calculate sum\n tot = 0\n pos = 0\n while pos < length - 1:\n tot += Provider.luhn_lookup[reverse[pos]]\n if pos != (length - 2):\n tot += int(reverse[pos + 1])\n pos += 2\n # Calculate check digit\n check_digit = (10 - (tot % 10)) % 10\n number += str(check_digit)\n return number\n", "path": "faker/providers/credit_card/__init__.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom collections import OrderedDict\n\nfrom .. import BaseProvider\n\n\nclass CreditCard(object):\n\n def __init__(\n self,\n name,\n prefixes,\n length=16,\n security_code='CVC',\n security_code_length=3):\n self.name = name\n self.prefixes = prefixes\n self.length = length\n self.security_code = security_code\n self.security_code_length = security_code_length\n\n\nclass Provider(BaseProvider):\n\n # Prefixes from:\n # * https://en.wikipedia.org/wiki/Payment_card_number#Issuer_identification_number_.28IIN.29\n # * https://www.regular-expressions.info/creditcard.html\n # * https://creditcardjs.com/credit-card-type-detection\n prefix_maestro = ['5018', '5020', '5038', '56##', '57##', '58##',\n '6304', '6759', '6761', '6762', '6763', '0604', '6390']\n prefix_mastercard = ['51', '52', '53', '54', '55', '222%']\n prefix_visa = ['4']\n prefix_amex = ['34', '37']\n prefix_discover = ['6011', '65']\n prefix_diners = ['300', '301', '302', '303', '304', '305', '36', '38']\n prefix_jcb16 = ['35']\n prefix_jcb15 = ['2131', '1800']\n\n credit_card_types = OrderedDict((\n ('maestro', CreditCard('Maestro',\n prefix_maestro, 12, security_code='CVV')),\n ('mastercard', CreditCard('Mastercard',\n prefix_mastercard, 16, security_code='CVV')),\n ('visa16', CreditCard('VISA 16 digit', prefix_visa)),\n ('visa13', CreditCard('VISA 13 digit', prefix_visa, 13)),\n ('amex', CreditCard('American Express', prefix_amex,\n 15, security_code='CID', security_code_length=4)),\n ('discover', CreditCard('Discover', prefix_discover)),\n ('diners', CreditCard('Diners Club / Carte Blanche', prefix_diners, 14)),\n ('jcb15', CreditCard('JCB 15 digit', prefix_jcb15, 15)),\n ('jcb16', CreditCard('JCB 16 digit', prefix_jcb16)),\n ))\n credit_card_types['visa'] = credit_card_types['visa16']\n credit_card_types['jcb'] = credit_card_types['jcb16']\n\n luhn_lookup = {'0': 0, '1': 2, '2': 4, '3': 6, '4': 8,\n '5': 1, '6': 3, '7': 5, '8': 7, '9': 9}\n\n def credit_card_provider(self, card_type=None):\n \"\"\" Returns the provider's name of the credit card. \"\"\"\n if card_type is None:\n card_type = self.random_element(self.credit_card_types.keys())\n return self._credit_card_type(card_type).name\n\n def credit_card_number(self, card_type=None):\n \"\"\" Returns a valid credit card number. \"\"\"\n card = self._credit_card_type(card_type)\n prefix = self.random_element(card.prefixes)\n number = self._generate_number(self.numerify(prefix), card.length)\n return number\n\n def credit_card_expire(self, start='now', end='+10y', date_format='%m/%y'):\n expire_date = self.generator.date_time_between(start, end)\n return expire_date.strftime(date_format)\n\n def credit_card_full(self, card_type=None):\n card = self._credit_card_type(card_type)\n\n tpl = ('{provider}\\n'\n '{owner}\\n'\n '{number} {expire_date}\\n'\n '{security}: {security_nb}\\n')\n\n tpl = tpl.format(provider=card.name,\n owner=self.generator.parse(\n \"{{first_name}} {{last_name}}\"),\n number=self.credit_card_number(card),\n expire_date=self.credit_card_expire(),\n security=card.security_code,\n security_nb=self.credit_card_security_code(card))\n\n return self.generator.parse(tpl)\n\n def credit_card_security_code(self, card_type=None):\n \"\"\" Returns a security code string. \"\"\"\n sec_len = self._credit_card_type(card_type).security_code_length\n return self.numerify('#' * sec_len)\n\n def _credit_card_type(self, card_type=None):\n \"\"\" Returns a random credit card type instance. \"\"\"\n if card_type is None:\n card_type = self.random_element(self.credit_card_types.keys())\n elif isinstance(card_type, CreditCard):\n return card_type\n return self.credit_card_types[card_type]\n\n def _generate_number(self, prefix, length):\n \"\"\"\n 'prefix' is the start of the CC number as a string, any number of digits.\n 'length' is the length of the CC number to generate. Typically 13 or 16\n \"\"\"\n number = prefix\n # Generate random char digits\n number += '#' * (length - len(prefix) - 1)\n number = self.numerify(number)\n reverse = number[::-1]\n # Calculate sum\n tot = 0\n pos = 0\n while pos < length - 1:\n tot += Provider.luhn_lookup[reverse[pos]]\n if pos != (length - 2):\n tot += int(reverse[pos + 1])\n pos += 2\n # Calculate check digit\n check_digit = (10 - (tot % 10)) % 10\n number += str(check_digit)\n return number\n", "path": "faker/providers/credit_card/__init__.py"}]}
2,239
821
gh_patches_debug_19468
rasdani/github-patches
git_diff
opsdroid__opsdroid-58
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Default connector and default room When writing a skill which originates from something other than a message (e.g cron #26) the response may need to know which room to post into. Most chat clients have a default room, like `#general` in Slack. This could be available as a property in the connector so that skills can easily access it. e.g ``` python @non_message_decorator() def myskill(opsdroid): for connector in opsdroid.connectors: message = Message("Message text", connector.default_room, None, connector) connector.respond(message) ``` It should also be possible to override the default room in the connector config. ``` yaml connectors: slack: default-room: "#random" ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opsdroid/core.py` Content: ``` 1 """Core components of OpsDroid.""" 2 3 import logging 4 import sys 5 import weakref 6 import asyncio 7 8 from opsdroid.memory import Memory 9 from opsdroid.connector import Connector 10 from opsdroid.database import Database 11 from opsdroid.loader import Loader 12 from opsdroid.parsers.regex import parse_regex 13 from opsdroid.parsers.apiai import parse_apiai 14 15 16 class OpsDroid(): 17 """Root object for opsdroid.""" 18 19 # pylint: disable=too-many-instance-attributes 20 # All are reasonable in this case. 21 22 instances = [] 23 24 def __init__(self): 25 """Start opsdroid.""" 26 self.bot_name = 'opsdroid' 27 self.sys_status = 0 28 self.connectors = [] 29 self.connector_tasks = [] 30 self.eventloop = asyncio.get_event_loop() 31 self.skills = [] 32 self.memory = Memory() 33 self.loader = Loader(self) 34 self.config = {} 35 logging.info("Created main opsdroid object") 36 37 def __enter__(self): 38 """Add self to existing instances.""" 39 if len(self.__class__.instances) == 0: 40 self.__class__.instances.append(weakref.proxy(self)) 41 else: 42 self.critical("opsdroid has already been started", 1) 43 return self 44 45 def __exit__(self, exc_type, exc_value, traceback): 46 """Remove self from existing instances.""" 47 self.__class__.instances = [] 48 49 def exit(self): 50 """Exit application.""" 51 logging.info("Exiting application with return code " + 52 str(self.sys_status)) 53 if self.eventloop.is_running(): 54 self.eventloop.stop() 55 sys.exit(self.sys_status) 56 57 def critical(self, error, code): 58 """Exit due to unrecoverable error.""" 59 self.sys_status = code 60 logging.critical(error) 61 print("Error: " + error) 62 self.exit() 63 64 def load(self): 65 """Load configuration.""" 66 self.config = self.loader.load_config_file([ 67 "./configuration.yaml", 68 "~/.opsdroid/configuration.yaml", 69 "/etc/opsdroid/configuration.yaml" 70 ]) 71 72 def start_loop(self): 73 """Start the event loop.""" 74 connectors, databases, skills = self.loader.load_config(self.config) 75 if databases is not None: 76 self.start_databases(databases) 77 self.setup_skills(skills) 78 self.start_connector_tasks(connectors) 79 try: 80 self.eventloop.run_forever() 81 except (KeyboardInterrupt, EOFError): 82 print('') # Prints a character return for return to shell 83 logging.info("Keyboard interrupt, exiting.") 84 self.exit() 85 86 def setup_skills(self, skills): 87 """Call the setup function on the passed in skills.""" 88 for skill in skills: 89 try: 90 skill["module"].setup(self) 91 except AttributeError: 92 pass 93 94 def start_connector_tasks(self, connectors): 95 """Start the connectors.""" 96 for connector_module in connectors: 97 for _, cls in connector_module["module"].__dict__.items(): 98 if isinstance(cls, type) and \ 99 issubclass(cls, Connector) and\ 100 cls is not Connector: 101 connector_module["config"]["bot-name"] = self.bot_name 102 connector = cls(connector_module["config"]) 103 self.connectors.append(connector) 104 105 if len(connectors) > 0: 106 for connector in self.connectors: 107 self.eventloop.run_until_complete(connector.connect(self)) 108 for connector in self.connectors: 109 task = self.eventloop.create_task(connector.listen(self)) 110 self.connector_tasks.append(task) 111 else: 112 self.critical("All connectors failed to load", 1) 113 114 def start_databases(self, databases): 115 """Start the databases.""" 116 if len(databases) == 0: 117 logging.debug(databases) 118 logging.warning("All databases failed to load") 119 for database_module in databases: 120 for name, cls in database_module["module"].__dict__.items(): 121 if isinstance(cls, type) and \ 122 issubclass(cls, Database) and \ 123 cls is not Database: 124 logging.debug("Adding database: " + name) 125 database = cls(database_module["config"]) 126 self.memory.databases.append(database) 127 self.eventloop.run_until_complete(database.connect(self)) 128 129 async def parse(self, message): 130 """Parse a string against all skills.""" 131 if message.text.strip() != "": 132 logging.debug("Parsing input: " + message.text) 133 134 await parse_regex(self, message) 135 136 if "parsers" in self.config and "apiai" in self.config["parsers"]: 137 await parse_apiai(self, message) 138 ``` Path: `opsdroid/connector.py` Content: ``` 1 """A base class for connectors to inherit from.""" 2 3 from opsdroid.message import Message # NOQA # pylint: disable=unused-import 4 5 6 class Connector(): 7 """A base connector. 8 9 Connectors are used to interact with a given chat service. 10 11 """ 12 13 def __init__(self, config): 14 """Setup the connector. 15 16 Set some basic properties from the connector config such as the name 17 of this connector and the name the bot should appear with in chat 18 service. 19 20 Args: 21 config (dict): The config for this connector specified in the 22 `configuration.yaml` file. 23 24 """ 25 self.name = "" 26 self.config = config 27 28 async def connect(self, opsdroid): 29 """Connect to chat service. 30 31 This method should create a connection to the desired chat service. 32 It should also be possible to call it multiple times in the event of 33 being disconnected. 34 35 Args: 36 opsdroid (OpsDroid): An instance of the opsdroid core. 37 38 """ 39 raise NotImplementedError 40 41 async def listen(self, opsdroid): 42 """Listen to chat service and parse all messages. 43 44 This method should block the thread with an infinite loop and create 45 Message objects for chat messages coming from the service. It should 46 then call `await opsdroid.parse(message)` on those messages. 47 48 As the method should include some kind of `while True` all messages 49 from the chat service should be "awaited" asyncronously to avoid 50 blocking the thread. 51 52 Args: 53 opsdroid (OpsDroid): An instance of the opsdroid core. 54 55 """ 56 raise NotImplementedError 57 58 async def respond(self, message): 59 """Send a message back to the chat service. 60 61 The message object will have a `text` property which should be sent 62 back to the chat service. It may also have a `room` and `user` property 63 which gives information on where the message should be directed. 64 65 Args: 66 message (Message): A message received by the connector. 67 68 Returns: 69 bool: True for message successfully sent. False otherwise. 70 71 """ 72 raise NotImplementedError 73 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opsdroid/connector.py b/opsdroid/connector.py --- a/opsdroid/connector.py +++ b/opsdroid/connector.py @@ -24,6 +24,7 @@ """ self.name = "" self.config = config + self.default_room = None async def connect(self, opsdroid): """Connect to chat service. diff --git a/opsdroid/core.py b/opsdroid/core.py --- a/opsdroid/core.py +++ b/opsdroid/core.py @@ -46,6 +46,18 @@ """Remove self from existing instances.""" self.__class__.instances = [] + @property + def default_connector(self): + """Return the default connector.""" + default_connector = None + for connector in self.connectors: + if "default" in connector.config and connector.config["default"]: + default_connector = connector + break + if default_connector is None: + default_connector = self.connectors[0] + return default_connector + def exit(self): """Exit application.""" logging.info("Exiting application with return code " +
{"golden_diff": "diff --git a/opsdroid/connector.py b/opsdroid/connector.py\n--- a/opsdroid/connector.py\n+++ b/opsdroid/connector.py\n@@ -24,6 +24,7 @@\n \"\"\"\n self.name = \"\"\n self.config = config\n+ self.default_room = None\n \n async def connect(self, opsdroid):\n \"\"\"Connect to chat service.\ndiff --git a/opsdroid/core.py b/opsdroid/core.py\n--- a/opsdroid/core.py\n+++ b/opsdroid/core.py\n@@ -46,6 +46,18 @@\n \"\"\"Remove self from existing instances.\"\"\"\n self.__class__.instances = []\n \n+ @property\n+ def default_connector(self):\n+ \"\"\"Return the default connector.\"\"\"\n+ default_connector = None\n+ for connector in self.connectors:\n+ if \"default\" in connector.config and connector.config[\"default\"]:\n+ default_connector = connector\n+ break\n+ if default_connector is None:\n+ default_connector = self.connectors[0]\n+ return default_connector\n+\n def exit(self):\n \"\"\"Exit application.\"\"\"\n logging.info(\"Exiting application with return code \" +\n", "issue": "Default connector and default room\nWhen writing a skill which originates from something other than a message (e.g cron #26) the response may need to know which room to post into. \n\nMost chat clients have a default room, like `#general` in Slack. This could be available as a property in the connector so that skills can easily access it. \n\ne.g\n\n``` python\n@non_message_decorator()\ndef myskill(opsdroid):\n for connector in opsdroid.connectors:\n message = Message(\"Message text\", connector.default_room, None, connector)\n connector.respond(message)\n```\n\nIt should also be possible to override the default room in the connector config.\n\n``` yaml\nconnectors:\n slack:\n default-room: \"#random\"\n```\n\n", "before_files": [{"content": "\"\"\"Core components of OpsDroid.\"\"\"\n\nimport logging\nimport sys\nimport weakref\nimport asyncio\n\nfrom opsdroid.memory import Memory\nfrom opsdroid.connector import Connector\nfrom opsdroid.database import Database\nfrom opsdroid.loader import Loader\nfrom opsdroid.parsers.regex import parse_regex\nfrom opsdroid.parsers.apiai import parse_apiai\n\n\nclass OpsDroid():\n \"\"\"Root object for opsdroid.\"\"\"\n\n # pylint: disable=too-many-instance-attributes\n # All are reasonable in this case.\n\n instances = []\n\n def __init__(self):\n \"\"\"Start opsdroid.\"\"\"\n self.bot_name = 'opsdroid'\n self.sys_status = 0\n self.connectors = []\n self.connector_tasks = []\n self.eventloop = asyncio.get_event_loop()\n self.skills = []\n self.memory = Memory()\n self.loader = Loader(self)\n self.config = {}\n logging.info(\"Created main opsdroid object\")\n\n def __enter__(self):\n \"\"\"Add self to existing instances.\"\"\"\n if len(self.__class__.instances) == 0:\n self.__class__.instances.append(weakref.proxy(self))\n else:\n self.critical(\"opsdroid has already been started\", 1)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Remove self from existing instances.\"\"\"\n self.__class__.instances = []\n\n def exit(self):\n \"\"\"Exit application.\"\"\"\n logging.info(\"Exiting application with return code \" +\n str(self.sys_status))\n if self.eventloop.is_running():\n self.eventloop.stop()\n sys.exit(self.sys_status)\n\n def critical(self, error, code):\n \"\"\"Exit due to unrecoverable error.\"\"\"\n self.sys_status = code\n logging.critical(error)\n print(\"Error: \" + error)\n self.exit()\n\n def load(self):\n \"\"\"Load configuration.\"\"\"\n self.config = self.loader.load_config_file([\n \"./configuration.yaml\",\n \"~/.opsdroid/configuration.yaml\",\n \"/etc/opsdroid/configuration.yaml\"\n ])\n\n def start_loop(self):\n \"\"\"Start the event loop.\"\"\"\n connectors, databases, skills = self.loader.load_config(self.config)\n if databases is not None:\n self.start_databases(databases)\n self.setup_skills(skills)\n self.start_connector_tasks(connectors)\n try:\n self.eventloop.run_forever()\n except (KeyboardInterrupt, EOFError):\n print('') # Prints a character return for return to shell\n logging.info(\"Keyboard interrupt, exiting.\")\n self.exit()\n\n def setup_skills(self, skills):\n \"\"\"Call the setup function on the passed in skills.\"\"\"\n for skill in skills:\n try:\n skill[\"module\"].setup(self)\n except AttributeError:\n pass\n\n def start_connector_tasks(self, connectors):\n \"\"\"Start the connectors.\"\"\"\n for connector_module in connectors:\n for _, cls in connector_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \\\n issubclass(cls, Connector) and\\\n cls is not Connector:\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n\n if len(connectors) > 0:\n for connector in self.connectors:\n self.eventloop.run_until_complete(connector.connect(self))\n for connector in self.connectors:\n task = self.eventloop.create_task(connector.listen(self))\n self.connector_tasks.append(task)\n else:\n self.critical(\"All connectors failed to load\", 1)\n\n def start_databases(self, databases):\n \"\"\"Start the databases.\"\"\"\n if len(databases) == 0:\n logging.debug(databases)\n logging.warning(\"All databases failed to load\")\n for database_module in databases:\n for name, cls in database_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \\\n issubclass(cls, Database) and \\\n cls is not Database:\n logging.debug(\"Adding database: \" + name)\n database = cls(database_module[\"config\"])\n self.memory.databases.append(database)\n self.eventloop.run_until_complete(database.connect(self))\n\n async def parse(self, message):\n \"\"\"Parse a string against all skills.\"\"\"\n if message.text.strip() != \"\":\n logging.debug(\"Parsing input: \" + message.text)\n\n await parse_regex(self, message)\n\n if \"parsers\" in self.config and \"apiai\" in self.config[\"parsers\"]:\n await parse_apiai(self, message)\n", "path": "opsdroid/core.py"}, {"content": "\"\"\"A base class for connectors to inherit from.\"\"\"\n\nfrom opsdroid.message import Message # NOQA # pylint: disable=unused-import\n\n\nclass Connector():\n \"\"\"A base connector.\n\n Connectors are used to interact with a given chat service.\n\n \"\"\"\n\n def __init__(self, config):\n \"\"\"Setup the connector.\n\n Set some basic properties from the connector config such as the name\n of this connector and the name the bot should appear with in chat\n service.\n\n Args:\n config (dict): The config for this connector specified in the\n `configuration.yaml` file.\n\n \"\"\"\n self.name = \"\"\n self.config = config\n\n async def connect(self, opsdroid):\n \"\"\"Connect to chat service.\n\n This method should create a connection to the desired chat service.\n It should also be possible to call it multiple times in the event of\n being disconnected.\n\n Args:\n opsdroid (OpsDroid): An instance of the opsdroid core.\n\n \"\"\"\n raise NotImplementedError\n\n async def listen(self, opsdroid):\n \"\"\"Listen to chat service and parse all messages.\n\n This method should block the thread with an infinite loop and create\n Message objects for chat messages coming from the service. It should\n then call `await opsdroid.parse(message)` on those messages.\n\n As the method should include some kind of `while True` all messages\n from the chat service should be \"awaited\" asyncronously to avoid\n blocking the thread.\n\n Args:\n opsdroid (OpsDroid): An instance of the opsdroid core.\n\n \"\"\"\n raise NotImplementedError\n\n async def respond(self, message):\n \"\"\"Send a message back to the chat service.\n\n The message object will have a `text` property which should be sent\n back to the chat service. It may also have a `room` and `user` property\n which gives information on where the message should be directed.\n\n Args:\n message (Message): A message received by the connector.\n\n Returns:\n bool: True for message successfully sent. False otherwise.\n\n \"\"\"\n raise NotImplementedError\n", "path": "opsdroid/connector.py"}], "after_files": [{"content": "\"\"\"Core components of OpsDroid.\"\"\"\n\nimport logging\nimport sys\nimport weakref\nimport asyncio\n\nfrom opsdroid.memory import Memory\nfrom opsdroid.connector import Connector\nfrom opsdroid.database import Database\nfrom opsdroid.loader import Loader\nfrom opsdroid.parsers.regex import parse_regex\nfrom opsdroid.parsers.apiai import parse_apiai\n\n\nclass OpsDroid():\n \"\"\"Root object for opsdroid.\"\"\"\n\n # pylint: disable=too-many-instance-attributes\n # All are reasonable in this case.\n\n instances = []\n\n def __init__(self):\n \"\"\"Start opsdroid.\"\"\"\n self.bot_name = 'opsdroid'\n self.sys_status = 0\n self.connectors = []\n self.connector_tasks = []\n self.eventloop = asyncio.get_event_loop()\n self.skills = []\n self.memory = Memory()\n self.loader = Loader(self)\n self.config = {}\n logging.info(\"Created main opsdroid object\")\n\n def __enter__(self):\n \"\"\"Add self to existing instances.\"\"\"\n if len(self.__class__.instances) == 0:\n self.__class__.instances.append(weakref.proxy(self))\n else:\n self.critical(\"opsdroid has already been started\", 1)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Remove self from existing instances.\"\"\"\n self.__class__.instances = []\n\n @property\n def default_connector(self):\n \"\"\"Return the default connector.\"\"\"\n default_connector = None\n for connector in self.connectors:\n if \"default\" in connector.config and connector.config[\"default\"]:\n default_connector = connector\n break\n if default_connector is None:\n default_connector = self.connectors[0]\n return default_connector\n\n def exit(self):\n \"\"\"Exit application.\"\"\"\n logging.info(\"Exiting application with return code \" +\n str(self.sys_status))\n if self.eventloop.is_running():\n self.eventloop.stop()\n sys.exit(self.sys_status)\n\n def critical(self, error, code):\n \"\"\"Exit due to unrecoverable error.\"\"\"\n self.sys_status = code\n logging.critical(error)\n print(\"Error: \" + error)\n self.exit()\n\n def load(self):\n \"\"\"Load configuration.\"\"\"\n self.config = self.loader.load_config_file([\n \"./configuration.yaml\",\n \"~/.opsdroid/configuration.yaml\",\n \"/etc/opsdroid/configuration.yaml\"\n ])\n\n def start_loop(self):\n \"\"\"Start the event loop.\"\"\"\n connectors, databases, skills = self.loader.load_config(self.config)\n if databases is not None:\n self.start_databases(databases)\n self.setup_skills(skills)\n self.start_connector_tasks(connectors)\n try:\n self.eventloop.run_forever()\n except (KeyboardInterrupt, EOFError):\n print('') # Prints a character return for return to shell\n logging.info(\"Keyboard interrupt, exiting.\")\n self.exit()\n\n def setup_skills(self, skills):\n \"\"\"Call the setup function on the passed in skills.\"\"\"\n for skill in skills:\n try:\n skill[\"module\"].setup(self)\n except AttributeError:\n pass\n\n def start_connector_tasks(self, connectors):\n \"\"\"Start the connectors.\"\"\"\n for connector_module in connectors:\n for _, cls in connector_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \\\n issubclass(cls, Connector) and\\\n cls is not Connector:\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n\n if len(connectors) > 0:\n for connector in self.connectors:\n self.eventloop.run_until_complete(connector.connect(self))\n for connector in self.connectors:\n task = self.eventloop.create_task(connector.listen(self))\n self.connector_tasks.append(task)\n else:\n self.critical(\"All connectors failed to load\", 1)\n\n def start_databases(self, databases):\n \"\"\"Start the databases.\"\"\"\n if len(databases) == 0:\n logging.debug(databases)\n logging.warning(\"All databases failed to load\")\n for database_module in databases:\n for name, cls in database_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \\\n issubclass(cls, Database) and \\\n cls is not Database:\n logging.debug(\"Adding database: \" + name)\n database = cls(database_module[\"config\"])\n self.memory.databases.append(database)\n self.eventloop.run_until_complete(database.connect(self))\n\n async def parse(self, message):\n \"\"\"Parse a string against all skills.\"\"\"\n if message.text.strip() != \"\":\n logging.debug(\"Parsing input: \" + message.text)\n\n await parse_regex(self, message)\n\n if \"parsers\" in self.config and \"apiai\" in self.config[\"parsers\"]:\n await parse_apiai(self, message)\n", "path": "opsdroid/core.py"}, {"content": "\"\"\"A base class for connectors to inherit from.\"\"\"\n\nfrom opsdroid.message import Message # NOQA # pylint: disable=unused-import\n\n\nclass Connector():\n \"\"\"A base connector.\n\n Connectors are used to interact with a given chat service.\n\n \"\"\"\n\n def __init__(self, config):\n \"\"\"Setup the connector.\n\n Set some basic properties from the connector config such as the name\n of this connector and the name the bot should appear with in chat\n service.\n\n Args:\n config (dict): The config for this connector specified in the\n `configuration.yaml` file.\n\n \"\"\"\n self.name = \"\"\n self.config = config\n self.default_room = None\n\n async def connect(self, opsdroid):\n \"\"\"Connect to chat service.\n\n This method should create a connection to the desired chat service.\n It should also be possible to call it multiple times in the event of\n being disconnected.\n\n Args:\n opsdroid (OpsDroid): An instance of the opsdroid core.\n\n \"\"\"\n raise NotImplementedError\n\n async def listen(self, opsdroid):\n \"\"\"Listen to chat service and parse all messages.\n\n This method should block the thread with an infinite loop and create\n Message objects for chat messages coming from the service. It should\n then call `await opsdroid.parse(message)` on those messages.\n\n As the method should include some kind of `while True` all messages\n from the chat service should be \"awaited\" asyncronously to avoid\n blocking the thread.\n\n Args:\n opsdroid (OpsDroid): An instance of the opsdroid core.\n\n \"\"\"\n raise NotImplementedError\n\n async def respond(self, message):\n \"\"\"Send a message back to the chat service.\n\n The message object will have a `text` property which should be sent\n back to the chat service. It may also have a `room` and `user` property\n which gives information on where the message should be directed.\n\n Args:\n message (Message): A message received by the connector.\n\n Returns:\n bool: True for message successfully sent. False otherwise.\n\n \"\"\"\n raise NotImplementedError\n", "path": "opsdroid/connector.py"}]}
2,324
265
gh_patches_debug_26756
rasdani/github-patches
git_diff
boto__boto-709
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- CloudFormation Bug in Parsing Describe Stacks Response Currently the boto.cloudformation.CloudFormationConnection.describe__stacks() call does _not_ correctly parse the Capabilities section of the XML Response. This makes the parser assume the Capabilities section is part of the main Stack element. This means that, for example, if a Stack does contain a Capabilities section and an Outputs section, the Outputs section will not correctly parse either, and instead, the Outputs member element will get parsed as top level Stack elements. This is an issue with the 2.3.0 release. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `boto/cloudformation/stack.py` Content: ``` 1 from datetime import datetime 2 3 from boto.resultset import ResultSet 4 5 class Stack: 6 def __init__(self, connection=None): 7 self.connection = connection 8 self.creation_time = None 9 self.description = None 10 self.disable_rollback = None 11 self.notification_arns = [] 12 self.outputs = [] 13 self.parameters = [] 14 self.stack_id = None 15 self.stack_status = None 16 self.stack_name = None 17 self.stack_name_reason = None 18 self.timeout_in_minutes = None 19 20 def startElement(self, name, attrs, connection): 21 if name == "Parameters": 22 self.parameters = ResultSet([('member', Parameter)]) 23 return self.parameters 24 elif name == "Outputs": 25 self.outputs = ResultSet([('member', Output)]) 26 return self.outputs 27 else: 28 return None 29 30 def endElement(self, name, value, connection): 31 if name == 'CreationTime': 32 self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') 33 elif name == "Description": 34 self.description = value 35 elif name == "DisableRollback": 36 self.disable_rollback = bool(value) 37 elif name == "NotificationARNs": 38 self.notification_arns = value 39 elif name == 'StackId': 40 self.stack_id = value 41 elif name == 'StackName': 42 self.stack_name = value 43 elif name == 'StackStatus': 44 self.stack_status = value 45 elif name == "StackStatusReason": 46 self.stack_status_reason = value 47 elif name == "TimeoutInMinutes": 48 self.timeout_in_minutes = int(value) 49 elif name == "member": 50 pass 51 else: 52 setattr(self, name, value) 53 54 def delete(self): 55 return self.connection.delete_stack(stack_name_or_id=self.stack_id) 56 57 def describe_events(self, next_token=None): 58 return self.connection.describe_stack_events( 59 stack_name_or_id=self.stack_id, 60 next_token=next_token 61 ) 62 63 def describe_resource(self, logical_resource_id): 64 return self.connection.describe_stack_resource( 65 stack_name_or_id=self.stack_id, 66 logical_resource_id=logical_resource_id 67 ) 68 69 def describe_resources(self, logical_resource_id=None, 70 physical_resource_id=None): 71 return self.connection.describe_stack_resources( 72 stack_name_or_id=self.stack_id, 73 logical_resource_id=logical_resource_id, 74 physical_resource_id=physical_resource_id 75 ) 76 77 def list_resources(self, next_token=None): 78 return self.connection.list_stack_resources( 79 stack_name_or_id=self.stack_id, 80 next_token=next_token 81 ) 82 83 def update(self): 84 rs = self.connection.describe_stacks(self.stack_id) 85 if len(rs) == 1 and rs[0].stack_id == self.stack_id: 86 self.__dict__.update(rs[0].__dict__) 87 else: 88 raise ValueError("%s is not a valid Stack ID or Name" % 89 self.stack_id) 90 91 def get_template(self): 92 return self.connection.get_template(stack_name_or_id=self.stack_id) 93 94 class StackSummary: 95 def __init__(self, connection=None): 96 self.connection = connection 97 self.stack_id = None 98 self.stack_status = None 99 self.stack_name = None 100 self.creation_time = None 101 self.deletion_time = None 102 self.template_description = None 103 104 def startElement(self, name, attrs, connection): 105 return None 106 107 def endElement(self, name, value, connection): 108 if name == 'StackId': 109 self.stack_id = value 110 elif name == 'StackStatus': 111 self.stack_status = value 112 elif name == 'StackName': 113 self.stack_name = value 114 elif name == 'CreationTime': 115 self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') 116 elif name == "DeletionTime": 117 self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') 118 elif name == 'TemplateDescription': 119 self.template_description = value 120 elif name == "member": 121 pass 122 else: 123 setattr(self, name, value) 124 125 class Parameter: 126 def __init__(self, connection=None): 127 self.connection = None 128 self.key = None 129 self.value = None 130 131 def startElement(self, name, attrs, connection): 132 return None 133 134 def endElement(self, name, value, connection): 135 if name == "ParameterKey": 136 self.key = value 137 elif name == "ParameterValue": 138 self.value = value 139 else: 140 setattr(self, name, value) 141 142 def __repr__(self): 143 return "Parameter:\"%s\"=\"%s\"" % (self.key, self.value) 144 145 class Output: 146 def __init__(self, connection=None): 147 self.connection = connection 148 self.description = None 149 self.key = None 150 self.value = None 151 152 def startElement(self, name, attrs, connection): 153 return None 154 155 def endElement(self, name, value, connection): 156 if name == "Description": 157 self.description = value 158 elif name == "OutputKey": 159 self.key = value 160 elif name == "OutputValue": 161 self.value = value 162 else: 163 setattr(self, name, value) 164 165 def __repr__(self): 166 return "Output:\"%s\"=\"%s\"" % (self.key, self.value) 167 168 class StackResource: 169 def __init__(self, connection=None): 170 self.connection = connection 171 self.description = None 172 self.logical_resource_id = None 173 self.physical_resource_id = None 174 self.resource_status = None 175 self.resource_status_reason = None 176 self.resource_type = None 177 self.stack_id = None 178 self.stack_name = None 179 self.timestamp = None 180 181 def startElement(self, name, attrs, connection): 182 return None 183 184 def endElement(self, name, value, connection): 185 if name == "Description": 186 self.description = value 187 elif name == "LogicalResourceId": 188 self.logical_resource_id = value 189 elif name == "PhysicalResourceId": 190 self.physical_resource_id = value 191 elif name == "ResourceStatus": 192 self.resource_status = value 193 elif name == "ResourceStatusReason": 194 self.resource_status_reason = value 195 elif name == "ResourceType": 196 self.resource_type = value 197 elif name == "StackId": 198 self.stack_id = value 199 elif name == "StackName": 200 self.stack_name = value 201 elif name == "Timestamp": 202 self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') 203 else: 204 setattr(self, name, value) 205 206 def __repr__(self): 207 return "StackResource:%s (%s)" % (self.logical_resource_id, 208 self.resource_type) 209 210 class StackResourceSummary: 211 def __init__(self, connection=None): 212 self.connection = connection 213 self.last_updated_timestamp = None 214 self.logical_resource_id = None 215 self.physical_resource_id = None 216 self.resource_status = None 217 self.resource_status_reason = None 218 self.resource_type = None 219 220 def startElement(self, name, attrs, connection): 221 return None 222 223 def endElement(self, name, value, connection): 224 if name == "LastUpdatedTimestamp": 225 self.last_updated_timestampe = datetime.strptime(value, 226 '%Y-%m-%dT%H:%M:%SZ') 227 elif name == "LogicalResourceId": 228 self.logical_resource_id = value 229 elif name == "PhysicalResourceId": 230 self.physical_resource_id = value 231 elif name == "ResourceStatus": 232 self.resource_status = value 233 elif name == "ResourceStatusReason": 234 self.resource_status_reason = value 235 elif name == "ResourceType": 236 self.resource_type = value 237 else: 238 setattr(self, name, value) 239 240 def __repr__(self): 241 return "StackResourceSummary:%s (%s)" % (self.logical_resource_id, 242 self.resource_type) 243 244 class StackEvent: 245 valid_states = ("CREATE_IN_PROGRESS", "CREATE_FAILED", "CREATE_COMPLETE", 246 "DELETE_IN_PROGRESS", "DELETE_FAILED", "DELETE_COMPLETE") 247 def __init__(self, connection=None): 248 self.connection = connection 249 self.event_id = None 250 self.logical_resource_id = None 251 self.physical_resource_id = None 252 self.resource_properties = None 253 self.resource_status = None 254 self.resource_status_reason = None 255 self.resource_type = None 256 self.stack_id = None 257 self.stack_name = None 258 self.timestamp = None 259 260 def startElement(self, name, attrs, connection): 261 return None 262 263 def endElement(self, name, value, connection): 264 if name == "EventId": 265 self.event_id = value 266 elif name == "LogicalResourceId": 267 self.logical_resource_id = value 268 elif name == "PhysicalResourceId": 269 self.physical_resource_id = value 270 elif name == "ResourceProperties": 271 self.resource_properties = value 272 elif name == "ResourceStatus": 273 self.resource_status = value 274 elif name == "ResourceStatusReason": 275 self.resource_status_reason = value 276 elif name == "ResourceType": 277 self.resource_type = value 278 elif name == "StackId": 279 self.stack_id = value 280 elif name == "StackName": 281 self.stack_name = value 282 elif name == "Timestamp": 283 self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') 284 else: 285 setattr(self, name, value) 286 287 def __repr__(self): 288 return "StackEvent %s %s %s" % (self.resource_type, 289 self.logical_resource_id, self.resource_status) 290 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/boto/cloudformation/stack.py b/boto/cloudformation/stack.py --- a/boto/cloudformation/stack.py +++ b/boto/cloudformation/stack.py @@ -11,6 +11,7 @@ self.notification_arns = [] self.outputs = [] self.parameters = [] + self.capabilities = [] self.stack_id = None self.stack_status = None self.stack_name = None @@ -24,6 +25,9 @@ elif name == "Outputs": self.outputs = ResultSet([('member', Output)]) return self.outputs + elif name == "Capabilities": + self.capabilities = ResultSet([('member', Capability)]) + return self.capabilities else: return None @@ -165,6 +169,20 @@ def __repr__(self): return "Output:\"%s\"=\"%s\"" % (self.key, self.value) +class Capability: + def __init__(self, connection=None): + self.connection = None + self.value = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + self.value = value + + def __repr__(self): + return "Capability:\"%s\"" % (self.value) + class StackResource: def __init__(self, connection=None): self.connection = connection
{"golden_diff": "diff --git a/boto/cloudformation/stack.py b/boto/cloudformation/stack.py\n--- a/boto/cloudformation/stack.py\n+++ b/boto/cloudformation/stack.py\n@@ -11,6 +11,7 @@\n self.notification_arns = []\n self.outputs = []\n self.parameters = []\n+ self.capabilities = []\n self.stack_id = None\n self.stack_status = None\n self.stack_name = None\n@@ -24,6 +25,9 @@\n elif name == \"Outputs\":\n self.outputs = ResultSet([('member', Output)])\n return self.outputs\n+ elif name == \"Capabilities\":\n+ self.capabilities = ResultSet([('member', Capability)])\n+ return self.capabilities\n else:\n return None\n \n@@ -165,6 +169,20 @@\n def __repr__(self):\n return \"Output:\\\"%s\\\"=\\\"%s\\\"\" % (self.key, self.value)\n \n+class Capability:\n+ def __init__(self, connection=None):\n+ self.connection = None\n+ self.value = None\n+\n+ def startElement(self, name, attrs, connection):\n+ return None\n+\n+ def endElement(self, name, value, connection):\n+ self.value = value\n+\n+ def __repr__(self):\n+ return \"Capability:\\\"%s\\\"\" % (self.value)\n+\n class StackResource:\n def __init__(self, connection=None):\n self.connection = connection\n", "issue": "CloudFormation Bug in Parsing Describe Stacks Response\nCurrently the boto.cloudformation.CloudFormationConnection.describe__stacks() call does _not_ correctly parse the Capabilities section of the XML Response. \n\nThis makes the parser assume the Capabilities section is part of the main Stack element. This means that, for example, if a Stack does contain a Capabilities section and an Outputs section, the Outputs section will not correctly parse either, and instead, the Outputs member element will get parsed as top level Stack elements.\n\nThis is an issue with the 2.3.0 release.\n\n", "before_files": [{"content": "from datetime import datetime\n\nfrom boto.resultset import ResultSet\n\nclass Stack:\n def __init__(self, connection=None):\n self.connection = connection\n self.creation_time = None\n self.description = None\n self.disable_rollback = None\n self.notification_arns = []\n self.outputs = []\n self.parameters = []\n self.stack_id = None\n self.stack_status = None\n self.stack_name = None\n self.stack_name_reason = None\n self.timeout_in_minutes = None\n\n def startElement(self, name, attrs, connection):\n if name == \"Parameters\":\n self.parameters = ResultSet([('member', Parameter)])\n return self.parameters\n elif name == \"Outputs\":\n self.outputs = ResultSet([('member', Output)])\n return self.outputs\n else:\n return None\n\n def endElement(self, name, value, connection):\n if name == 'CreationTime':\n self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n elif name == \"Description\":\n self.description = value\n elif name == \"DisableRollback\":\n self.disable_rollback = bool(value)\n elif name == \"NotificationARNs\":\n self.notification_arns = value\n elif name == 'StackId':\n self.stack_id = value\n elif name == 'StackName':\n self.stack_name = value\n elif name == 'StackStatus':\n self.stack_status = value\n elif name == \"StackStatusReason\":\n self.stack_status_reason = value\n elif name == \"TimeoutInMinutes\":\n self.timeout_in_minutes = int(value)\n elif name == \"member\":\n pass\n else:\n setattr(self, name, value)\n\n def delete(self):\n return self.connection.delete_stack(stack_name_or_id=self.stack_id)\n\n def describe_events(self, next_token=None):\n return self.connection.describe_stack_events(\n stack_name_or_id=self.stack_id,\n next_token=next_token\n )\n\n def describe_resource(self, logical_resource_id):\n return self.connection.describe_stack_resource(\n stack_name_or_id=self.stack_id,\n logical_resource_id=logical_resource_id\n )\n\n def describe_resources(self, logical_resource_id=None,\n physical_resource_id=None):\n return self.connection.describe_stack_resources(\n stack_name_or_id=self.stack_id,\n logical_resource_id=logical_resource_id,\n physical_resource_id=physical_resource_id\n )\n\n def list_resources(self, next_token=None):\n return self.connection.list_stack_resources(\n stack_name_or_id=self.stack_id,\n next_token=next_token\n )\n\n def update(self):\n rs = self.connection.describe_stacks(self.stack_id)\n if len(rs) == 1 and rs[0].stack_id == self.stack_id:\n self.__dict__.update(rs[0].__dict__)\n else:\n raise ValueError(\"%s is not a valid Stack ID or Name\" %\n self.stack_id)\n\n def get_template(self):\n return self.connection.get_template(stack_name_or_id=self.stack_id)\n\nclass StackSummary:\n def __init__(self, connection=None):\n self.connection = connection\n self.stack_id = None\n self.stack_status = None\n self.stack_name = None\n self.creation_time = None\n self.deletion_time = None\n self.template_description = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == 'StackId':\n self.stack_id = value\n elif name == 'StackStatus':\n self.stack_status = value\n elif name == 'StackName':\n self.stack_name = value\n elif name == 'CreationTime':\n self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n elif name == \"DeletionTime\":\n self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n elif name == 'TemplateDescription':\n self.template_description = value\n elif name == \"member\":\n pass\n else:\n setattr(self, name, value)\n\nclass Parameter:\n def __init__(self, connection=None):\n self.connection = None\n self.key = None\n self.value = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == \"ParameterKey\":\n self.key = value\n elif name == \"ParameterValue\":\n self.value = value\n else:\n setattr(self, name, value)\n\n def __repr__(self):\n return \"Parameter:\\\"%s\\\"=\\\"%s\\\"\" % (self.key, self.value)\n\nclass Output:\n def __init__(self, connection=None):\n self.connection = connection\n self.description = None\n self.key = None\n self.value = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == \"Description\":\n self.description = value\n elif name == \"OutputKey\":\n self.key = value\n elif name == \"OutputValue\":\n self.value = value\n else:\n setattr(self, name, value)\n\n def __repr__(self):\n return \"Output:\\\"%s\\\"=\\\"%s\\\"\" % (self.key, self.value)\n\nclass StackResource:\n def __init__(self, connection=None):\n self.connection = connection\n self.description = None\n self.logical_resource_id = None\n self.physical_resource_id = None\n self.resource_status = None\n self.resource_status_reason = None\n self.resource_type = None\n self.stack_id = None\n self.stack_name = None\n self.timestamp = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == \"Description\":\n self.description = value\n elif name == \"LogicalResourceId\":\n self.logical_resource_id = value\n elif name == \"PhysicalResourceId\":\n self.physical_resource_id = value\n elif name == \"ResourceStatus\":\n self.resource_status = value\n elif name == \"ResourceStatusReason\":\n self.resource_status_reason = value\n elif name == \"ResourceType\":\n self.resource_type = value\n elif name == \"StackId\":\n self.stack_id = value\n elif name == \"StackName\":\n self.stack_name = value\n elif name == \"Timestamp\":\n self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n else:\n setattr(self, name, value)\n\n def __repr__(self):\n return \"StackResource:%s (%s)\" % (self.logical_resource_id,\n self.resource_type)\n\nclass StackResourceSummary:\n def __init__(self, connection=None):\n self.connection = connection\n self.last_updated_timestamp = None\n self.logical_resource_id = None\n self.physical_resource_id = None\n self.resource_status = None\n self.resource_status_reason = None\n self.resource_type = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == \"LastUpdatedTimestamp\":\n self.last_updated_timestampe = datetime.strptime(value,\n '%Y-%m-%dT%H:%M:%SZ')\n elif name == \"LogicalResourceId\":\n self.logical_resource_id = value\n elif name == \"PhysicalResourceId\":\n self.physical_resource_id = value\n elif name == \"ResourceStatus\":\n self.resource_status = value\n elif name == \"ResourceStatusReason\":\n self.resource_status_reason = value\n elif name == \"ResourceType\":\n self.resource_type = value\n else:\n setattr(self, name, value)\n\n def __repr__(self):\n return \"StackResourceSummary:%s (%s)\" % (self.logical_resource_id,\n self.resource_type)\n\nclass StackEvent:\n valid_states = (\"CREATE_IN_PROGRESS\", \"CREATE_FAILED\", \"CREATE_COMPLETE\",\n \"DELETE_IN_PROGRESS\", \"DELETE_FAILED\", \"DELETE_COMPLETE\")\n def __init__(self, connection=None):\n self.connection = connection\n self.event_id = None\n self.logical_resource_id = None\n self.physical_resource_id = None\n self.resource_properties = None\n self.resource_status = None\n self.resource_status_reason = None\n self.resource_type = None\n self.stack_id = None\n self.stack_name = None\n self.timestamp = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == \"EventId\":\n self.event_id = value\n elif name == \"LogicalResourceId\":\n self.logical_resource_id = value\n elif name == \"PhysicalResourceId\":\n self.physical_resource_id = value\n elif name == \"ResourceProperties\":\n self.resource_properties = value\n elif name == \"ResourceStatus\":\n self.resource_status = value\n elif name == \"ResourceStatusReason\":\n self.resource_status_reason = value\n elif name == \"ResourceType\":\n self.resource_type = value\n elif name == \"StackId\":\n self.stack_id = value\n elif name == \"StackName\":\n self.stack_name = value\n elif name == \"Timestamp\":\n self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n else:\n setattr(self, name, value)\n\n def __repr__(self):\n return \"StackEvent %s %s %s\" % (self.resource_type,\n self.logical_resource_id, self.resource_status)\n", "path": "boto/cloudformation/stack.py"}], "after_files": [{"content": "from datetime import datetime\n\nfrom boto.resultset import ResultSet\n\nclass Stack:\n def __init__(self, connection=None):\n self.connection = connection\n self.creation_time = None\n self.description = None\n self.disable_rollback = None\n self.notification_arns = []\n self.outputs = []\n self.parameters = []\n self.capabilities = []\n self.stack_id = None\n self.stack_status = None\n self.stack_name = None\n self.stack_name_reason = None\n self.timeout_in_minutes = None\n\n def startElement(self, name, attrs, connection):\n if name == \"Parameters\":\n self.parameters = ResultSet([('member', Parameter)])\n return self.parameters\n elif name == \"Outputs\":\n self.outputs = ResultSet([('member', Output)])\n return self.outputs\n elif name == \"Capabilities\":\n self.capabilities = ResultSet([('member', Capability)])\n return self.capabilities\n else:\n return None\n\n def endElement(self, name, value, connection):\n if name == 'CreationTime':\n self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n elif name == \"Description\":\n self.description = value\n elif name == \"DisableRollback\":\n self.disable_rollback = bool(value)\n elif name == \"NotificationARNs\":\n self.notification_arns = value\n elif name == 'StackId':\n self.stack_id = value\n elif name == 'StackName':\n self.stack_name = value\n elif name == 'StackStatus':\n self.stack_status = value\n elif name == \"StackStatusReason\":\n self.stack_status_reason = value\n elif name == \"TimeoutInMinutes\":\n self.timeout_in_minutes = int(value)\n elif name == \"member\":\n pass\n else:\n setattr(self, name, value)\n\n def delete(self):\n return self.connection.delete_stack(stack_name_or_id=self.stack_id)\n\n def describe_events(self, next_token=None):\n return self.connection.describe_stack_events(\n stack_name_or_id=self.stack_id,\n next_token=next_token\n )\n\n def describe_resource(self, logical_resource_id):\n return self.connection.describe_stack_resource(\n stack_name_or_id=self.stack_id,\n logical_resource_id=logical_resource_id\n )\n\n def describe_resources(self, logical_resource_id=None,\n physical_resource_id=None):\n return self.connection.describe_stack_resources(\n stack_name_or_id=self.stack_id,\n logical_resource_id=logical_resource_id,\n physical_resource_id=physical_resource_id\n )\n\n def list_resources(self, next_token=None):\n return self.connection.list_stack_resources(\n stack_name_or_id=self.stack_id,\n next_token=next_token\n )\n\n def update(self):\n rs = self.connection.describe_stacks(self.stack_id)\n if len(rs) == 1 and rs[0].stack_id == self.stack_id:\n self.__dict__.update(rs[0].__dict__)\n else:\n raise ValueError(\"%s is not a valid Stack ID or Name\" %\n self.stack_id)\n\n def get_template(self):\n return self.connection.get_template(stack_name_or_id=self.stack_id)\n\nclass StackSummary:\n def __init__(self, connection=None):\n self.connection = connection\n self.stack_id = None\n self.stack_status = None\n self.stack_name = None\n self.creation_time = None\n self.deletion_time = None\n self.template_description = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == 'StackId':\n self.stack_id = value\n elif name == 'StackStatus':\n self.stack_status = value\n elif name == 'StackName':\n self.stack_name = value\n elif name == 'CreationTime':\n self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n elif name == \"DeletionTime\":\n self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n elif name == 'TemplateDescription':\n self.template_description = value\n elif name == \"member\":\n pass\n else:\n setattr(self, name, value)\n\nclass Parameter:\n def __init__(self, connection=None):\n self.connection = None\n self.key = None\n self.value = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == \"ParameterKey\":\n self.key = value\n elif name == \"ParameterValue\":\n self.value = value\n else:\n setattr(self, name, value)\n\n def __repr__(self):\n return \"Parameter:\\\"%s\\\"=\\\"%s\\\"\" % (self.key, self.value)\n\nclass Output:\n def __init__(self, connection=None):\n self.connection = connection\n self.description = None\n self.key = None\n self.value = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == \"Description\":\n self.description = value\n elif name == \"OutputKey\":\n self.key = value\n elif name == \"OutputValue\":\n self.value = value\n else:\n setattr(self, name, value)\n\n def __repr__(self):\n return \"Output:\\\"%s\\\"=\\\"%s\\\"\" % (self.key, self.value)\n\nclass Capability:\n def __init__(self, connection=None):\n self.connection = None\n self.value = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n self.value = value\n\n def __repr__(self):\n return \"Capability:\\\"%s\\\"\" % (self.value)\n\nclass StackResource:\n def __init__(self, connection=None):\n self.connection = connection\n self.description = None\n self.logical_resource_id = None\n self.physical_resource_id = None\n self.resource_status = None\n self.resource_status_reason = None\n self.resource_type = None\n self.stack_id = None\n self.stack_name = None\n self.timestamp = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == \"Description\":\n self.description = value\n elif name == \"LogicalResourceId\":\n self.logical_resource_id = value\n elif name == \"PhysicalResourceId\":\n self.physical_resource_id = value\n elif name == \"ResourceStatus\":\n self.resource_status = value\n elif name == \"ResourceStatusReason\":\n self.resource_status_reason = value\n elif name == \"ResourceType\":\n self.resource_type = value\n elif name == \"StackId\":\n self.stack_id = value\n elif name == \"StackName\":\n self.stack_name = value\n elif name == \"Timestamp\":\n self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n else:\n setattr(self, name, value)\n\n def __repr__(self):\n return \"StackResource:%s (%s)\" % (self.logical_resource_id,\n self.resource_type)\n\nclass StackResourceSummary:\n def __init__(self, connection=None):\n self.connection = connection\n self.last_updated_timestamp = None\n self.logical_resource_id = None\n self.physical_resource_id = None\n self.resource_status = None\n self.resource_status_reason = None\n self.resource_type = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == \"LastUpdatedTimestamp\":\n self.last_updated_timestampe = datetime.strptime(value,\n '%Y-%m-%dT%H:%M:%SZ')\n elif name == \"LogicalResourceId\":\n self.logical_resource_id = value\n elif name == \"PhysicalResourceId\":\n self.physical_resource_id = value\n elif name == \"ResourceStatus\":\n self.resource_status = value\n elif name == \"ResourceStatusReason\":\n self.resource_status_reason = value\n elif name == \"ResourceType\":\n self.resource_type = value\n else:\n setattr(self, name, value)\n\n def __repr__(self):\n return \"StackResourceSummary:%s (%s)\" % (self.logical_resource_id,\n self.resource_type)\n\nclass StackEvent:\n valid_states = (\"CREATE_IN_PROGRESS\", \"CREATE_FAILED\", \"CREATE_COMPLETE\",\n \"DELETE_IN_PROGRESS\", \"DELETE_FAILED\", \"DELETE_COMPLETE\")\n def __init__(self, connection=None):\n self.connection = connection\n self.event_id = None\n self.logical_resource_id = None\n self.physical_resource_id = None\n self.resource_properties = None\n self.resource_status = None\n self.resource_status_reason = None\n self.resource_type = None\n self.stack_id = None\n self.stack_name = None\n self.timestamp = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == \"EventId\":\n self.event_id = value\n elif name == \"LogicalResourceId\":\n self.logical_resource_id = value\n elif name == \"PhysicalResourceId\":\n self.physical_resource_id = value\n elif name == \"ResourceProperties\":\n self.resource_properties = value\n elif name == \"ResourceStatus\":\n self.resource_status = value\n elif name == \"ResourceStatusReason\":\n self.resource_status_reason = value\n elif name == \"ResourceType\":\n self.resource_type = value\n elif name == \"StackId\":\n self.stack_id = value\n elif name == \"StackName\":\n self.stack_name = value\n elif name == \"Timestamp\":\n self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n else:\n setattr(self, name, value)\n\n def __repr__(self):\n return \"StackEvent %s %s %s\" % (self.resource_type,\n self.logical_resource_id, self.resource_status)\n", "path": "boto/cloudformation/stack.py"}]}
3,229
321
gh_patches_debug_12535
rasdani/github-patches
git_diff
conda__conda-6918
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- deprecate 'conda help' in favor of --help CLI flag With the install on Windows 10: _platform : win-64 conda version : 4.3.30 conda is private : False conda-env version : 4.3.30 conda-build version : 3.0.22 python version : 3.6.3.final.0 requests version : 2.18.4 root environment : C:\Anaconda3 (writable)_ the command: `conda help install` (or other similar, say, conda help create, etc.) produces the message `C:\Anaconda3\python.exe: can't open file 'C:\Anaconda3\Scripts\conda': [Errno 2] No such file or directory` This syntax works under Linux (Ubuntu), but of course, there is no just "conda" file in Windows Anaconda Scripts. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conda/cli/main_help.py` Content: ``` 1 # (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io 2 # All Rights Reserved 3 # 4 # conda is distributed under the terms of the BSD 3-clause license. 5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause. 6 from __future__ import absolute_import, division, print_function, unicode_literals 7 8 import subprocess 9 import sys 10 11 12 def execute(args, parser): 13 if not args.command: 14 parser.print_help() 15 return 16 subprocess.call([sys.executable, sys.argv[0], args.command, '-h']) # pragma: no cover 17 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conda/cli/main_help.py b/conda/cli/main_help.py --- a/conda/cli/main_help.py +++ b/conda/cli/main_help.py @@ -5,7 +5,6 @@ # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause. from __future__ import absolute_import, division, print_function, unicode_literals -import subprocess import sys @@ -13,4 +12,7 @@ if not args.command: parser.print_help() return - subprocess.call([sys.executable, sys.argv[0], args.command, '-h']) # pragma: no cover + print("ERROR: The 'conda help' command is deprecated.\n" + "Instead use 'conda %s --help'." % args.command, + file=sys.stderr) + return 1
{"golden_diff": "diff --git a/conda/cli/main_help.py b/conda/cli/main_help.py\n--- a/conda/cli/main_help.py\n+++ b/conda/cli/main_help.py\n@@ -5,7 +5,6 @@\n # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n-import subprocess\n import sys\n \n \n@@ -13,4 +12,7 @@\n if not args.command:\n parser.print_help()\n return\n- subprocess.call([sys.executable, sys.argv[0], args.command, '-h']) # pragma: no cover\n+ print(\"ERROR: The 'conda help' command is deprecated.\\n\"\n+ \"Instead use 'conda %s --help'.\" % args.command,\n+ file=sys.stderr)\n+ return 1\n", "issue": "deprecate 'conda help' in favor of --help CLI flag\nWith the install on Windows 10:\r\n\r\n _platform : win-64 conda version : 4.3.30\r\n conda is private : False conda-env version : 4.3.30\r\n conda-build version : 3.0.22 python version : 3.6.3.final.0\r\n requests version : 2.18.4 root environment : C:\\Anaconda3 (writable)_\r\n\r\nthe command: `conda help install` (or other similar, say, conda help create, etc.) produces the message\r\n\r\n`C:\\Anaconda3\\python.exe: can't open file 'C:\\Anaconda3\\Scripts\\conda': [Errno 2] No such file or directory`\r\n\r\nThis syntax works under Linux (Ubuntu), but of course, there is no just \"conda\" file in Windows Anaconda Scripts.\n", "before_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport subprocess\nimport sys\n\n\ndef execute(args, parser):\n if not args.command:\n parser.print_help()\n return\n subprocess.call([sys.executable, sys.argv[0], args.command, '-h']) # pragma: no cover\n", "path": "conda/cli/main_help.py"}], "after_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport sys\n\n\ndef execute(args, parser):\n if not args.command:\n parser.print_help()\n return\n print(\"ERROR: The 'conda help' command is deprecated.\\n\"\n \"Instead use 'conda %s --help'.\" % args.command,\n file=sys.stderr)\n return 1\n", "path": "conda/cli/main_help.py"}]}
632
184
gh_patches_debug_2373
rasdani/github-patches
git_diff
netbox-community__netbox-14828
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Only one event rule triggers for a content type ### Deployment Type Self-hosted ### NetBox Version v3.7.0 ### Python Version 3.11 ### Steps to Reproduce Setup: 1. Create a webhook: Name = Test, URL = http://127.0.0.1:9000 2. Create event rule 1: - Name = Rule 1 - Content types = Prefix - select Updates - Condition = `{ "and": [{"attr": "status.value", "value": "deprecated"}]}` - Action type = Webhook - Webhook = Test 3. Create event rule 2: - Name = Rule 2 - Content types = Prefix - select Updates - Condition = `{ "and": [{"attr": "status.value", "value": "active"}]}` - Action type = Webhook - Webhook = Test 4. Start webhook receiver (`python manage.py webhook_receiver`), or observe the webhooks happen in some other way (Sorry, couldn't figure out the correct condition syntax without using the "and" operator) Demo: 5. Create a prefix, like 10.1.2.0/24, status = Active (the defaults) 6. Edit the prefix: change its status to **Deprecated** 7. Edit the prefix again: change its status to **Active** ### Expected Behavior Webhook is run **twice**: first when prefix status was changed to **Deprecated** (step 6), second when changed to **Active** again (step 7). ### Observed Behavior Webhook is run **only once**, that's in step 6, but not in step 7. Additionally: If Rule 1 is disabled, and steps 6 and 7 are executed again, now the webhook is run in step 7. Looks like only the first enabled event rule is run for a specific object type. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `netbox/extras/events.py` Content: ``` 1 import logging 2 3 from django.conf import settings 4 from django.contrib.auth import get_user_model 5 from django.contrib.contenttypes.models import ContentType 6 from django.core.exceptions import ObjectDoesNotExist 7 from django.utils import timezone 8 from django.utils.module_loading import import_string 9 from django_rq import get_queue 10 11 from core.models import Job 12 from netbox.config import get_config 13 from netbox.constants import RQ_QUEUE_DEFAULT 14 from netbox.registry import registry 15 from utilities.api import get_serializer_for_model 16 from utilities.rqworker import get_rq_retry 17 from utilities.utils import serialize_object 18 from .choices import * 19 from .models import EventRule, ScriptModule 20 21 logger = logging.getLogger('netbox.events_processor') 22 23 24 def serialize_for_event(instance): 25 """ 26 Return a serialized representation of the given instance suitable for use in a queued event. 27 """ 28 serializer_class = get_serializer_for_model(instance.__class__) 29 serializer_context = { 30 'request': None, 31 } 32 serializer = serializer_class(instance, context=serializer_context) 33 34 return serializer.data 35 36 37 def get_snapshots(instance, action): 38 snapshots = { 39 'prechange': getattr(instance, '_prechange_snapshot', None), 40 'postchange': None, 41 } 42 if action != ObjectChangeActionChoices.ACTION_DELETE: 43 # Use model's serialize_object() method if defined; fall back to serialize_object() utility function 44 if hasattr(instance, 'serialize_object'): 45 snapshots['postchange'] = instance.serialize_object() 46 else: 47 snapshots['postchange'] = serialize_object(instance) 48 49 return snapshots 50 51 52 def enqueue_object(queue, instance, user, request_id, action): 53 """ 54 Enqueue a serialized representation of a created/updated/deleted object for the processing of 55 events once the request has completed. 56 """ 57 # Determine whether this type of object supports event rules 58 app_label = instance._meta.app_label 59 model_name = instance._meta.model_name 60 if model_name not in registry['model_features']['event_rules'].get(app_label, []): 61 return 62 63 queue.append({ 64 'content_type': ContentType.objects.get_for_model(instance), 65 'object_id': instance.pk, 66 'event': action, 67 'data': serialize_for_event(instance), 68 'snapshots': get_snapshots(instance, action), 69 'username': user.username, 70 'request_id': request_id 71 }) 72 73 74 def process_event_rules(event_rules, model_name, event, data, username, snapshots=None, request_id=None): 75 try: 76 user = get_user_model().objects.get(username=username) 77 except ObjectDoesNotExist: 78 user = None 79 80 for event_rule in event_rules: 81 82 # Evaluate event rule conditions (if any) 83 if not event_rule.eval_conditions(data): 84 return 85 86 # Webhooks 87 if event_rule.action_type == EventRuleActionChoices.WEBHOOK: 88 89 # Select the appropriate RQ queue 90 queue_name = get_config().QUEUE_MAPPINGS.get('webhook', RQ_QUEUE_DEFAULT) 91 rq_queue = get_queue(queue_name) 92 93 # Compile the task parameters 94 params = { 95 "event_rule": event_rule, 96 "model_name": model_name, 97 "event": event, 98 "data": data, 99 "snapshots": snapshots, 100 "timestamp": timezone.now().isoformat(), 101 "username": username, 102 "retry": get_rq_retry() 103 } 104 if snapshots: 105 params["snapshots"] = snapshots 106 if request_id: 107 params["request_id"] = request_id 108 109 # Enqueue the task 110 rq_queue.enqueue( 111 "extras.webhooks.send_webhook", 112 **params 113 ) 114 115 # Scripts 116 elif event_rule.action_type == EventRuleActionChoices.SCRIPT: 117 # Resolve the script from action parameters 118 script_module = event_rule.action_object 119 script_name = event_rule.action_parameters['script_name'] 120 script = script_module.scripts[script_name]() 121 122 # Enqueue a Job to record the script's execution 123 Job.enqueue( 124 "extras.scripts.run_script", 125 instance=script_module, 126 name=script.class_name, 127 user=user, 128 data=data 129 ) 130 131 else: 132 raise ValueError(f"Unknown action type for an event rule: {event_rule.action_type}") 133 134 135 def process_event_queue(events): 136 """ 137 Flush a list of object representation to RQ for EventRule processing. 138 """ 139 events_cache = { 140 'type_create': {}, 141 'type_update': {}, 142 'type_delete': {}, 143 } 144 145 for data in events: 146 action_flag = { 147 ObjectChangeActionChoices.ACTION_CREATE: 'type_create', 148 ObjectChangeActionChoices.ACTION_UPDATE: 'type_update', 149 ObjectChangeActionChoices.ACTION_DELETE: 'type_delete', 150 }[data['event']] 151 content_type = data['content_type'] 152 153 # Cache applicable Event Rules 154 if content_type not in events_cache[action_flag]: 155 events_cache[action_flag][content_type] = EventRule.objects.filter( 156 **{action_flag: True}, 157 content_types=content_type, 158 enabled=True 159 ) 160 event_rules = events_cache[action_flag][content_type] 161 162 process_event_rules( 163 event_rules, content_type.model, data['event'], data['data'], data['username'], 164 snapshots=data['snapshots'], request_id=data['request_id'] 165 ) 166 167 168 def flush_events(queue): 169 """ 170 Flush a list of object representation to RQ for webhook processing. 171 """ 172 if queue: 173 for name in settings.EVENTS_PIPELINE: 174 try: 175 func = import_string(name) 176 func(queue) 177 except Exception as e: 178 logger.error(f"Cannot import events pipeline {name} error: {e}") 179 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/netbox/extras/events.py b/netbox/extras/events.py --- a/netbox/extras/events.py +++ b/netbox/extras/events.py @@ -81,7 +81,7 @@ # Evaluate event rule conditions (if any) if not event_rule.eval_conditions(data): - return + continue # Webhooks if event_rule.action_type == EventRuleActionChoices.WEBHOOK:
{"golden_diff": "diff --git a/netbox/extras/events.py b/netbox/extras/events.py\n--- a/netbox/extras/events.py\n+++ b/netbox/extras/events.py\n@@ -81,7 +81,7 @@\n \n # Evaluate event rule conditions (if any)\n if not event_rule.eval_conditions(data):\n- return\n+ continue\n \n # Webhooks\n if event_rule.action_type == EventRuleActionChoices.WEBHOOK:\n", "issue": "Only one event rule triggers for a content type\n### Deployment Type\r\n\r\nSelf-hosted\r\n\r\n### NetBox Version\r\n\r\nv3.7.0\r\n\r\n### Python Version\r\n\r\n3.11\r\n\r\n### Steps to Reproduce\r\n\r\nSetup:\r\n\r\n1. Create a webhook: Name = Test, URL = http://127.0.0.1:9000\r\n2. Create event rule 1:\r\n - Name = Rule 1\r\n - Content types = Prefix\r\n - select Updates\r\n - Condition = `{ \"and\": [{\"attr\": \"status.value\", \"value\": \"deprecated\"}]}`\r\n - Action type = Webhook\r\n - Webhook = Test\r\n3. Create event rule 2:\r\n - Name = Rule 2\r\n - Content types = Prefix\r\n - select Updates\r\n - Condition = `{ \"and\": [{\"attr\": \"status.value\", \"value\": \"active\"}]}`\r\n - Action type = Webhook\r\n - Webhook = Test\r\n4. Start webhook receiver (`python manage.py webhook_receiver`), or observe the webhooks happen in some other way\r\n\r\n(Sorry, couldn't figure out the correct condition syntax without using the \"and\" operator)\r\n\r\nDemo:\r\n\r\n5. Create a prefix, like 10.1.2.0/24, status = Active (the defaults)\r\n6. Edit the prefix: change its status to **Deprecated**\r\n7. Edit the prefix again: change its status to **Active**\r\n\r\n\r\n### Expected Behavior\r\n\r\nWebhook is run **twice**: first when prefix status was changed to **Deprecated** (step 6), second when changed to **Active** again (step 7).\r\n\r\n### Observed Behavior\r\n\r\nWebhook is run **only once**, that's in step 6, but not in step 7.\r\n\r\nAdditionally: If Rule 1 is disabled, and steps 6 and 7 are executed again, now the webhook is run in step 7.\r\n\r\nLooks like only the first enabled event rule is run for a specific object type.\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils import timezone\nfrom django.utils.module_loading import import_string\nfrom django_rq import get_queue\n\nfrom core.models import Job\nfrom netbox.config import get_config\nfrom netbox.constants import RQ_QUEUE_DEFAULT\nfrom netbox.registry import registry\nfrom utilities.api import get_serializer_for_model\nfrom utilities.rqworker import get_rq_retry\nfrom utilities.utils import serialize_object\nfrom .choices import *\nfrom .models import EventRule, ScriptModule\n\nlogger = logging.getLogger('netbox.events_processor')\n\n\ndef serialize_for_event(instance):\n \"\"\"\n Return a serialized representation of the given instance suitable for use in a queued event.\n \"\"\"\n serializer_class = get_serializer_for_model(instance.__class__)\n serializer_context = {\n 'request': None,\n }\n serializer = serializer_class(instance, context=serializer_context)\n\n return serializer.data\n\n\ndef get_snapshots(instance, action):\n snapshots = {\n 'prechange': getattr(instance, '_prechange_snapshot', None),\n 'postchange': None,\n }\n if action != ObjectChangeActionChoices.ACTION_DELETE:\n # Use model's serialize_object() method if defined; fall back to serialize_object() utility function\n if hasattr(instance, 'serialize_object'):\n snapshots['postchange'] = instance.serialize_object()\n else:\n snapshots['postchange'] = serialize_object(instance)\n\n return snapshots\n\n\ndef enqueue_object(queue, instance, user, request_id, action):\n \"\"\"\n Enqueue a serialized representation of a created/updated/deleted object for the processing of\n events once the request has completed.\n \"\"\"\n # Determine whether this type of object supports event rules\n app_label = instance._meta.app_label\n model_name = instance._meta.model_name\n if model_name not in registry['model_features']['event_rules'].get(app_label, []):\n return\n\n queue.append({\n 'content_type': ContentType.objects.get_for_model(instance),\n 'object_id': instance.pk,\n 'event': action,\n 'data': serialize_for_event(instance),\n 'snapshots': get_snapshots(instance, action),\n 'username': user.username,\n 'request_id': request_id\n })\n\n\ndef process_event_rules(event_rules, model_name, event, data, username, snapshots=None, request_id=None):\n try:\n user = get_user_model().objects.get(username=username)\n except ObjectDoesNotExist:\n user = None\n\n for event_rule in event_rules:\n\n # Evaluate event rule conditions (if any)\n if not event_rule.eval_conditions(data):\n return\n\n # Webhooks\n if event_rule.action_type == EventRuleActionChoices.WEBHOOK:\n\n # Select the appropriate RQ queue\n queue_name = get_config().QUEUE_MAPPINGS.get('webhook', RQ_QUEUE_DEFAULT)\n rq_queue = get_queue(queue_name)\n\n # Compile the task parameters\n params = {\n \"event_rule\": event_rule,\n \"model_name\": model_name,\n \"event\": event,\n \"data\": data,\n \"snapshots\": snapshots,\n \"timestamp\": timezone.now().isoformat(),\n \"username\": username,\n \"retry\": get_rq_retry()\n }\n if snapshots:\n params[\"snapshots\"] = snapshots\n if request_id:\n params[\"request_id\"] = request_id\n\n # Enqueue the task\n rq_queue.enqueue(\n \"extras.webhooks.send_webhook\",\n **params\n )\n\n # Scripts\n elif event_rule.action_type == EventRuleActionChoices.SCRIPT:\n # Resolve the script from action parameters\n script_module = event_rule.action_object\n script_name = event_rule.action_parameters['script_name']\n script = script_module.scripts[script_name]()\n\n # Enqueue a Job to record the script's execution\n Job.enqueue(\n \"extras.scripts.run_script\",\n instance=script_module,\n name=script.class_name,\n user=user,\n data=data\n )\n\n else:\n raise ValueError(f\"Unknown action type for an event rule: {event_rule.action_type}\")\n\n\ndef process_event_queue(events):\n \"\"\"\n Flush a list of object representation to RQ for EventRule processing.\n \"\"\"\n events_cache = {\n 'type_create': {},\n 'type_update': {},\n 'type_delete': {},\n }\n\n for data in events:\n action_flag = {\n ObjectChangeActionChoices.ACTION_CREATE: 'type_create',\n ObjectChangeActionChoices.ACTION_UPDATE: 'type_update',\n ObjectChangeActionChoices.ACTION_DELETE: 'type_delete',\n }[data['event']]\n content_type = data['content_type']\n\n # Cache applicable Event Rules\n if content_type not in events_cache[action_flag]:\n events_cache[action_flag][content_type] = EventRule.objects.filter(\n **{action_flag: True},\n content_types=content_type,\n enabled=True\n )\n event_rules = events_cache[action_flag][content_type]\n\n process_event_rules(\n event_rules, content_type.model, data['event'], data['data'], data['username'],\n snapshots=data['snapshots'], request_id=data['request_id']\n )\n\n\ndef flush_events(queue):\n \"\"\"\n Flush a list of object representation to RQ for webhook processing.\n \"\"\"\n if queue:\n for name in settings.EVENTS_PIPELINE:\n try:\n func = import_string(name)\n func(queue)\n except Exception as e:\n logger.error(f\"Cannot import events pipeline {name} error: {e}\")\n", "path": "netbox/extras/events.py"}], "after_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils import timezone\nfrom django.utils.module_loading import import_string\nfrom django_rq import get_queue\n\nfrom core.models import Job\nfrom netbox.config import get_config\nfrom netbox.constants import RQ_QUEUE_DEFAULT\nfrom netbox.registry import registry\nfrom utilities.api import get_serializer_for_model\nfrom utilities.rqworker import get_rq_retry\nfrom utilities.utils import serialize_object\nfrom .choices import *\nfrom .models import EventRule, ScriptModule\n\nlogger = logging.getLogger('netbox.events_processor')\n\n\ndef serialize_for_event(instance):\n \"\"\"\n Return a serialized representation of the given instance suitable for use in a queued event.\n \"\"\"\n serializer_class = get_serializer_for_model(instance.__class__)\n serializer_context = {\n 'request': None,\n }\n serializer = serializer_class(instance, context=serializer_context)\n\n return serializer.data\n\n\ndef get_snapshots(instance, action):\n snapshots = {\n 'prechange': getattr(instance, '_prechange_snapshot', None),\n 'postchange': None,\n }\n if action != ObjectChangeActionChoices.ACTION_DELETE:\n # Use model's serialize_object() method if defined; fall back to serialize_object() utility function\n if hasattr(instance, 'serialize_object'):\n snapshots['postchange'] = instance.serialize_object()\n else:\n snapshots['postchange'] = serialize_object(instance)\n\n return snapshots\n\n\ndef enqueue_object(queue, instance, user, request_id, action):\n \"\"\"\n Enqueue a serialized representation of a created/updated/deleted object for the processing of\n events once the request has completed.\n \"\"\"\n # Determine whether this type of object supports event rules\n app_label = instance._meta.app_label\n model_name = instance._meta.model_name\n if model_name not in registry['model_features']['event_rules'].get(app_label, []):\n return\n\n queue.append({\n 'content_type': ContentType.objects.get_for_model(instance),\n 'object_id': instance.pk,\n 'event': action,\n 'data': serialize_for_event(instance),\n 'snapshots': get_snapshots(instance, action),\n 'username': user.username,\n 'request_id': request_id\n })\n\n\ndef process_event_rules(event_rules, model_name, event, data, username, snapshots=None, request_id=None):\n try:\n user = get_user_model().objects.get(username=username)\n except ObjectDoesNotExist:\n user = None\n\n for event_rule in event_rules:\n\n # Evaluate event rule conditions (if any)\n if not event_rule.eval_conditions(data):\n continue\n\n # Webhooks\n if event_rule.action_type == EventRuleActionChoices.WEBHOOK:\n\n # Select the appropriate RQ queue\n queue_name = get_config().QUEUE_MAPPINGS.get('webhook', RQ_QUEUE_DEFAULT)\n rq_queue = get_queue(queue_name)\n\n # Compile the task parameters\n params = {\n \"event_rule\": event_rule,\n \"model_name\": model_name,\n \"event\": event,\n \"data\": data,\n \"snapshots\": snapshots,\n \"timestamp\": timezone.now().isoformat(),\n \"username\": username,\n \"retry\": get_rq_retry()\n }\n if snapshots:\n params[\"snapshots\"] = snapshots\n if request_id:\n params[\"request_id\"] = request_id\n\n # Enqueue the task\n rq_queue.enqueue(\n \"extras.webhooks.send_webhook\",\n **params\n )\n\n # Scripts\n elif event_rule.action_type == EventRuleActionChoices.SCRIPT:\n # Resolve the script from action parameters\n script_module = event_rule.action_object\n script_name = event_rule.action_parameters['script_name']\n script = script_module.scripts[script_name]()\n\n # Enqueue a Job to record the script's execution\n Job.enqueue(\n \"extras.scripts.run_script\",\n instance=script_module,\n name=script.class_name,\n user=user,\n data=data\n )\n\n else:\n raise ValueError(f\"Unknown action type for an event rule: {event_rule.action_type}\")\n\n\ndef process_event_queue(events):\n \"\"\"\n Flush a list of object representation to RQ for EventRule processing.\n \"\"\"\n events_cache = {\n 'type_create': {},\n 'type_update': {},\n 'type_delete': {},\n }\n\n for data in events:\n action_flag = {\n ObjectChangeActionChoices.ACTION_CREATE: 'type_create',\n ObjectChangeActionChoices.ACTION_UPDATE: 'type_update',\n ObjectChangeActionChoices.ACTION_DELETE: 'type_delete',\n }[data['event']]\n content_type = data['content_type']\n\n # Cache applicable Event Rules\n if content_type not in events_cache[action_flag]:\n events_cache[action_flag][content_type] = EventRule.objects.filter(\n **{action_flag: True},\n content_types=content_type,\n enabled=True\n )\n event_rules = events_cache[action_flag][content_type]\n\n process_event_rules(\n event_rules, content_type.model, data['event'], data['data'], data['username'],\n snapshots=data['snapshots'], request_id=data['request_id']\n )\n\n\ndef flush_events(queue):\n \"\"\"\n Flush a list of object representation to RQ for webhook processing.\n \"\"\"\n if queue:\n for name in settings.EVENTS_PIPELINE:\n try:\n func = import_string(name)\n func(queue)\n except Exception as e:\n logger.error(f\"Cannot import events pipeline {name} error: {e}\")\n", "path": "netbox/extras/events.py"}]}
2,313
96
gh_patches_debug_19502
rasdani/github-patches
git_diff
dask__distributed-7404
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Zstandard compression is not thread safe **Describe the issue**: In `compression.py` we create a singleton `ZstdCompressor` and `ZstdDecompressor` object. The docs for the Zstandard library [state](https://github.com/indygreg/python-zstandard/blob/main/zstandard/backend_cffi.py#L1764): > Unless specified otherwise, assume that no two methods of ``ZstdCompressor`` instances can be called from multiple Python threads simultaneously. In other words, assume instances are not thread safe unless stated otherwise. **Minimal Complete Verifiable Example**: This can trivially be reproduced by introducing threading to the `test_maybe_compress` test in `test_protocol.py` Some errors that I've reproduced range from compression/decompression failure, to seg faults. Examples for searchability: ``` Fatal Python error: Segmentation fault Thread 0x000000030df25000 (most recent call first): File "/Users/dwragge/code/distributed/distributed/protocol/compression.py", line 92 in zstd_compress File "/Users/dwragge/code/distributed/distributed/protocol/compression.py", line 186 in maybe_compress File "/Users/dwragge/code/distributed/distributed/protocol/tests/test_protocol.py", line 148 in test_compress_decompress File "/Users/dwragge/miniconda/envs/dask-distributed/lib/python3.8/threading.py", line 870 in run File "/Users/dwragge/miniconda/envs/dask-distributed/lib/python3.8/threading.py", line 932 in _bootstrap_inner File "/Users/dwragge/miniconda/envs/dask-distributed/lib/python3.8/threading.py", line 890 in _bootstrap ``` - `zstd.ZstdError: cannot compress: Src size is incorrect` - `zstd.ZstdError: cannot compress: Operation not authorized at current processing stage` - `zstd.ZstdError: decompression error: Corrupted block detected` - `zstd.ZstdError: error determining content size from frame header` - `zstd.ZstdError: decompression error: Unknown frame descriptor` - `zstd.ZstdError: decompression error: Destination buffer is too small` This manifests in production as errors such as: - `distributed.core - ERROR - Exception while handling op get_data` - `distributed.worker - ERROR - failed during get data with tcp://a.b.c.d:39195 -> tcp://d.e.f.g:43787` - `distributed.protocol.core - CRITICAL - Failed to deserialize` Amongst others, including clear lost workers, and `CommClosedError`s. **Anything else we need to know?**: The fix is trivial (instantiate a new Zstd(De)Compressor instance for each compression. I will raise a PR to fix plus tests. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `distributed/protocol/compression.py` Content: ``` 1 """ 2 Record known compressors 3 4 Includes utilities for determining whether or not to compress 5 """ 6 from __future__ import annotations 7 8 import logging 9 from collections.abc import Callable 10 from contextlib import suppress 11 from random import randint 12 from typing import Literal 13 14 from packaging.version import parse as parse_version 15 from tlz import identity 16 17 import dask 18 19 from distributed.utils import ensure_memoryview, nbytes, no_default 20 21 compressions: dict[ 22 str | None | Literal[False], 23 dict[Literal["compress", "decompress"], Callable[[bytes], bytes]], 24 ] = {None: {"compress": identity, "decompress": identity}} 25 26 compressions[False] = compressions[None] # alias 27 28 29 default_compression = None 30 31 32 logger = logging.getLogger(__name__) 33 34 35 with suppress(ImportError): 36 import zlib 37 38 compressions["zlib"] = {"compress": zlib.compress, "decompress": zlib.decompress} 39 40 with suppress(ImportError): 41 import snappy 42 43 # In python-snappy 0.5.3, support for the Python Buffer Protocol was added. 44 # This is needed to handle other objects (like `memoryview`s) without 45 # copying to `bytes` first. 46 # 47 # Note: `snappy.__version__` doesn't exist in a release yet. 48 # So do a little test that will fail if snappy is not 0.5.3 or later. 49 try: 50 snappy.compress(memoryview(b"")) 51 except TypeError: 52 raise ImportError("Need snappy >= 0.5.3") 53 54 compressions["snappy"] = { 55 "compress": snappy.compress, 56 "decompress": snappy.decompress, 57 } 58 default_compression = "snappy" 59 60 with suppress(ImportError): 61 import lz4 62 63 # Required to use `lz4.block` APIs and Python Buffer Protocol support. 64 if parse_version(lz4.__version__) < parse_version("0.23.1"): 65 raise ImportError("Need lz4 >= 0.23.1") 66 67 from lz4.block import compress as lz4_compress 68 from lz4.block import decompress as lz4_decompress 69 70 compressions["lz4"] = { 71 "compress": lz4_compress, 72 "decompress": lz4_decompress, 73 } 74 default_compression = "lz4" 75 76 77 with suppress(ImportError): 78 import zstandard 79 80 # Required for Python Buffer Protocol support. 81 if parse_version(zstandard.__version__) < parse_version("0.9.0"): 82 raise ImportError("Need zstandard >= 0.9.0") 83 84 zstd_compressor = zstandard.ZstdCompressor( 85 level=dask.config.get("distributed.comm.zstd.level"), 86 threads=dask.config.get("distributed.comm.zstd.threads"), 87 ) 88 89 zstd_decompressor = zstandard.ZstdDecompressor() 90 91 def zstd_compress(data): 92 return zstd_compressor.compress(data) 93 94 def zstd_decompress(data): 95 return zstd_decompressor.decompress(data) 96 97 compressions["zstd"] = {"compress": zstd_compress, "decompress": zstd_decompress} 98 99 100 def get_default_compression(): 101 default = dask.config.get("distributed.comm.compression") 102 if default == "auto": 103 return default_compression 104 if default in compressions: 105 return default 106 raise ValueError( 107 "Default compression '%s' not found.\n" 108 "Choices include auto, %s" 109 % (default, ", ".join(sorted(map(str, compressions)))) 110 ) 111 112 113 get_default_compression() 114 115 116 def byte_sample(b, size, n): 117 """Sample a bytestring from many locations 118 119 Parameters 120 ---------- 121 b : bytes or memoryview 122 size : int 123 target size of each sample to collect 124 (may be smaller if samples collide) 125 n : int 126 number of samples to collect 127 """ 128 assert size >= 0 and n >= 0 129 if size == 0 or n == 0: 130 return memoryview(b"") 131 132 b = ensure_memoryview(b) 133 134 parts = n * [None] 135 max_start = b.nbytes - size 136 start = randint(0, max_start) 137 for i in range(n - 1): 138 next_start = randint(0, max_start) 139 end = min(start + size, next_start) 140 parts[i] = b[start:end] 141 start = next_start 142 parts[-1] = b[start : start + size] 143 144 if n == 1: 145 return parts[0] 146 else: 147 return memoryview(b"".join(parts)) 148 149 150 def maybe_compress( 151 payload, 152 min_size=10_000, 153 sample_size=10_000, 154 nsamples=5, 155 compression=no_default, 156 ): 157 """ 158 Maybe compress payload 159 160 1. We don't compress small messages 161 2. We sample the payload in a few spots, compress that, and if it doesn't 162 do any good we return the original 163 3. We then compress the full original, it it doesn't compress well then we 164 return the original 165 4. We return the compressed result 166 """ 167 if compression is no_default: 168 compression = dask.config.get("distributed.comm.compression") 169 if not compression: 170 return None, payload 171 if not (min_size <= nbytes(payload) <= 2**31): 172 # Either too small to bother 173 # or too large (compression libraries often fail) 174 return None, payload 175 176 # Normalize function arguments 177 if compression == "auto": 178 compression = default_compression 179 compress = compressions[compression]["compress"] 180 181 # Take a view of payload for efficient usage 182 mv = ensure_memoryview(payload) 183 184 # Try compressing a sample to see if it compresses well 185 sample = byte_sample(mv, sample_size, nsamples) 186 if len(compress(sample)) <= 0.9 * sample.nbytes: 187 # Try compressing the real thing and check how compressed it is 188 compressed = compress(mv) 189 if len(compressed) <= 0.9 * mv.nbytes: 190 return compression, compressed 191 # Skip compression as the sample or the data didn't compress well 192 return None, payload 193 194 195 def decompress(header, frames): 196 """Decompress frames according to information in the header""" 197 return [ 198 compressions[c]["decompress"](frame) 199 for c, frame in zip(header["compression"], frames) 200 ] 201 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/distributed/protocol/compression.py b/distributed/protocol/compression.py --- a/distributed/protocol/compression.py +++ b/distributed/protocol/compression.py @@ -81,17 +81,15 @@ if parse_version(zstandard.__version__) < parse_version("0.9.0"): raise ImportError("Need zstandard >= 0.9.0") - zstd_compressor = zstandard.ZstdCompressor( - level=dask.config.get("distributed.comm.zstd.level"), - threads=dask.config.get("distributed.comm.zstd.threads"), - ) - - zstd_decompressor = zstandard.ZstdDecompressor() - def zstd_compress(data): + zstd_compressor = zstandard.ZstdCompressor( + level=dask.config.get("distributed.comm.zstd.level"), + threads=dask.config.get("distributed.comm.zstd.threads"), + ) return zstd_compressor.compress(data) def zstd_decompress(data): + zstd_decompressor = zstandard.ZstdDecompressor() return zstd_decompressor.decompress(data) compressions["zstd"] = {"compress": zstd_compress, "decompress": zstd_decompress}
{"golden_diff": "diff --git a/distributed/protocol/compression.py b/distributed/protocol/compression.py\n--- a/distributed/protocol/compression.py\n+++ b/distributed/protocol/compression.py\n@@ -81,17 +81,15 @@\n if parse_version(zstandard.__version__) < parse_version(\"0.9.0\"):\n raise ImportError(\"Need zstandard >= 0.9.0\")\n \n- zstd_compressor = zstandard.ZstdCompressor(\n- level=dask.config.get(\"distributed.comm.zstd.level\"),\n- threads=dask.config.get(\"distributed.comm.zstd.threads\"),\n- )\n-\n- zstd_decompressor = zstandard.ZstdDecompressor()\n-\n def zstd_compress(data):\n+ zstd_compressor = zstandard.ZstdCompressor(\n+ level=dask.config.get(\"distributed.comm.zstd.level\"),\n+ threads=dask.config.get(\"distributed.comm.zstd.threads\"),\n+ )\n return zstd_compressor.compress(data)\n \n def zstd_decompress(data):\n+ zstd_decompressor = zstandard.ZstdDecompressor()\n return zstd_decompressor.decompress(data)\n \n compressions[\"zstd\"] = {\"compress\": zstd_compress, \"decompress\": zstd_decompress}\n", "issue": "Zstandard compression is not thread safe\n**Describe the issue**:\r\n\r\nIn `compression.py` we create a singleton `ZstdCompressor` and `ZstdDecompressor` object. The docs for the Zstandard library [state](https://github.com/indygreg/python-zstandard/blob/main/zstandard/backend_cffi.py#L1764):\r\n\r\n> Unless specified otherwise, assume that no two methods of ``ZstdCompressor`` instances can be called from multiple Python threads simultaneously. In other words, assume instances are not thread safe unless stated otherwise.\r\n\r\n**Minimal Complete Verifiable Example**:\r\nThis can trivially be reproduced by introducing threading to the `test_maybe_compress` test in `test_protocol.py`\r\n\r\nSome errors that I've reproduced range from compression/decompression failure, to seg faults. Examples for searchability:\r\n\r\n```\r\nFatal Python error: Segmentation fault\r\n\r\nThread 0x000000030df25000 (most recent call first):\r\n File \"/Users/dwragge/code/distributed/distributed/protocol/compression.py\", line 92 in zstd_compress\r\n File \"/Users/dwragge/code/distributed/distributed/protocol/compression.py\", line 186 in maybe_compress\r\n File \"/Users/dwragge/code/distributed/distributed/protocol/tests/test_protocol.py\", line 148 in test_compress_decompress\r\n File \"/Users/dwragge/miniconda/envs/dask-distributed/lib/python3.8/threading.py\", line 870 in run\r\n File \"/Users/dwragge/miniconda/envs/dask-distributed/lib/python3.8/threading.py\", line 932 in _bootstrap_inner\r\n File \"/Users/dwragge/miniconda/envs/dask-distributed/lib/python3.8/threading.py\", line 890 in _bootstrap\r\n```\r\n\r\n- `zstd.ZstdError: cannot compress: Src size is incorrect`\r\n- `zstd.ZstdError: cannot compress: Operation not authorized at current processing stage`\r\n- `zstd.ZstdError: decompression error: Corrupted block detected`\r\n- `zstd.ZstdError: error determining content size from frame header`\r\n- `zstd.ZstdError: decompression error: Unknown frame descriptor`\r\n- `zstd.ZstdError: decompression error: Destination buffer is too small`\r\n\r\nThis manifests in production as errors such as:\r\n\r\n- `distributed.core - ERROR - Exception while handling op get_data`\r\n- `distributed.worker - ERROR - failed during get data with tcp://a.b.c.d:39195 -> tcp://d.e.f.g:43787`\r\n- `distributed.protocol.core - CRITICAL - Failed to deserialize`\r\n\r\nAmongst others, including clear lost workers, and `CommClosedError`s.\r\n\r\n**Anything else we need to know?**:\r\n\r\nThe fix is trivial (instantiate a new Zstd(De)Compressor instance for each compression. I will raise a PR to fix plus tests.\n", "before_files": [{"content": "\"\"\"\nRecord known compressors\n\nIncludes utilities for determining whether or not to compress\n\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom collections.abc import Callable\nfrom contextlib import suppress\nfrom random import randint\nfrom typing import Literal\n\nfrom packaging.version import parse as parse_version\nfrom tlz import identity\n\nimport dask\n\nfrom distributed.utils import ensure_memoryview, nbytes, no_default\n\ncompressions: dict[\n str | None | Literal[False],\n dict[Literal[\"compress\", \"decompress\"], Callable[[bytes], bytes]],\n] = {None: {\"compress\": identity, \"decompress\": identity}}\n\ncompressions[False] = compressions[None] # alias\n\n\ndefault_compression = None\n\n\nlogger = logging.getLogger(__name__)\n\n\nwith suppress(ImportError):\n import zlib\n\n compressions[\"zlib\"] = {\"compress\": zlib.compress, \"decompress\": zlib.decompress}\n\nwith suppress(ImportError):\n import snappy\n\n # In python-snappy 0.5.3, support for the Python Buffer Protocol was added.\n # This is needed to handle other objects (like `memoryview`s) without\n # copying to `bytes` first.\n #\n # Note: `snappy.__version__` doesn't exist in a release yet.\n # So do a little test that will fail if snappy is not 0.5.3 or later.\n try:\n snappy.compress(memoryview(b\"\"))\n except TypeError:\n raise ImportError(\"Need snappy >= 0.5.3\")\n\n compressions[\"snappy\"] = {\n \"compress\": snappy.compress,\n \"decompress\": snappy.decompress,\n }\n default_compression = \"snappy\"\n\nwith suppress(ImportError):\n import lz4\n\n # Required to use `lz4.block` APIs and Python Buffer Protocol support.\n if parse_version(lz4.__version__) < parse_version(\"0.23.1\"):\n raise ImportError(\"Need lz4 >= 0.23.1\")\n\n from lz4.block import compress as lz4_compress\n from lz4.block import decompress as lz4_decompress\n\n compressions[\"lz4\"] = {\n \"compress\": lz4_compress,\n \"decompress\": lz4_decompress,\n }\n default_compression = \"lz4\"\n\n\nwith suppress(ImportError):\n import zstandard\n\n # Required for Python Buffer Protocol support.\n if parse_version(zstandard.__version__) < parse_version(\"0.9.0\"):\n raise ImportError(\"Need zstandard >= 0.9.0\")\n\n zstd_compressor = zstandard.ZstdCompressor(\n level=dask.config.get(\"distributed.comm.zstd.level\"),\n threads=dask.config.get(\"distributed.comm.zstd.threads\"),\n )\n\n zstd_decompressor = zstandard.ZstdDecompressor()\n\n def zstd_compress(data):\n return zstd_compressor.compress(data)\n\n def zstd_decompress(data):\n return zstd_decompressor.decompress(data)\n\n compressions[\"zstd\"] = {\"compress\": zstd_compress, \"decompress\": zstd_decompress}\n\n\ndef get_default_compression():\n default = dask.config.get(\"distributed.comm.compression\")\n if default == \"auto\":\n return default_compression\n if default in compressions:\n return default\n raise ValueError(\n \"Default compression '%s' not found.\\n\"\n \"Choices include auto, %s\"\n % (default, \", \".join(sorted(map(str, compressions))))\n )\n\n\nget_default_compression()\n\n\ndef byte_sample(b, size, n):\n \"\"\"Sample a bytestring from many locations\n\n Parameters\n ----------\n b : bytes or memoryview\n size : int\n target size of each sample to collect\n (may be smaller if samples collide)\n n : int\n number of samples to collect\n \"\"\"\n assert size >= 0 and n >= 0\n if size == 0 or n == 0:\n return memoryview(b\"\")\n\n b = ensure_memoryview(b)\n\n parts = n * [None]\n max_start = b.nbytes - size\n start = randint(0, max_start)\n for i in range(n - 1):\n next_start = randint(0, max_start)\n end = min(start + size, next_start)\n parts[i] = b[start:end]\n start = next_start\n parts[-1] = b[start : start + size]\n\n if n == 1:\n return parts[0]\n else:\n return memoryview(b\"\".join(parts))\n\n\ndef maybe_compress(\n payload,\n min_size=10_000,\n sample_size=10_000,\n nsamples=5,\n compression=no_default,\n):\n \"\"\"\n Maybe compress payload\n\n 1. We don't compress small messages\n 2. We sample the payload in a few spots, compress that, and if it doesn't\n do any good we return the original\n 3. We then compress the full original, it it doesn't compress well then we\n return the original\n 4. We return the compressed result\n \"\"\"\n if compression is no_default:\n compression = dask.config.get(\"distributed.comm.compression\")\n if not compression:\n return None, payload\n if not (min_size <= nbytes(payload) <= 2**31):\n # Either too small to bother\n # or too large (compression libraries often fail)\n return None, payload\n\n # Normalize function arguments\n if compression == \"auto\":\n compression = default_compression\n compress = compressions[compression][\"compress\"]\n\n # Take a view of payload for efficient usage\n mv = ensure_memoryview(payload)\n\n # Try compressing a sample to see if it compresses well\n sample = byte_sample(mv, sample_size, nsamples)\n if len(compress(sample)) <= 0.9 * sample.nbytes:\n # Try compressing the real thing and check how compressed it is\n compressed = compress(mv)\n if len(compressed) <= 0.9 * mv.nbytes:\n return compression, compressed\n # Skip compression as the sample or the data didn't compress well\n return None, payload\n\n\ndef decompress(header, frames):\n \"\"\"Decompress frames according to information in the header\"\"\"\n return [\n compressions[c][\"decompress\"](frame)\n for c, frame in zip(header[\"compression\"], frames)\n ]\n", "path": "distributed/protocol/compression.py"}], "after_files": [{"content": "\"\"\"\nRecord known compressors\n\nIncludes utilities for determining whether or not to compress\n\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom collections.abc import Callable\nfrom contextlib import suppress\nfrom random import randint\nfrom typing import Literal\n\nfrom packaging.version import parse as parse_version\nfrom tlz import identity\n\nimport dask\n\nfrom distributed.utils import ensure_memoryview, nbytes, no_default\n\ncompressions: dict[\n str | None | Literal[False],\n dict[Literal[\"compress\", \"decompress\"], Callable[[bytes], bytes]],\n] = {None: {\"compress\": identity, \"decompress\": identity}}\n\ncompressions[False] = compressions[None] # alias\n\n\ndefault_compression = None\n\n\nlogger = logging.getLogger(__name__)\n\n\nwith suppress(ImportError):\n import zlib\n\n compressions[\"zlib\"] = {\"compress\": zlib.compress, \"decompress\": zlib.decompress}\n\nwith suppress(ImportError):\n import snappy\n\n # In python-snappy 0.5.3, support for the Python Buffer Protocol was added.\n # This is needed to handle other objects (like `memoryview`s) without\n # copying to `bytes` first.\n #\n # Note: `snappy.__version__` doesn't exist in a release yet.\n # So do a little test that will fail if snappy is not 0.5.3 or later.\n try:\n snappy.compress(memoryview(b\"\"))\n except TypeError:\n raise ImportError(\"Need snappy >= 0.5.3\")\n\n compressions[\"snappy\"] = {\n \"compress\": snappy.compress,\n \"decompress\": snappy.decompress,\n }\n default_compression = \"snappy\"\n\nwith suppress(ImportError):\n import lz4\n\n # Required to use `lz4.block` APIs and Python Buffer Protocol support.\n if parse_version(lz4.__version__) < parse_version(\"0.23.1\"):\n raise ImportError(\"Need lz4 >= 0.23.1\")\n\n from lz4.block import compress as lz4_compress\n from lz4.block import decompress as lz4_decompress\n\n compressions[\"lz4\"] = {\n \"compress\": lz4_compress,\n \"decompress\": lz4_decompress,\n }\n default_compression = \"lz4\"\n\n\nwith suppress(ImportError):\n import zstandard\n\n # Required for Python Buffer Protocol support.\n if parse_version(zstandard.__version__) < parse_version(\"0.9.0\"):\n raise ImportError(\"Need zstandard >= 0.9.0\")\n\n def zstd_compress(data):\n zstd_compressor = zstandard.ZstdCompressor(\n level=dask.config.get(\"distributed.comm.zstd.level\"),\n threads=dask.config.get(\"distributed.comm.zstd.threads\"),\n )\n return zstd_compressor.compress(data)\n\n def zstd_decompress(data):\n zstd_decompressor = zstandard.ZstdDecompressor()\n return zstd_decompressor.decompress(data)\n\n compressions[\"zstd\"] = {\"compress\": zstd_compress, \"decompress\": zstd_decompress}\n\n\ndef get_default_compression():\n default = dask.config.get(\"distributed.comm.compression\")\n if default == \"auto\":\n return default_compression\n if default in compressions:\n return default\n raise ValueError(\n \"Default compression '%s' not found.\\n\"\n \"Choices include auto, %s\"\n % (default, \", \".join(sorted(map(str, compressions))))\n )\n\n\nget_default_compression()\n\n\ndef byte_sample(b, size, n):\n \"\"\"Sample a bytestring from many locations\n\n Parameters\n ----------\n b : bytes or memoryview\n size : int\n target size of each sample to collect\n (may be smaller if samples collide)\n n : int\n number of samples to collect\n \"\"\"\n assert size >= 0 and n >= 0\n if size == 0 or n == 0:\n return memoryview(b\"\")\n\n b = ensure_memoryview(b)\n\n parts = n * [None]\n max_start = b.nbytes - size\n start = randint(0, max_start)\n for i in range(n - 1):\n next_start = randint(0, max_start)\n end = min(start + size, next_start)\n parts[i] = b[start:end]\n start = next_start\n parts[-1] = b[start : start + size]\n\n if n == 1:\n return parts[0]\n else:\n return memoryview(b\"\".join(parts))\n\n\ndef maybe_compress(\n payload,\n min_size=10_000,\n sample_size=10_000,\n nsamples=5,\n compression=no_default,\n):\n \"\"\"\n Maybe compress payload\n\n 1. We don't compress small messages\n 2. We sample the payload in a few spots, compress that, and if it doesn't\n do any good we return the original\n 3. We then compress the full original, it it doesn't compress well then we\n return the original\n 4. We return the compressed result\n \"\"\"\n if compression is no_default:\n compression = dask.config.get(\"distributed.comm.compression\")\n if not compression:\n return None, payload\n if not (min_size <= nbytes(payload) <= 2**31):\n # Either too small to bother\n # or too large (compression libraries often fail)\n return None, payload\n\n # Normalize function arguments\n if compression == \"auto\":\n compression = default_compression\n compress = compressions[compression][\"compress\"]\n\n # Take a view of payload for efficient usage\n mv = ensure_memoryview(payload)\n\n # Try compressing a sample to see if it compresses well\n sample = byte_sample(mv, sample_size, nsamples)\n if len(compress(sample)) <= 0.9 * sample.nbytes:\n # Try compressing the real thing and check how compressed it is\n compressed = compress(mv)\n if len(compressed) <= 0.9 * mv.nbytes:\n return compression, compressed\n # Skip compression as the sample or the data didn't compress well\n return None, payload\n\n\ndef decompress(header, frames):\n \"\"\"Decompress frames according to information in the header\"\"\"\n return [\n compressions[c][\"decompress\"](frame)\n for c, frame in zip(header[\"compression\"], frames)\n ]\n", "path": "distributed/protocol/compression.py"}]}
2,824
276
gh_patches_debug_19446
rasdani/github-patches
git_diff
cookiecutter__cookiecutter-370
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- AttributeError in Jinja2 call I'm fairly certain this is a Jinja2 bug (submitted [here](https://github.com/mitsuhiko/jinja2/issues/396)), but the ultimate impact is that `cookiecutter==0.9.0` is broken for me (on Python 2 and Python 3). Steps to reproduce: ``` $ pip2 install cookiecutter==0.9.0 && cookiecutter gh:jacebrowning/template-python --no-input ``` ``` Traceback (most recent call last): File "/opt/boxen/homebrew/bin/cookiecutter", line 9, in <module> load_entry_point('cookiecutter==0.9.0', 'console_scripts', 'cookiecutter')() File "/opt/boxen/homebrew/lib/python2.7/site-packages/cookiecutter/main.py", line 169, in main cookiecutter(args.input_dir, args.checkout, args.no_input) File "/opt/boxen/homebrew/lib/python2.7/site-packages/cookiecutter/main.py", line 100, in cookiecutter context['cookiecutter'] = prompt_for_config(context, no_input) File "/opt/boxen/homebrew/lib/python2.7/site-packages/cookiecutter/prompt.py", line 29, in prompt_for_config val = env.from_string(raw).render(cookiecutter=cookiecutter_dict) File "/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/environment.py", line 841, in from_string return cls.from_code(self, self.compile(source), globals, None) File "/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/environment.py", line 542, in compile source = optimize(source, self) File "/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/optimizer.py", line 27, in optimize return optimizer.visit(node) File "/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/visitor.py", line 39, in visit return self.generic_visit(node, *args, **kwargs) File "/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/visitor.py", line 59, in generic_visit for field, old_value in node.iter_fields(): AttributeError: 'int' object has no attribute 'iter_fields' ``` This still works with `cookiecutter==0.8.0`: ``` $ pip2 install cookiecutter==0.8.0 && cookiecutter gh:jacebrowning/template-python --no-input ``` My environment: - OS X 10.10.1 - Python 2.7.9 - pip 1.5.6 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cookiecutter/compat.py` Content: ``` 1 import os 2 import sys 3 4 PY3 = sys.version_info[0] == 3 5 OLD_PY2 = sys.version_info[:2] < (2, 7) 6 7 if PY3: # pragma: no cover 8 input_str = 'builtins.input' 9 iteritems = lambda d: iter(d.items()) 10 from unittest.mock import patch 11 from io import StringIO 12 13 def read_response(prompt=''): 14 """ 15 Prompt the user for a response. 16 17 Prints the given prompt (which should be a Unicode string), 18 and returns the text entered by the user as a Unicode string. 19 20 :param prompt: A Unicode string that is presented to the user. 21 """ 22 # The Python 3 input function does exactly what we want 23 return input(prompt) 24 25 else: # pragma: no cover 26 from __builtin__ import raw_input 27 input = raw_input 28 input_str = '__builtin__.raw_input' 29 iteritems = lambda d: d.iteritems() 30 from mock import patch 31 from cStringIO import StringIO 32 33 def read_response(prompt=''): 34 """ 35 Prompt the user for a response. 36 37 Prints the given prompt (which should be a Unicode string), 38 and returns the text entered by the user as a Unicode string. 39 40 :param prompt: A Unicode string that is presented to the user. 41 """ 42 # For Python 2, raw_input takes a byte string argument for the prompt. 43 # This must be encoded using the encoding used by sys.stdout. 44 # The result is a byte string encoding using sys.stdin.encoding. 45 # However, if the program is not being run interactively, sys.stdout 46 # and sys.stdin may not have encoding attributes. 47 # In that case we don't print a prompt (stdin/out isn't interactive, 48 # so prompting is pointless), and we assume the returned data is 49 # encoded using sys.getdefaultencoding(). This may not be right, 50 # but it's likely the best we can do. 51 # Isn't Python 2 encoding support wonderful? :-) 52 if sys.stdout.encoding: 53 prompt = prompt.encode(sys.stdout.encoding) 54 else: 55 prompt = '' 56 enc = sys.stdin.encoding or sys.getdefaultencoding() 57 return raw_input(prompt).decode(enc) 58 59 60 if PY3: # Forced testing 61 62 from shutil import which 63 64 else: # Forced testing 65 66 def is_exe(program): 67 """ 68 Returns whether or not a file is an executable. 69 """ 70 return os.path.isfile(program) and os.access(program, os.X_OK) 71 72 def which(cmd, mode=os.F_OK | os.X_OK, path=None): 73 """Given a command, mode, and a PATH string, return the path which 74 conforms to the given mode on the PATH, or None if there is no such 75 file. 76 `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result 77 of os.environ.get("PATH"), or can be overridden with a custom search 78 path. 79 80 Note: This function was backported from the Python 3 source code. 81 """ 82 # Check that a given file can be accessed with the correct mode. 83 # Additionally check that `file` is not a directory, as on Windows 84 # directories pass the os.access check. 85 def _access_check(fn, mode): 86 return (os.path.exists(fn) and os.access(fn, mode) 87 and not os.path.isdir(fn)) 88 89 # If we're given a path with a directory part, look it up directly 90 # rather than referring to PATH directories. This includes checking 91 # relative to the current directory, e.g. ./script 92 if os.path.dirname(cmd): 93 if _access_check(cmd, mode): 94 return cmd 95 return None 96 97 if path is None: 98 path = os.environ.get("PATH", os.defpath) 99 if not path: 100 return None 101 path = path.split(os.pathsep) 102 103 if sys.platform == "win32": 104 # The current directory takes precedence on Windows. 105 if os.curdir not in path: 106 path.insert(0, os.curdir) 107 108 # PATHEXT is necessary to check on Windows. 109 pathext = os.environ.get("PATHEXT", "").split(os.pathsep) 110 # See if the given file matches any of the expected path 111 # extensions. This will allow us to short circuit when given 112 # "python.exe". If it does match, only test that one, otherwise we 113 # have to try others. 114 if any(cmd.lower().endswith(ext.lower()) for ext in pathext): 115 files = [cmd] 116 else: 117 files = [cmd + ext for ext in pathext] 118 else: 119 # On other platforms you don't have things like PATHEXT to tell you 120 # what file suffixes are executable, so just pass on cmd as-is. 121 files = [cmd] 122 123 seen = set() 124 for dir in path: 125 normdir = os.path.normcase(dir) 126 if normdir not in seen: 127 seen.add(normdir) 128 for thefile in files: 129 name = os.path.join(dir, thefile) 130 if _access_check(name, mode): 131 return name 132 return None 133 134 _hush_pyflakes = (patch, StringIO, which) 135 ``` Path: `cookiecutter/prompt.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 """ 5 cookiecutter.prompt 6 --------------------- 7 8 Functions for prompting the user for project info. 9 """ 10 11 from __future__ import unicode_literals 12 import sys 13 14 from .compat import iteritems, read_response 15 from jinja2.environment import Environment 16 17 18 def prompt_for_config(context, no_input=False): 19 """ 20 Prompts the user to enter new config, using context as a source for the 21 field names and sample values. 22 23 :param no_input: Prompt the user at command line for manual configuration? 24 """ 25 cookiecutter_dict = {} 26 env = Environment() 27 28 for key, raw in iteritems(context['cookiecutter']): 29 val = env.from_string(raw).render(cookiecutter=cookiecutter_dict) 30 31 if not no_input: 32 prompt = '{0} (default is "{1}")? '.format(key, val) 33 34 new_val = read_response(prompt).strip() 35 36 if new_val != '': 37 val = new_val 38 39 cookiecutter_dict[key] = val 40 return cookiecutter_dict 41 42 43 def query_yes_no(question, default='yes'): 44 """ 45 Ask a yes/no question via `read_response()` and return their answer. 46 47 :param question: A string that is presented to the user. 48 :param default: The presumed answer if the user just hits <Enter>. 49 It must be "yes" (the default), "no" or None (meaning 50 an answer is required of the user). 51 52 The "answer" return value is one of "yes" or "no". 53 54 Adapted from 55 http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input 56 http://code.activestate.com/recipes/577058/ 57 58 """ 59 valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False} 60 if default is None: 61 prompt = ' [y/n] ' 62 elif default == 'yes': 63 prompt = ' [Y/n] ' 64 elif default == 'no': 65 prompt = ' [y/N] ' 66 else: 67 raise ValueError('Invalid default answer: "{0}"'.format(default)) 68 69 while True: 70 sys.stdout.write(question + prompt) 71 choice = read_response().lower() 72 73 if default is not None and choice == '': 74 return valid[default] 75 elif choice in valid: 76 return valid[choice] 77 else: 78 sys.stdout.write('Please respond with "yes" or "no" ' 79 '(or "y" or "n").\n') 80 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cookiecutter/compat.py b/cookiecutter/compat.py --- a/cookiecutter/compat.py +++ b/cookiecutter/compat.py @@ -131,4 +131,10 @@ return name return None + +def is_string(obj): + """Determine if an object is a string.""" + return isinstance(obj, str if PY3 else basestring) + + _hush_pyflakes = (patch, StringIO, which) diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py --- a/cookiecutter/prompt.py +++ b/cookiecutter/prompt.py @@ -11,7 +11,7 @@ from __future__ import unicode_literals import sys -from .compat import iteritems, read_response +from .compat import iteritems, read_response, is_string from jinja2.environment import Environment @@ -26,6 +26,7 @@ env = Environment() for key, raw in iteritems(context['cookiecutter']): + raw = raw if is_string(raw) else str(raw) val = env.from_string(raw).render(cookiecutter=cookiecutter_dict) if not no_input:
{"golden_diff": "diff --git a/cookiecutter/compat.py b/cookiecutter/compat.py\n--- a/cookiecutter/compat.py\n+++ b/cookiecutter/compat.py\n@@ -131,4 +131,10 @@\n return name\n return None\n \n+\n+def is_string(obj):\n+ \"\"\"Determine if an object is a string.\"\"\"\n+ return isinstance(obj, str if PY3 else basestring)\n+\n+\n _hush_pyflakes = (patch, StringIO, which)\ndiff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py\n--- a/cookiecutter/prompt.py\n+++ b/cookiecutter/prompt.py\n@@ -11,7 +11,7 @@\n from __future__ import unicode_literals\n import sys\n \n-from .compat import iteritems, read_response\n+from .compat import iteritems, read_response, is_string\n from jinja2.environment import Environment\n \n \n@@ -26,6 +26,7 @@\n env = Environment()\n \n for key, raw in iteritems(context['cookiecutter']):\n+ raw = raw if is_string(raw) else str(raw)\n val = env.from_string(raw).render(cookiecutter=cookiecutter_dict)\n \n if not no_input:\n", "issue": "AttributeError in Jinja2 call\nI'm fairly certain this is a Jinja2 bug (submitted [here](https://github.com/mitsuhiko/jinja2/issues/396)), but the ultimate impact is that `cookiecutter==0.9.0` is broken for me (on Python 2 and Python 3).\n\nSteps to reproduce:\n\n```\n $ pip2 install cookiecutter==0.9.0 && cookiecutter gh:jacebrowning/template-python --no-input\n```\n\n```\nTraceback (most recent call last):\n File \"/opt/boxen/homebrew/bin/cookiecutter\", line 9, in <module>\n load_entry_point('cookiecutter==0.9.0', 'console_scripts', 'cookiecutter')()\n File \"/opt/boxen/homebrew/lib/python2.7/site-packages/cookiecutter/main.py\", line 169, in main\n cookiecutter(args.input_dir, args.checkout, args.no_input)\n File \"/opt/boxen/homebrew/lib/python2.7/site-packages/cookiecutter/main.py\", line 100, in cookiecutter\n context['cookiecutter'] = prompt_for_config(context, no_input)\n File \"/opt/boxen/homebrew/lib/python2.7/site-packages/cookiecutter/prompt.py\", line 29, in prompt_for_config\n val = env.from_string(raw).render(cookiecutter=cookiecutter_dict)\n File \"/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/environment.py\", line 841, in from_string\n return cls.from_code(self, self.compile(source), globals, None)\n File \"/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/environment.py\", line 542, in compile\n source = optimize(source, self)\n File \"/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/optimizer.py\", line 27, in optimize\n return optimizer.visit(node)\n File \"/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/visitor.py\", line 39, in visit\n return self.generic_visit(node, *args, **kwargs)\n File \"/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/visitor.py\", line 59, in generic_visit\n for field, old_value in node.iter_fields():\nAttributeError: 'int' object has no attribute 'iter_fields'\n```\n\nThis still works with `cookiecutter==0.8.0`:\n\n```\n $ pip2 install cookiecutter==0.8.0 && cookiecutter gh:jacebrowning/template-python --no-input\n```\n\nMy environment:\n- OS X 10.10.1\n- Python 2.7.9\n- pip 1.5.6\n\n", "before_files": [{"content": "import os\nimport sys\n\nPY3 = sys.version_info[0] == 3\nOLD_PY2 = sys.version_info[:2] < (2, 7)\n\nif PY3: # pragma: no cover\n input_str = 'builtins.input'\n iteritems = lambda d: iter(d.items())\n from unittest.mock import patch\n from io import StringIO\n\n def read_response(prompt=''):\n \"\"\"\n Prompt the user for a response.\n\n Prints the given prompt (which should be a Unicode string),\n and returns the text entered by the user as a Unicode string.\n\n :param prompt: A Unicode string that is presented to the user.\n \"\"\"\n # The Python 3 input function does exactly what we want\n return input(prompt)\n\nelse: # pragma: no cover\n from __builtin__ import raw_input\n input = raw_input\n input_str = '__builtin__.raw_input'\n iteritems = lambda d: d.iteritems()\n from mock import patch\n from cStringIO import StringIO\n\n def read_response(prompt=''):\n \"\"\"\n Prompt the user for a response.\n\n Prints the given prompt (which should be a Unicode string),\n and returns the text entered by the user as a Unicode string.\n\n :param prompt: A Unicode string that is presented to the user.\n \"\"\"\n # For Python 2, raw_input takes a byte string argument for the prompt.\n # This must be encoded using the encoding used by sys.stdout.\n # The result is a byte string encoding using sys.stdin.encoding.\n # However, if the program is not being run interactively, sys.stdout\n # and sys.stdin may not have encoding attributes.\n # In that case we don't print a prompt (stdin/out isn't interactive,\n # so prompting is pointless), and we assume the returned data is\n # encoded using sys.getdefaultencoding(). This may not be right,\n # but it's likely the best we can do.\n # Isn't Python 2 encoding support wonderful? :-)\n if sys.stdout.encoding:\n prompt = prompt.encode(sys.stdout.encoding)\n else:\n prompt = ''\n enc = sys.stdin.encoding or sys.getdefaultencoding()\n return raw_input(prompt).decode(enc)\n\n\nif PY3: # Forced testing\n\n from shutil import which\n\nelse: # Forced testing\n\n def is_exe(program):\n \"\"\"\n Returns whether or not a file is an executable.\n \"\"\"\n return os.path.isfile(program) and os.access(program, os.X_OK)\n\n def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n \"\"\"Given a command, mode, and a PATH string, return the path which\n conforms to the given mode on the PATH, or None if there is no such\n file.\n `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result\n of os.environ.get(\"PATH\"), or can be overridden with a custom search\n path.\n\n Note: This function was backported from the Python 3 source code.\n \"\"\"\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode)\n and not os.path.isdir(fn))\n\n # If we're given a path with a directory part, look it up directly\n # rather than referring to PATH directories. This includes checking\n # relative to the current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return None\n\n if path is None:\n path = os.environ.get(\"PATH\", os.defpath)\n if not path:\n return None\n path = path.split(os.pathsep)\n\n if sys.platform == \"win32\":\n # The current directory takes precedence on Windows.\n if os.curdir not in path:\n path.insert(0, os.curdir)\n\n # PATHEXT is necessary to check on Windows.\n pathext = os.environ.get(\"PATHEXT\", \"\").split(os.pathsep)\n # See if the given file matches any of the expected path\n # extensions. This will allow us to short circuit when given\n # \"python.exe\". If it does match, only test that one, otherwise we\n # have to try others.\n if any(cmd.lower().endswith(ext.lower()) for ext in pathext):\n files = [cmd]\n else:\n files = [cmd + ext for ext in pathext]\n else:\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if normdir not in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n return None\n\n_hush_pyflakes = (patch, StringIO, which)\n", "path": "cookiecutter/compat.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.prompt\n---------------------\n\nFunctions for prompting the user for project info.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport sys\n\nfrom .compat import iteritems, read_response\nfrom jinja2.environment import Environment\n\n\ndef prompt_for_config(context, no_input=False):\n \"\"\"\n Prompts the user to enter new config, using context as a source for the\n field names and sample values.\n\n :param no_input: Prompt the user at command line for manual configuration?\n \"\"\"\n cookiecutter_dict = {}\n env = Environment()\n\n for key, raw in iteritems(context['cookiecutter']):\n val = env.from_string(raw).render(cookiecutter=cookiecutter_dict)\n\n if not no_input:\n prompt = '{0} (default is \"{1}\")? '.format(key, val)\n\n new_val = read_response(prompt).strip()\n\n if new_val != '':\n val = new_val\n\n cookiecutter_dict[key] = val\n return cookiecutter_dict\n\n\ndef query_yes_no(question, default='yes'):\n \"\"\"\n Ask a yes/no question via `read_response()` and return their answer.\n\n :param question: A string that is presented to the user.\n :param default: The presumed answer if the user just hits <Enter>.\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is one of \"yes\" or \"no\".\n\n Adapted from\n http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input\n http://code.activestate.com/recipes/577058/\n\n \"\"\"\n valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}\n if default is None:\n prompt = ' [y/n] '\n elif default == 'yes':\n prompt = ' [Y/n] '\n elif default == 'no':\n prompt = ' [y/N] '\n else:\n raise ValueError('Invalid default answer: \"{0}\"'.format(default))\n\n while True:\n sys.stdout.write(question + prompt)\n choice = read_response().lower()\n\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write('Please respond with \"yes\" or \"no\" '\n '(or \"y\" or \"n\").\\n')\n", "path": "cookiecutter/prompt.py"}], "after_files": [{"content": "import os\nimport sys\n\nPY3 = sys.version_info[0] == 3\nOLD_PY2 = sys.version_info[:2] < (2, 7)\n\nif PY3: # pragma: no cover\n input_str = 'builtins.input'\n iteritems = lambda d: iter(d.items())\n from unittest.mock import patch\n from io import StringIO\n\n def read_response(prompt=''):\n \"\"\"\n Prompt the user for a response.\n\n Prints the given prompt (which should be a Unicode string),\n and returns the text entered by the user as a Unicode string.\n\n :param prompt: A Unicode string that is presented to the user.\n \"\"\"\n # The Python 3 input function does exactly what we want\n return input(prompt)\n\nelse: # pragma: no cover\n from __builtin__ import raw_input\n input = raw_input\n input_str = '__builtin__.raw_input'\n iteritems = lambda d: d.iteritems()\n from mock import patch\n from cStringIO import StringIO\n\n def read_response(prompt=''):\n \"\"\"\n Prompt the user for a response.\n\n Prints the given prompt (which should be a Unicode string),\n and returns the text entered by the user as a Unicode string.\n\n :param prompt: A Unicode string that is presented to the user.\n \"\"\"\n # For Python 2, raw_input takes a byte string argument for the prompt.\n # This must be encoded using the encoding used by sys.stdout.\n # The result is a byte string encoding using sys.stdin.encoding.\n # However, if the program is not being run interactively, sys.stdout\n # and sys.stdin may not have encoding attributes.\n # In that case we don't print a prompt (stdin/out isn't interactive,\n # so prompting is pointless), and we assume the returned data is\n # encoded using sys.getdefaultencoding(). This may not be right,\n # but it's likely the best we can do.\n # Isn't Python 2 encoding support wonderful? :-)\n if sys.stdout.encoding:\n prompt = prompt.encode(sys.stdout.encoding)\n else:\n prompt = ''\n enc = sys.stdin.encoding or sys.getdefaultencoding()\n return raw_input(prompt).decode(enc)\n\n\nif PY3: # Forced testing\n\n from shutil import which\n\nelse: # Forced testing\n\n def is_exe(program):\n \"\"\"\n Returns whether or not a file is an executable.\n \"\"\"\n return os.path.isfile(program) and os.access(program, os.X_OK)\n\n def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n \"\"\"Given a command, mode, and a PATH string, return the path which\n conforms to the given mode on the PATH, or None if there is no such\n file.\n `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result\n of os.environ.get(\"PATH\"), or can be overridden with a custom search\n path.\n\n Note: This function was backported from the Python 3 source code.\n \"\"\"\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode)\n and not os.path.isdir(fn))\n\n # If we're given a path with a directory part, look it up directly\n # rather than referring to PATH directories. This includes checking\n # relative to the current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return None\n\n if path is None:\n path = os.environ.get(\"PATH\", os.defpath)\n if not path:\n return None\n path = path.split(os.pathsep)\n\n if sys.platform == \"win32\":\n # The current directory takes precedence on Windows.\n if os.curdir not in path:\n path.insert(0, os.curdir)\n\n # PATHEXT is necessary to check on Windows.\n pathext = os.environ.get(\"PATHEXT\", \"\").split(os.pathsep)\n # See if the given file matches any of the expected path\n # extensions. This will allow us to short circuit when given\n # \"python.exe\". If it does match, only test that one, otherwise we\n # have to try others.\n if any(cmd.lower().endswith(ext.lower()) for ext in pathext):\n files = [cmd]\n else:\n files = [cmd + ext for ext in pathext]\n else:\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if normdir not in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n return None\n\n\ndef is_string(obj):\n \"\"\"Determine if an object is a string.\"\"\"\n return isinstance(obj, str if PY3 else basestring)\n\n\n_hush_pyflakes = (patch, StringIO, which)\n", "path": "cookiecutter/compat.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.prompt\n---------------------\n\nFunctions for prompting the user for project info.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport sys\n\nfrom .compat import iteritems, read_response, is_string\nfrom jinja2.environment import Environment\n\n\ndef prompt_for_config(context, no_input=False):\n \"\"\"\n Prompts the user to enter new config, using context as a source for the\n field names and sample values.\n\n :param no_input: Prompt the user at command line for manual configuration?\n \"\"\"\n cookiecutter_dict = {}\n env = Environment()\n\n for key, raw in iteritems(context['cookiecutter']):\n raw = raw if is_string(raw) else str(raw)\n val = env.from_string(raw).render(cookiecutter=cookiecutter_dict)\n\n if not no_input:\n prompt = '{0} (default is \"{1}\")? '.format(key, val)\n\n new_val = read_response(prompt).strip()\n\n if new_val != '':\n val = new_val\n\n cookiecutter_dict[key] = val\n return cookiecutter_dict\n\n\ndef query_yes_no(question, default='yes'):\n \"\"\"\n Ask a yes/no question via `read_response()` and return their answer.\n\n :param question: A string that is presented to the user.\n :param default: The presumed answer if the user just hits <Enter>.\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is one of \"yes\" or \"no\".\n\n Adapted from\n http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input\n http://code.activestate.com/recipes/577058/\n\n \"\"\"\n valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}\n if default is None:\n prompt = ' [y/n] '\n elif default == 'yes':\n prompt = ' [Y/n] '\n elif default == 'no':\n prompt = ' [y/N] '\n else:\n raise ValueError('Invalid default answer: \"{0}\"'.format(default))\n\n while True:\n sys.stdout.write(question + prompt)\n choice = read_response().lower()\n\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write('Please respond with \"yes\" or \"no\" '\n '(or \"y\" or \"n\").\\n')\n", "path": "cookiecutter/prompt.py"}]}
3,048
279
gh_patches_debug_16096
rasdani/github-patches
git_diff
digitalfabrik__integreat-cms-799
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Improve POI list and form ### Motivation <!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. --> ### Proposed Solution <!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. --> List: - Remove id and version columns - Sort alphabetically after title in region's default language Form: - Add field to not show the location on the map (used for event locations, e.g. multiple instances of the same address but different rooms etc) - Use the same dynamic slug/link update mechanism like for pages ### Alternatives <!-- A clear and concise description of any alternative solutions or features you've considered, and why you're proposed solution is better. --> ### Additional Context <!-- Add any other information or screenshots about the feature request here. --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cms/models/pois/poi.py` Content: ``` 1 from django.db import models 2 from django.utils.translation import get_language, ugettext_lazy as _ 3 4 from ..regions.region import Region, Language 5 from ...constants import status 6 7 8 class POI(models.Model): 9 """ 10 Data model representing a point of interest (POI). It contains all relevant data about its exact position, including 11 coordinates. 12 """ 13 14 region = models.ForeignKey( 15 Region, 16 on_delete=models.CASCADE, 17 related_name="pois", 18 verbose_name=_("region"), 19 ) 20 address = models.CharField( 21 max_length=250, verbose_name=_("street and house number") 22 ) 23 postcode = models.CharField(max_length=10, verbose_name=_("postal code")) 24 city = models.CharField(max_length=250, verbose_name=_("city")) 25 country = models.CharField(max_length=250, verbose_name=_("country")) 26 latitude = models.FloatField( 27 verbose_name=_("latitude"), help_text=_("The latitude coordinate") 28 ) 29 longitude = models.FloatField( 30 verbose_name=_("longitude"), help_text=_("The longitude coordinate") 31 ) 32 icon = models.ImageField( 33 null=True, 34 blank=True, 35 upload_to="pois/%Y/%m/%d", 36 verbose_name=_("icon"), 37 ) 38 archived = models.BooleanField( 39 default=False, 40 verbose_name=_("archived"), 41 help_text=_("Whether or not the location is read-only and hidden in the API."), 42 ) 43 44 @property 45 def languages(self): 46 """ 47 This property returns a QuerySet of all :class:`~cms.models.languages.language.Language` objects, to which a POI 48 translation exists. 49 50 :return: QuerySet of all :class:`~cms.models.languages.language.Language` a POI is translated into 51 :rtype: ~django.db.models.query.QuerySet [ ~cms.models.languages.language.Language ] 52 """ 53 return Language.objects.filter(poi_translations__poi=self) 54 55 def get_translation(self, language_slug): 56 """ 57 This function uses the reverse foreign key ``self.translations`` to get all translations of ``self`` 58 and filters them to the requested :class:`~cms.models.languages.language.Language` slug. 59 60 :param language_slug: The slug of the desired :class:`~cms.models.languages.language.Language` 61 :type language_slug: str 62 63 :return: The POI translation in the requested :class:`~cms.models.languages.language.Language` or :obj:`None` 64 if no translation exists 65 :rtype: ~cms.models.pois.poi_translation.POITranslation 66 """ 67 return self.translations.filter(language__slug=language_slug).first() 68 69 def get_public_translation(self, language_slug): 70 """ 71 This function retrieves the newest public translation of a POI. 72 73 :param language_slug: The slug of the requested :class:`~cms.models.languages.language.Language` 74 :type language_slug: str 75 76 :return: The public translation of a POI 77 :rtype: ~cms.models.pois.poi_translation.POITranslation 78 """ 79 return self.translations.filter( 80 language__slug=language_slug, 81 status=status.PUBLIC, 82 ).first() 83 84 @property 85 def backend_translation(self): 86 """ 87 This function returns the translation of this POI in the current backend language. 88 89 :return: The backend translation of a POI 90 :rtype: ~cms.models.pois.poi_translation.POITranslation 91 """ 92 return self.translations.filter(language__slug=get_language()).first() 93 94 @property 95 def default_translation(self): 96 """ 97 This function returns the translation of this POI in the region's default language. 98 Since a POI can only be created by creating a translation in the default language, this is guaranteed to return 99 a POI translation. 100 101 :return: The default translation of a POI 102 :rtype: ~cms.models.pois.poi_translation.POITranslation 103 """ 104 return self.translations.filter(language=self.region.default_language).first() 105 106 @property 107 def best_translation(self): 108 """ 109 This function returns the translation of this POI in the current backend language and if it doesn't exist, it 110 provides a fallback to the translation in the region's default language. 111 112 :return: The "best" translation of a POI for displaying in the backend 113 :rtype: ~cms.models.pois.poi_translation.POITranslation 114 """ 115 return self.backend_translation or self.default_translation 116 117 def __str__(self): 118 """ 119 This overwrites the default Django :meth:`~django.db.models.Model.__str__` method which would return ``POI object (id)``. 120 It is used in the Django admin backend and as label for ModelChoiceFields. 121 122 :return: A readable string representation of the POI 123 :rtype: str 124 """ 125 return self.best_translation.title 126 127 def __repr__(self): 128 """ 129 This overwrites the default Django ``__repr__()`` method which would return ``<POI: POI object (id)>``. 130 It is used for logging. 131 132 :return: The canonical string representation of the POI 133 :rtype: str 134 """ 135 return f"<POI (id: {self.id}, region: {self.region.slug}, slug: {self.best_translation.slug})>" 136 137 class Meta: 138 #: The verbose name of the model 139 verbose_name = _("location") 140 #: The plural verbose name of the model 141 verbose_name_plural = _("locations") 142 #: The default permissions for this model 143 default_permissions = () 144 #: The custom permissions for this model 145 permissions = (("manage_pois", "Can manage points of interest"),) 146 ``` Path: `src/cms/forms/pois/poi_form.py` Content: ``` 1 import logging 2 3 from ...models import POI 4 from ..custom_model_form import CustomModelForm 5 from ..icon_widget import IconWidget 6 7 8 logger = logging.getLogger(__name__) 9 10 11 class POIForm(CustomModelForm): 12 """ 13 Form for creating and modifying POI objects 14 """ 15 16 class Meta: 17 """ 18 This class contains additional meta configuration of the form class, see the :class:`django.forms.ModelForm` 19 for more information. 20 """ 21 22 #: The model of this :class:`django.forms.ModelForm` 23 model = POI 24 #: The fields of the model which should be handled by this form 25 fields = [ 26 "address", 27 "postcode", 28 "city", 29 "country", 30 "latitude", 31 "longitude", 32 "icon", 33 ] 34 #: The widgets which are used in this form 35 widgets = { 36 "icon": IconWidget(), 37 } 38 39 def __init__(self, data=None, files=None, instance=None): 40 """ 41 Initialize POI form 42 43 :param data: submitted POST data 44 :type data: dict 45 46 :param instance: This form's instance 47 :type instance: ~cms.models.pois.poi.POI 48 """ 49 50 # instantiate ModelForm 51 super().__init__(data=data, files=files, instance=instance) 52 53 # If form is disabled because the user has no permissions to edit the page, disable all form fields 54 if instance and instance.archived: 55 for _, field in self.fields.items(): 56 field.disabled = True 57 58 # pylint: disable=arguments-differ 59 def save(self, region=None): 60 """ 61 This method extends the default ``save()``-method of the base :class:`~django.forms.ModelForm` to set attributes 62 which are not directly determined by input fields. 63 64 :param region: The region of this form's POI instance 65 :type region: ~cms.models.regions.region.Region 66 67 :return: The saved POI object 68 :rtype: ~cms.models.pois.poi.POI 69 """ 70 71 poi = super().save(commit=False) 72 73 if not self.instance.id: 74 # only update these values when poi is created 75 poi.region = region 76 77 poi.save() 78 return poi 79 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cms/forms/pois/poi_form.py b/src/cms/forms/pois/poi_form.py --- a/src/cms/forms/pois/poi_form.py +++ b/src/cms/forms/pois/poi_form.py @@ -29,6 +29,7 @@ "country", "latitude", "longitude", + "location_not_on_map", "icon", ] #: The widgets which are used in this form diff --git a/src/cms/models/pois/poi.py b/src/cms/models/pois/poi.py --- a/src/cms/models/pois/poi.py +++ b/src/cms/models/pois/poi.py @@ -29,6 +29,11 @@ longitude = models.FloatField( verbose_name=_("longitude"), help_text=_("The longitude coordinate") ) + location_not_on_map = models.BooleanField( + default=False, + verbose_name=_("Do not show this location on map"), + help_text=_("Tick if you do not show this location on map"), + ) icon = models.ImageField( null=True, blank=True,
{"golden_diff": "diff --git a/src/cms/forms/pois/poi_form.py b/src/cms/forms/pois/poi_form.py\n--- a/src/cms/forms/pois/poi_form.py\n+++ b/src/cms/forms/pois/poi_form.py\n@@ -29,6 +29,7 @@\n \"country\",\n \"latitude\",\n \"longitude\",\n+ \"location_not_on_map\",\n \"icon\",\n ]\n #: The widgets which are used in this form\ndiff --git a/src/cms/models/pois/poi.py b/src/cms/models/pois/poi.py\n--- a/src/cms/models/pois/poi.py\n+++ b/src/cms/models/pois/poi.py\n@@ -29,6 +29,11 @@\n longitude = models.FloatField(\n verbose_name=_(\"longitude\"), help_text=_(\"The longitude coordinate\")\n )\n+ location_not_on_map = models.BooleanField(\n+ default=False,\n+ verbose_name=_(\"Do not show this location on map\"),\n+ help_text=_(\"Tick if you do not show this location on map\"),\n+ )\n icon = models.ImageField(\n null=True,\n blank=True,\n", "issue": "Improve POI list and form\n### Motivation\r\n<!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. -->\r\n\r\n\r\n### Proposed Solution\r\n<!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. -->\r\nList:\r\n\r\n- Remove id and version columns\r\n- Sort alphabetically after title in region's default language\r\n\r\nForm:\r\n- Add field to not show the location on the map (used for event locations, e.g. multiple instances of the same address but different rooms etc)\r\n- Use the same dynamic slug/link update mechanism like for pages\r\n\r\n### Alternatives\r\n<!-- A clear and concise description of any alternative solutions or features you've considered, and why you're proposed solution is better. -->\r\n\r\n\r\n### Additional Context\r\n<!-- Add any other information or screenshots about the feature request here. -->\r\n\r\n\n", "before_files": [{"content": "from django.db import models\nfrom django.utils.translation import get_language, ugettext_lazy as _\n\nfrom ..regions.region import Region, Language\nfrom ...constants import status\n\n\nclass POI(models.Model):\n \"\"\"\n Data model representing a point of interest (POI). It contains all relevant data about its exact position, including\n coordinates.\n \"\"\"\n\n region = models.ForeignKey(\n Region,\n on_delete=models.CASCADE,\n related_name=\"pois\",\n verbose_name=_(\"region\"),\n )\n address = models.CharField(\n max_length=250, verbose_name=_(\"street and house number\")\n )\n postcode = models.CharField(max_length=10, verbose_name=_(\"postal code\"))\n city = models.CharField(max_length=250, verbose_name=_(\"city\"))\n country = models.CharField(max_length=250, verbose_name=_(\"country\"))\n latitude = models.FloatField(\n verbose_name=_(\"latitude\"), help_text=_(\"The latitude coordinate\")\n )\n longitude = models.FloatField(\n verbose_name=_(\"longitude\"), help_text=_(\"The longitude coordinate\")\n )\n icon = models.ImageField(\n null=True,\n blank=True,\n upload_to=\"pois/%Y/%m/%d\",\n verbose_name=_(\"icon\"),\n )\n archived = models.BooleanField(\n default=False,\n verbose_name=_(\"archived\"),\n help_text=_(\"Whether or not the location is read-only and hidden in the API.\"),\n )\n\n @property\n def languages(self):\n \"\"\"\n This property returns a QuerySet of all :class:`~cms.models.languages.language.Language` objects, to which a POI\n translation exists.\n\n :return: QuerySet of all :class:`~cms.models.languages.language.Language` a POI is translated into\n :rtype: ~django.db.models.query.QuerySet [ ~cms.models.languages.language.Language ]\n \"\"\"\n return Language.objects.filter(poi_translations__poi=self)\n\n def get_translation(self, language_slug):\n \"\"\"\n This function uses the reverse foreign key ``self.translations`` to get all translations of ``self``\n and filters them to the requested :class:`~cms.models.languages.language.Language` slug.\n\n :param language_slug: The slug of the desired :class:`~cms.models.languages.language.Language`\n :type language_slug: str\n\n :return: The POI translation in the requested :class:`~cms.models.languages.language.Language` or :obj:`None`\n if no translation exists\n :rtype: ~cms.models.pois.poi_translation.POITranslation\n \"\"\"\n return self.translations.filter(language__slug=language_slug).first()\n\n def get_public_translation(self, language_slug):\n \"\"\"\n This function retrieves the newest public translation of a POI.\n\n :param language_slug: The slug of the requested :class:`~cms.models.languages.language.Language`\n :type language_slug: str\n\n :return: The public translation of a POI\n :rtype: ~cms.models.pois.poi_translation.POITranslation\n \"\"\"\n return self.translations.filter(\n language__slug=language_slug,\n status=status.PUBLIC,\n ).first()\n\n @property\n def backend_translation(self):\n \"\"\"\n This function returns the translation of this POI in the current backend language.\n\n :return: The backend translation of a POI\n :rtype: ~cms.models.pois.poi_translation.POITranslation\n \"\"\"\n return self.translations.filter(language__slug=get_language()).first()\n\n @property\n def default_translation(self):\n \"\"\"\n This function returns the translation of this POI in the region's default language.\n Since a POI can only be created by creating a translation in the default language, this is guaranteed to return\n a POI translation.\n\n :return: The default translation of a POI\n :rtype: ~cms.models.pois.poi_translation.POITranslation\n \"\"\"\n return self.translations.filter(language=self.region.default_language).first()\n\n @property\n def best_translation(self):\n \"\"\"\n This function returns the translation of this POI in the current backend language and if it doesn't exist, it\n provides a fallback to the translation in the region's default language.\n\n :return: The \"best\" translation of a POI for displaying in the backend\n :rtype: ~cms.models.pois.poi_translation.POITranslation\n \"\"\"\n return self.backend_translation or self.default_translation\n\n def __str__(self):\n \"\"\"\n This overwrites the default Django :meth:`~django.db.models.Model.__str__` method which would return ``POI object (id)``.\n It is used in the Django admin backend and as label for ModelChoiceFields.\n\n :return: A readable string representation of the POI\n :rtype: str\n \"\"\"\n return self.best_translation.title\n\n def __repr__(self):\n \"\"\"\n This overwrites the default Django ``__repr__()`` method which would return ``<POI: POI object (id)>``.\n It is used for logging.\n\n :return: The canonical string representation of the POI\n :rtype: str\n \"\"\"\n return f\"<POI (id: {self.id}, region: {self.region.slug}, slug: {self.best_translation.slug})>\"\n\n class Meta:\n #: The verbose name of the model\n verbose_name = _(\"location\")\n #: The plural verbose name of the model\n verbose_name_plural = _(\"locations\")\n #: The default permissions for this model\n default_permissions = ()\n #: The custom permissions for this model\n permissions = ((\"manage_pois\", \"Can manage points of interest\"),)\n", "path": "src/cms/models/pois/poi.py"}, {"content": "import logging\n\nfrom ...models import POI\nfrom ..custom_model_form import CustomModelForm\nfrom ..icon_widget import IconWidget\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass POIForm(CustomModelForm):\n \"\"\"\n Form for creating and modifying POI objects\n \"\"\"\n\n class Meta:\n \"\"\"\n This class contains additional meta configuration of the form class, see the :class:`django.forms.ModelForm`\n for more information.\n \"\"\"\n\n #: The model of this :class:`django.forms.ModelForm`\n model = POI\n #: The fields of the model which should be handled by this form\n fields = [\n \"address\",\n \"postcode\",\n \"city\",\n \"country\",\n \"latitude\",\n \"longitude\",\n \"icon\",\n ]\n #: The widgets which are used in this form\n widgets = {\n \"icon\": IconWidget(),\n }\n\n def __init__(self, data=None, files=None, instance=None):\n \"\"\"\n Initialize POI form\n\n :param data: submitted POST data\n :type data: dict\n\n :param instance: This form's instance\n :type instance: ~cms.models.pois.poi.POI\n \"\"\"\n\n # instantiate ModelForm\n super().__init__(data=data, files=files, instance=instance)\n\n # If form is disabled because the user has no permissions to edit the page, disable all form fields\n if instance and instance.archived:\n for _, field in self.fields.items():\n field.disabled = True\n\n # pylint: disable=arguments-differ\n def save(self, region=None):\n \"\"\"\n This method extends the default ``save()``-method of the base :class:`~django.forms.ModelForm` to set attributes\n which are not directly determined by input fields.\n\n :param region: The region of this form's POI instance\n :type region: ~cms.models.regions.region.Region\n\n :return: The saved POI object\n :rtype: ~cms.models.pois.poi.POI\n \"\"\"\n\n poi = super().save(commit=False)\n\n if not self.instance.id:\n # only update these values when poi is created\n poi.region = region\n\n poi.save()\n return poi\n", "path": "src/cms/forms/pois/poi_form.py"}], "after_files": [{"content": "from django.db import models\nfrom django.utils.translation import get_language, ugettext_lazy as _\n\nfrom ..regions.region import Region, Language\nfrom ...constants import status\n\n\nclass POI(models.Model):\n \"\"\"\n Data model representing a point of interest (POI). It contains all relevant data about its exact position, including\n coordinates.\n \"\"\"\n\n region = models.ForeignKey(\n Region,\n on_delete=models.CASCADE,\n related_name=\"pois\",\n verbose_name=_(\"region\"),\n )\n address = models.CharField(\n max_length=250, verbose_name=_(\"street and house number\")\n )\n postcode = models.CharField(max_length=10, verbose_name=_(\"postal code\"))\n city = models.CharField(max_length=250, verbose_name=_(\"city\"))\n country = models.CharField(max_length=250, verbose_name=_(\"country\"))\n latitude = models.FloatField(\n verbose_name=_(\"latitude\"), help_text=_(\"The latitude coordinate\")\n )\n longitude = models.FloatField(\n verbose_name=_(\"longitude\"), help_text=_(\"The longitude coordinate\")\n )\n location_not_on_map = models.BooleanField(\n default=False,\n verbose_name=_(\"Do not show this location on map\"),\n help_text=_(\"Tick if you do not show this location on map\"),\n )\n icon = models.ImageField(\n null=True,\n blank=True,\n upload_to=\"pois/%Y/%m/%d\",\n verbose_name=_(\"icon\"),\n )\n archived = models.BooleanField(\n default=False,\n verbose_name=_(\"archived\"),\n help_text=_(\"Whether or not the location is read-only and hidden in the API.\"),\n )\n\n @property\n def languages(self):\n \"\"\"\n This property returns a QuerySet of all :class:`~cms.models.languages.language.Language` objects, to which a POI\n translation exists.\n\n :return: QuerySet of all :class:`~cms.models.languages.language.Language` a POI is translated into\n :rtype: ~django.db.models.query.QuerySet [ ~cms.models.languages.language.Language ]\n \"\"\"\n return Language.objects.filter(poi_translations__poi=self)\n\n def get_translation(self, language_slug):\n \"\"\"\n This function uses the reverse foreign key ``self.translations`` to get all translations of ``self``\n and filters them to the requested :class:`~cms.models.languages.language.Language` slug.\n\n :param language_slug: The slug of the desired :class:`~cms.models.languages.language.Language`\n :type language_slug: str\n\n :return: The POI translation in the requested :class:`~cms.models.languages.language.Language` or :obj:`None`\n if no translation exists\n :rtype: ~cms.models.pois.poi_translation.POITranslation\n \"\"\"\n return self.translations.filter(language__slug=language_slug).first()\n\n def get_public_translation(self, language_slug):\n \"\"\"\n This function retrieves the newest public translation of a POI.\n\n :param language_slug: The slug of the requested :class:`~cms.models.languages.language.Language`\n :type language_slug: str\n\n :return: The public translation of a POI\n :rtype: ~cms.models.pois.poi_translation.POITranslation\n \"\"\"\n return self.translations.filter(\n language__slug=language_slug,\n status=status.PUBLIC,\n ).first()\n\n @property\n def backend_translation(self):\n \"\"\"\n This function returns the translation of this POI in the current backend language.\n\n :return: The backend translation of a POI\n :rtype: ~cms.models.pois.poi_translation.POITranslation\n \"\"\"\n return self.translations.filter(language__slug=get_language()).first()\n\n @property\n def default_translation(self):\n \"\"\"\n This function returns the translation of this POI in the region's default language.\n Since a POI can only be created by creating a translation in the default language, this is guaranteed to return\n a POI translation.\n\n :return: The default translation of a POI\n :rtype: ~cms.models.pois.poi_translation.POITranslation\n \"\"\"\n return self.translations.filter(language=self.region.default_language).first()\n\n @property\n def best_translation(self):\n \"\"\"\n This function returns the translation of this POI in the current backend language and if it doesn't exist, it\n provides a fallback to the translation in the region's default language.\n\n :return: The \"best\" translation of a POI for displaying in the backend\n :rtype: ~cms.models.pois.poi_translation.POITranslation\n \"\"\"\n return self.backend_translation or self.default_translation\n\n def __str__(self):\n \"\"\"\n This overwrites the default Django :meth:`~django.db.models.Model.__str__` method which would return ``POI object (id)``.\n It is used in the Django admin backend and as label for ModelChoiceFields.\n\n :return: A readable string representation of the POI\n :rtype: str\n \"\"\"\n return self.best_translation.title\n\n def __repr__(self):\n \"\"\"\n This overwrites the default Django ``__repr__()`` method which would return ``<POI: POI object (id)>``.\n It is used for logging.\n\n :return: The canonical string representation of the POI\n :rtype: str\n \"\"\"\n return f\"<POI (id: {self.id}, region: {self.region.slug}, slug: {self.best_translation.slug})>\"\n\n class Meta:\n #: The verbose name of the model\n verbose_name = _(\"location\")\n #: The plural verbose name of the model\n verbose_name_plural = _(\"locations\")\n #: The default permissions for this model\n default_permissions = ()\n #: The custom permissions for this model\n permissions = ((\"manage_pois\", \"Can manage points of interest\"),)\n", "path": "src/cms/models/pois/poi.py"}, {"content": "import logging\n\nfrom ...models import POI\nfrom ..custom_model_form import CustomModelForm\nfrom ..icon_widget import IconWidget\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass POIForm(CustomModelForm):\n \"\"\"\n Form for creating and modifying POI objects\n \"\"\"\n\n class Meta:\n \"\"\"\n This class contains additional meta configuration of the form class, see the :class:`django.forms.ModelForm`\n for more information.\n \"\"\"\n\n #: The model of this :class:`django.forms.ModelForm`\n model = POI\n #: The fields of the model which should be handled by this form\n fields = [\n \"address\",\n \"postcode\",\n \"city\",\n \"country\",\n \"latitude\",\n \"longitude\",\n \"location_not_on_map\",\n \"icon\",\n ]\n #: The widgets which are used in this form\n widgets = {\n \"icon\": IconWidget(),\n }\n\n def __init__(self, data=None, files=None, instance=None):\n \"\"\"\n Initialize POI form\n\n :param data: submitted POST data\n :type data: dict\n\n :param instance: This form's instance\n :type instance: ~cms.models.pois.poi.POI\n \"\"\"\n\n # instantiate ModelForm\n super().__init__(data=data, files=files, instance=instance)\n\n # If form is disabled because the user has no permissions to edit the page, disable all form fields\n if instance and instance.archived:\n for _, field in self.fields.items():\n field.disabled = True\n\n # pylint: disable=arguments-differ\n def save(self, region=None):\n \"\"\"\n This method extends the default ``save()``-method of the base :class:`~django.forms.ModelForm` to set attributes\n which are not directly determined by input fields.\n\n :param region: The region of this form's POI instance\n :type region: ~cms.models.regions.region.Region\n\n :return: The saved POI object\n :rtype: ~cms.models.pois.poi.POI\n \"\"\"\n\n poi = super().save(commit=False)\n\n if not self.instance.id:\n # only update these values when poi is created\n poi.region = region\n\n poi.save()\n return poi\n", "path": "src/cms/forms/pois/poi_form.py"}]}
2,621
236
gh_patches_debug_25586
rasdani/github-patches
git_diff
google-deepmind__optax-54
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Prevent creating unnecessary momentum variables Currently, `optax.sgd` and `optax.noisy_sgd` unconditionally create momentum variables for the parameters, since both rely on `trace`. For `optax.noisy_sgd`, this is unnecessary since `decay` is always `0`. For `optax.sgd`, this is unexpected since `momentum=0` by default (and can be wasteful for large models). `optax.noisy_sgd` should only require `_scale_by_learning_rate` (with a negation). `optax.sgd` could conditionally add `trace` if `momentum > 0`. Below are the lines of code I'm referring to: https://github.com/deepmind/optax/blob/ba0bc11d172054d65b4387ecae840c04e2bc7035/optax/_src/alias.py#L142-L148 https://github.com/deepmind/optax/blob/ba0bc11d172054d65b4387ecae840c04e2bc7035/optax/_src/alias.py#L105-L113 And here's where trace automatically creates it's state: https://github.com/deepmind/optax/blob/ba0bc11d172054d65b4387ecae840c04e2bc7035/optax/_src/transform.py#L212-L213 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `optax/_src/alias.py` Content: ``` 1 # Lint as: python3 2 # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 # ============================================================================== 16 """Aliases for popular optimisers.""" 17 18 from typing import Union 19 20 import jax.numpy as jnp 21 from optax._src import combine 22 from optax._src import schedule 23 from optax._src import transform 24 25 26 GradientTransformation = transform.GradientTransformation 27 ScalarOrSchedule = Union[float, schedule.Schedule] 28 29 30 def _scale_by_learning_rate(learning_rate: ScalarOrSchedule): 31 if callable(learning_rate): 32 return transform.scale_by_schedule(lambda count: -learning_rate(count)) 33 return transform.scale(-learning_rate) 34 35 36 def adabelief(learning_rate: ScalarOrSchedule, 37 b1: float = 0.9, 38 b2: float = 0.999, 39 eps: float = 1e-8) -> GradientTransformation: 40 return combine.chain( 41 transform.scale_by_belief(b1=b1, b2=b2, eps=eps), 42 _scale_by_learning_rate(learning_rate), 43 ) 44 45 46 def adagrad( 47 learning_rate: ScalarOrSchedule, 48 initial_accumulator_value: float = 0.1, 49 eps: float = 1e-7) -> GradientTransformation: 50 return combine.chain( 51 transform.scale_by_rss( 52 initial_accumulator_value=initial_accumulator_value, eps=eps), 53 _scale_by_learning_rate(learning_rate), 54 ) 55 56 57 def adam(learning_rate: ScalarOrSchedule, 58 b1: float = 0.9, 59 b2: float = 0.999, 60 eps: float = 1e-8, 61 eps_root: float = 0.0) -> GradientTransformation: 62 return combine.chain( 63 transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root), 64 _scale_by_learning_rate(learning_rate), 65 ) 66 67 68 def adamw(learning_rate: ScalarOrSchedule, 69 b1: float = 0.9, 70 b2: float = 0.999, 71 eps: float = 1e-8, 72 eps_root: float = 0.0, 73 weight_decay: float = 1e-4) -> GradientTransformation: 74 return combine.chain( 75 transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root), 76 transform.additive_weight_decay(weight_decay), 77 _scale_by_learning_rate(learning_rate), 78 ) 79 80 81 def fromage(learning_rate: float, 82 min_norm: float = 1e-6) -> GradientTransformation: 83 mult = 1 / jnp.sqrt(1 + learning_rate ** 2) 84 return combine.chain( 85 transform.scale_by_trust_ratio(min_norm), 86 _scale_by_learning_rate(learning_rate * mult), 87 transform.add_decayed_weights((mult - 1)), 88 ) 89 90 91 def lamb(learning_rate: ScalarOrSchedule, 92 b1: float = 0.9, 93 b2: float = 0.999, 94 eps: float = 1e-6, 95 eps_root: float = 0.0, 96 weight_decay: float = 0.) -> GradientTransformation: 97 return combine.chain( 98 transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root), 99 transform.add_decayed_weights(weight_decay), 100 transform.scale_by_trust_ratio(), 101 _scale_by_learning_rate(learning_rate), 102 ) 103 104 105 def noisy_sgd(learning_rate: ScalarOrSchedule, 106 eta: float = 0.01, 107 gamma: float = 0.55, 108 seed: int = 0) -> GradientTransformation: 109 return combine.chain( 110 transform.trace(decay=0., nesterov=False), 111 _scale_by_learning_rate(learning_rate), 112 transform.add_noise(eta, gamma, seed), 113 ) 114 115 116 def radam(learning_rate: ScalarOrSchedule, 117 b1: float = 0.9, 118 b2: float = 0.999, 119 eps: float = 1e-8, 120 threshold: float = 5.0) -> GradientTransformation: 121 return combine.chain( 122 transform.scale_by_radam(b1=b1, b2=b2, eps=eps, threshold=threshold), 123 _scale_by_learning_rate(learning_rate), 124 ) 125 126 127 def rmsprop(learning_rate: ScalarOrSchedule, 128 decay: float = 0.9, 129 eps: float = 1e-8, 130 centered: bool = False) -> GradientTransformation: 131 if centered: 132 return combine.chain( 133 transform.scale_by_stddev(decay=decay, eps=eps), 134 _scale_by_learning_rate(learning_rate), 135 ) 136 return combine.chain( 137 transform.scale_by_rms(decay=decay, eps=eps), 138 _scale_by_learning_rate(learning_rate), 139 ) 140 141 142 def sgd(learning_rate: ScalarOrSchedule, 143 momentum: float = 0., 144 nesterov: bool = False) -> GradientTransformation: 145 return combine.chain( 146 transform.trace(decay=momentum, nesterov=nesterov), 147 _scale_by_learning_rate(learning_rate), 148 ) 149 150 151 def yogi(learning_rate: ScalarOrSchedule, 152 b1: float = 0.9, 153 b2: float = 0.999, 154 eps: float = 1e-3) -> GradientTransformation: 155 return combine.chain( 156 transform.scale_by_yogi(b1=b1, b2=b2, eps=eps), 157 _scale_by_learning_rate(learning_rate), 158 ) 159 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/optax/_src/alias.py b/optax/_src/alias.py --- a/optax/_src/alias.py +++ b/optax/_src/alias.py @@ -15,7 +15,7 @@ # ============================================================================== """Aliases for popular optimisers.""" -from typing import Union +from typing import Union, Optional import jax.numpy as jnp from optax._src import combine @@ -107,7 +107,6 @@ gamma: float = 0.55, seed: int = 0) -> GradientTransformation: return combine.chain( - transform.trace(decay=0., nesterov=False), _scale_by_learning_rate(learning_rate), transform.add_noise(eta, gamma, seed), ) @@ -140,11 +139,12 @@ def sgd(learning_rate: ScalarOrSchedule, - momentum: float = 0., + momentum: Optional[float] = None, nesterov: bool = False) -> GradientTransformation: return combine.chain( - transform.trace(decay=momentum, nesterov=nesterov), - _scale_by_learning_rate(learning_rate), + (transform.trace(decay=momentum, nesterov=nesterov) + if momentum is not None else transform.identity()), + _scale_by_learning_rate(learning_rate) )
{"golden_diff": "diff --git a/optax/_src/alias.py b/optax/_src/alias.py\n--- a/optax/_src/alias.py\n+++ b/optax/_src/alias.py\n@@ -15,7 +15,7 @@\n # ==============================================================================\n \"\"\"Aliases for popular optimisers.\"\"\"\n \n-from typing import Union\n+from typing import Union, Optional\n \n import jax.numpy as jnp\n from optax._src import combine\n@@ -107,7 +107,6 @@\n gamma: float = 0.55,\n seed: int = 0) -> GradientTransformation:\n return combine.chain(\n- transform.trace(decay=0., nesterov=False),\n _scale_by_learning_rate(learning_rate),\n transform.add_noise(eta, gamma, seed),\n )\n@@ -140,11 +139,12 @@\n \n \n def sgd(learning_rate: ScalarOrSchedule,\n- momentum: float = 0.,\n+ momentum: Optional[float] = None,\n nesterov: bool = False) -> GradientTransformation:\n return combine.chain(\n- transform.trace(decay=momentum, nesterov=nesterov),\n- _scale_by_learning_rate(learning_rate),\n+ (transform.trace(decay=momentum, nesterov=nesterov)\n+ if momentum is not None else transform.identity()),\n+ _scale_by_learning_rate(learning_rate)\n )\n", "issue": "Prevent creating unnecessary momentum variables\nCurrently, `optax.sgd` and `optax.noisy_sgd` unconditionally create momentum variables for the parameters, since both rely on `trace`. For `optax.noisy_sgd`, this is unnecessary since `decay` is always `0`. For `optax.sgd`, this is unexpected since `momentum=0` by default (and can be wasteful for large models). \r\n\r\n`optax.noisy_sgd` should only require `_scale_by_learning_rate` (with a negation). `optax.sgd` could conditionally add `trace` if `momentum > 0`. \r\n\r\nBelow are the lines of code I'm referring to:\r\n\r\nhttps://github.com/deepmind/optax/blob/ba0bc11d172054d65b4387ecae840c04e2bc7035/optax/_src/alias.py#L142-L148\r\n\r\nhttps://github.com/deepmind/optax/blob/ba0bc11d172054d65b4387ecae840c04e2bc7035/optax/_src/alias.py#L105-L113\r\n\r\nAnd here's where trace automatically creates it's state:\r\n\r\nhttps://github.com/deepmind/optax/blob/ba0bc11d172054d65b4387ecae840c04e2bc7035/optax/_src/transform.py#L212-L213\n", "before_files": [{"content": "# Lint as: python3\n# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Aliases for popular optimisers.\"\"\"\n\nfrom typing import Union\n\nimport jax.numpy as jnp\nfrom optax._src import combine\nfrom optax._src import schedule\nfrom optax._src import transform\n\n\nGradientTransformation = transform.GradientTransformation\nScalarOrSchedule = Union[float, schedule.Schedule]\n\n\ndef _scale_by_learning_rate(learning_rate: ScalarOrSchedule):\n if callable(learning_rate):\n return transform.scale_by_schedule(lambda count: -learning_rate(count))\n return transform.scale(-learning_rate)\n\n\ndef adabelief(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_belief(b1=b1, b2=b2, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef adagrad(\n learning_rate: ScalarOrSchedule,\n initial_accumulator_value: float = 0.1,\n eps: float = 1e-7) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_rss(\n initial_accumulator_value=initial_accumulator_value, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef adam(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n eps_root: float = 0.0) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef adamw(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n eps_root: float = 0.0,\n weight_decay: float = 1e-4) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root),\n transform.additive_weight_decay(weight_decay),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef fromage(learning_rate: float,\n min_norm: float = 1e-6) -> GradientTransformation:\n mult = 1 / jnp.sqrt(1 + learning_rate ** 2)\n return combine.chain(\n transform.scale_by_trust_ratio(min_norm),\n _scale_by_learning_rate(learning_rate * mult),\n transform.add_decayed_weights((mult - 1)),\n )\n\n\ndef lamb(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-6,\n eps_root: float = 0.0,\n weight_decay: float = 0.) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root),\n transform.add_decayed_weights(weight_decay),\n transform.scale_by_trust_ratio(),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef noisy_sgd(learning_rate: ScalarOrSchedule,\n eta: float = 0.01,\n gamma: float = 0.55,\n seed: int = 0) -> GradientTransformation:\n return combine.chain(\n transform.trace(decay=0., nesterov=False),\n _scale_by_learning_rate(learning_rate),\n transform.add_noise(eta, gamma, seed),\n )\n\n\ndef radam(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n threshold: float = 5.0) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_radam(b1=b1, b2=b2, eps=eps, threshold=threshold),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef rmsprop(learning_rate: ScalarOrSchedule,\n decay: float = 0.9,\n eps: float = 1e-8,\n centered: bool = False) -> GradientTransformation:\n if centered:\n return combine.chain(\n transform.scale_by_stddev(decay=decay, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n return combine.chain(\n transform.scale_by_rms(decay=decay, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef sgd(learning_rate: ScalarOrSchedule,\n momentum: float = 0.,\n nesterov: bool = False) -> GradientTransformation:\n return combine.chain(\n transform.trace(decay=momentum, nesterov=nesterov),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef yogi(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-3) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_yogi(b1=b1, b2=b2, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n", "path": "optax/_src/alias.py"}], "after_files": [{"content": "# Lint as: python3\n# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Aliases for popular optimisers.\"\"\"\n\nfrom typing import Union, Optional\n\nimport jax.numpy as jnp\nfrom optax._src import combine\nfrom optax._src import schedule\nfrom optax._src import transform\n\n\nGradientTransformation = transform.GradientTransformation\nScalarOrSchedule = Union[float, schedule.Schedule]\n\n\ndef _scale_by_learning_rate(learning_rate: ScalarOrSchedule):\n if callable(learning_rate):\n return transform.scale_by_schedule(lambda count: -learning_rate(count))\n return transform.scale(-learning_rate)\n\n\ndef adabelief(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_belief(b1=b1, b2=b2, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef adagrad(\n learning_rate: ScalarOrSchedule,\n initial_accumulator_value: float = 0.1,\n eps: float = 1e-7) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_rss(\n initial_accumulator_value=initial_accumulator_value, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef adam(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n eps_root: float = 0.0) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef adamw(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n eps_root: float = 0.0,\n weight_decay: float = 1e-4) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root),\n transform.additive_weight_decay(weight_decay),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef fromage(learning_rate: float,\n min_norm: float = 1e-6) -> GradientTransformation:\n mult = 1 / jnp.sqrt(1 + learning_rate ** 2)\n return combine.chain(\n transform.scale_by_trust_ratio(min_norm),\n _scale_by_learning_rate(learning_rate * mult),\n transform.add_decayed_weights((mult - 1)),\n )\n\n\ndef lamb(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-6,\n eps_root: float = 0.0,\n weight_decay: float = 0.) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root),\n transform.add_decayed_weights(weight_decay),\n transform.scale_by_trust_ratio(),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef noisy_sgd(learning_rate: ScalarOrSchedule,\n eta: float = 0.01,\n gamma: float = 0.55,\n seed: int = 0) -> GradientTransformation:\n return combine.chain(\n _scale_by_learning_rate(learning_rate),\n transform.add_noise(eta, gamma, seed),\n )\n\n\ndef radam(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n threshold: float = 5.0) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_radam(b1=b1, b2=b2, eps=eps, threshold=threshold),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef rmsprop(learning_rate: ScalarOrSchedule,\n decay: float = 0.9,\n eps: float = 1e-8,\n centered: bool = False) -> GradientTransformation:\n if centered:\n return combine.chain(\n transform.scale_by_stddev(decay=decay, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n return combine.chain(\n transform.scale_by_rms(decay=decay, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef sgd(learning_rate: ScalarOrSchedule,\n momentum: Optional[float] = None,\n nesterov: bool = False) -> GradientTransformation:\n return combine.chain(\n (transform.trace(decay=momentum, nesterov=nesterov)\n if momentum is not None else transform.identity()),\n _scale_by_learning_rate(learning_rate)\n )\n\n\ndef yogi(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-3) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_yogi(b1=b1, b2=b2, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n", "path": "optax/_src/alias.py"}]}
2,315
304
gh_patches_debug_24251
rasdani/github-patches
git_diff
nvaccess__nvda-7114
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- When the -c option is used when starting NVDA user configuration can not be saved Steps to reproduce: - Open a command prompt and cd to the nvda.exe directory. - start nvda with `nvda.exe -c "C:\myTemp" - open the nvda menu and select "save configuration" An error is shown "Could not save configuration - probably read only file system". Regression caused by atomic rename/write fix. Config path is unicode without `-c`, but when `-c` is passed, the config path is ANSI. fileUtils assumes path is always Unicode. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `source/fileUtils.py` Content: ``` 1 #fileUtils.py 2 #A part of NonVisual Desktop Access (NVDA) 3 #Copyright (C) 2017 NV Access Limited, Bram Duvigneau 4 #This file is covered by the GNU General Public License. 5 #See the file COPYING for more details. 6 import os 7 import ctypes 8 from contextlib import contextmanager 9 from tempfile import NamedTemporaryFile 10 from logHandler import log 11 12 #: Constant; flag for MoveFileEx(). If a file with the destination filename already exists, it is overwritten. 13 MOVEFILE_REPLACE_EXISTING = 1 14 15 @contextmanager 16 def FaultTolerantFile(name): 17 '''Used to write out files in a more fault tolerant way. A temporary file is used, and replaces the 18 file `name' when the context manager scope ends and the the context manager __exit__ is called. This 19 means writing out the complete file can be performed with less concern of corrupting the original file 20 if the process is interrupted by windows shutting down. 21 22 Usage: 23 with FaultTolerantFile("myFile.txt") as f: 24 f.write("This is a test") 25 26 This creates a temporary file, and the writes actually happen on this temp file. At the end of the 27 `with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces "myFile.txt" 28 ''' 29 dirpath, filename = os.path.split(name) 30 with NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f: 31 log.debug(f.name) 32 yield f 33 f.flush() 34 os.fsync(f) 35 f.close() 36 moveFileResult = ctypes.windll.kernel32.MoveFileExW(f.name, name, MOVEFILE_REPLACE_EXISTING) 37 if moveFileResult == 0: 38 raise ctypes.WinError() 39 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/source/fileUtils.py b/source/fileUtils.py --- a/source/fileUtils.py +++ b/source/fileUtils.py @@ -18,6 +18,7 @@ file `name' when the context manager scope ends and the the context manager __exit__ is called. This means writing out the complete file can be performed with less concern of corrupting the original file if the process is interrupted by windows shutting down. + `name` must be unicode. Usage: with FaultTolerantFile("myFile.txt") as f: @@ -26,6 +27,8 @@ This creates a temporary file, and the writes actually happen on this temp file. At the end of the `with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces "myFile.txt" ''' + if not isinstance(name, unicode): + raise TypeError("name must be unicode") dirpath, filename = os.path.split(name) with NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f: log.debug(f.name)
{"golden_diff": "diff --git a/source/fileUtils.py b/source/fileUtils.py\n--- a/source/fileUtils.py\n+++ b/source/fileUtils.py\n@@ -18,6 +18,7 @@\n \tfile `name' when the context manager scope ends and the the context manager __exit__ is called. This\n \tmeans writing out the complete file can be performed with less concern of corrupting the original file\n \tif the process is interrupted by windows shutting down.\n+\t`name` must be unicode.\n \n \tUsage:\n \t\twith FaultTolerantFile(\"myFile.txt\") as f:\n@@ -26,6 +27,8 @@\n \tThis creates a temporary file, and the writes actually happen on this temp file. At the end of the \n \t`with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces \"myFile.txt\"\n \t'''\n+\tif not isinstance(name, unicode):\n+\t\traise TypeError(\"name must be unicode\")\n \tdirpath, filename = os.path.split(name)\n \twith NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f:\n \t\tlog.debug(f.name)\n", "issue": "When the -c option is used when starting NVDA user configuration can not be saved\nSteps to reproduce:\r\n\r\n- Open a command prompt and cd to the nvda.exe directory.\r\n- start nvda with `nvda.exe -c \"C:\\myTemp\"\r\n- open the nvda menu and select \"save configuration\"\r\n\r\nAn error is shown \"Could not save configuration - probably read only file system\".\r\n\r\nRegression caused by atomic rename/write fix. Config path is unicode without `-c`, but when `-c` is passed, the config path is ANSI. fileUtils assumes path is always Unicode.\n", "before_files": [{"content": "#fileUtils.py\n#A part of NonVisual Desktop Access (NVDA)\n#Copyright (C) 2017 NV Access Limited, Bram Duvigneau\n#This file is covered by the GNU General Public License.\n#See the file COPYING for more details.\nimport os\nimport ctypes\nfrom contextlib import contextmanager\nfrom tempfile import NamedTemporaryFile\nfrom logHandler import log\n\n#: Constant; flag for MoveFileEx(). If a file with the destination filename already exists, it is overwritten.\nMOVEFILE_REPLACE_EXISTING = 1\n\n@contextmanager\ndef FaultTolerantFile(name):\n\t'''Used to write out files in a more fault tolerant way. A temporary file is used, and replaces the \n\tfile `name' when the context manager scope ends and the the context manager __exit__ is called. This\n\tmeans writing out the complete file can be performed with less concern of corrupting the original file\n\tif the process is interrupted by windows shutting down.\n\n\tUsage:\n\t\twith FaultTolerantFile(\"myFile.txt\") as f:\n\t\t\tf.write(\"This is a test\")\n\n\tThis creates a temporary file, and the writes actually happen on this temp file. At the end of the \n\t`with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces \"myFile.txt\"\n\t'''\n\tdirpath, filename = os.path.split(name)\n\twith NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f:\n\t\tlog.debug(f.name)\n\t\tyield f\n\t\tf.flush()\n\t\tos.fsync(f)\n\t\tf.close()\n\t\tmoveFileResult = ctypes.windll.kernel32.MoveFileExW(f.name, name, MOVEFILE_REPLACE_EXISTING)\n\t\tif moveFileResult == 0:\n\t\t\traise ctypes.WinError()\n", "path": "source/fileUtils.py"}], "after_files": [{"content": "#fileUtils.py\n#A part of NonVisual Desktop Access (NVDA)\n#Copyright (C) 2017 NV Access Limited, Bram Duvigneau\n#This file is covered by the GNU General Public License.\n#See the file COPYING for more details.\nimport os\nimport ctypes\nfrom contextlib import contextmanager\nfrom tempfile import NamedTemporaryFile\nfrom logHandler import log\n\n#: Constant; flag for MoveFileEx(). If a file with the destination filename already exists, it is overwritten.\nMOVEFILE_REPLACE_EXISTING = 1\n\n@contextmanager\ndef FaultTolerantFile(name):\n\t'''Used to write out files in a more fault tolerant way. A temporary file is used, and replaces the \n\tfile `name' when the context manager scope ends and the the context manager __exit__ is called. This\n\tmeans writing out the complete file can be performed with less concern of corrupting the original file\n\tif the process is interrupted by windows shutting down.\n\t`name` must be unicode.\n\n\tUsage:\n\t\twith FaultTolerantFile(\"myFile.txt\") as f:\n\t\t\tf.write(\"This is a test\")\n\n\tThis creates a temporary file, and the writes actually happen on this temp file. At the end of the \n\t`with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces \"myFile.txt\"\n\t'''\n\tif not isinstance(name, unicode):\n\t\traise TypeError(\"name must be unicode\")\n\tdirpath, filename = os.path.split(name)\n\twith NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f:\n\t\tlog.debug(f.name)\n\t\tyield f\n\t\tf.flush()\n\t\tos.fsync(f)\n\t\tf.close()\n\t\tmoveFileResult = ctypes.windll.kernel32.MoveFileExW(f.name, name, MOVEFILE_REPLACE_EXISTING)\n\t\tif moveFileResult == 0:\n\t\t\traise ctypes.WinError()\n", "path": "source/fileUtils.py"}]}
845
253
gh_patches_debug_25184
rasdani/github-patches
git_diff
getsentry__sentry-6688
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Search related events by event ID ![image](https://user-images.githubusercontent.com/2153/29005586-1dfdfbf6-7a93-11e7-9127-c82c0356b4c6.png) Right now you can search [by message or tags](https://github.com/getsentry/sentry/blob/master/src/sentry/api/endpoints/group_events.py#L104), but not individual event ID. This is useful when trying to locate a specific event. More useful now that sampling has been disabled by default. We should also add a column for event ID here. Refs #3147 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/sentry/api/endpoints/group_events.py` Content: ``` 1 from __future__ import absolute_import 2 3 import six 4 5 from sentry import tagstore 6 from sentry.api.base import DocSection, EnvironmentMixin 7 from sentry.api.bases import GroupEndpoint 8 from sentry.api.serializers import serialize 9 from sentry.api.paginator import DateTimePaginator 10 from sentry.models import Environment, Event, Group 11 from sentry.search.utils import parse_query 12 from sentry.utils.apidocs import scenario, attach_scenarios 13 from rest_framework.response import Response 14 from sentry.search.utils import InvalidQuery 15 16 17 @scenario('ListAvailableSamples') 18 def list_available_samples_scenario(runner): 19 group = Group.objects.filter(project=runner.default_project).first() 20 runner.request(method='GET', path='/issues/%s/events/' % group.id) 21 22 23 class GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin): 24 doc_section = DocSection.EVENTS 25 26 @attach_scenarios([list_available_samples_scenario]) 27 def get(self, request, group): 28 """ 29 List an Issue's Events 30 `````````````````````` 31 32 This endpoint lists an issue's events. 33 34 :pparam string issue_id: the ID of the issue to retrieve. 35 :auth: required 36 """ 37 38 events = Event.objects.filter( 39 group_id=group.id, 40 ) 41 42 query = request.GET.get('query') 43 if query: 44 try: 45 query_kwargs = parse_query(group.project, query, request.user) 46 except InvalidQuery as exc: 47 return Response({'detail': six.text_type(exc)}, status=400) 48 49 if query_kwargs['query']: 50 events = events.filter( 51 message__icontains=query_kwargs['query'], 52 ) 53 54 if query_kwargs['tags']: 55 try: 56 environment_id = self._get_environment_id_from_request( 57 request, group.project.organization_id) 58 except Environment.DoesNotExist: 59 event_ids = [] 60 else: 61 event_ids = tagstore.get_group_event_ids( 62 group.project_id, group.id, environment_id, query_kwargs['tags']) 63 64 if event_ids: 65 events = events.filter( 66 id__in=event_ids, 67 ) 68 else: 69 events = events.none() 70 71 return self.paginate( 72 request=request, 73 queryset=events, 74 order_by='-datetime', 75 on_results=lambda x: serialize(x, request.user), 76 paginator_cls=DateTimePaginator, 77 ) 78 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/sentry/api/endpoints/group_events.py b/src/sentry/api/endpoints/group_events.py --- a/src/sentry/api/endpoints/group_events.py +++ b/src/sentry/api/endpoints/group_events.py @@ -12,6 +12,7 @@ from sentry.utils.apidocs import scenario, attach_scenarios from rest_framework.response import Response from sentry.search.utils import InvalidQuery +from django.db.models import Q @scenario('ListAvailableSamples') @@ -40,6 +41,7 @@ ) query = request.GET.get('query') + if query: try: query_kwargs = parse_query(group.project, query, request.user) @@ -47,9 +49,12 @@ return Response({'detail': six.text_type(exc)}, status=400) if query_kwargs['query']: - events = events.filter( - message__icontains=query_kwargs['query'], - ) + q = Q(message__icontains=query_kwargs['query']) + + if len(query) == 32: + q |= Q(event_id__exact=query_kwargs['query']) + + events = events.filter(q) if query_kwargs['tags']: try:
{"golden_diff": "diff --git a/src/sentry/api/endpoints/group_events.py b/src/sentry/api/endpoints/group_events.py\n--- a/src/sentry/api/endpoints/group_events.py\n+++ b/src/sentry/api/endpoints/group_events.py\n@@ -12,6 +12,7 @@\n from sentry.utils.apidocs import scenario, attach_scenarios\n from rest_framework.response import Response\n from sentry.search.utils import InvalidQuery\n+from django.db.models import Q\n \n \n @scenario('ListAvailableSamples')\n@@ -40,6 +41,7 @@\n )\n \n query = request.GET.get('query')\n+\n if query:\n try:\n query_kwargs = parse_query(group.project, query, request.user)\n@@ -47,9 +49,12 @@\n return Response({'detail': six.text_type(exc)}, status=400)\n \n if query_kwargs['query']:\n- events = events.filter(\n- message__icontains=query_kwargs['query'],\n- )\n+ q = Q(message__icontains=query_kwargs['query'])\n+\n+ if len(query) == 32:\n+ q |= Q(event_id__exact=query_kwargs['query'])\n+\n+ events = events.filter(q)\n \n if query_kwargs['tags']:\n try:\n", "issue": "Search related events by event ID\n\r\n![image](https://user-images.githubusercontent.com/2153/29005586-1dfdfbf6-7a93-11e7-9127-c82c0356b4c6.png)\r\n\r\nRight now you can search [by message or tags](https://github.com/getsentry/sentry/blob/master/src/sentry/api/endpoints/group_events.py#L104), but not individual event ID. This is useful when trying to locate a specific event. More useful now that sampling has been disabled by default.\r\n\r\nWe should also add a column for event ID here.\r\n\r\nRefs #3147\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport six\n\nfrom sentry import tagstore\nfrom sentry.api.base import DocSection, EnvironmentMixin\nfrom sentry.api.bases import GroupEndpoint\nfrom sentry.api.serializers import serialize\nfrom sentry.api.paginator import DateTimePaginator\nfrom sentry.models import Environment, Event, Group\nfrom sentry.search.utils import parse_query\nfrom sentry.utils.apidocs import scenario, attach_scenarios\nfrom rest_framework.response import Response\nfrom sentry.search.utils import InvalidQuery\n\n\n@scenario('ListAvailableSamples')\ndef list_available_samples_scenario(runner):\n group = Group.objects.filter(project=runner.default_project).first()\n runner.request(method='GET', path='/issues/%s/events/' % group.id)\n\n\nclass GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin):\n doc_section = DocSection.EVENTS\n\n @attach_scenarios([list_available_samples_scenario])\n def get(self, request, group):\n \"\"\"\n List an Issue's Events\n ``````````````````````\n\n This endpoint lists an issue's events.\n\n :pparam string issue_id: the ID of the issue to retrieve.\n :auth: required\n \"\"\"\n\n events = Event.objects.filter(\n group_id=group.id,\n )\n\n query = request.GET.get('query')\n if query:\n try:\n query_kwargs = parse_query(group.project, query, request.user)\n except InvalidQuery as exc:\n return Response({'detail': six.text_type(exc)}, status=400)\n\n if query_kwargs['query']:\n events = events.filter(\n message__icontains=query_kwargs['query'],\n )\n\n if query_kwargs['tags']:\n try:\n environment_id = self._get_environment_id_from_request(\n request, group.project.organization_id)\n except Environment.DoesNotExist:\n event_ids = []\n else:\n event_ids = tagstore.get_group_event_ids(\n group.project_id, group.id, environment_id, query_kwargs['tags'])\n\n if event_ids:\n events = events.filter(\n id__in=event_ids,\n )\n else:\n events = events.none()\n\n return self.paginate(\n request=request,\n queryset=events,\n order_by='-datetime',\n on_results=lambda x: serialize(x, request.user),\n paginator_cls=DateTimePaginator,\n )\n", "path": "src/sentry/api/endpoints/group_events.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport six\n\nfrom sentry import tagstore\nfrom sentry.api.base import DocSection, EnvironmentMixin\nfrom sentry.api.bases import GroupEndpoint\nfrom sentry.api.serializers import serialize\nfrom sentry.api.paginator import DateTimePaginator\nfrom sentry.models import Environment, Event, Group\nfrom sentry.search.utils import parse_query\nfrom sentry.utils.apidocs import scenario, attach_scenarios\nfrom rest_framework.response import Response\nfrom sentry.search.utils import InvalidQuery\nfrom django.db.models import Q\n\n\n@scenario('ListAvailableSamples')\ndef list_available_samples_scenario(runner):\n group = Group.objects.filter(project=runner.default_project).first()\n runner.request(method='GET', path='/issues/%s/events/' % group.id)\n\n\nclass GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin):\n doc_section = DocSection.EVENTS\n\n @attach_scenarios([list_available_samples_scenario])\n def get(self, request, group):\n \"\"\"\n List an Issue's Events\n ``````````````````````\n\n This endpoint lists an issue's events.\n\n :pparam string issue_id: the ID of the issue to retrieve.\n :auth: required\n \"\"\"\n\n events = Event.objects.filter(\n group_id=group.id,\n )\n\n query = request.GET.get('query')\n\n if query:\n try:\n query_kwargs = parse_query(group.project, query, request.user)\n except InvalidQuery as exc:\n return Response({'detail': six.text_type(exc)}, status=400)\n\n if query_kwargs['query']:\n q = Q(message__icontains=query_kwargs['query'])\n\n if len(query) == 32:\n q |= Q(event_id__exact=query_kwargs['query'])\n\n events = events.filter(q)\n\n if query_kwargs['tags']:\n try:\n environment_id = self._get_environment_id_from_request(\n request, group.project.organization_id)\n except Environment.DoesNotExist:\n event_ids = []\n else:\n event_ids = tagstore.get_group_event_ids(\n group.project_id, group.id, environment_id, query_kwargs['tags'])\n\n if event_ids:\n events = events.filter(\n id__in=event_ids,\n )\n else:\n events = events.none()\n\n return self.paginate(\n request=request,\n queryset=events,\n order_by='-datetime',\n on_results=lambda x: serialize(x, request.user),\n paginator_cls=DateTimePaginator,\n )\n", "path": "src/sentry/api/endpoints/group_events.py"}]}
1,057
269
gh_patches_debug_38866
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-1480
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make sure Resources follow semantic conventions From [specs](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/semantic_conventions/README.md). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 This package implements `OpenTelemetry Resources 17 <https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/sdk.md#resource-sdk>`_: 18 19 *A Resource is an immutable representation of the entity producing 20 telemetry. For example, a process producing telemetry that is running in 21 a container on Kubernetes has a Pod name, it is in a namespace and 22 possibly is part of a Deployment which also has a name. All three of 23 these attributes can be included in the Resource.* 24 25 Resource objects are created with `Resource.create`, which accepts attributes 26 (key-values). Resource attributes can also be passed at process invocation in 27 the :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable. You should 28 register your resource with the `opentelemetry.sdk.metrics.MeterProvider` and 29 `opentelemetry.sdk.trace.TracerProvider` by passing them into their 30 constructors. The `Resource` passed to a provider is available to the 31 exporter, which can send on this information as it sees fit. 32 33 .. code-block:: python 34 35 metrics.set_meter_provider( 36 MeterProvider( 37 resource=Resource.create({ 38 "service.name": "shoppingcart", 39 "service.instance.id": "instance-12", 40 }), 41 ), 42 ) 43 print(metrics.get_meter_provider().resource.attributes) 44 45 {'telemetry.sdk.language': 'python', 46 'telemetry.sdk.name': 'opentelemetry', 47 'telemetry.sdk.version': '0.13.dev0', 48 'service.name': 'shoppingcart', 49 'service.instance.id': 'instance-12'} 50 51 Note that the OpenTelemetry project documents certain `"standard attributes" 52 <https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/semantic_conventions/README.md>`_ 53 that have prescribed semantic meanings, for example ``service.name`` in the 54 above example. 55 56 .. envvar:: OTEL_RESOURCE_ATTRIBUTES 57 58 The :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable allows resource 59 attributes to be passed to the SDK at process invocation. The attributes from 60 :envvar:`OTEL_RESOURCE_ATTRIBUTES` are merged with those passed to 61 `Resource.create`, meaning :envvar:`OTEL_RESOURCE_ATTRIBUTES` takes *lower* 62 priority. Attributes should be in the format ``key1=value1,key2=value2``. 63 Additional details are available `in the specification 64 <https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable>`_. 65 66 .. code-block:: console 67 68 $ OTEL_RESOURCE_ATTRIBUTES="service.name=shoppingcard,will_be_overridden=foo" python - <<EOF 69 import pprint 70 from opentelemetry.sdk.resources import Resource 71 pprint.pprint(Resource.create({"will_be_overridden": "bar"}).attributes) 72 EOF 73 {'service.name': 'shoppingcard', 74 'telemetry.sdk.language': 'python', 75 'telemetry.sdk.name': 'opentelemetry', 76 'telemetry.sdk.version': '0.13.dev0', 77 'will_be_overridden': 'bar'} 78 """ 79 80 import abc 81 import concurrent.futures 82 import logging 83 import os 84 import typing 85 from json import dumps 86 87 import pkg_resources 88 89 LabelValue = typing.Union[str, bool, int, float] 90 Attributes = typing.Dict[str, LabelValue] 91 logger = logging.getLogger(__name__) 92 93 94 TELEMETRY_SDK_LANGUAGE = "telemetry.sdk.language" 95 TELEMETRY_SDK_NAME = "telemetry.sdk.name" 96 TELEMETRY_SDK_VERSION = "telemetry.sdk.version" 97 98 OPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution( 99 "opentelemetry-sdk" 100 ).version 101 OTEL_RESOURCE_ATTRIBUTES = "OTEL_RESOURCE_ATTRIBUTES" 102 103 104 class Resource: 105 def __init__(self, attributes: Attributes): 106 self._attributes = attributes.copy() 107 108 @staticmethod 109 def create(attributes: typing.Optional[Attributes] = None) -> "Resource": 110 if not attributes: 111 resource = _DEFAULT_RESOURCE 112 else: 113 resource = _DEFAULT_RESOURCE.merge(Resource(attributes)) 114 return resource.merge(OTELResourceDetector().detect()) 115 116 @staticmethod 117 def create_empty() -> "Resource": 118 return _EMPTY_RESOURCE 119 120 @property 121 def attributes(self) -> Attributes: 122 return self._attributes.copy() 123 124 def merge(self, other: "Resource") -> "Resource": 125 merged_attributes = self.attributes 126 # pylint: disable=protected-access 127 for key, value in other._attributes.items(): 128 if key not in merged_attributes or merged_attributes[key] == "": 129 merged_attributes[key] = value 130 return Resource(merged_attributes) 131 132 def __eq__(self, other: object) -> bool: 133 if not isinstance(other, Resource): 134 return False 135 return self._attributes == other._attributes 136 137 def __hash__(self): 138 return hash(dumps(self._attributes, sort_keys=True)) 139 140 141 _EMPTY_RESOURCE = Resource({}) 142 _DEFAULT_RESOURCE = Resource( 143 { 144 TELEMETRY_SDK_LANGUAGE: "python", 145 TELEMETRY_SDK_NAME: "opentelemetry", 146 TELEMETRY_SDK_VERSION: OPENTELEMETRY_SDK_VERSION, 147 } 148 ) 149 150 151 class ResourceDetector(abc.ABC): 152 def __init__(self, raise_on_error=False): 153 self.raise_on_error = raise_on_error 154 155 @abc.abstractmethod 156 def detect(self) -> "Resource": 157 raise NotImplementedError() 158 159 160 class OTELResourceDetector(ResourceDetector): 161 # pylint: disable=no-self-use 162 def detect(self) -> "Resource": 163 env_resources_items = os.environ.get(OTEL_RESOURCE_ATTRIBUTES) 164 env_resource_map = {} 165 if env_resources_items: 166 env_resource_map = { 167 key.strip(): value.strip() 168 for key, value in ( 169 item.split("=") for item in env_resources_items.split(",") 170 ) 171 } 172 return Resource(env_resource_map) 173 174 175 def get_aggregated_resources( 176 detectors: typing.List["ResourceDetector"], 177 initial_resource: typing.Optional[Resource] = None, 178 timeout=5, 179 ) -> "Resource": 180 """ Retrieves resources from detectors in the order that they were passed 181 182 :param detectors: List of resources in order of priority 183 :param initial_resource: Static resource. This has highest priority 184 :param timeout: Number of seconds to wait for each detector to return 185 :return: 186 """ 187 final_resource = initial_resource or _EMPTY_RESOURCE 188 detectors = [OTELResourceDetector()] + detectors 189 190 with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: 191 futures = [executor.submit(detector.detect) for detector in detectors] 192 for detector_ind, future in enumerate(futures): 193 detector = detectors[detector_ind] 194 try: 195 detected_resources = future.result(timeout=timeout) 196 # pylint: disable=broad-except 197 except Exception as ex: 198 if detector.raise_on_error: 199 raise ex 200 logger.warning( 201 "Exception %s in detector %s, ignoring", ex, detector 202 ) 203 detected_resources = _EMPTY_RESOURCE 204 finally: 205 final_resource = final_resource.merge(detected_resources) 206 return final_resource 207 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py --- a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py @@ -91,9 +91,62 @@ logger = logging.getLogger(__name__) -TELEMETRY_SDK_LANGUAGE = "telemetry.sdk.language" +CLOUD_PROVIDER = "cloud.provider" +CLOUD_ACCOUNT_ID = "cloud.account.id" +CLOUD_REGION = "cloud.region" +CLOUD_ZONE = "cloud.zone" +CONTAINER_NAME = "container.name" +CONTAINER_ID = "container.id" +CONTAINER_IMAGE_NAME = "container.image.name" +CONTAINER_IMAGE_TAG = "container.image.tag" +DEPLOYMENT_ENVIRONMENT = "deployment.environment" +FAAS_NAME = "faas.name" +FAAS_ID = "faas.id" +FAAS_VERSION = "faas.version" +FAAS_INSTANCE = "faas.instance" +HOST_NAME = "host.name" +HOST_TYPE = "host.type" +HOST_IMAGE_NAME = "host.image.name" +HOST_IMAGE_ID = "host.image.id" +HOST_IMAGE_VERSION = "host.image.version" +KUBERNETES_CLUSTER_NAME = "k8s.cluster.name" +KUBERNETES_NAMESPACE_NAME = "k8s.namespace.name" +KUBERNETES_POD_UID = "k8s.pod.uid" +KUBERNETES_POD_NAME = "k8s.pod.name" +KUBERNETES_CONTAINER_NAME = "k8s.container.name" +KUBERNETES_REPLICA_SET_UID = "k8s.replicaset.uid" +KUBERNETES_REPLICA_SET_NAME = "k8s.replicaset.name" +KUBERNETES_DEPLOYMENT_UID = "k8s.deployment.uid" +KUBERNETES_DEPLOYMENT_NAME = "k8s.deployment.name" +KUBERNETES_STATEFUL_SET_UID = "k8s.statefulset.uid" +KUBERNETES_STATEFUL_SET_NAME = "k8s.statefulset.name" +KUBERNETES_DAEMON_SET_UID = "k8s.daemonset.uid" +KUBERNETES_DAEMON_SET_NAME = "k8s.daemonset.name" +KUBERNETES_JOB_UID = "k8s.job.uid" +KUBERNETES_JOB_NAME = "k8s.job.name" +KUBERNETES_CRON_JOB_UID = "k8s.cronjob.uid" +KUBERNETES_CRON_JOB_NAME = "k8s.cronjob.name" +OS_TYPE = "os.type" +OS_DESCRIPTION = "os.description" +PROCESS_PID = "process.pid" +PROCESS_EXECUTABLE_NAME = "process.executable.name" +PROCESS_EXECUTABLE_PATH = "process.executable.path" +PROCESS_COMMAND = "process.command" +PROCESS_COMMAND_LINE = "process.command_line" +PROCESS_COMMAND_ARGS = "process.command_args" +PROCESS_OWNER = "process.owner" +PROCESS_RUNTIME_NAME = "process.runtime.name" +PROCESS_RUNTIME_VERSION = "process.runtime.version" +PROCESS_RUNTIME_DESCRIPTION = "process.runtime.description" +SERVICE_NAME = "service.name" +SERVICE_NAMESPACE = "service.namespace" +SERVICE_INSTANCE_ID = "service.instance.id" +SERVICE_VERSION = "service.version" TELEMETRY_SDK_NAME = "telemetry.sdk.name" TELEMETRY_SDK_VERSION = "telemetry.sdk.version" +TELEMETRY_AUTO_VERSION = "telemetry.auto.version" +TELEMETRY_SDK_LANGUAGE = "telemetry.sdk.language" + OPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution( "opentelemetry-sdk" @@ -111,7 +164,18 @@ resource = _DEFAULT_RESOURCE else: resource = _DEFAULT_RESOURCE.merge(Resource(attributes)) - return resource.merge(OTELResourceDetector().detect()) + resource = resource.merge(OTELResourceDetector().detect()) + if not resource.attributes.get(SERVICE_NAME, None): + default_service_name = "unknown_service" + process_executable_name = resource.attributes.get( + PROCESS_EXECUTABLE_NAME, None + ) + if process_executable_name: + default_service_name += ":" + process_executable_name + resource = resource.merge( + Resource({SERVICE_NAME: default_service_name}) + ) + return resource @staticmethod def create_empty() -> "Resource":
{"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py\n@@ -91,9 +91,62 @@\n logger = logging.getLogger(__name__)\n \n \n-TELEMETRY_SDK_LANGUAGE = \"telemetry.sdk.language\"\n+CLOUD_PROVIDER = \"cloud.provider\"\n+CLOUD_ACCOUNT_ID = \"cloud.account.id\"\n+CLOUD_REGION = \"cloud.region\"\n+CLOUD_ZONE = \"cloud.zone\"\n+CONTAINER_NAME = \"container.name\"\n+CONTAINER_ID = \"container.id\"\n+CONTAINER_IMAGE_NAME = \"container.image.name\"\n+CONTAINER_IMAGE_TAG = \"container.image.tag\"\n+DEPLOYMENT_ENVIRONMENT = \"deployment.environment\"\n+FAAS_NAME = \"faas.name\"\n+FAAS_ID = \"faas.id\"\n+FAAS_VERSION = \"faas.version\"\n+FAAS_INSTANCE = \"faas.instance\"\n+HOST_NAME = \"host.name\"\n+HOST_TYPE = \"host.type\"\n+HOST_IMAGE_NAME = \"host.image.name\"\n+HOST_IMAGE_ID = \"host.image.id\"\n+HOST_IMAGE_VERSION = \"host.image.version\"\n+KUBERNETES_CLUSTER_NAME = \"k8s.cluster.name\"\n+KUBERNETES_NAMESPACE_NAME = \"k8s.namespace.name\"\n+KUBERNETES_POD_UID = \"k8s.pod.uid\"\n+KUBERNETES_POD_NAME = \"k8s.pod.name\"\n+KUBERNETES_CONTAINER_NAME = \"k8s.container.name\"\n+KUBERNETES_REPLICA_SET_UID = \"k8s.replicaset.uid\"\n+KUBERNETES_REPLICA_SET_NAME = \"k8s.replicaset.name\"\n+KUBERNETES_DEPLOYMENT_UID = \"k8s.deployment.uid\"\n+KUBERNETES_DEPLOYMENT_NAME = \"k8s.deployment.name\"\n+KUBERNETES_STATEFUL_SET_UID = \"k8s.statefulset.uid\"\n+KUBERNETES_STATEFUL_SET_NAME = \"k8s.statefulset.name\"\n+KUBERNETES_DAEMON_SET_UID = \"k8s.daemonset.uid\"\n+KUBERNETES_DAEMON_SET_NAME = \"k8s.daemonset.name\"\n+KUBERNETES_JOB_UID = \"k8s.job.uid\"\n+KUBERNETES_JOB_NAME = \"k8s.job.name\"\n+KUBERNETES_CRON_JOB_UID = \"k8s.cronjob.uid\"\n+KUBERNETES_CRON_JOB_NAME = \"k8s.cronjob.name\"\n+OS_TYPE = \"os.type\"\n+OS_DESCRIPTION = \"os.description\"\n+PROCESS_PID = \"process.pid\"\n+PROCESS_EXECUTABLE_NAME = \"process.executable.name\"\n+PROCESS_EXECUTABLE_PATH = \"process.executable.path\"\n+PROCESS_COMMAND = \"process.command\"\n+PROCESS_COMMAND_LINE = \"process.command_line\"\n+PROCESS_COMMAND_ARGS = \"process.command_args\"\n+PROCESS_OWNER = \"process.owner\"\n+PROCESS_RUNTIME_NAME = \"process.runtime.name\"\n+PROCESS_RUNTIME_VERSION = \"process.runtime.version\"\n+PROCESS_RUNTIME_DESCRIPTION = \"process.runtime.description\"\n+SERVICE_NAME = \"service.name\"\n+SERVICE_NAMESPACE = \"service.namespace\"\n+SERVICE_INSTANCE_ID = \"service.instance.id\"\n+SERVICE_VERSION = \"service.version\"\n TELEMETRY_SDK_NAME = \"telemetry.sdk.name\"\n TELEMETRY_SDK_VERSION = \"telemetry.sdk.version\"\n+TELEMETRY_AUTO_VERSION = \"telemetry.auto.version\"\n+TELEMETRY_SDK_LANGUAGE = \"telemetry.sdk.language\"\n+\n \n OPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution(\n \"opentelemetry-sdk\"\n@@ -111,7 +164,18 @@\n resource = _DEFAULT_RESOURCE\n else:\n resource = _DEFAULT_RESOURCE.merge(Resource(attributes))\n- return resource.merge(OTELResourceDetector().detect())\n+ resource = resource.merge(OTELResourceDetector().detect())\n+ if not resource.attributes.get(SERVICE_NAME, None):\n+ default_service_name = \"unknown_service\"\n+ process_executable_name = resource.attributes.get(\n+ PROCESS_EXECUTABLE_NAME, None\n+ )\n+ if process_executable_name:\n+ default_service_name += \":\" + process_executable_name\n+ resource = resource.merge(\n+ Resource({SERVICE_NAME: default_service_name})\n+ )\n+ return resource\n \n @staticmethod\n def create_empty() -> \"Resource\":\n", "issue": "Make sure Resources follow semantic conventions\nFrom [specs](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/semantic_conventions/README.md).\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis package implements `OpenTelemetry Resources\n<https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/sdk.md#resource-sdk>`_:\n\n *A Resource is an immutable representation of the entity producing\n telemetry. For example, a process producing telemetry that is running in\n a container on Kubernetes has a Pod name, it is in a namespace and\n possibly is part of a Deployment which also has a name. All three of\n these attributes can be included in the Resource.*\n\nResource objects are created with `Resource.create`, which accepts attributes\n(key-values). Resource attributes can also be passed at process invocation in\nthe :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable. You should\nregister your resource with the `opentelemetry.sdk.metrics.MeterProvider` and\n`opentelemetry.sdk.trace.TracerProvider` by passing them into their\nconstructors. The `Resource` passed to a provider is available to the\nexporter, which can send on this information as it sees fit.\n\n.. code-block:: python\n\n metrics.set_meter_provider(\n MeterProvider(\n resource=Resource.create({\n \"service.name\": \"shoppingcart\",\n \"service.instance.id\": \"instance-12\",\n }),\n ),\n )\n print(metrics.get_meter_provider().resource.attributes)\n\n {'telemetry.sdk.language': 'python',\n 'telemetry.sdk.name': 'opentelemetry',\n 'telemetry.sdk.version': '0.13.dev0',\n 'service.name': 'shoppingcart',\n 'service.instance.id': 'instance-12'}\n\nNote that the OpenTelemetry project documents certain `\"standard attributes\"\n<https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/semantic_conventions/README.md>`_\nthat have prescribed semantic meanings, for example ``service.name`` in the\nabove example.\n\n.. envvar:: OTEL_RESOURCE_ATTRIBUTES\n\nThe :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable allows resource\nattributes to be passed to the SDK at process invocation. The attributes from\n:envvar:`OTEL_RESOURCE_ATTRIBUTES` are merged with those passed to\n`Resource.create`, meaning :envvar:`OTEL_RESOURCE_ATTRIBUTES` takes *lower*\npriority. Attributes should be in the format ``key1=value1,key2=value2``.\nAdditional details are available `in the specification\n<https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable>`_.\n\n.. code-block:: console\n\n $ OTEL_RESOURCE_ATTRIBUTES=\"service.name=shoppingcard,will_be_overridden=foo\" python - <<EOF\n import pprint\n from opentelemetry.sdk.resources import Resource\n pprint.pprint(Resource.create({\"will_be_overridden\": \"bar\"}).attributes)\n EOF\n {'service.name': 'shoppingcard',\n 'telemetry.sdk.language': 'python',\n 'telemetry.sdk.name': 'opentelemetry',\n 'telemetry.sdk.version': '0.13.dev0',\n 'will_be_overridden': 'bar'}\n \"\"\"\n\nimport abc\nimport concurrent.futures\nimport logging\nimport os\nimport typing\nfrom json import dumps\n\nimport pkg_resources\n\nLabelValue = typing.Union[str, bool, int, float]\nAttributes = typing.Dict[str, LabelValue]\nlogger = logging.getLogger(__name__)\n\n\nTELEMETRY_SDK_LANGUAGE = \"telemetry.sdk.language\"\nTELEMETRY_SDK_NAME = \"telemetry.sdk.name\"\nTELEMETRY_SDK_VERSION = \"telemetry.sdk.version\"\n\nOPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution(\n \"opentelemetry-sdk\"\n).version\nOTEL_RESOURCE_ATTRIBUTES = \"OTEL_RESOURCE_ATTRIBUTES\"\n\n\nclass Resource:\n def __init__(self, attributes: Attributes):\n self._attributes = attributes.copy()\n\n @staticmethod\n def create(attributes: typing.Optional[Attributes] = None) -> \"Resource\":\n if not attributes:\n resource = _DEFAULT_RESOURCE\n else:\n resource = _DEFAULT_RESOURCE.merge(Resource(attributes))\n return resource.merge(OTELResourceDetector().detect())\n\n @staticmethod\n def create_empty() -> \"Resource\":\n return _EMPTY_RESOURCE\n\n @property\n def attributes(self) -> Attributes:\n return self._attributes.copy()\n\n def merge(self, other: \"Resource\") -> \"Resource\":\n merged_attributes = self.attributes\n # pylint: disable=protected-access\n for key, value in other._attributes.items():\n if key not in merged_attributes or merged_attributes[key] == \"\":\n merged_attributes[key] = value\n return Resource(merged_attributes)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Resource):\n return False\n return self._attributes == other._attributes\n\n def __hash__(self):\n return hash(dumps(self._attributes, sort_keys=True))\n\n\n_EMPTY_RESOURCE = Resource({})\n_DEFAULT_RESOURCE = Resource(\n {\n TELEMETRY_SDK_LANGUAGE: \"python\",\n TELEMETRY_SDK_NAME: \"opentelemetry\",\n TELEMETRY_SDK_VERSION: OPENTELEMETRY_SDK_VERSION,\n }\n)\n\n\nclass ResourceDetector(abc.ABC):\n def __init__(self, raise_on_error=False):\n self.raise_on_error = raise_on_error\n\n @abc.abstractmethod\n def detect(self) -> \"Resource\":\n raise NotImplementedError()\n\n\nclass OTELResourceDetector(ResourceDetector):\n # pylint: disable=no-self-use\n def detect(self) -> \"Resource\":\n env_resources_items = os.environ.get(OTEL_RESOURCE_ATTRIBUTES)\n env_resource_map = {}\n if env_resources_items:\n env_resource_map = {\n key.strip(): value.strip()\n for key, value in (\n item.split(\"=\") for item in env_resources_items.split(\",\")\n )\n }\n return Resource(env_resource_map)\n\n\ndef get_aggregated_resources(\n detectors: typing.List[\"ResourceDetector\"],\n initial_resource: typing.Optional[Resource] = None,\n timeout=5,\n) -> \"Resource\":\n \"\"\" Retrieves resources from detectors in the order that they were passed\n\n :param detectors: List of resources in order of priority\n :param initial_resource: Static resource. This has highest priority\n :param timeout: Number of seconds to wait for each detector to return\n :return:\n \"\"\"\n final_resource = initial_resource or _EMPTY_RESOURCE\n detectors = [OTELResourceDetector()] + detectors\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:\n futures = [executor.submit(detector.detect) for detector in detectors]\n for detector_ind, future in enumerate(futures):\n detector = detectors[detector_ind]\n try:\n detected_resources = future.result(timeout=timeout)\n # pylint: disable=broad-except\n except Exception as ex:\n if detector.raise_on_error:\n raise ex\n logger.warning(\n \"Exception %s in detector %s, ignoring\", ex, detector\n )\n detected_resources = _EMPTY_RESOURCE\n finally:\n final_resource = final_resource.merge(detected_resources)\n return final_resource\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis package implements `OpenTelemetry Resources\n<https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/sdk.md#resource-sdk>`_:\n\n *A Resource is an immutable representation of the entity producing\n telemetry. For example, a process producing telemetry that is running in\n a container on Kubernetes has a Pod name, it is in a namespace and\n possibly is part of a Deployment which also has a name. All three of\n these attributes can be included in the Resource.*\n\nResource objects are created with `Resource.create`, which accepts attributes\n(key-values). Resource attributes can also be passed at process invocation in\nthe :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable. You should\nregister your resource with the `opentelemetry.sdk.metrics.MeterProvider` and\n`opentelemetry.sdk.trace.TracerProvider` by passing them into their\nconstructors. The `Resource` passed to a provider is available to the\nexporter, which can send on this information as it sees fit.\n\n.. code-block:: python\n\n metrics.set_meter_provider(\n MeterProvider(\n resource=Resource.create({\n \"service.name\": \"shoppingcart\",\n \"service.instance.id\": \"instance-12\",\n }),\n ),\n )\n print(metrics.get_meter_provider().resource.attributes)\n\n {'telemetry.sdk.language': 'python',\n 'telemetry.sdk.name': 'opentelemetry',\n 'telemetry.sdk.version': '0.13.dev0',\n 'service.name': 'shoppingcart',\n 'service.instance.id': 'instance-12'}\n\nNote that the OpenTelemetry project documents certain `\"standard attributes\"\n<https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/semantic_conventions/README.md>`_\nthat have prescribed semantic meanings, for example ``service.name`` in the\nabove example.\n\n.. envvar:: OTEL_RESOURCE_ATTRIBUTES\n\nThe :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable allows resource\nattributes to be passed to the SDK at process invocation. The attributes from\n:envvar:`OTEL_RESOURCE_ATTRIBUTES` are merged with those passed to\n`Resource.create`, meaning :envvar:`OTEL_RESOURCE_ATTRIBUTES` takes *lower*\npriority. Attributes should be in the format ``key1=value1,key2=value2``.\nAdditional details are available `in the specification\n<https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable>`_.\n\n.. code-block:: console\n\n $ OTEL_RESOURCE_ATTRIBUTES=\"service.name=shoppingcard,will_be_overridden=foo\" python - <<EOF\n import pprint\n from opentelemetry.sdk.resources import Resource\n pprint.pprint(Resource.create({\"will_be_overridden\": \"bar\"}).attributes)\n EOF\n {'service.name': 'shoppingcard',\n 'telemetry.sdk.language': 'python',\n 'telemetry.sdk.name': 'opentelemetry',\n 'telemetry.sdk.version': '0.13.dev0',\n 'will_be_overridden': 'bar'}\n \"\"\"\n\nimport abc\nimport concurrent.futures\nimport logging\nimport os\nimport typing\nfrom json import dumps\n\nimport pkg_resources\n\nLabelValue = typing.Union[str, bool, int, float]\nAttributes = typing.Dict[str, LabelValue]\nlogger = logging.getLogger(__name__)\n\n\nCLOUD_PROVIDER = \"cloud.provider\"\nCLOUD_ACCOUNT_ID = \"cloud.account.id\"\nCLOUD_REGION = \"cloud.region\"\nCLOUD_ZONE = \"cloud.zone\"\nCONTAINER_NAME = \"container.name\"\nCONTAINER_ID = \"container.id\"\nCONTAINER_IMAGE_NAME = \"container.image.name\"\nCONTAINER_IMAGE_TAG = \"container.image.tag\"\nDEPLOYMENT_ENVIRONMENT = \"deployment.environment\"\nFAAS_NAME = \"faas.name\"\nFAAS_ID = \"faas.id\"\nFAAS_VERSION = \"faas.version\"\nFAAS_INSTANCE = \"faas.instance\"\nHOST_NAME = \"host.name\"\nHOST_TYPE = \"host.type\"\nHOST_IMAGE_NAME = \"host.image.name\"\nHOST_IMAGE_ID = \"host.image.id\"\nHOST_IMAGE_VERSION = \"host.image.version\"\nKUBERNETES_CLUSTER_NAME = \"k8s.cluster.name\"\nKUBERNETES_NAMESPACE_NAME = \"k8s.namespace.name\"\nKUBERNETES_POD_UID = \"k8s.pod.uid\"\nKUBERNETES_POD_NAME = \"k8s.pod.name\"\nKUBERNETES_CONTAINER_NAME = \"k8s.container.name\"\nKUBERNETES_REPLICA_SET_UID = \"k8s.replicaset.uid\"\nKUBERNETES_REPLICA_SET_NAME = \"k8s.replicaset.name\"\nKUBERNETES_DEPLOYMENT_UID = \"k8s.deployment.uid\"\nKUBERNETES_DEPLOYMENT_NAME = \"k8s.deployment.name\"\nKUBERNETES_STATEFUL_SET_UID = \"k8s.statefulset.uid\"\nKUBERNETES_STATEFUL_SET_NAME = \"k8s.statefulset.name\"\nKUBERNETES_DAEMON_SET_UID = \"k8s.daemonset.uid\"\nKUBERNETES_DAEMON_SET_NAME = \"k8s.daemonset.name\"\nKUBERNETES_JOB_UID = \"k8s.job.uid\"\nKUBERNETES_JOB_NAME = \"k8s.job.name\"\nKUBERNETES_CRON_JOB_UID = \"k8s.cronjob.uid\"\nKUBERNETES_CRON_JOB_NAME = \"k8s.cronjob.name\"\nOS_TYPE = \"os.type\"\nOS_DESCRIPTION = \"os.description\"\nPROCESS_PID = \"process.pid\"\nPROCESS_EXECUTABLE_NAME = \"process.executable.name\"\nPROCESS_EXECUTABLE_PATH = \"process.executable.path\"\nPROCESS_COMMAND = \"process.command\"\nPROCESS_COMMAND_LINE = \"process.command_line\"\nPROCESS_COMMAND_ARGS = \"process.command_args\"\nPROCESS_OWNER = \"process.owner\"\nPROCESS_RUNTIME_NAME = \"process.runtime.name\"\nPROCESS_RUNTIME_VERSION = \"process.runtime.version\"\nPROCESS_RUNTIME_DESCRIPTION = \"process.runtime.description\"\nSERVICE_NAME = \"service.name\"\nSERVICE_NAMESPACE = \"service.namespace\"\nSERVICE_INSTANCE_ID = \"service.instance.id\"\nSERVICE_VERSION = \"service.version\"\nTELEMETRY_SDK_NAME = \"telemetry.sdk.name\"\nTELEMETRY_SDK_VERSION = \"telemetry.sdk.version\"\nTELEMETRY_AUTO_VERSION = \"telemetry.auto.version\"\nTELEMETRY_SDK_LANGUAGE = \"telemetry.sdk.language\"\n\n\nOPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution(\n \"opentelemetry-sdk\"\n).version\nOTEL_RESOURCE_ATTRIBUTES = \"OTEL_RESOURCE_ATTRIBUTES\"\n\n\nclass Resource:\n def __init__(self, attributes: Attributes):\n self._attributes = attributes.copy()\n\n @staticmethod\n def create(attributes: typing.Optional[Attributes] = None) -> \"Resource\":\n if not attributes:\n resource = _DEFAULT_RESOURCE\n else:\n resource = _DEFAULT_RESOURCE.merge(Resource(attributes))\n resource = resource.merge(OTELResourceDetector().detect())\n if not resource.attributes.get(SERVICE_NAME, None):\n default_service_name = \"unknown_service\"\n process_executable_name = resource.attributes.get(\n PROCESS_EXECUTABLE_NAME, None\n )\n if process_executable_name:\n default_service_name += \":\" + process_executable_name\n resource = resource.merge(\n Resource({SERVICE_NAME: default_service_name})\n )\n return resource\n\n @staticmethod\n def create_empty() -> \"Resource\":\n return _EMPTY_RESOURCE\n\n @property\n def attributes(self) -> Attributes:\n return self._attributes.copy()\n\n def merge(self, other: \"Resource\") -> \"Resource\":\n merged_attributes = self.attributes\n # pylint: disable=protected-access\n for key, value in other._attributes.items():\n if key not in merged_attributes or merged_attributes[key] == \"\":\n merged_attributes[key] = value\n return Resource(merged_attributes)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Resource):\n return False\n return self._attributes == other._attributes\n\n def __hash__(self):\n return hash(dumps(self._attributes, sort_keys=True))\n\n\n_EMPTY_RESOURCE = Resource({})\n_DEFAULT_RESOURCE = Resource(\n {\n TELEMETRY_SDK_LANGUAGE: \"python\",\n TELEMETRY_SDK_NAME: \"opentelemetry\",\n TELEMETRY_SDK_VERSION: OPENTELEMETRY_SDK_VERSION,\n }\n)\n\n\nclass ResourceDetector(abc.ABC):\n def __init__(self, raise_on_error=False):\n self.raise_on_error = raise_on_error\n\n @abc.abstractmethod\n def detect(self) -> \"Resource\":\n raise NotImplementedError()\n\n\nclass OTELResourceDetector(ResourceDetector):\n # pylint: disable=no-self-use\n def detect(self) -> \"Resource\":\n env_resources_items = os.environ.get(OTEL_RESOURCE_ATTRIBUTES)\n env_resource_map = {}\n if env_resources_items:\n env_resource_map = {\n key.strip(): value.strip()\n for key, value in (\n item.split(\"=\") for item in env_resources_items.split(\",\")\n )\n }\n return Resource(env_resource_map)\n\n\ndef get_aggregated_resources(\n detectors: typing.List[\"ResourceDetector\"],\n initial_resource: typing.Optional[Resource] = None,\n timeout=5,\n) -> \"Resource\":\n \"\"\" Retrieves resources from detectors in the order that they were passed\n\n :param detectors: List of resources in order of priority\n :param initial_resource: Static resource. This has highest priority\n :param timeout: Number of seconds to wait for each detector to return\n :return:\n \"\"\"\n final_resource = initial_resource or _EMPTY_RESOURCE\n detectors = [OTELResourceDetector()] + detectors\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:\n futures = [executor.submit(detector.detect) for detector in detectors]\n for detector_ind, future in enumerate(futures):\n detector = detectors[detector_ind]\n try:\n detected_resources = future.result(timeout=timeout)\n # pylint: disable=broad-except\n except Exception as ex:\n if detector.raise_on_error:\n raise ex\n logger.warning(\n \"Exception %s in detector %s, ignoring\", ex, detector\n )\n detected_resources = _EMPTY_RESOURCE\n finally:\n final_resource = final_resource.merge(detected_resources)\n return final_resource\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py"}]}
2,459
983
gh_patches_debug_43222
rasdani/github-patches
git_diff
hydroshare__hydroshare-5067
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- add a select for "subject areas" **Describe the feature you'd like and what it will do** Currently, subject areas is free text. Should be a typeahead -- noninvasive. @martinseul suggested that we should populate a list similar to https://has.arizona.edu/research-focus-areas Related to https://github.com/hydroshare/hydroshare/issues/4733 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `theme/migrations/0023_alter_userprofile_subject_areas.py` Content: ``` 1 # Generated by Django 3.2.15 on 2023-04-11 18:41 2 3 import django.contrib.postgres.fields 4 from django.db import migrations, models 5 6 7 def migrate_csv_subject_areas(apps, schema_editor): 8 SubjectArea = apps.get_model('hs_dictionary.SubjectArea') 9 UserProfile = apps.get_model('theme.UserProfile') 10 # Attempt to match existing SAs from profiles 11 profiles_with_sa = UserProfile.objects \ 12 .exclude(subject_areas__isnull=True) \ 13 .exclude(subject_areas='') 14 15 subject_area_objects = SubjectArea.objects.all() 16 17 for profile in profiles_with_sa: 18 old_subject_areas = profile.subject_areas.split(',') 19 old_subject_areas = [s for s in old_subject_areas] 20 print('*' * 100) 21 print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}') 22 new_subj_areas = [] 23 for subject in old_subject_areas: 24 print(f"Searching for a match with '{subject}'") 25 match = [sa for sa in subject_area_objects if sa.name.lower() == subject.strip().lower()] 26 if match: 27 new_subj_areas.append(match[0].name) 28 if match[0].name == subject: 29 print(f'- Exact match with pre-existing subject area: {subject}') 30 else: 31 print(f'- Near match with pre-existing subject area: {subject}') 32 else: 33 if subject.strip() == subject: 34 print(f"- Unmatched subject area '{subject}' will remain unaltered") 35 new_subj_areas.append(subject) 36 else: 37 print(f"- Unmatched subject area '{subject}' contains whitespace that will be stripped") 38 new_subj_areas.append(subject.strip()) 39 40 sas = ','.join(new_subj_areas) 41 print(f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}') 42 profile.subject_areas = f'{{{sas}}}' 43 profile.save() 44 45 profiles_without_sa = UserProfile.objects \ 46 .filter(subject_areas='') 47 for profile in profiles_without_sa: 48 print(f'Updating {profile} from "" to {{}}') 49 profile.subject_areas = '{}' 50 profile.save() 51 52 53 class Migration(migrations.Migration): 54 55 dependencies = [ 56 ('theme', '0022_alter_userprofile_subject_areas'), 57 ] 58 59 operations = [ 60 migrations.RunSQL("UPDATE theme_userprofile set subject_areas = NULL " 61 "where theme_userprofile.subject_areas like '';"), 62 migrations.RunPython(migrate_csv_subject_areas), 63 migrations.AlterField( 64 model_name='userprofile', 65 name='subject_areas', 66 field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=1024), blank=True, help_text='A list of subject areas you are interested in researching. e.g. "Water Management." Free text entry or select from the suggestions', null=True, size=None), 67 ), 68 ] 69 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/theme/migrations/0023_alter_userprofile_subject_areas.py b/theme/migrations/0023_alter_userprofile_subject_areas.py --- a/theme/migrations/0023_alter_userprofile_subject_areas.py +++ b/theme/migrations/0023_alter_userprofile_subject_areas.py @@ -2,12 +2,25 @@ import django.contrib.postgres.fields from django.db import migrations, models +from django.db.utils import DataError +from django.core.management import call_command +import re def migrate_csv_subject_areas(apps, schema_editor): + def strip_for_dict(string=""): + # Check if the string consists of only spaces and braces + res, _ = re.subn('{|}', '', string) + if res.strip() == "": + return "" + # replace invalid braces and quotes + string = string.replace("{", "[").replace("}", "]").replace("\"", "\'") + return string.strip() + call_command('create_subject_areas_dict') SubjectArea = apps.get_model('hs_dictionary.SubjectArea') UserProfile = apps.get_model('theme.UserProfile') # Attempt to match existing SAs from profiles + errors = [] profiles_with_sa = UserProfile.objects \ .exclude(subject_areas__isnull=True) \ .exclude(subject_areas='') @@ -21,8 +34,16 @@ print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}') new_subj_areas = [] for subject in old_subject_areas: + if subject == '': + # There is a trailing comma that we need to remove + continue + stripped_subject = strip_for_dict(subject) + if stripped_subject == '': + # The subject contained only invalid chars + print(f"- Unmatched subject area '{subject}' contains invalid chars that will be stripped") + continue print(f"Searching for a match with '{subject}'") - match = [sa for sa in subject_area_objects if sa.name.lower() == subject.strip().lower()] + match = [sa for sa in subject_area_objects if sa.name.lower() == stripped_subject.lower()] if match: new_subj_areas.append(match[0].name) if match[0].name == subject: @@ -30,24 +51,33 @@ else: print(f'- Near match with pre-existing subject area: {subject}') else: - if subject.strip() == subject: + if stripped_subject == subject: print(f"- Unmatched subject area '{subject}' will remain unaltered") new_subj_areas.append(subject) else: - print(f"- Unmatched subject area '{subject}' contains whitespace that will be stripped") - new_subj_areas.append(subject.strip()) + print(f"- Unmatched subject area '{subject}' contains invalid chars that will be stripped") + new_subj_areas.append(stripped_subject) sas = ','.join(new_subj_areas) - print(f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}') + message = f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}' + print(message) profile.subject_areas = f'{{{sas}}}' + try: + profile.save() + except DataError as e: + errors.append(f'Error saving profile: {e}' + message) + + profiles_without_sa = UserProfile.objects.filter(subject_areas='') + for profile in profiles_without_sa: + print(f'Updating {profile} from "" to {{}}') + profile.subject_areas = '{}' profile.save() - profiles_without_sa = UserProfile.objects \ - .filter(subject_areas='') - for profile in profiles_without_sa: - print(f'Updating {profile} from "" to {{}}') - profile.subject_areas = '{}' - profile.save() + print("Done updating Subject Areas.") + if errors: + print("Errors during update:") + for error in errors: + print(error) class Migration(migrations.Migration):
{"golden_diff": "diff --git a/theme/migrations/0023_alter_userprofile_subject_areas.py b/theme/migrations/0023_alter_userprofile_subject_areas.py\n--- a/theme/migrations/0023_alter_userprofile_subject_areas.py\n+++ b/theme/migrations/0023_alter_userprofile_subject_areas.py\n@@ -2,12 +2,25 @@\n \n import django.contrib.postgres.fields\n from django.db import migrations, models\n+from django.db.utils import DataError\n+from django.core.management import call_command\n+import re\n \n \n def migrate_csv_subject_areas(apps, schema_editor):\n+ def strip_for_dict(string=\"\"):\n+ # Check if the string consists of only spaces and braces\n+ res, _ = re.subn('{|}', '', string)\n+ if res.strip() == \"\":\n+ return \"\"\n+ # replace invalid braces and quotes\n+ string = string.replace(\"{\", \"[\").replace(\"}\", \"]\").replace(\"\\\"\", \"\\'\")\n+ return string.strip()\n+ call_command('create_subject_areas_dict')\n SubjectArea = apps.get_model('hs_dictionary.SubjectArea')\n UserProfile = apps.get_model('theme.UserProfile')\n # Attempt to match existing SAs from profiles\n+ errors = []\n profiles_with_sa = UserProfile.objects \\\n .exclude(subject_areas__isnull=True) \\\n .exclude(subject_areas='')\n@@ -21,8 +34,16 @@\n print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}')\n new_subj_areas = []\n for subject in old_subject_areas:\n+ if subject == '':\n+ # There is a trailing comma that we need to remove\n+ continue\n+ stripped_subject = strip_for_dict(subject)\n+ if stripped_subject == '':\n+ # The subject contained only invalid chars\n+ print(f\"- Unmatched subject area '{subject}' contains invalid chars that will be stripped\")\n+ continue\n print(f\"Searching for a match with '{subject}'\")\n- match = [sa for sa in subject_area_objects if sa.name.lower() == subject.strip().lower()]\n+ match = [sa for sa in subject_area_objects if sa.name.lower() == stripped_subject.lower()]\n if match:\n new_subj_areas.append(match[0].name)\n if match[0].name == subject:\n@@ -30,24 +51,33 @@\n else:\n print(f'- Near match with pre-existing subject area: {subject}')\n else:\n- if subject.strip() == subject:\n+ if stripped_subject == subject:\n print(f\"- Unmatched subject area '{subject}' will remain unaltered\")\n new_subj_areas.append(subject)\n else:\n- print(f\"- Unmatched subject area '{subject}' contains whitespace that will be stripped\")\n- new_subj_areas.append(subject.strip())\n+ print(f\"- Unmatched subject area '{subject}' contains invalid chars that will be stripped\")\n+ new_subj_areas.append(stripped_subject)\n \n sas = ','.join(new_subj_areas)\n- print(f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}')\n+ message = f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}'\n+ print(message)\n profile.subject_areas = f'{{{sas}}}'\n+ try:\n+ profile.save()\n+ except DataError as e:\n+ errors.append(f'Error saving profile: {e}' + message)\n+\n+ profiles_without_sa = UserProfile.objects.filter(subject_areas='')\n+ for profile in profiles_without_sa:\n+ print(f'Updating {profile} from \"\" to {{}}')\n+ profile.subject_areas = '{}'\n profile.save()\n \n- profiles_without_sa = UserProfile.objects \\\n- .filter(subject_areas='')\n- for profile in profiles_without_sa:\n- print(f'Updating {profile} from \"\" to {{}}')\n- profile.subject_areas = '{}'\n- profile.save()\n+ print(\"Done updating Subject Areas.\")\n+ if errors:\n+ print(\"Errors during update:\")\n+ for error in errors:\n+ print(error)\n \n \n class Migration(migrations.Migration):\n", "issue": "add a select for \"subject areas\"\n**Describe the feature you'd like and what it will do**\r\nCurrently, subject areas is free text.\r\nShould be a typeahead -- noninvasive.\r\n@martinseul suggested that we should populate a list similar to\r\nhttps://has.arizona.edu/research-focus-areas\r\n\r\n\r\nRelated to https://github.com/hydroshare/hydroshare/issues/4733\r\n\n", "before_files": [{"content": "# Generated by Django 3.2.15 on 2023-04-11 18:41\n\nimport django.contrib.postgres.fields\nfrom django.db import migrations, models\n\n\ndef migrate_csv_subject_areas(apps, schema_editor):\n SubjectArea = apps.get_model('hs_dictionary.SubjectArea')\n UserProfile = apps.get_model('theme.UserProfile')\n # Attempt to match existing SAs from profiles\n profiles_with_sa = UserProfile.objects \\\n .exclude(subject_areas__isnull=True) \\\n .exclude(subject_areas='')\n\n subject_area_objects = SubjectArea.objects.all()\n\n for profile in profiles_with_sa:\n old_subject_areas = profile.subject_areas.split(',')\n old_subject_areas = [s for s in old_subject_areas]\n print('*' * 100)\n print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}')\n new_subj_areas = []\n for subject in old_subject_areas:\n print(f\"Searching for a match with '{subject}'\")\n match = [sa for sa in subject_area_objects if sa.name.lower() == subject.strip().lower()]\n if match:\n new_subj_areas.append(match[0].name)\n if match[0].name == subject:\n print(f'- Exact match with pre-existing subject area: {subject}')\n else:\n print(f'- Near match with pre-existing subject area: {subject}')\n else:\n if subject.strip() == subject:\n print(f\"- Unmatched subject area '{subject}' will remain unaltered\")\n new_subj_areas.append(subject)\n else:\n print(f\"- Unmatched subject area '{subject}' contains whitespace that will be stripped\")\n new_subj_areas.append(subject.strip())\n\n sas = ','.join(new_subj_areas)\n print(f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}')\n profile.subject_areas = f'{{{sas}}}'\n profile.save()\n\n profiles_without_sa = UserProfile.objects \\\n .filter(subject_areas='')\n for profile in profiles_without_sa:\n print(f'Updating {profile} from \"\" to {{}}')\n profile.subject_areas = '{}'\n profile.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('theme', '0022_alter_userprofile_subject_areas'),\n ]\n\n operations = [\n migrations.RunSQL(\"UPDATE theme_userprofile set subject_areas = NULL \"\n \"where theme_userprofile.subject_areas like '';\"),\n migrations.RunPython(migrate_csv_subject_areas),\n migrations.AlterField(\n model_name='userprofile',\n name='subject_areas',\n field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=1024), blank=True, help_text='A list of subject areas you are interested in researching. e.g. \"Water Management.\" Free text entry or select from the suggestions', null=True, size=None),\n ),\n ]\n", "path": "theme/migrations/0023_alter_userprofile_subject_areas.py"}], "after_files": [{"content": "# Generated by Django 3.2.15 on 2023-04-11 18:41\n\nimport django.contrib.postgres.fields\nfrom django.db import migrations, models\nfrom django.db.utils import DataError\nfrom django.core.management import call_command\nimport re\n\n\ndef migrate_csv_subject_areas(apps, schema_editor):\n def strip_for_dict(string=\"\"):\n # Check if the string consists of only spaces and braces\n res, _ = re.subn('{|}', '', string)\n if res.strip() == \"\":\n return \"\"\n # replace invalid braces and quotes\n string = string.replace(\"{\", \"[\").replace(\"}\", \"]\").replace(\"\\\"\", \"\\'\")\n return string.strip()\n call_command('create_subject_areas_dict')\n SubjectArea = apps.get_model('hs_dictionary.SubjectArea')\n UserProfile = apps.get_model('theme.UserProfile')\n # Attempt to match existing SAs from profiles\n errors = []\n profiles_with_sa = UserProfile.objects \\\n .exclude(subject_areas__isnull=True) \\\n .exclude(subject_areas='')\n\n subject_area_objects = SubjectArea.objects.all()\n\n for profile in profiles_with_sa:\n old_subject_areas = profile.subject_areas.split(',')\n old_subject_areas = [s for s in old_subject_areas]\n print('*' * 100)\n print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}')\n new_subj_areas = []\n for subject in old_subject_areas:\n if subject == '':\n # There is a trailing comma that we need to remove\n continue\n stripped_subject = strip_for_dict(subject)\n if stripped_subject == '':\n # The subject contained only invalid chars\n print(f\"- Unmatched subject area '{subject}' contains invalid chars that will be stripped\")\n continue\n print(f\"Searching for a match with '{subject}'\")\n match = [sa for sa in subject_area_objects if sa.name.lower() == stripped_subject.lower()]\n if match:\n new_subj_areas.append(match[0].name)\n if match[0].name == subject:\n print(f'- Exact match with pre-existing subject area: {subject}')\n else:\n print(f'- Near match with pre-existing subject area: {subject}')\n else:\n if stripped_subject == subject:\n print(f\"- Unmatched subject area '{subject}' will remain unaltered\")\n new_subj_areas.append(subject)\n else:\n print(f\"- Unmatched subject area '{subject}' contains invalid chars that will be stripped\")\n new_subj_areas.append(stripped_subject)\n\n sas = ','.join(new_subj_areas)\n message = f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}'\n print(message)\n profile.subject_areas = f'{{{sas}}}'\n try:\n profile.save()\n except DataError as e:\n errors.append(f'Error saving profile: {e}' + message)\n\n profiles_without_sa = UserProfile.objects.filter(subject_areas='')\n for profile in profiles_without_sa:\n print(f'Updating {profile} from \"\" to {{}}')\n profile.subject_areas = '{}'\n profile.save()\n\n print(\"Done updating Subject Areas.\")\n if errors:\n print(\"Errors during update:\")\n for error in errors:\n print(error)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('theme', '0022_alter_userprofile_subject_areas'),\n ]\n\n operations = [\n migrations.RunSQL(\"UPDATE theme_userprofile set subject_areas = NULL \"\n \"where theme_userprofile.subject_areas like '';\"),\n migrations.RunPython(migrate_csv_subject_areas),\n migrations.AlterField(\n model_name='userprofile',\n name='subject_areas',\n field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=1024), blank=True, help_text='A list of subject areas you are interested in researching. e.g. \"Water Management.\" Free text entry or select from the suggestions', null=True, size=None),\n ),\n ]\n", "path": "theme/migrations/0023_alter_userprofile_subject_areas.py"}]}
1,107
897
gh_patches_debug_14475
rasdani/github-patches
git_diff
pre-commit__pre-commit-2686
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ParserError exception raised for invalid configuration ### search you tried in the issue tracker ParserError and unicode ### describe your issue I executed `pre-commit autoupdate` with an invalid configuration file (the second `- repo` is indented incorrectly) and got this error message: ```` $ pre-commit autoupdate An unexpected error has occurred: ParserError: while parsing a block mapping in "<unicode string>", line 1, column 1 did not find expected key in "<unicode string>", line 7, column 1 Check the log at /home/carsten/.cache/pre-commit/pre-commit.log ```` This is an expected error and I would expect an error message like `Your configuration file "..." is wrongly formatted at <pos>. Please review the format of the content.'. Thank you, Carsten ### pre-commit --version pre-commit 2.21.0 ### .pre-commit-config.yaml ```yaml repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: - id: check-executables-have-shebangs - repo: https://github.com/shellcheck-py/shellcheck-py rev: v0.9.0.2 hooks: - id: shellcheck ``` ### ~/.cache/pre-commit/pre-commit.log (if present) ### version information ``` pre-commit version: 2.21.0 git --version: git version 2.35.3 sys.version: 3.10.8 (main, Oct 28 2022, 17:28:32) [GCC] sys.executable: /home/carsten/virtualenv/bin/python3.10 os.name: posix sys.platform: linux ``` ### error information ``` An unexpected error has occurred: ParserError: while parsing a block mapping in "<unicode string>", line 1, column 1 did not find expected key in "<unicode string>", line 7, column 1 ``` ``` Traceback (most recent call last): File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/error_handler.py", line 73, in error_handler yield File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/main.py", line 355, in main return autoupdate( File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/autoupdate.py", line 154, in autoupdate migrate_config(config_file, quiet=True) File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/migrate_config.py", line 47, in migrate_config contents = _migrate_map(contents) File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/migrate_config.py", line 16, in _migrate_map if isinstance(yaml_load(contents), list): File "/home/carsten/virtualenv/lib64/python3.10/site-packages/yaml/__init__.py", line 81, in load return loader.get_single_data() File "/home/carsten/virtualenv/lib64/python3.10/site-packages/yaml/constructor.py", line 49, in get_single_data node = self.get_single_node() File "yaml/_yaml.pyx", line 673, in yaml._yaml.CParser.get_single_node File "yaml/_yaml.pyx", line 687, in yaml._yaml.CParser._compose_document File "yaml/_yaml.pyx", line 731, in yaml._yaml.CParser._compose_node File "yaml/_yaml.pyx", line 847, in yaml._yaml.CParser._compose_mapping_node File "yaml/_yaml.pyx", line 860, in yaml._yaml.CParser._parse_next_event yaml.parser.ParserError: while parsing a block mapping in "<unicode string>", line 1, column 1 did not find expected key in "<unicode string>", line 7, column 1 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/commands/migrate_config.py` Content: ``` 1 from __future__ import annotations 2 3 import re 4 import textwrap 5 6 import yaml 7 8 from pre_commit.yaml import yaml_load 9 10 11 def _is_header_line(line: str) -> bool: 12 return line.startswith(('#', '---')) or not line.strip() 13 14 15 def _migrate_map(contents: str) -> str: 16 if isinstance(yaml_load(contents), list): 17 # Find the first non-header line 18 lines = contents.splitlines(True) 19 i = 0 20 # Only loop on non empty configuration file 21 while i < len(lines) and _is_header_line(lines[i]): 22 i += 1 23 24 header = ''.join(lines[:i]) 25 rest = ''.join(lines[i:]) 26 27 # If they are using the "default" flow style of yaml, this operation 28 # will yield a valid configuration 29 try: 30 trial_contents = f'{header}repos:\n{rest}' 31 yaml_load(trial_contents) 32 contents = trial_contents 33 except yaml.YAMLError: 34 contents = f'{header}repos:\n{textwrap.indent(rest, " " * 4)}' 35 36 return contents 37 38 39 def _migrate_sha_to_rev(contents: str) -> str: 40 return re.sub(r'(\n\s+)sha:', r'\1rev:', contents) 41 42 43 def migrate_config(config_file: str, quiet: bool = False) -> int: 44 with open(config_file) as f: 45 orig_contents = contents = f.read() 46 47 contents = _migrate_map(contents) 48 contents = _migrate_sha_to_rev(contents) 49 50 if contents != orig_contents: 51 with open(config_file, 'w') as f: 52 f.write(contents) 53 54 print('Configuration has been migrated.') 55 elif not quiet: 56 print('Configuration is already migrated.') 57 return 0 58 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py --- a/pre_commit/commands/migrate_config.py +++ b/pre_commit/commands/migrate_config.py @@ -3,8 +3,10 @@ import re import textwrap +import cfgv import yaml +from pre_commit.clientlib import InvalidConfigError from pre_commit.yaml import yaml_load @@ -44,6 +46,13 @@ with open(config_file) as f: orig_contents = contents = f.read() + with cfgv.reraise_as(InvalidConfigError): + with cfgv.validate_context(f'File {config_file}'): + try: + yaml_load(orig_contents) + except Exception as e: + raise cfgv.ValidationError(str(e)) + contents = _migrate_map(contents) contents = _migrate_sha_to_rev(contents)
{"golden_diff": "diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py\n--- a/pre_commit/commands/migrate_config.py\n+++ b/pre_commit/commands/migrate_config.py\n@@ -3,8 +3,10 @@\n import re\n import textwrap\n \n+import cfgv\n import yaml\n \n+from pre_commit.clientlib import InvalidConfigError\n from pre_commit.yaml import yaml_load\n \n \n@@ -44,6 +46,13 @@\n with open(config_file) as f:\n orig_contents = contents = f.read()\n \n+ with cfgv.reraise_as(InvalidConfigError):\n+ with cfgv.validate_context(f'File {config_file}'):\n+ try:\n+ yaml_load(orig_contents)\n+ except Exception as e:\n+ raise cfgv.ValidationError(str(e))\n+\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n", "issue": "ParserError exception raised for invalid configuration\n### search you tried in the issue tracker\r\n\r\nParserError and unicode\r\n\r\n### describe your issue\r\n\r\nI executed `pre-commit autoupdate` with an invalid configuration file (the second `- repo` is indented incorrectly) and got this error message:\r\n\r\n````\r\n$ pre-commit autoupdate\r\nAn unexpected error has occurred: ParserError: while parsing a block mapping\r\n in \"<unicode string>\", line 1, column 1\r\ndid not find expected key\r\n in \"<unicode string>\", line 7, column 1\r\nCheck the log at /home/carsten/.cache/pre-commit/pre-commit.log\r\n````\r\n\r\nThis is an expected error and I would expect an error message like `Your configuration file \"...\" is wrongly formatted at <pos>. Please review the format of the content.'.\r\n\r\nThank you,\r\nCarsten\r\n\r\n### pre-commit --version\r\n\r\npre-commit 2.21.0\r\n\r\n### .pre-commit-config.yaml\r\n\r\n```yaml\r\nrepos:\r\n - repo: https://github.com/pre-commit/pre-commit-hooks\r\n rev: v4.4.0\r\n hooks:\r\n - id: check-executables-have-shebangs\r\n\r\n- repo: https://github.com/shellcheck-py/shellcheck-py\r\n rev: v0.9.0.2\r\n hooks:\r\n - id: shellcheck\r\n```\r\n\r\n\r\n### ~/.cache/pre-commit/pre-commit.log (if present)\r\n\r\n### version information\r\n\r\n```\r\npre-commit version: 2.21.0\r\ngit --version: git version 2.35.3\r\nsys.version:\r\n 3.10.8 (main, Oct 28 2022, 17:28:32) [GCC]\r\nsys.executable: /home/carsten/virtualenv/bin/python3.10\r\nos.name: posix\r\nsys.platform: linux\r\n```\r\n\r\n### error information\r\n\r\n```\r\nAn unexpected error has occurred: ParserError: while parsing a block mapping\r\n in \"<unicode string>\", line 1, column 1\r\ndid not find expected key\r\n in \"<unicode string>\", line 7, column 1\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/error_handler.py\", line 73, in error_handler\r\n yield\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/main.py\", line 355, in main\r\n return autoupdate(\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/autoupdate.py\", line 154, in autoupdate\r\n migrate_config(config_file, quiet=True)\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/migrate_config.py\", line 47, in migrate_config\r\n contents = _migrate_map(contents)\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/migrate_config.py\", line 16, in _migrate_map\r\n if isinstance(yaml_load(contents), list):\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/yaml/__init__.py\", line 81, in load\r\n return loader.get_single_data()\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/yaml/constructor.py\", line 49, in get_single_data\r\n node = self.get_single_node()\r\n File \"yaml/_yaml.pyx\", line 673, in yaml._yaml.CParser.get_single_node\r\n File \"yaml/_yaml.pyx\", line 687, in yaml._yaml.CParser._compose_document\r\n File \"yaml/_yaml.pyx\", line 731, in yaml._yaml.CParser._compose_node\r\n File \"yaml/_yaml.pyx\", line 847, in yaml._yaml.CParser._compose_mapping_node\r\n File \"yaml/_yaml.pyx\", line 860, in yaml._yaml.CParser._parse_next_event\r\nyaml.parser.ParserError: while parsing a block mapping\r\n in \"<unicode string>\", line 1, column 1\r\ndid not find expected key\r\n in \"<unicode string>\", line 7, column 1\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nimport re\nimport textwrap\n\nimport yaml\n\nfrom pre_commit.yaml import yaml_load\n\n\ndef _is_header_line(line: str) -> bool:\n return line.startswith(('#', '---')) or not line.strip()\n\n\ndef _migrate_map(contents: str) -> str:\n if isinstance(yaml_load(contents), list):\n # Find the first non-header line\n lines = contents.splitlines(True)\n i = 0\n # Only loop on non empty configuration file\n while i < len(lines) and _is_header_line(lines[i]):\n i += 1\n\n header = ''.join(lines[:i])\n rest = ''.join(lines[i:])\n\n # If they are using the \"default\" flow style of yaml, this operation\n # will yield a valid configuration\n try:\n trial_contents = f'{header}repos:\\n{rest}'\n yaml_load(trial_contents)\n contents = trial_contents\n except yaml.YAMLError:\n contents = f'{header}repos:\\n{textwrap.indent(rest, \" \" * 4)}'\n\n return contents\n\n\ndef _migrate_sha_to_rev(contents: str) -> str:\n return re.sub(r'(\\n\\s+)sha:', r'\\1rev:', contents)\n\n\ndef migrate_config(config_file: str, quiet: bool = False) -> int:\n with open(config_file) as f:\n orig_contents = contents = f.read()\n\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n\n if contents != orig_contents:\n with open(config_file, 'w') as f:\n f.write(contents)\n\n print('Configuration has been migrated.')\n elif not quiet:\n print('Configuration is already migrated.')\n return 0\n", "path": "pre_commit/commands/migrate_config.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport re\nimport textwrap\n\nimport cfgv\nimport yaml\n\nfrom pre_commit.clientlib import InvalidConfigError\nfrom pre_commit.yaml import yaml_load\n\n\ndef _is_header_line(line: str) -> bool:\n return line.startswith(('#', '---')) or not line.strip()\n\n\ndef _migrate_map(contents: str) -> str:\n if isinstance(yaml_load(contents), list):\n # Find the first non-header line\n lines = contents.splitlines(True)\n i = 0\n # Only loop on non empty configuration file\n while i < len(lines) and _is_header_line(lines[i]):\n i += 1\n\n header = ''.join(lines[:i])\n rest = ''.join(lines[i:])\n\n # If they are using the \"default\" flow style of yaml, this operation\n # will yield a valid configuration\n try:\n trial_contents = f'{header}repos:\\n{rest}'\n yaml_load(trial_contents)\n contents = trial_contents\n except yaml.YAMLError:\n contents = f'{header}repos:\\n{textwrap.indent(rest, \" \" * 4)}'\n\n return contents\n\n\ndef _migrate_sha_to_rev(contents: str) -> str:\n return re.sub(r'(\\n\\s+)sha:', r'\\1rev:', contents)\n\n\ndef migrate_config(config_file: str, quiet: bool = False) -> int:\n with open(config_file) as f:\n orig_contents = contents = f.read()\n\n with cfgv.reraise_as(InvalidConfigError):\n with cfgv.validate_context(f'File {config_file}'):\n try:\n yaml_load(orig_contents)\n except Exception as e:\n raise cfgv.ValidationError(str(e))\n\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n\n if contents != orig_contents:\n with open(config_file, 'w') as f:\n f.write(contents)\n\n print('Configuration has been migrated.')\n elif not quiet:\n print('Configuration is already migrated.')\n return 0\n", "path": "pre_commit/commands/migrate_config.py"}]}
1,703
199
gh_patches_debug_22028
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-910
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Master pod should not be created when docker image failed to push Docker image failed to push: ``` ===== Pushing Docker Image ===== {'status': 'The push refers to repository [reg.docker.alibaba-inc.com/yuan.tang/elasticdl]'} {'status': 'Preparing', 'progressDetail': {}, 'id': 'a29e21fbecd7'} ... {'status': 'Waiting', 'progressDetail': {}, 'id': '2ee61facf36a'} {'status': 'Waiting', 'progressDetail': {}, 'id': '05ea944dce26'} {'errorDetail': {'message': 'denied: requested access to the resource is denied'}, 'error': 'denied: requested access to the resource is denied'} ``` But the master pod is created. We should not start the master pod in this case since it cannot find the image anyways. Otherwise users would have to delete the pod themselves before submitting a new elasticdl job. ``` Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 1m default-scheduler Successfully assigned elasticdl-test-mnist-terry-master to ckas389w00k Normal Pulling 1m kubelet, ckas389w00k pulling image "reg.docker.alibaba-inc.com/yuan.tang/elasticdl:01baa62fcc9d4d8e89a5fc8c735fcb0f" Warning Failed 1m kubelet, ckas389w00k Failed to pull image "reg.docker.alibaba-inc.com/yuan.tang/elasticdl:01baa62fcc9d4d8e89a5fc8c735fcb0f": [rpc error: code = Unknown desc = Error response from daemon: manifest unknown: manifest unknown, rpc error: code = Unknown desc = Error response from daemon: manifest unknown: manifest unknown] Warning Failed 1m kubelet, ckas389w00k Error: ErrImagePull Warning MissingClusterDNS 1m (x12 over 1m) kubelet, ckas389w00k pod: "elasticdl-test-mnist-terry-master_kubemaker(c14c88d4-a4c0-11e9-bf35-02420ba6e969)". kubelet does not have ClusterDNS IP configured and cannot create Pod using "ClusterFirst" policy. Falling back to "Default" policy. Normal BackOff 1m (x5 over 1m) kubelet, ckas389w00k Back-off pulling image "reg.docker.alibaba-inc.com/yuan.tang/elasticdl:01baa62fcc9d4d8e89a5fc8c735fcb0f" Warning Failed 1m (x5 over 1m) kubelet, ckas389w00k Error: ImagePullBackOff ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `elasticdl/python/elasticdl/image_builder.py` Content: ``` 1 import os 2 import shutil 3 import tempfile 4 import uuid 5 from urllib.parse import urlparse 6 7 import docker 8 9 10 def build_and_push_docker_image( 11 model_zoo, 12 docker_image_prefix, 13 base_image="", 14 extra_pypi="", 15 cluster_spec="", 16 ): 17 """Build and push a Docker image containing ElasticDL and the model 18 zoo. The parameter model_zoo could be a local directory or an URL. 19 In the later case, we do git clone. 20 21 The basename of the Docker image is auto-generated and is globally 22 unique. The full name is docker_image_prefix + "/" + basename. 23 24 The fullname of the Docker image is docker_image_prefix + "/" + 25 basename. Unless prefix is None or "", _push_docker_image is called 26 after _build_docker_image. 27 28 Returns the full Docker image name. So the caller can docker rmi 29 fullname later. 30 31 """ 32 with tempfile.TemporaryDirectory() as ctx_dir: 33 # Copy ElasticDL Python source tree into the context directory. 34 elasticdl = _find_elasticdl_root() 35 shutil.copytree( 36 elasticdl, os.path.join(ctx_dir, os.path.basename(elasticdl)) 37 ) 38 39 # Copy model zoo source tree into the context directory. 40 shutil.copytree( 41 model_zoo, os.path.join(ctx_dir, os.path.basename(model_zoo)) 42 ) 43 44 # Copy cluster specification file into the context directory. 45 if cluster_spec: 46 shutil.copy( 47 cluster_spec, 48 os.path.join(ctx_dir, os.path.basename(cluster_spec)), 49 ) 50 51 # Create the Dockerfile. 52 with tempfile.NamedTemporaryFile(mode="w+", delete=False) as df: 53 df.write( 54 _create_dockerfile( 55 os.path.basename(elasticdl), 56 os.path.basename(model_zoo), 57 os.path.basename(cluster_spec), 58 base_image, 59 extra_pypi, 60 ) 61 ) 62 63 image_name = _generate_unique_image_name(docker_image_prefix) 64 client = docker.APIClient(base_url="unix://var/run/docker.sock") 65 _build_docker_image(client, ctx_dir, df.name, image_name) 66 67 if docker_image_prefix: 68 _push_docker_image(client, image_name) 69 70 return image_name 71 72 73 def _find_elasticdl_root(): 74 return os.path.abspath( 75 os.path.join(os.path.dirname(__file__), "../../../") 76 ) 77 78 79 def _create_dockerfile( 80 elasticdl, model_zoo, cluster_spec="", base_image="", extra_pypi_index="" 81 ): 82 LOCAL_ZOO = """ 83 FROM {BASE_IMAGE} as base 84 ENV PYTHONPATH=/:/model_zoo 85 COPY {ELASTIC_DL}/elasticdl /elasticdl 86 RUN pip install -r elasticdl/requirements.txt \ 87 --extra-index-url="${EXTRA_PYPI_INDEX}" 88 RUN make -f elasticdl/Makefile 89 # TODO: Need to restructure examples directory to make it conform to model_zoo 90 # convention 91 COPY {MODEL_ZOO} /model_zoo/{MODEL_ZOO} 92 ARG REQS=/model_zoo/{MODEL_ZOO}/requirements.txt 93 RUN if [ -f $REQS ]; then \ 94 pip install -r $REQS --extra-index-url="${EXTRA_PYPI_INDEX}"; \ 95 fi 96 """ 97 REMOTE_ZOO = """ 98 FROM {BASE_IMAGE} as base 99 ENV PYTHONPATH=/:/model_zoo 100 COPY {ELASTIC_DL}/elasticdl /elasticdl 101 RUN pip install -r elasticdl/requirements.txt \ 102 --extra-index-url="${EXTRA_PYPI_INDEX}" 103 RUN make -f elasticdl/Makefile 104 RUN apt-get update && apt-get install -y git 105 RUN git clone --recursive {MODEL_ZOO} /model_zoo 106 ARG REQS=/model_zoo/{MODEL_ZOO}/requirements.txt 107 RUN if [ -f $REQS ]; then \ 108 pip install -r $REQS --extra-index-url="${EXTRA_PYPI_INDEX}"; \ 109 fi 110 """ 111 pr = urlparse(model_zoo) 112 if not pr.path: 113 raise RuntimeError("model_zoo {} has no path".format(model_zoo)) 114 if pr.scheme in ["file", ""]: 115 tmpl = LOCAL_ZOO 116 model_zoo = pr.path # Remove the "file://" prefix if any. 117 else: 118 tmpl = REMOTE_ZOO 119 120 if cluster_spec: 121 tmpl = """ 122 %s 123 COPY %s /cluster_spec/%s 124 """ % ( 125 tmpl, 126 cluster_spec, 127 cluster_spec, 128 ) 129 130 return tmpl.format( 131 BASE_IMAGE=base_image 132 if base_image 133 else "tensorflow/tensorflow:2.0.0b1-py3", 134 ELASTIC_DL=elasticdl, 135 MODEL_ZOO=model_zoo, 136 EXTRA_PYPI_INDEX=extra_pypi_index, 137 ) 138 139 140 def _generate_unique_image_name(prefix): 141 return os.path.join( 142 prefix if prefix else "", "elasticdl:" + uuid.uuid4().hex 143 ) 144 145 146 def _build_docker_image(client, ctx_dir, dockerfile, image_name): 147 print("===== Building Docker Image =====") 148 for line in client.build( 149 dockerfile=dockerfile, 150 path=ctx_dir, 151 rm=True, 152 tag=image_name, 153 decode=True, 154 ): 155 error = line.get("error", None) 156 if error: 157 raise RuntimeError("Docker image build: " + error) 158 text = line.get("stream", None) 159 if text: 160 print(text) 161 162 163 def _push_docker_image(client, image_name): 164 print("===== Pushing Docker Image =====") 165 for line in client.push(image_name, stream=True, decode=True): 166 print(line) 167 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/elasticdl/python/elasticdl/image_builder.py b/elasticdl/python/elasticdl/image_builder.py --- a/elasticdl/python/elasticdl/image_builder.py +++ b/elasticdl/python/elasticdl/image_builder.py @@ -143,6 +143,15 @@ ) +def _print_docker_progress(line): + error = line.get("error", None) + if error: + raise RuntimeError("Docker image build: " + error) + text = line.get("stream", None) + if text: + print(text) + + def _build_docker_image(client, ctx_dir, dockerfile, image_name): print("===== Building Docker Image =====") for line in client.build( @@ -152,15 +161,10 @@ tag=image_name, decode=True, ): - error = line.get("error", None) - if error: - raise RuntimeError("Docker image build: " + error) - text = line.get("stream", None) - if text: - print(text) + _print_docker_progress(line) def _push_docker_image(client, image_name): print("===== Pushing Docker Image =====") for line in client.push(image_name, stream=True, decode=True): - print(line) + _print_docker_progress(line)
{"golden_diff": "diff --git a/elasticdl/python/elasticdl/image_builder.py b/elasticdl/python/elasticdl/image_builder.py\n--- a/elasticdl/python/elasticdl/image_builder.py\n+++ b/elasticdl/python/elasticdl/image_builder.py\n@@ -143,6 +143,15 @@\n )\n \n \n+def _print_docker_progress(line):\n+ error = line.get(\"error\", None)\n+ if error:\n+ raise RuntimeError(\"Docker image build: \" + error)\n+ text = line.get(\"stream\", None)\n+ if text:\n+ print(text)\n+\n+\n def _build_docker_image(client, ctx_dir, dockerfile, image_name):\n print(\"===== Building Docker Image =====\")\n for line in client.build(\n@@ -152,15 +161,10 @@\n tag=image_name,\n decode=True,\n ):\n- error = line.get(\"error\", None)\n- if error:\n- raise RuntimeError(\"Docker image build: \" + error)\n- text = line.get(\"stream\", None)\n- if text:\n- print(text)\n+ _print_docker_progress(line)\n \n \n def _push_docker_image(client, image_name):\n print(\"===== Pushing Docker Image =====\")\n for line in client.push(image_name, stream=True, decode=True):\n- print(line)\n+ _print_docker_progress(line)\n", "issue": "Master pod should not be created when docker image failed to push\nDocker image failed to push:\r\n```\r\n===== Pushing Docker Image =====\r\n{'status': 'The push refers to repository [reg.docker.alibaba-inc.com/yuan.tang/elasticdl]'}\r\n{'status': 'Preparing', 'progressDetail': {}, 'id': 'a29e21fbecd7'}\r\n...\r\n{'status': 'Waiting', 'progressDetail': {}, 'id': '2ee61facf36a'}\r\n{'status': 'Waiting', 'progressDetail': {}, 'id': '05ea944dce26'}\r\n{'errorDetail': {'message': 'denied: requested access to the resource is denied'}, 'error': 'denied: requested access to the resource is denied'}\r\n```\r\nBut the master pod is created. We should not start the master pod in this case since it cannot find the image anyways. Otherwise users would have to delete the pod themselves before submitting a new elasticdl job.\r\n```\r\nEvents:\r\n Type Reason Age From Message\r\n ---- ------ ---- ---- -------\r\n Normal Scheduled 1m default-scheduler Successfully assigned elasticdl-test-mnist-terry-master to ckas389w00k\r\n Normal Pulling 1m kubelet, ckas389w00k pulling image \"reg.docker.alibaba-inc.com/yuan.tang/elasticdl:01baa62fcc9d4d8e89a5fc8c735fcb0f\"\r\n Warning Failed 1m kubelet, ckas389w00k Failed to pull image \"reg.docker.alibaba-inc.com/yuan.tang/elasticdl:01baa62fcc9d4d8e89a5fc8c735fcb0f\": [rpc error: code = Unknown desc = Error response from daemon: manifest unknown: manifest unknown, rpc error: code = Unknown desc = Error response from daemon: manifest unknown: manifest unknown]\r\n Warning Failed 1m kubelet, ckas389w00k Error: ErrImagePull\r\n Warning MissingClusterDNS 1m (x12 over 1m) kubelet, ckas389w00k pod: \"elasticdl-test-mnist-terry-master_kubemaker(c14c88d4-a4c0-11e9-bf35-02420ba6e969)\". kubelet does not have ClusterDNS IP configured and cannot create Pod using \"ClusterFirst\" policy. Falling back to \"Default\" policy.\r\n Normal BackOff 1m (x5 over 1m) kubelet, ckas389w00k Back-off pulling image \"reg.docker.alibaba-inc.com/yuan.tang/elasticdl:01baa62fcc9d4d8e89a5fc8c735fcb0f\"\r\n Warning Failed 1m (x5 over 1m) kubelet, ckas389w00k Error: ImagePullBackOff\r\n```\n", "before_files": [{"content": "import os\nimport shutil\nimport tempfile\nimport uuid\nfrom urllib.parse import urlparse\n\nimport docker\n\n\ndef build_and_push_docker_image(\n model_zoo,\n docker_image_prefix,\n base_image=\"\",\n extra_pypi=\"\",\n cluster_spec=\"\",\n):\n \"\"\"Build and push a Docker image containing ElasticDL and the model\nzoo. The parameter model_zoo could be a local directory or an URL.\nIn the later case, we do git clone.\n\n The basename of the Docker image is auto-generated and is globally\nunique. The full name is docker_image_prefix + \"/\" + basename.\n\n The fullname of the Docker image is docker_image_prefix + \"/\" +\nbasename. Unless prefix is None or \"\", _push_docker_image is called\nafter _build_docker_image.\n\n Returns the full Docker image name. So the caller can docker rmi\n fullname later.\n\n \"\"\"\n with tempfile.TemporaryDirectory() as ctx_dir:\n # Copy ElasticDL Python source tree into the context directory.\n elasticdl = _find_elasticdl_root()\n shutil.copytree(\n elasticdl, os.path.join(ctx_dir, os.path.basename(elasticdl))\n )\n\n # Copy model zoo source tree into the context directory.\n shutil.copytree(\n model_zoo, os.path.join(ctx_dir, os.path.basename(model_zoo))\n )\n\n # Copy cluster specification file into the context directory.\n if cluster_spec:\n shutil.copy(\n cluster_spec,\n os.path.join(ctx_dir, os.path.basename(cluster_spec)),\n )\n\n # Create the Dockerfile.\n with tempfile.NamedTemporaryFile(mode=\"w+\", delete=False) as df:\n df.write(\n _create_dockerfile(\n os.path.basename(elasticdl),\n os.path.basename(model_zoo),\n os.path.basename(cluster_spec),\n base_image,\n extra_pypi,\n )\n )\n\n image_name = _generate_unique_image_name(docker_image_prefix)\n client = docker.APIClient(base_url=\"unix://var/run/docker.sock\")\n _build_docker_image(client, ctx_dir, df.name, image_name)\n\n if docker_image_prefix:\n _push_docker_image(client, image_name)\n\n return image_name\n\n\ndef _find_elasticdl_root():\n return os.path.abspath(\n os.path.join(os.path.dirname(__file__), \"../../../\")\n )\n\n\ndef _create_dockerfile(\n elasticdl, model_zoo, cluster_spec=\"\", base_image=\"\", extra_pypi_index=\"\"\n):\n LOCAL_ZOO = \"\"\"\nFROM {BASE_IMAGE} as base\nENV PYTHONPATH=/:/model_zoo\nCOPY {ELASTIC_DL}/elasticdl /elasticdl\nRUN pip install -r elasticdl/requirements.txt \\\n --extra-index-url=\"${EXTRA_PYPI_INDEX}\"\nRUN make -f elasticdl/Makefile\n# TODO: Need to restructure examples directory to make it conform to model_zoo\n# convention\nCOPY {MODEL_ZOO} /model_zoo/{MODEL_ZOO}\nARG REQS=/model_zoo/{MODEL_ZOO}/requirements.txt\nRUN if [ -f $REQS ]; then \\\n pip install -r $REQS --extra-index-url=\"${EXTRA_PYPI_INDEX}\"; \\\n fi\n\"\"\"\n REMOTE_ZOO = \"\"\"\nFROM {BASE_IMAGE} as base\nENV PYTHONPATH=/:/model_zoo\nCOPY {ELASTIC_DL}/elasticdl /elasticdl\nRUN pip install -r elasticdl/requirements.txt \\\n --extra-index-url=\"${EXTRA_PYPI_INDEX}\"\nRUN make -f elasticdl/Makefile\nRUN apt-get update && apt-get install -y git\nRUN git clone --recursive {MODEL_ZOO} /model_zoo\nARG REQS=/model_zoo/{MODEL_ZOO}/requirements.txt\nRUN if [ -f $REQS ]; then \\\n pip install -r $REQS --extra-index-url=\"${EXTRA_PYPI_INDEX}\"; \\\n fi\n\"\"\"\n pr = urlparse(model_zoo)\n if not pr.path:\n raise RuntimeError(\"model_zoo {} has no path\".format(model_zoo))\n if pr.scheme in [\"file\", \"\"]:\n tmpl = LOCAL_ZOO\n model_zoo = pr.path # Remove the \"file://\" prefix if any.\n else:\n tmpl = REMOTE_ZOO\n\n if cluster_spec:\n tmpl = \"\"\"\n%s\nCOPY %s /cluster_spec/%s\n\"\"\" % (\n tmpl,\n cluster_spec,\n cluster_spec,\n )\n\n return tmpl.format(\n BASE_IMAGE=base_image\n if base_image\n else \"tensorflow/tensorflow:2.0.0b1-py3\",\n ELASTIC_DL=elasticdl,\n MODEL_ZOO=model_zoo,\n EXTRA_PYPI_INDEX=extra_pypi_index,\n )\n\n\ndef _generate_unique_image_name(prefix):\n return os.path.join(\n prefix if prefix else \"\", \"elasticdl:\" + uuid.uuid4().hex\n )\n\n\ndef _build_docker_image(client, ctx_dir, dockerfile, image_name):\n print(\"===== Building Docker Image =====\")\n for line in client.build(\n dockerfile=dockerfile,\n path=ctx_dir,\n rm=True,\n tag=image_name,\n decode=True,\n ):\n error = line.get(\"error\", None)\n if error:\n raise RuntimeError(\"Docker image build: \" + error)\n text = line.get(\"stream\", None)\n if text:\n print(text)\n\n\ndef _push_docker_image(client, image_name):\n print(\"===== Pushing Docker Image =====\")\n for line in client.push(image_name, stream=True, decode=True):\n print(line)\n", "path": "elasticdl/python/elasticdl/image_builder.py"}], "after_files": [{"content": "import os\nimport shutil\nimport tempfile\nimport uuid\nfrom urllib.parse import urlparse\n\nimport docker\n\n\ndef build_and_push_docker_image(\n model_zoo,\n docker_image_prefix,\n base_image=\"\",\n extra_pypi=\"\",\n cluster_spec=\"\",\n):\n \"\"\"Build and push a Docker image containing ElasticDL and the model\nzoo. The parameter model_zoo could be a local directory or an URL.\nIn the later case, we do git clone.\n\n The basename of the Docker image is auto-generated and is globally\nunique. The full name is docker_image_prefix + \"/\" + basename.\n\n The fullname of the Docker image is docker_image_prefix + \"/\" +\nbasename. Unless prefix is None or \"\", _push_docker_image is called\nafter _build_docker_image.\n\n Returns the full Docker image name. So the caller can docker rmi\n fullname later.\n\n \"\"\"\n with tempfile.TemporaryDirectory() as ctx_dir:\n # Copy ElasticDL Python source tree into the context directory.\n elasticdl = _find_elasticdl_root()\n shutil.copytree(\n elasticdl, os.path.join(ctx_dir, os.path.basename(elasticdl))\n )\n\n # Copy model zoo source tree into the context directory.\n shutil.copytree(\n model_zoo, os.path.join(ctx_dir, os.path.basename(model_zoo))\n )\n\n # Copy cluster specification file into the context directory.\n if cluster_spec:\n shutil.copy(\n cluster_spec,\n os.path.join(ctx_dir, os.path.basename(cluster_spec)),\n )\n\n # Create the Dockerfile.\n with tempfile.NamedTemporaryFile(mode=\"w+\", delete=False) as df:\n df.write(\n _create_dockerfile(\n os.path.basename(elasticdl),\n os.path.basename(model_zoo),\n os.path.basename(cluster_spec),\n base_image,\n extra_pypi,\n )\n )\n\n image_name = _generate_unique_image_name(docker_image_prefix)\n client = docker.APIClient(base_url=\"unix://var/run/docker.sock\")\n _build_docker_image(client, ctx_dir, df.name, image_name)\n\n if docker_image_prefix:\n _push_docker_image(client, image_name)\n\n return image_name\n\n\ndef _find_elasticdl_root():\n return os.path.abspath(\n os.path.join(os.path.dirname(__file__), \"../../../\")\n )\n\n\ndef _create_dockerfile(\n elasticdl, model_zoo, cluster_spec=\"\", base_image=\"\", extra_pypi_index=\"\"\n):\n LOCAL_ZOO = \"\"\"\nFROM {BASE_IMAGE} as base\nENV PYTHONPATH=/:/model_zoo\nCOPY {ELASTIC_DL}/elasticdl /elasticdl\nRUN pip install -r elasticdl/requirements.txt \\\n --extra-index-url=\"${EXTRA_PYPI_INDEX}\"\nRUN make -f elasticdl/Makefile\n# TODO: Need to restructure examples directory to make it conform to model_zoo\n# convention\nCOPY {MODEL_ZOO} /model_zoo/{MODEL_ZOO}\nARG REQS=/model_zoo/{MODEL_ZOO}/requirements.txt\nRUN if [ -f $REQS ]; then \\\n pip install -r $REQS --extra-index-url=\"${EXTRA_PYPI_INDEX}\"; \\\n fi\n\"\"\"\n REMOTE_ZOO = \"\"\"\nFROM {BASE_IMAGE} as base\nENV PYTHONPATH=/:/model_zoo\nCOPY {ELASTIC_DL}/elasticdl /elasticdl\nRUN pip install -r elasticdl/requirements.txt \\\n --extra-index-url=\"${EXTRA_PYPI_INDEX}\"\nRUN make -f elasticdl/Makefile\nRUN apt-get update && apt-get install -y git\nRUN git clone --recursive {MODEL_ZOO} /model_zoo\nARG REQS=/model_zoo/{MODEL_ZOO}/requirements.txt\nRUN if [ -f $REQS ]; then \\\n pip install -r $REQS --extra-index-url=\"${EXTRA_PYPI_INDEX}\"; \\\n fi\n\"\"\"\n pr = urlparse(model_zoo)\n if not pr.path:\n raise RuntimeError(\"model_zoo {} has no path\".format(model_zoo))\n if pr.scheme in [\"file\", \"\"]:\n tmpl = LOCAL_ZOO\n model_zoo = pr.path # Remove the \"file://\" prefix if any.\n else:\n tmpl = REMOTE_ZOO\n\n if cluster_spec:\n tmpl = \"\"\"\n%s\nCOPY %s /cluster_spec/%s\n\"\"\" % (\n tmpl,\n cluster_spec,\n cluster_spec,\n )\n\n return tmpl.format(\n BASE_IMAGE=base_image\n if base_image\n else \"tensorflow/tensorflow:2.0.0b1-py3\",\n ELASTIC_DL=elasticdl,\n MODEL_ZOO=model_zoo,\n EXTRA_PYPI_INDEX=extra_pypi_index,\n )\n\n\ndef _generate_unique_image_name(prefix):\n return os.path.join(\n prefix if prefix else \"\", \"elasticdl:\" + uuid.uuid4().hex\n )\n\n\ndef _print_docker_progress(line):\n error = line.get(\"error\", None)\n if error:\n raise RuntimeError(\"Docker image build: \" + error)\n text = line.get(\"stream\", None)\n if text:\n print(text)\n\n\ndef _build_docker_image(client, ctx_dir, dockerfile, image_name):\n print(\"===== Building Docker Image =====\")\n for line in client.build(\n dockerfile=dockerfile,\n path=ctx_dir,\n rm=True,\n tag=image_name,\n decode=True,\n ):\n _print_docker_progress(line)\n\n\ndef _push_docker_image(client, image_name):\n print(\"===== Pushing Docker Image =====\")\n for line in client.push(image_name, stream=True, decode=True):\n _print_docker_progress(line)\n", "path": "elasticdl/python/elasticdl/image_builder.py"}]}
2,564
303
gh_patches_debug_11999
rasdani/github-patches
git_diff
chainer__chainer-249
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add type check to LeakyReLU function Related to #123 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `chainer/functions/leaky_relu.py` Content: ``` 1 from chainer import cuda 2 from chainer import function 3 4 5 def _kern(): 6 return cuda.elementwise( 7 'float* y, const float* cond, const float* x, float slope', 8 'y[i] = cond[i] >= 0 ? x[i] : slope * x[i]', 'lrelu') 9 10 11 class LeakyReLU(function.Function): 12 13 """Leaky rectifier unit.""" 14 15 def __init__(self, slope=0.2): 16 self.slope = slope 17 18 def forward_cpu(self, x): 19 y = x[0].copy() 20 y[x[0] < 0] *= self.slope 21 return y, 22 23 def forward_gpu(self, x): 24 y = cuda.empty_like(x[0]) 25 _kern()(y, x[0], x[0], self.slope) 26 return y, 27 28 def backward_cpu(self, x, gy): 29 gx = gy[0].copy() 30 gx[x[0] < 0] *= self.slope 31 return gx, 32 33 def backward_gpu(self, x, gy): 34 gx = cuda.empty_like(x[0]) 35 _kern()(gx, x[0], gy[0], self.slope) 36 return gx, 37 38 39 def leaky_relu(x, slope=0.2): 40 """Leaky Rectified Linear Unit function. 41 42 This function is expressed as :math:`f(x) = \max(x, ax)`, where :math:`a` 43 is a configurable slope value. 44 45 Args: 46 x (~chainer.Variable): Input variable. 47 slope (float): Slope value :math:`a`. 48 49 Returns: 50 ~chainer.Variable: Output variable. 51 52 """ 53 return LeakyReLU(slope)(x) 54 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/chainer/functions/leaky_relu.py b/chainer/functions/leaky_relu.py --- a/chainer/functions/leaky_relu.py +++ b/chainer/functions/leaky_relu.py @@ -1,5 +1,8 @@ +import numpy + from chainer import cuda from chainer import function +from chainer.utils import type_check def _kern(): @@ -15,6 +18,13 @@ def __init__(self, slope=0.2): self.slope = slope + def check_type_forward(self, in_types): + type_check.expect(in_types.size() == 1) + x_type, = in_types + type_check.expect( + x_type.dtype == numpy.float32, + ) + def forward_cpu(self, x): y = x[0].copy() y[x[0] < 0] *= self.slope
{"golden_diff": "diff --git a/chainer/functions/leaky_relu.py b/chainer/functions/leaky_relu.py\n--- a/chainer/functions/leaky_relu.py\n+++ b/chainer/functions/leaky_relu.py\n@@ -1,5 +1,8 @@\n+import numpy\n+\n from chainer import cuda\n from chainer import function\n+from chainer.utils import type_check\n \n \n def _kern():\n@@ -15,6 +18,13 @@\n def __init__(self, slope=0.2):\n self.slope = slope\n \n+ def check_type_forward(self, in_types):\n+ type_check.expect(in_types.size() == 1)\n+ x_type, = in_types\n+ type_check.expect(\n+ x_type.dtype == numpy.float32,\n+ )\n+\n def forward_cpu(self, x):\n y = x[0].copy()\n y[x[0] < 0] *= self.slope\n", "issue": "Add type check to LeakyReLU function\nRelated to #123 \n\n", "before_files": [{"content": "from chainer import cuda\nfrom chainer import function\n\n\ndef _kern():\n return cuda.elementwise(\n 'float* y, const float* cond, const float* x, float slope',\n 'y[i] = cond[i] >= 0 ? x[i] : slope * x[i]', 'lrelu')\n\n\nclass LeakyReLU(function.Function):\n\n \"\"\"Leaky rectifier unit.\"\"\"\n\n def __init__(self, slope=0.2):\n self.slope = slope\n\n def forward_cpu(self, x):\n y = x[0].copy()\n y[x[0] < 0] *= self.slope\n return y,\n\n def forward_gpu(self, x):\n y = cuda.empty_like(x[0])\n _kern()(y, x[0], x[0], self.slope)\n return y,\n\n def backward_cpu(self, x, gy):\n gx = gy[0].copy()\n gx[x[0] < 0] *= self.slope\n return gx,\n\n def backward_gpu(self, x, gy):\n gx = cuda.empty_like(x[0])\n _kern()(gx, x[0], gy[0], self.slope)\n return gx,\n\n\ndef leaky_relu(x, slope=0.2):\n \"\"\"Leaky Rectified Linear Unit function.\n\n This function is expressed as :math:`f(x) = \\max(x, ax)`, where :math:`a`\n is a configurable slope value.\n\n Args:\n x (~chainer.Variable): Input variable.\n slope (float): Slope value :math:`a`.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return LeakyReLU(slope)(x)\n", "path": "chainer/functions/leaky_relu.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\ndef _kern():\n return cuda.elementwise(\n 'float* y, const float* cond, const float* x, float slope',\n 'y[i] = cond[i] >= 0 ? x[i] : slope * x[i]', 'lrelu')\n\n\nclass LeakyReLU(function.Function):\n\n \"\"\"Leaky rectifier unit.\"\"\"\n\n def __init__(self, slope=0.2):\n self.slope = slope\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(\n x_type.dtype == numpy.float32,\n )\n\n def forward_cpu(self, x):\n y = x[0].copy()\n y[x[0] < 0] *= self.slope\n return y,\n\n def forward_gpu(self, x):\n y = cuda.empty_like(x[0])\n _kern()(y, x[0], x[0], self.slope)\n return y,\n\n def backward_cpu(self, x, gy):\n gx = gy[0].copy()\n gx[x[0] < 0] *= self.slope\n return gx,\n\n def backward_gpu(self, x, gy):\n gx = cuda.empty_like(x[0])\n _kern()(gx, x[0], gy[0], self.slope)\n return gx,\n\n\ndef leaky_relu(x, slope=0.2):\n \"\"\"Leaky Rectified Linear Unit function.\n\n This function is expressed as :math:`f(x) = \\max(x, ax)`, where :math:`a`\n is a configurable slope value.\n\n Args:\n x (~chainer.Variable): Input variable.\n slope (float): Slope value :math:`a`.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return LeakyReLU(slope)(x)\n", "path": "chainer/functions/leaky_relu.py"}]}
759
205
gh_patches_debug_1427
rasdani/github-patches
git_diff
saleor__saleor-340
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Move py.test config to tox.ini Pytest (like many tools) can read its configuration from `tox.ini`. There is no need to keep a separate `pytest.ini`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #! /usr/bin/env python 2 from setuptools import setup, find_packages 3 from setuptools.command.test import test as TestCommand 4 import os 5 import sys 6 7 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'saleor.settings') 8 9 10 class PyTest(TestCommand): 11 user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")] 12 13 def initialize_options(self): 14 TestCommand.initialize_options(self) 15 self.pytest_args = [] 16 17 def finalize_options(self): 18 TestCommand.finalize_options(self) 19 self.test_args = [] 20 self.test_suite = True 21 22 def run_tests(self): 23 #import here, cause outside the eggs aren't loaded 24 import pytest 25 errno = pytest.main(self.pytest_args) 26 sys.exit(errno) 27 28 29 setup( 30 name='saleor', 31 author='Mirumee Software', 32 author_email='[email protected]', 33 description="A fork'n'play e-commerce in Django", 34 license='BSD', 35 version='0.1.0a0', 36 url='http://getsaleor.com/', 37 packages=find_packages(), 38 include_package_data=True, 39 install_requires=[ 40 'Babel>=1.3,<1.4a0', 41 'BabelDjango>=0.2,<0.3a0', 42 'Django>=1.8', 43 'dj_database_url>=0.3.0', 44 'django-emailit>=0.2.2', 45 'django-materializecss-form==0.0.64', 46 'django-model-utils>=2.0.0,<2.1a0', 47 'django-mptt>=0.7.1', 48 'django-offsite-storage>=0.0.5', 49 'django-payments>=0.7.0,<0.8a0', 50 'django-prices>=0.4.0,<0.5a0', 51 'djangorestframework>=3.1,<3.2a0', 52 'django-selectable==0.8.0', 53 'django-versatileimagefield>=1.0.1,<1.1a0', 54 'fake-factory>=0.3.2', 55 'google-measurement-protocol>=0.1.2,<0.2a0', 56 'jsonfield>=1.0.3', 57 'Markdown>=2.4', 58 'prices>=0.5,<0.6a0', 59 'requests>=1.2.0', 60 'satchless>=1.1.2,<1.2a0', 61 'unidecode' 62 ], 63 extras_require={ 64 'PaaS': [ 65 'whitenoise==1.0.6', 66 'gunicorn==19.2.1', 67 'psycopg2==2.6']}, 68 cmdclass={ 69 'test': PyTest}, 70 entry_points={ 71 'console_scripts': ['saleor = saleor:manage']}, 72 tests_require=[ 73 'mock==1.0.1', 74 'purl>=0.4.1', 75 'pytest', 76 'pytest-django']) 77 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -70,7 +70,7 @@ entry_points={ 'console_scripts': ['saleor = saleor:manage']}, tests_require=[ - 'mock==1.0.1', + 'mock==1.3.0', 'purl>=0.4.1', 'pytest', 'pytest-django'])
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -70,7 +70,7 @@\n entry_points={\n 'console_scripts': ['saleor = saleor:manage']},\n tests_require=[\n- 'mock==1.0.1',\n+ 'mock==1.3.0',\n 'purl>=0.4.1',\n 'pytest',\n 'pytest-django'])\n", "issue": "Move py.test config to tox.ini\nPytest (like many tools) can read its configuration from `tox.ini`. There is no need to keep a separate `pytest.ini`.\n\n", "before_files": [{"content": "#! /usr/bin/env python\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\nimport os\nimport sys\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'saleor.settings')\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n #import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\n\nsetup(\n name='saleor',\n author='Mirumee Software',\n author_email='[email protected]',\n description=\"A fork'n'play e-commerce in Django\",\n license='BSD',\n version='0.1.0a0',\n url='http://getsaleor.com/',\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'Babel>=1.3,<1.4a0',\n 'BabelDjango>=0.2,<0.3a0',\n 'Django>=1.8',\n 'dj_database_url>=0.3.0',\n 'django-emailit>=0.2.2',\n 'django-materializecss-form==0.0.64',\n 'django-model-utils>=2.0.0,<2.1a0',\n 'django-mptt>=0.7.1',\n 'django-offsite-storage>=0.0.5',\n 'django-payments>=0.7.0,<0.8a0',\n 'django-prices>=0.4.0,<0.5a0',\n 'djangorestframework>=3.1,<3.2a0',\n 'django-selectable==0.8.0',\n 'django-versatileimagefield>=1.0.1,<1.1a0',\n 'fake-factory>=0.3.2',\n 'google-measurement-protocol>=0.1.2,<0.2a0',\n 'jsonfield>=1.0.3',\n 'Markdown>=2.4',\n 'prices>=0.5,<0.6a0',\n 'requests>=1.2.0',\n 'satchless>=1.1.2,<1.2a0',\n 'unidecode'\n ],\n extras_require={\n 'PaaS': [\n 'whitenoise==1.0.6',\n 'gunicorn==19.2.1',\n 'psycopg2==2.6']},\n cmdclass={\n 'test': PyTest},\n entry_points={\n 'console_scripts': ['saleor = saleor:manage']},\n tests_require=[\n 'mock==1.0.1',\n 'purl>=0.4.1',\n 'pytest',\n 'pytest-django'])\n", "path": "setup.py"}], "after_files": [{"content": "#! /usr/bin/env python\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\nimport os\nimport sys\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'saleor.settings')\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n #import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\n\nsetup(\n name='saleor',\n author='Mirumee Software',\n author_email='[email protected]',\n description=\"A fork'n'play e-commerce in Django\",\n license='BSD',\n version='0.1.0a0',\n url='http://getsaleor.com/',\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'Babel>=1.3,<1.4a0',\n 'BabelDjango>=0.2,<0.3a0',\n 'Django>=1.8',\n 'dj_database_url>=0.3.0',\n 'django-emailit>=0.2.2',\n 'django-materializecss-form==0.0.64',\n 'django-model-utils>=2.0.0,<2.1a0',\n 'django-mptt>=0.7.1',\n 'django-offsite-storage>=0.0.5',\n 'django-payments>=0.7.0,<0.8a0',\n 'django-prices>=0.4.0,<0.5a0',\n 'djangorestframework>=3.1,<3.2a0',\n 'django-selectable==0.8.0',\n 'django-versatileimagefield>=1.0.1,<1.1a0',\n 'fake-factory>=0.3.2',\n 'google-measurement-protocol>=0.1.2,<0.2a0',\n 'jsonfield>=1.0.3',\n 'Markdown>=2.4',\n 'prices>=0.5,<0.6a0',\n 'requests>=1.2.0',\n 'satchless>=1.1.2,<1.2a0',\n 'unidecode'\n ],\n extras_require={\n 'PaaS': [\n 'whitenoise==1.0.6',\n 'gunicorn==19.2.1',\n 'psycopg2==2.6']},\n cmdclass={\n 'test': PyTest},\n entry_points={\n 'console_scripts': ['saleor = saleor:manage']},\n tests_require=[\n 'mock==1.3.0',\n 'purl>=0.4.1',\n 'pytest',\n 'pytest-django'])\n", "path": "setup.py"}]}
1,104
97
gh_patches_debug_42340
rasdani/github-patches
git_diff
streamlink__streamlink-5529
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- plugins.nicolive: plugin does not support stream metadata ### Checklist - [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose) - [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink) - [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22) - [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master) ### Streamlink version latest ### Description Current behaviour: Author folder is not created nor is the title grabbed properly. All streams have titles on the site so not sure whats going wrong here. Current output: -20230825200547.ts (right in the recordings folder, no author folder, no title) Expected behaviour/output: Create Author folder and filename consisting of title-timestuff.ts ### Debug log ```text streamlink --output "~/recordings/{author}/{title}-{time:%Y%m%d%H%M%S}.ts" https://live.nicovideo.jp/watch/co1992508 best ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/streamlink/plugins/nicolive.py` Content: ``` 1 """ 2 $description Japanese live-streaming and video hosting social platform. 3 $url live.nicovideo.jp 4 $type live, vod 5 $account Required by some streams 6 $notes Timeshift is supported 7 """ 8 9 import logging 10 import re 11 from threading import Event 12 from urllib.parse import urljoin 13 14 from streamlink.plugin import Plugin, PluginError, pluginargument, pluginmatcher 15 from streamlink.plugin.api import useragents, validate 16 from streamlink.plugin.api.websocket import WebsocketClient 17 from streamlink.stream.hls import HLSStream, HLSStreamReader 18 from streamlink.utils.parse import parse_json 19 from streamlink.utils.times import hours_minutes_seconds 20 from streamlink.utils.url import update_qsd 21 22 23 log = logging.getLogger(__name__) 24 25 26 class NicoLiveWsClient(WebsocketClient): 27 STREAM_OPENED_TIMEOUT = 6 28 29 ready: Event 30 opened: Event 31 hls_stream_url: str 32 33 def __init__(self, *args, **kwargs): 34 super().__init__(*args, **kwargs) 35 self.opened = Event() 36 self.ready = Event() 37 38 def on_open(self, wsapp): 39 super().on_open(wsapp) 40 self.send_playerversion() 41 self.send_getpermit() 42 43 def on_message(self, wsapp, data: str): 44 log.debug(f"Received: {data}") 45 message = parse_json(data) 46 msgtype = message.get("type") 47 msgdata = message.get("data", {}) 48 49 if msgtype == "ping": 50 self.send_pong() 51 52 elif msgtype == "stream" and msgdata.get("protocol") == "hls" and msgdata.get("uri"): 53 self.hls_stream_url = msgdata.get("uri") 54 self.ready.set() 55 if self.opened.wait(self.STREAM_OPENED_TIMEOUT): 56 log.debug("Stream opened, keeping websocket connection alive") 57 else: 58 log.info("Closing websocket connection") 59 self.close() 60 61 elif msgtype == "disconnect": 62 reason = msgdata.get("reason", "Unknown reason") 63 log.info(f"Received disconnect message: {reason}") 64 self.close() 65 66 def send_playerversion(self): 67 self.send_json({ 68 "type": "startWatching", 69 "data": { 70 "stream": { 71 "quality": "abr", 72 "protocol": "hls", 73 "latency": "high", 74 "chasePlay": False, 75 }, 76 "room": { 77 "protocol": "webSocket", 78 "commentable": True, 79 }, 80 "reconnect": False, 81 }, 82 }) 83 84 def send_getpermit(self): 85 self.send_json({ 86 "type": "getAkashic", 87 "data": { 88 "chasePlay": False, 89 }, 90 }) 91 92 def send_pong(self): 93 self.send_json({"type": "pong"}) 94 self.send_json({"type": "keepSeat"}) 95 96 97 class NicoLiveHLSStreamReader(HLSStreamReader): 98 stream: "NicoLiveHLSStream" 99 100 def open(self): 101 self.stream.wsclient.opened.set() 102 super().open() 103 104 def close(self): 105 super().close() 106 self.stream.wsclient.close() 107 108 109 class NicoLiveHLSStream(HLSStream): 110 __reader__ = NicoLiveHLSStreamReader 111 wsclient: NicoLiveWsClient 112 113 def set_wsclient(self, wsclient: NicoLiveWsClient): 114 self.wsclient = wsclient 115 116 117 @pluginmatcher(re.compile( 118 r"https?://(?P<domain>live\d*\.nicovideo\.jp)/watch/(lv|co)\d+", 119 )) 120 @pluginargument( 121 "email", 122 sensitive=True, 123 argument_name="niconico-email", 124 metavar="EMAIL", 125 help="The email or phone number associated with your Niconico account", 126 ) 127 @pluginargument( 128 "password", 129 sensitive=True, 130 argument_name="niconico-password", 131 metavar="PASSWORD", 132 help="The password of your Niconico account", 133 ) 134 @pluginargument( 135 "user-session", 136 sensitive=True, 137 argument_name="niconico-user-session", 138 metavar="VALUE", 139 help=""" 140 Value of the user-session token. 141 142 Can be used as an alternative to providing a password. 143 """, 144 ) 145 @pluginargument( 146 "purge-credentials", 147 argument_name="niconico-purge-credentials", 148 action="store_true", 149 help="Purge cached Niconico credentials to initiate a new session and reauthenticate.", 150 ) 151 @pluginargument( 152 "timeshift-offset", 153 type=hours_minutes_seconds, 154 argument_name="niconico-timeshift-offset", 155 metavar="[[XX:]XX:]XX | [XXh][XXm][XXs]", 156 help=""" 157 Amount of time to skip from the beginning of a stream. 158 159 Default is 0. 160 """, 161 ) 162 class NicoLive(Plugin): 163 STREAM_READY_TIMEOUT = 6 164 LOGIN_URL = "https://account.nicovideo.jp/login/redirector" 165 LOGIN_URL_PARAMS = { 166 "show_button_twitter": 1, 167 "show_button_facebook": 1, 168 "next_url": "/", 169 } 170 171 wsclient: NicoLiveWsClient 172 173 def _get_streams(self): 174 if self.get_option("purge_credentials"): 175 self.clear_cookies() 176 log.info("All credentials were successfully removed") 177 178 self.session.http.headers.update({ 179 "User-Agent": useragents.CHROME, 180 }) 181 182 self.niconico_web_login() 183 184 wss_api_url = self.get_wss_api_url() 185 if not wss_api_url: 186 log.error( 187 "Failed to get wss_api_url. " 188 + "Please check if the URL is correct, and make sure your account has access to the video.", 189 ) 190 return 191 192 self.wsclient = NicoLiveWsClient(self.session, wss_api_url) 193 self.wsclient.start() 194 195 hls_stream_url = self._get_hls_stream_url() 196 if not hls_stream_url: 197 return 198 199 offset = self.get_option("timeshift-offset") 200 if offset and "timeshift" in wss_api_url: 201 hls_stream_url = update_qsd(hls_stream_url, {"start": offset}) 202 203 for quality, stream in NicoLiveHLSStream.parse_variant_playlist(self.session, hls_stream_url).items(): 204 stream.set_wsclient(self.wsclient) 205 yield quality, stream 206 207 def _get_hls_stream_url(self): 208 log.debug(f"Waiting for permit (for at most {self.STREAM_READY_TIMEOUT} seconds)...") 209 if not self.wsclient.ready.wait(self.STREAM_READY_TIMEOUT) or not self.wsclient.is_alive(): 210 log.error("Waiting for permit timed out.") 211 self.wsclient.close() 212 return 213 214 return self.wsclient.hls_stream_url 215 216 def get_wss_api_url(self): 217 try: 218 data = self.session.http.get(self.url, schema=validate.Schema( 219 validate.parse_html(), 220 validate.xml_find(".//script[@id='embedded-data'][@data-props]"), 221 validate.get("data-props"), 222 validate.parse_json(), 223 {"site": { 224 "relive": { 225 "webSocketUrl": validate.url(scheme="wss"), 226 }, 227 validate.optional("frontendId"): int, 228 }}, 229 validate.get("site"), 230 validate.union_get(("relive", "webSocketUrl"), "frontendId"), 231 )) 232 except PluginError: 233 return 234 235 wss_api_url, frontend_id = data 236 if frontend_id is not None: 237 wss_api_url = update_qsd(wss_api_url, {"frontend_id": frontend_id}) 238 239 return wss_api_url 240 241 def niconico_web_login(self): 242 user_session = self.get_option("user-session") 243 email = self.get_option("email") 244 password = self.get_option("password") 245 246 if user_session is not None: 247 log.info("Logging in via provided user session cookie") 248 self.session.http.cookies.set( 249 "user_session", 250 user_session, 251 path="/", 252 domain="nicovideo.jp", 253 ) 254 self.save_cookies() 255 256 elif self.session.http.cookies.get("user_session"): 257 log.info("Logging in via cached user session cookie") 258 259 elif email is not None and password is not None: 260 log.info("Logging in via provided email and password") 261 root = self.session.http.post( 262 self.LOGIN_URL, 263 data={"mail_tel": email, "password": password}, 264 params=self.LOGIN_URL_PARAMS, 265 schema=validate.Schema(validate.parse_html()), 266 ) 267 268 if self.session.http.cookies.get("user_session"): 269 log.info("Logged in.") 270 self.save_cookies() 271 return 272 273 input_with_value = {} 274 for elem in root.xpath(".//form[@action]//input"): 275 if elem.attrib.get("value"): 276 input_with_value[elem.attrib.get("name")] = elem.attrib.get("value") 277 elif elem.attrib.get("id") == "oneTimePw": 278 maxlength = int(elem.attrib.get("maxlength")) 279 oneTimePw = self.input_ask("Enter the 6 digit number included in email") 280 if len(oneTimePw) > maxlength: 281 log.error("invalid user input") 282 return 283 input_with_value[elem.attrib.get("name")] = oneTimePw 284 else: 285 log.debug(f"unknown input: {elem.attrib.get('name')}") 286 287 root = self.session.http.post( 288 urljoin("https://account.nicovideo.jp", root.xpath("string(.//form[@action]/@action)")), 289 data=input_with_value, 290 schema=validate.Schema(validate.parse_html()), 291 ) 292 log.debug(f"Cookies: {self.session.http.cookies.get_dict()}") 293 if self.session.http.cookies.get("user_session") is None: 294 error = root.xpath("string(//div[@class='formError']/div/text())") 295 log.warning(f"Login failed: {error or 'unknown reason'}") 296 else: 297 log.info("Logged in.") 298 self.save_cookies() 299 300 301 __plugin__ = NicoLive 302 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/streamlink/plugins/nicolive.py b/src/streamlink/plugins/nicolive.py --- a/src/streamlink/plugins/nicolive.py +++ b/src/streamlink/plugins/nicolive.py @@ -2,6 +2,9 @@ $description Japanese live-streaming and video hosting social platform. $url live.nicovideo.jp $type live, vod +$metadata id +$metadata author +$metadata title $account Required by some streams $notes Timeshift is supported """ @@ -11,7 +14,7 @@ from threading import Event from urllib.parse import urljoin -from streamlink.plugin import Plugin, PluginError, pluginargument, pluginmatcher +from streamlink.plugin import Plugin, pluginargument, pluginmatcher from streamlink.plugin.api import useragents, validate from streamlink.plugin.api.websocket import WebsocketClient from streamlink.stream.hls import HLSStream, HLSStreamReader @@ -181,7 +184,9 @@ self.niconico_web_login() - wss_api_url = self.get_wss_api_url() + data = self.get_data() + + wss_api_url = self.find_wss_api_url(data) if not wss_api_url: log.error( "Failed to get wss_api_url. " @@ -189,6 +194,8 @@ ) return + self.id, self.author, self.title = self.find_metadata(data) + self.wsclient = NicoLiveWsClient(self.session, wss_api_url) self.wsclient.start() @@ -213,26 +220,56 @@ return self.wsclient.hls_stream_url - def get_wss_api_url(self): - try: - data = self.session.http.get(self.url, schema=validate.Schema( - validate.parse_html(), - validate.xml_find(".//script[@id='embedded-data'][@data-props]"), - validate.get("data-props"), - validate.parse_json(), - {"site": { + def get_data(self): + return self.session.http.get(self.url, schema=validate.Schema( + validate.parse_html(), + validate.xml_find(".//script[@id='embedded-data'][@data-props]"), + validate.get("data-props"), + validate.parse_json(), + )) + + @staticmethod + def find_metadata(data): + schema = validate.Schema( + { + "program": { + "nicoliveProgramId": str, + "supplier": {"name": str}, + "title": str, + }, + }, + validate.get("program"), + validate.union_get( + "nicoliveProgramId", + ("supplier", "name"), + "title", + ), + ) + + return schema.validate(data) + + @staticmethod + def find_wss_api_url(data): + schema = validate.Schema( + { + "site": { "relive": { - "webSocketUrl": validate.url(scheme="wss"), + "webSocketUrl": validate.any( + validate.url(scheme="wss"), + "", + ), }, validate.optional("frontendId"): int, - }}, - validate.get("site"), - validate.union_get(("relive", "webSocketUrl"), "frontendId"), - )) - except PluginError: + }, + }, + validate.get("site"), + validate.union_get(("relive", "webSocketUrl"), "frontendId"), + ) + + wss_api_url, frontend_id = schema.validate(data) + if not wss_api_url: return - wss_api_url, frontend_id = data if frontend_id is not None: wss_api_url = update_qsd(wss_api_url, {"frontend_id": frontend_id})
{"golden_diff": "diff --git a/src/streamlink/plugins/nicolive.py b/src/streamlink/plugins/nicolive.py\n--- a/src/streamlink/plugins/nicolive.py\n+++ b/src/streamlink/plugins/nicolive.py\n@@ -2,6 +2,9 @@\n $description Japanese live-streaming and video hosting social platform.\n $url live.nicovideo.jp\n $type live, vod\n+$metadata id\n+$metadata author\n+$metadata title\n $account Required by some streams\n $notes Timeshift is supported\n \"\"\"\n@@ -11,7 +14,7 @@\n from threading import Event\n from urllib.parse import urljoin\n \n-from streamlink.plugin import Plugin, PluginError, pluginargument, pluginmatcher\n+from streamlink.plugin import Plugin, pluginargument, pluginmatcher\n from streamlink.plugin.api import useragents, validate\n from streamlink.plugin.api.websocket import WebsocketClient\n from streamlink.stream.hls import HLSStream, HLSStreamReader\n@@ -181,7 +184,9 @@\n \n self.niconico_web_login()\n \n- wss_api_url = self.get_wss_api_url()\n+ data = self.get_data()\n+\n+ wss_api_url = self.find_wss_api_url(data)\n if not wss_api_url:\n log.error(\n \"Failed to get wss_api_url. \"\n@@ -189,6 +194,8 @@\n )\n return\n \n+ self.id, self.author, self.title = self.find_metadata(data)\n+\n self.wsclient = NicoLiveWsClient(self.session, wss_api_url)\n self.wsclient.start()\n \n@@ -213,26 +220,56 @@\n \n return self.wsclient.hls_stream_url\n \n- def get_wss_api_url(self):\n- try:\n- data = self.session.http.get(self.url, schema=validate.Schema(\n- validate.parse_html(),\n- validate.xml_find(\".//script[@id='embedded-data'][@data-props]\"),\n- validate.get(\"data-props\"),\n- validate.parse_json(),\n- {\"site\": {\n+ def get_data(self):\n+ return self.session.http.get(self.url, schema=validate.Schema(\n+ validate.parse_html(),\n+ validate.xml_find(\".//script[@id='embedded-data'][@data-props]\"),\n+ validate.get(\"data-props\"),\n+ validate.parse_json(),\n+ ))\n+\n+ @staticmethod\n+ def find_metadata(data):\n+ schema = validate.Schema(\n+ {\n+ \"program\": {\n+ \"nicoliveProgramId\": str,\n+ \"supplier\": {\"name\": str},\n+ \"title\": str,\n+ },\n+ },\n+ validate.get(\"program\"),\n+ validate.union_get(\n+ \"nicoliveProgramId\",\n+ (\"supplier\", \"name\"),\n+ \"title\",\n+ ),\n+ )\n+\n+ return schema.validate(data)\n+\n+ @staticmethod\n+ def find_wss_api_url(data):\n+ schema = validate.Schema(\n+ {\n+ \"site\": {\n \"relive\": {\n- \"webSocketUrl\": validate.url(scheme=\"wss\"),\n+ \"webSocketUrl\": validate.any(\n+ validate.url(scheme=\"wss\"),\n+ \"\",\n+ ),\n },\n validate.optional(\"frontendId\"): int,\n- }},\n- validate.get(\"site\"),\n- validate.union_get((\"relive\", \"webSocketUrl\"), \"frontendId\"),\n- ))\n- except PluginError:\n+ },\n+ },\n+ validate.get(\"site\"),\n+ validate.union_get((\"relive\", \"webSocketUrl\"), \"frontendId\"),\n+ )\n+\n+ wss_api_url, frontend_id = schema.validate(data)\n+ if not wss_api_url:\n return\n \n- wss_api_url, frontend_id = data\n if frontend_id is not None:\n wss_api_url = update_qsd(wss_api_url, {\"frontend_id\": frontend_id})\n", "issue": "plugins.nicolive: plugin does not support stream metadata\n### Checklist\n\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nlatest\n\n### Description\n\nCurrent behaviour: Author folder is not created nor is the title grabbed properly. All streams have titles on the site so not sure whats going wrong here.\r\n\r\nCurrent output: -20230825200547.ts (right in the recordings folder, no author folder, no title)\r\n\r\nExpected behaviour/output: Create Author folder and filename consisting of title-timestuff.ts\r\n\r\n\n\n### Debug log\n\n```text\nstreamlink --output \"~/recordings/{author}/{title}-{time:%Y%m%d%H%M%S}.ts\" https://live.nicovideo.jp/watch/co1992508 best\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Japanese live-streaming and video hosting social platform.\n$url live.nicovideo.jp\n$type live, vod\n$account Required by some streams\n$notes Timeshift is supported\n\"\"\"\n\nimport logging\nimport re\nfrom threading import Event\nfrom urllib.parse import urljoin\n\nfrom streamlink.plugin import Plugin, PluginError, pluginargument, pluginmatcher\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.plugin.api.websocket import WebsocketClient\nfrom streamlink.stream.hls import HLSStream, HLSStreamReader\nfrom streamlink.utils.parse import parse_json\nfrom streamlink.utils.times import hours_minutes_seconds\nfrom streamlink.utils.url import update_qsd\n\n\nlog = logging.getLogger(__name__)\n\n\nclass NicoLiveWsClient(WebsocketClient):\n STREAM_OPENED_TIMEOUT = 6\n\n ready: Event\n opened: Event\n hls_stream_url: str\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.opened = Event()\n self.ready = Event()\n\n def on_open(self, wsapp):\n super().on_open(wsapp)\n self.send_playerversion()\n self.send_getpermit()\n\n def on_message(self, wsapp, data: str):\n log.debug(f\"Received: {data}\")\n message = parse_json(data)\n msgtype = message.get(\"type\")\n msgdata = message.get(\"data\", {})\n\n if msgtype == \"ping\":\n self.send_pong()\n\n elif msgtype == \"stream\" and msgdata.get(\"protocol\") == \"hls\" and msgdata.get(\"uri\"):\n self.hls_stream_url = msgdata.get(\"uri\")\n self.ready.set()\n if self.opened.wait(self.STREAM_OPENED_TIMEOUT):\n log.debug(\"Stream opened, keeping websocket connection alive\")\n else:\n log.info(\"Closing websocket connection\")\n self.close()\n\n elif msgtype == \"disconnect\":\n reason = msgdata.get(\"reason\", \"Unknown reason\")\n log.info(f\"Received disconnect message: {reason}\")\n self.close()\n\n def send_playerversion(self):\n self.send_json({\n \"type\": \"startWatching\",\n \"data\": {\n \"stream\": {\n \"quality\": \"abr\",\n \"protocol\": \"hls\",\n \"latency\": \"high\",\n \"chasePlay\": False,\n },\n \"room\": {\n \"protocol\": \"webSocket\",\n \"commentable\": True,\n },\n \"reconnect\": False,\n },\n })\n\n def send_getpermit(self):\n self.send_json({\n \"type\": \"getAkashic\",\n \"data\": {\n \"chasePlay\": False,\n },\n })\n\n def send_pong(self):\n self.send_json({\"type\": \"pong\"})\n self.send_json({\"type\": \"keepSeat\"})\n\n\nclass NicoLiveHLSStreamReader(HLSStreamReader):\n stream: \"NicoLiveHLSStream\"\n\n def open(self):\n self.stream.wsclient.opened.set()\n super().open()\n\n def close(self):\n super().close()\n self.stream.wsclient.close()\n\n\nclass NicoLiveHLSStream(HLSStream):\n __reader__ = NicoLiveHLSStreamReader\n wsclient: NicoLiveWsClient\n\n def set_wsclient(self, wsclient: NicoLiveWsClient):\n self.wsclient = wsclient\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?P<domain>live\\d*\\.nicovideo\\.jp)/watch/(lv|co)\\d+\",\n))\n@pluginargument(\n \"email\",\n sensitive=True,\n argument_name=\"niconico-email\",\n metavar=\"EMAIL\",\n help=\"The email or phone number associated with your Niconico account\",\n)\n@pluginargument(\n \"password\",\n sensitive=True,\n argument_name=\"niconico-password\",\n metavar=\"PASSWORD\",\n help=\"The password of your Niconico account\",\n)\n@pluginargument(\n \"user-session\",\n sensitive=True,\n argument_name=\"niconico-user-session\",\n metavar=\"VALUE\",\n help=\"\"\"\n Value of the user-session token.\n\n Can be used as an alternative to providing a password.\n \"\"\",\n)\n@pluginargument(\n \"purge-credentials\",\n argument_name=\"niconico-purge-credentials\",\n action=\"store_true\",\n help=\"Purge cached Niconico credentials to initiate a new session and reauthenticate.\",\n)\n@pluginargument(\n \"timeshift-offset\",\n type=hours_minutes_seconds,\n argument_name=\"niconico-timeshift-offset\",\n metavar=\"[[XX:]XX:]XX | [XXh][XXm][XXs]\",\n help=\"\"\"\n Amount of time to skip from the beginning of a stream.\n\n Default is 0.\n \"\"\",\n)\nclass NicoLive(Plugin):\n STREAM_READY_TIMEOUT = 6\n LOGIN_URL = \"https://account.nicovideo.jp/login/redirector\"\n LOGIN_URL_PARAMS = {\n \"show_button_twitter\": 1,\n \"show_button_facebook\": 1,\n \"next_url\": \"/\",\n }\n\n wsclient: NicoLiveWsClient\n\n def _get_streams(self):\n if self.get_option(\"purge_credentials\"):\n self.clear_cookies()\n log.info(\"All credentials were successfully removed\")\n\n self.session.http.headers.update({\n \"User-Agent\": useragents.CHROME,\n })\n\n self.niconico_web_login()\n\n wss_api_url = self.get_wss_api_url()\n if not wss_api_url:\n log.error(\n \"Failed to get wss_api_url. \"\n + \"Please check if the URL is correct, and make sure your account has access to the video.\",\n )\n return\n\n self.wsclient = NicoLiveWsClient(self.session, wss_api_url)\n self.wsclient.start()\n\n hls_stream_url = self._get_hls_stream_url()\n if not hls_stream_url:\n return\n\n offset = self.get_option(\"timeshift-offset\")\n if offset and \"timeshift\" in wss_api_url:\n hls_stream_url = update_qsd(hls_stream_url, {\"start\": offset})\n\n for quality, stream in NicoLiveHLSStream.parse_variant_playlist(self.session, hls_stream_url).items():\n stream.set_wsclient(self.wsclient)\n yield quality, stream\n\n def _get_hls_stream_url(self):\n log.debug(f\"Waiting for permit (for at most {self.STREAM_READY_TIMEOUT} seconds)...\")\n if not self.wsclient.ready.wait(self.STREAM_READY_TIMEOUT) or not self.wsclient.is_alive():\n log.error(\"Waiting for permit timed out.\")\n self.wsclient.close()\n return\n\n return self.wsclient.hls_stream_url\n\n def get_wss_api_url(self):\n try:\n data = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_find(\".//script[@id='embedded-data'][@data-props]\"),\n validate.get(\"data-props\"),\n validate.parse_json(),\n {\"site\": {\n \"relive\": {\n \"webSocketUrl\": validate.url(scheme=\"wss\"),\n },\n validate.optional(\"frontendId\"): int,\n }},\n validate.get(\"site\"),\n validate.union_get((\"relive\", \"webSocketUrl\"), \"frontendId\"),\n ))\n except PluginError:\n return\n\n wss_api_url, frontend_id = data\n if frontend_id is not None:\n wss_api_url = update_qsd(wss_api_url, {\"frontend_id\": frontend_id})\n\n return wss_api_url\n\n def niconico_web_login(self):\n user_session = self.get_option(\"user-session\")\n email = self.get_option(\"email\")\n password = self.get_option(\"password\")\n\n if user_session is not None:\n log.info(\"Logging in via provided user session cookie\")\n self.session.http.cookies.set(\n \"user_session\",\n user_session,\n path=\"/\",\n domain=\"nicovideo.jp\",\n )\n self.save_cookies()\n\n elif self.session.http.cookies.get(\"user_session\"):\n log.info(\"Logging in via cached user session cookie\")\n\n elif email is not None and password is not None:\n log.info(\"Logging in via provided email and password\")\n root = self.session.http.post(\n self.LOGIN_URL,\n data={\"mail_tel\": email, \"password\": password},\n params=self.LOGIN_URL_PARAMS,\n schema=validate.Schema(validate.parse_html()),\n )\n\n if self.session.http.cookies.get(\"user_session\"):\n log.info(\"Logged in.\")\n self.save_cookies()\n return\n\n input_with_value = {}\n for elem in root.xpath(\".//form[@action]//input\"):\n if elem.attrib.get(\"value\"):\n input_with_value[elem.attrib.get(\"name\")] = elem.attrib.get(\"value\")\n elif elem.attrib.get(\"id\") == \"oneTimePw\":\n maxlength = int(elem.attrib.get(\"maxlength\"))\n oneTimePw = self.input_ask(\"Enter the 6 digit number included in email\")\n if len(oneTimePw) > maxlength:\n log.error(\"invalid user input\")\n return\n input_with_value[elem.attrib.get(\"name\")] = oneTimePw\n else:\n log.debug(f\"unknown input: {elem.attrib.get('name')}\")\n\n root = self.session.http.post(\n urljoin(\"https://account.nicovideo.jp\", root.xpath(\"string(.//form[@action]/@action)\")),\n data=input_with_value,\n schema=validate.Schema(validate.parse_html()),\n )\n log.debug(f\"Cookies: {self.session.http.cookies.get_dict()}\")\n if self.session.http.cookies.get(\"user_session\") is None:\n error = root.xpath(\"string(//div[@class='formError']/div/text())\")\n log.warning(f\"Login failed: {error or 'unknown reason'}\")\n else:\n log.info(\"Logged in.\")\n self.save_cookies()\n\n\n__plugin__ = NicoLive\n", "path": "src/streamlink/plugins/nicolive.py"}], "after_files": [{"content": "\"\"\"\n$description Japanese live-streaming and video hosting social platform.\n$url live.nicovideo.jp\n$type live, vod\n$metadata id\n$metadata author\n$metadata title\n$account Required by some streams\n$notes Timeshift is supported\n\"\"\"\n\nimport logging\nimport re\nfrom threading import Event\nfrom urllib.parse import urljoin\n\nfrom streamlink.plugin import Plugin, pluginargument, pluginmatcher\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.plugin.api.websocket import WebsocketClient\nfrom streamlink.stream.hls import HLSStream, HLSStreamReader\nfrom streamlink.utils.parse import parse_json\nfrom streamlink.utils.times import hours_minutes_seconds\nfrom streamlink.utils.url import update_qsd\n\n\nlog = logging.getLogger(__name__)\n\n\nclass NicoLiveWsClient(WebsocketClient):\n STREAM_OPENED_TIMEOUT = 6\n\n ready: Event\n opened: Event\n hls_stream_url: str\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.opened = Event()\n self.ready = Event()\n\n def on_open(self, wsapp):\n super().on_open(wsapp)\n self.send_playerversion()\n self.send_getpermit()\n\n def on_message(self, wsapp, data: str):\n log.debug(f\"Received: {data}\")\n message = parse_json(data)\n msgtype = message.get(\"type\")\n msgdata = message.get(\"data\", {})\n\n if msgtype == \"ping\":\n self.send_pong()\n\n elif msgtype == \"stream\" and msgdata.get(\"protocol\") == \"hls\" and msgdata.get(\"uri\"):\n self.hls_stream_url = msgdata.get(\"uri\")\n self.ready.set()\n if self.opened.wait(self.STREAM_OPENED_TIMEOUT):\n log.debug(\"Stream opened, keeping websocket connection alive\")\n else:\n log.info(\"Closing websocket connection\")\n self.close()\n\n elif msgtype == \"disconnect\":\n reason = msgdata.get(\"reason\", \"Unknown reason\")\n log.info(f\"Received disconnect message: {reason}\")\n self.close()\n\n def send_playerversion(self):\n self.send_json({\n \"type\": \"startWatching\",\n \"data\": {\n \"stream\": {\n \"quality\": \"abr\",\n \"protocol\": \"hls\",\n \"latency\": \"high\",\n \"chasePlay\": False,\n },\n \"room\": {\n \"protocol\": \"webSocket\",\n \"commentable\": True,\n },\n \"reconnect\": False,\n },\n })\n\n def send_getpermit(self):\n self.send_json({\n \"type\": \"getAkashic\",\n \"data\": {\n \"chasePlay\": False,\n },\n })\n\n def send_pong(self):\n self.send_json({\"type\": \"pong\"})\n self.send_json({\"type\": \"keepSeat\"})\n\n\nclass NicoLiveHLSStreamReader(HLSStreamReader):\n stream: \"NicoLiveHLSStream\"\n\n def open(self):\n self.stream.wsclient.opened.set()\n super().open()\n\n def close(self):\n super().close()\n self.stream.wsclient.close()\n\n\nclass NicoLiveHLSStream(HLSStream):\n __reader__ = NicoLiveHLSStreamReader\n wsclient: NicoLiveWsClient\n\n def set_wsclient(self, wsclient: NicoLiveWsClient):\n self.wsclient = wsclient\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?P<domain>live\\d*\\.nicovideo\\.jp)/watch/(lv|co)\\d+\",\n))\n@pluginargument(\n \"email\",\n sensitive=True,\n argument_name=\"niconico-email\",\n metavar=\"EMAIL\",\n help=\"The email or phone number associated with your Niconico account\",\n)\n@pluginargument(\n \"password\",\n sensitive=True,\n argument_name=\"niconico-password\",\n metavar=\"PASSWORD\",\n help=\"The password of your Niconico account\",\n)\n@pluginargument(\n \"user-session\",\n sensitive=True,\n argument_name=\"niconico-user-session\",\n metavar=\"VALUE\",\n help=\"\"\"\n Value of the user-session token.\n\n Can be used as an alternative to providing a password.\n \"\"\",\n)\n@pluginargument(\n \"purge-credentials\",\n argument_name=\"niconico-purge-credentials\",\n action=\"store_true\",\n help=\"Purge cached Niconico credentials to initiate a new session and reauthenticate.\",\n)\n@pluginargument(\n \"timeshift-offset\",\n type=hours_minutes_seconds,\n argument_name=\"niconico-timeshift-offset\",\n metavar=\"[[XX:]XX:]XX | [XXh][XXm][XXs]\",\n help=\"\"\"\n Amount of time to skip from the beginning of a stream.\n\n Default is 0.\n \"\"\",\n)\nclass NicoLive(Plugin):\n STREAM_READY_TIMEOUT = 6\n LOGIN_URL = \"https://account.nicovideo.jp/login/redirector\"\n LOGIN_URL_PARAMS = {\n \"show_button_twitter\": 1,\n \"show_button_facebook\": 1,\n \"next_url\": \"/\",\n }\n\n wsclient: NicoLiveWsClient\n\n def _get_streams(self):\n if self.get_option(\"purge_credentials\"):\n self.clear_cookies()\n log.info(\"All credentials were successfully removed\")\n\n self.session.http.headers.update({\n \"User-Agent\": useragents.CHROME,\n })\n\n self.niconico_web_login()\n\n data = self.get_data()\n\n wss_api_url = self.find_wss_api_url(data)\n if not wss_api_url:\n log.error(\n \"Failed to get wss_api_url. \"\n + \"Please check if the URL is correct, and make sure your account has access to the video.\",\n )\n return\n\n self.id, self.author, self.title = self.find_metadata(data)\n\n self.wsclient = NicoLiveWsClient(self.session, wss_api_url)\n self.wsclient.start()\n\n hls_stream_url = self._get_hls_stream_url()\n if not hls_stream_url:\n return\n\n offset = self.get_option(\"timeshift-offset\")\n if offset and \"timeshift\" in wss_api_url:\n hls_stream_url = update_qsd(hls_stream_url, {\"start\": offset})\n\n for quality, stream in NicoLiveHLSStream.parse_variant_playlist(self.session, hls_stream_url).items():\n stream.set_wsclient(self.wsclient)\n yield quality, stream\n\n def _get_hls_stream_url(self):\n log.debug(f\"Waiting for permit (for at most {self.STREAM_READY_TIMEOUT} seconds)...\")\n if not self.wsclient.ready.wait(self.STREAM_READY_TIMEOUT) or not self.wsclient.is_alive():\n log.error(\"Waiting for permit timed out.\")\n self.wsclient.close()\n return\n\n return self.wsclient.hls_stream_url\n\n def get_data(self):\n return self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_find(\".//script[@id='embedded-data'][@data-props]\"),\n validate.get(\"data-props\"),\n validate.parse_json(),\n ))\n\n @staticmethod\n def find_metadata(data):\n schema = validate.Schema(\n {\n \"program\": {\n \"nicoliveProgramId\": str,\n \"supplier\": {\"name\": str},\n \"title\": str,\n },\n },\n validate.get(\"program\"),\n validate.union_get(\n \"nicoliveProgramId\",\n (\"supplier\", \"name\"),\n \"title\",\n ),\n )\n\n return schema.validate(data)\n\n @staticmethod\n def find_wss_api_url(data):\n schema = validate.Schema(\n {\n \"site\": {\n \"relive\": {\n \"webSocketUrl\": validate.any(\n validate.url(scheme=\"wss\"),\n \"\",\n ),\n },\n validate.optional(\"frontendId\"): int,\n },\n },\n validate.get(\"site\"),\n validate.union_get((\"relive\", \"webSocketUrl\"), \"frontendId\"),\n )\n\n wss_api_url, frontend_id = schema.validate(data)\n if not wss_api_url:\n return\n\n if frontend_id is not None:\n wss_api_url = update_qsd(wss_api_url, {\"frontend_id\": frontend_id})\n\n return wss_api_url\n\n def niconico_web_login(self):\n user_session = self.get_option(\"user-session\")\n email = self.get_option(\"email\")\n password = self.get_option(\"password\")\n\n if user_session is not None:\n log.info(\"Logging in via provided user session cookie\")\n self.session.http.cookies.set(\n \"user_session\",\n user_session,\n path=\"/\",\n domain=\"nicovideo.jp\",\n )\n self.save_cookies()\n\n elif self.session.http.cookies.get(\"user_session\"):\n log.info(\"Logging in via cached user session cookie\")\n\n elif email is not None and password is not None:\n log.info(\"Logging in via provided email and password\")\n root = self.session.http.post(\n self.LOGIN_URL,\n data={\"mail_tel\": email, \"password\": password},\n params=self.LOGIN_URL_PARAMS,\n schema=validate.Schema(validate.parse_html()),\n )\n\n if self.session.http.cookies.get(\"user_session\"):\n log.info(\"Logged in.\")\n self.save_cookies()\n return\n\n input_with_value = {}\n for elem in root.xpath(\".//form[@action]//input\"):\n if elem.attrib.get(\"value\"):\n input_with_value[elem.attrib.get(\"name\")] = elem.attrib.get(\"value\")\n elif elem.attrib.get(\"id\") == \"oneTimePw\":\n maxlength = int(elem.attrib.get(\"maxlength\"))\n oneTimePw = self.input_ask(\"Enter the 6 digit number included in email\")\n if len(oneTimePw) > maxlength:\n log.error(\"invalid user input\")\n return\n input_with_value[elem.attrib.get(\"name\")] = oneTimePw\n else:\n log.debug(f\"unknown input: {elem.attrib.get('name')}\")\n\n root = self.session.http.post(\n urljoin(\"https://account.nicovideo.jp\", root.xpath(\"string(.//form[@action]/@action)\")),\n data=input_with_value,\n schema=validate.Schema(validate.parse_html()),\n )\n log.debug(f\"Cookies: {self.session.http.cookies.get_dict()}\")\n if self.session.http.cookies.get(\"user_session\") is None:\n error = root.xpath(\"string(//div[@class='formError']/div/text())\")\n log.warning(f\"Login failed: {error or 'unknown reason'}\")\n else:\n log.info(\"Logged in.\")\n self.save_cookies()\n\n\n__plugin__ = NicoLive\n", "path": "src/streamlink/plugins/nicolive.py"}]}
3,522
854
gh_patches_debug_837
rasdani/github-patches
git_diff
optuna__optuna-1088
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [RFC] CI for examples/visualization It'll be better if we can validate that all the visualization examples run without failures with CI. I guess https://github.com/computationalmodelling/nbval/ might be useful. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 import os 2 import sys 3 4 import pkg_resources 5 from setuptools import find_packages 6 from setuptools import setup 7 8 from typing import Dict 9 from typing import List 10 from typing import Optional 11 12 13 def get_version() -> str: 14 15 version_filepath = os.path.join(os.path.dirname(__file__), "optuna", "version.py") 16 with open(version_filepath) as f: 17 for line in f: 18 if line.startswith("__version__"): 19 return line.strip().split()[-1][1:-1] 20 assert False 21 22 23 def get_long_description() -> str: 24 25 readme_filepath = os.path.join(os.path.dirname(__file__), "README.md") 26 with open(readme_filepath) as f: 27 return f.read() 28 29 30 def get_install_requires() -> List[str]: 31 32 return [ 33 "alembic", 34 "cliff", 35 "cmaes>=0.3.2", 36 "colorlog", 37 "joblib", 38 "numpy", 39 "scipy!=1.4.0", 40 "sqlalchemy>=1.1.0", 41 "tqdm", 42 ] 43 44 45 def get_tests_require() -> List[str]: 46 47 return get_extras_require()["testing"] 48 49 50 def get_extras_require() -> Dict[str, List[str]]: 51 52 requirements = { 53 "checking": ["black", "hacking", "mypy",], 54 "codecov": ["codecov", "pytest-cov",], 55 "doctest": ["cma", "pandas", "plotly>=4.0.0", "scikit-learn>=0.19.0", "scikit-optimize",], 56 "document": ["sphinx", "sphinx_rtd_theme",], 57 "example": [ 58 "catboost", 59 "chainer", 60 "lightgbm", 61 "mlflow", 62 "mpi4py", 63 "mxnet", 64 "pytorch-ignite", 65 "scikit-image", 66 "scikit-learn", 67 "torch", 68 "torchvision>=0.5.0", 69 "xgboost", 70 ] 71 + (["allennlp", "fastai<2"] if (3, 5) < sys.version_info[:2] < (3, 8) else []) 72 + ( 73 [ 74 "dask[dataframe]", 75 "dask-ml", 76 "keras", 77 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue 78 # https://github.com/optuna/optuna/issues/997. 79 "pytorch-lightning<0.7.0", 80 "tensorflow>=2.0.0", 81 "tensorflow-datasets", 82 ] 83 if sys.version_info[:2] < (3, 8) 84 else [] 85 ), 86 "testing": [ 87 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue 88 # https://github.com/optuna/optuna/issues/1000. 89 "bokeh<2.0.0", 90 "chainer>=5.0.0", 91 "cma", 92 "fanova", 93 "lightgbm", 94 "mock", 95 "mpi4py", 96 "mxnet", 97 "pandas", 98 "plotly>=4.0.0", 99 "pytest", 100 "pytorch-ignite", 101 "scikit-learn>=0.19.0", 102 "scikit-optimize", 103 "torch", 104 "torchvision>=0.5.0", 105 "xgboost", 106 ] 107 + (["fastai<2"] if (3, 5) < sys.version_info[:2] < (3, 8) else []) 108 + ( 109 [ 110 "keras", 111 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue 112 # https://github.com/optuna/optuna/issues/997. 113 "pytorch-lightning<0.7.0", 114 "tensorflow", 115 "tensorflow-datasets", 116 ] 117 if sys.version_info[:2] < (3, 8) 118 else [] 119 ), 120 } 121 122 return requirements 123 124 125 def find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]: 126 127 for pkg in pkgs: 128 try: 129 return pkg_resources.get_distribution(pkg) 130 except pkg_resources.DistributionNotFound: 131 pass 132 return None 133 134 135 pfnopt_pkg = find_any_distribution(["pfnopt"]) 136 if pfnopt_pkg is not None: 137 msg = ( 138 "We detected that PFNOpt is installed in your environment.\n" 139 "PFNOpt has been renamed Optuna. Please uninstall the old\n" 140 "PFNOpt in advance (e.g. by executing `$ pip uninstall pfnopt`)." 141 ) 142 print(msg) 143 exit(1) 144 145 setup( 146 name="optuna", 147 version=get_version(), 148 description="A hyperparameter optimization framework", 149 long_description=get_long_description(), 150 long_description_content_type="text/markdown", 151 author="Takuya Akiba", 152 author_email="[email protected]", 153 url="https://optuna.org/", 154 packages=find_packages(), 155 package_data={ 156 "optuna": [ 157 "storages/rdb/alembic.ini", 158 "storages/rdb/alembic/*.*", 159 "storages/rdb/alembic/versions/*.*", 160 ] 161 }, 162 install_requires=get_install_requires(), 163 tests_require=get_tests_require(), 164 extras_require=get_extras_require(), 165 entry_points={"console_scripts": ["optuna = optuna.cli:main"]}, 166 ) 167 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -61,6 +61,7 @@ "mlflow", "mpi4py", "mxnet", + "nbval", "pytorch-ignite", "scikit-image", "scikit-learn",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,6 +61,7 @@\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n+ \"nbval\",\n \"pytorch-ignite\",\n \"scikit-image\",\n \"scikit-learn\",\n", "issue": "[RFC] CI for examples/visualization\nIt'll be better if we can validate that all the visualization examples run without failures with CI.\r\n\r\nI guess https://github.com/computationalmodelling/nbval/ might be useful.\n", "before_files": [{"content": "import os\nimport sys\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), \"optuna\", \"version.py\")\n with open(version_filepath) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), \"README.md\")\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n return [\n \"alembic\",\n \"cliff\",\n \"cmaes>=0.3.2\",\n \"colorlog\",\n \"joblib\",\n \"numpy\",\n \"scipy!=1.4.0\",\n \"sqlalchemy>=1.1.0\",\n \"tqdm\",\n ]\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()[\"testing\"]\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n \"checking\": [\"black\", \"hacking\", \"mypy\",],\n \"codecov\": [\"codecov\", \"pytest-cov\",],\n \"doctest\": [\"cma\", \"pandas\", \"plotly>=4.0.0\", \"scikit-learn>=0.19.0\", \"scikit-optimize\",],\n \"document\": [\"sphinx\", \"sphinx_rtd_theme\",],\n \"example\": [\n \"catboost\",\n \"chainer\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"pytorch-ignite\",\n \"scikit-image\",\n \"scikit-learn\",\n \"torch\",\n \"torchvision>=0.5.0\",\n \"xgboost\",\n ]\n + ([\"allennlp\", \"fastai<2\"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + (\n [\n \"dask[dataframe]\",\n \"dask-ml\",\n \"keras\",\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n \"pytorch-lightning<0.7.0\",\n \"tensorflow>=2.0.0\",\n \"tensorflow-datasets\",\n ]\n if sys.version_info[:2] < (3, 8)\n else []\n ),\n \"testing\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"bokeh<2.0.0\",\n \"chainer>=5.0.0\",\n \"cma\",\n \"fanova\",\n \"lightgbm\",\n \"mock\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"pytest\",\n \"pytorch-ignite\",\n \"scikit-learn>=0.19.0\",\n \"scikit-optimize\",\n \"torch\",\n \"torchvision>=0.5.0\",\n \"xgboost\",\n ]\n + ([\"fastai<2\"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + (\n [\n \"keras\",\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n \"pytorch-lightning<0.7.0\",\n \"tensorflow\",\n \"tensorflow-datasets\",\n ]\n if sys.version_info[:2] < (3, 8)\n else []\n ),\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\npfnopt_pkg = find_any_distribution([\"pfnopt\"])\nif pfnopt_pkg is not None:\n msg = (\n \"We detected that PFNOpt is installed in your environment.\\n\"\n \"PFNOpt has been renamed Optuna. Please uninstall the old\\n\"\n \"PFNOpt in advance (e.g. by executing `$ pip uninstall pfnopt`).\"\n )\n print(msg)\n exit(1)\n\nsetup(\n name=\"optuna\",\n version=get_version(),\n description=\"A hyperparameter optimization framework\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Takuya Akiba\",\n author_email=\"[email protected]\",\n url=\"https://optuna.org/\",\n packages=find_packages(),\n package_data={\n \"optuna\": [\n \"storages/rdb/alembic.ini\",\n \"storages/rdb/alembic/*.*\",\n \"storages/rdb/alembic/versions/*.*\",\n ]\n },\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={\"console_scripts\": [\"optuna = optuna.cli:main\"]},\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport sys\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), \"optuna\", \"version.py\")\n with open(version_filepath) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), \"README.md\")\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n return [\n \"alembic\",\n \"cliff\",\n \"cmaes\",\n \"colorlog\",\n \"joblib\",\n \"numpy\",\n \"scipy!=1.4.0\",\n \"sqlalchemy>=1.1.0\",\n \"tqdm\",\n ]\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()[\"testing\"]\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n \"checking\": [\"black\", \"hacking\", \"mypy\",],\n \"codecov\": [\"codecov\", \"pytest-cov\",],\n \"doctest\": [\"cma\", \"pandas\", \"plotly>=4.0.0\", \"scikit-learn>=0.19.0\", \"scikit-optimize\",],\n \"document\": [\"sphinx\", \"sphinx_rtd_theme\",],\n \"example\": [\n \"catboost\",\n \"chainer\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"nbval\",\n \"pytorch-ignite\",\n \"scikit-image\",\n \"scikit-learn\",\n \"torch\",\n \"torchvision>=0.5.0\",\n \"xgboost\",\n ]\n + ([\"allennlp\", \"fastai<2\"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + (\n [\n \"dask[dataframe]\",\n \"dask-ml\",\n \"keras\",\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n \"pytorch-lightning<0.7.0\",\n \"tensorflow>=2.0.0\",\n \"tensorflow-datasets\",\n ]\n if sys.version_info[:2] < (3, 8)\n else []\n ),\n \"testing\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"bokeh<2.0.0\",\n \"chainer>=5.0.0\",\n \"cma\",\n \"fanova\",\n \"lightgbm\",\n \"mock\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"pytest\",\n \"pytorch-ignite\",\n \"scikit-learn>=0.19.0\",\n \"scikit-optimize\",\n \"torch\",\n \"torchvision>=0.5.0\",\n \"xgboost\",\n ]\n + ([\"fastai<2\"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + (\n [\n \"keras\",\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n \"pytorch-lightning<0.7.0\",\n \"tensorflow\",\n \"tensorflow-datasets\",\n ]\n if sys.version_info[:2] < (3, 8)\n else []\n ),\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\npfnopt_pkg = find_any_distribution([\"pfnopt\"])\nif pfnopt_pkg is not None:\n msg = (\n \"We detected that PFNOpt is installed in your environment.\\n\"\n \"PFNOpt has been renamed Optuna. Please uninstall the old\\n\"\n \"PFNOpt in advance (e.g. by executing `$ pip uninstall pfnopt`).\"\n )\n print(msg)\n exit(1)\n\nsetup(\n name=\"optuna\",\n version=get_version(),\n description=\"A hyperparameter optimization framework\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Takuya Akiba\",\n author_email=\"[email protected]\",\n url=\"https://optuna.org/\",\n packages=find_packages(),\n package_data={\n \"optuna\": [\n \"storages/rdb/alembic.ini\",\n \"storages/rdb/alembic/*.*\",\n \"storages/rdb/alembic/versions/*.*\",\n ]\n },\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={\"console_scripts\": [\"optuna = optuna.cli:main\"]},\n)\n", "path": "setup.py"}]}
1,901
74
gh_patches_debug_44014
rasdani/github-patches
git_diff
pypi__warehouse-627
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Enable Read Only Transactions (via factory wrapper) If a route isn't going to modify data, then we can use a read only deferrable transaction so that we don't need to take SIRead locks and we skip some of the overhead of having a serializable transaction. Closes #623 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `warehouse/db.py` Content: ``` 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import functools 14 15 import alembic.config 16 import sqlalchemy 17 import venusian 18 import zope.sqlalchemy 19 20 from sqlalchemy import event 21 from sqlalchemy.dialects.postgresql import UUID 22 from sqlalchemy.ext.declarative import declarative_base 23 from sqlalchemy.orm import sessionmaker 24 25 from warehouse.utils.attrs import make_repr 26 27 28 __all__ = ["includeme", "metadata", "ModelBase"] 29 30 31 class ModelBase: 32 33 def __repr__(self): 34 self.__repr__ = make_repr(*self.__table__.columns.keys(), _self=self) 35 return self.__repr__() 36 37 38 # The Global metadata object. 39 metadata = sqlalchemy.MetaData() 40 41 42 # Base class for models using declarative syntax 43 ModelBase = declarative_base(cls=ModelBase, metadata=metadata) 44 45 46 class Model(ModelBase): 47 48 __abstract__ = True 49 50 id = sqlalchemy.Column( 51 UUID(as_uuid=True), 52 primary_key=True, 53 server_default=sqlalchemy.text("gen_random_uuid()"), 54 ) 55 56 # Create our session class here, this will stay stateless as we'll bind the 57 # engine to each new state we create instead of binding it to the session 58 # class. 59 Session = sessionmaker() 60 61 62 def listens_for(target, identifier, *args, **kwargs): 63 def deco(wrapped): 64 def callback(scanner, _name, wrapped): 65 wrapped = functools.partial(wrapped, scanner.config) 66 event.listen(target, identifier, wrapped, *args, **kwargs) 67 68 venusian.attach(wrapped, callback) 69 70 return wrapped 71 return deco 72 73 74 def _configure_alembic(config): 75 alembic_cfg = alembic.config.Config() 76 alembic_cfg.set_main_option("script_location", "warehouse:migrations") 77 alembic_cfg.set_main_option( 78 "url", config.registry.settings["database.url"], 79 ) 80 return alembic_cfg 81 82 83 def _create_session(request): 84 # Create our session 85 session = Session(bind=request.registry["sqlalchemy.engine"]) 86 87 # Register only this particular session with zope.sqlalchemy 88 zope.sqlalchemy.register(session, transaction_manager=request.tm) 89 90 # Return our session now that it's created and registered 91 return session 92 93 94 def includeme(config): 95 # Add a directive to get an alembic configuration. 96 config.add_directive("alembic_config", _configure_alembic) 97 98 # Create our SQLAlchemy Engine. 99 config.registry["sqlalchemy.engine"] = sqlalchemy.create_engine( 100 config.registry.settings["database.url"], 101 isolation_level="SERIALIZABLE", 102 ) 103 104 # Register our request.db property 105 config.add_request_method(_create_session, name="db", reify=True) 106 ``` Path: `warehouse/routes.py` Content: ``` 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 14 def includeme(config): 15 config.add_route("index", "/") 16 17 # Accounts 18 config.add_route( 19 "accounts.profile", 20 "/user/{username}/", 21 factory="warehouse.accounts.models:UserFactory", 22 traverse="/{username}", 23 ) 24 config.add_route("accounts.login", "/account/login/") 25 config.add_route("accounts.logout", "/account/logout/") 26 27 # Packaging 28 config.add_route( 29 "packaging.project", 30 "/project/{name}/", 31 factory="warehouse.packaging.models:ProjectFactory", 32 traverse="/{name}", 33 ) 34 config.add_route( 35 "packaging.release", 36 "/project/{name}/{version}/", 37 factory="warehouse.packaging.models:ProjectFactory", 38 traverse="/{name}/{version}", 39 ) 40 config.add_route("packaging.file", "/packages/{path:.*}") 41 42 # Legacy URLs 43 config.add_route("legacy.api.simple.index", "/simple/") 44 config.add_route( 45 "legacy.api.simple.detail", 46 "/simple/{name}/", 47 factory="warehouse.packaging.models:ProjectFactory", 48 traverse="/{name}/", 49 ) 50 config.add_route( 51 "legacy.api.json.project", 52 "/pypi/{name}/json", 53 factory="warehouse.packaging.models:ProjectFactory", 54 traverse="/{name}", 55 ) 56 config.add_route( 57 "legacy.api.json.release", 58 "/pypi/{name}/{version}/json", 59 factory="warehouse.packaging.models:ProjectFactory", 60 traverse="/{name}/{version}", 61 ) 62 63 # Legacy Action URLs 64 config.add_pypi_action_route("legacy.api.pypi.file_upload", "file_upload") 65 config.add_pypi_action_route("legacy.api.pypi.submit", "submit") 66 config.add_pypi_action_route( 67 "legacy.api.pypi.submit_pkg_info", 68 "submit_pkg_info", 69 ) 70 config.add_pypi_action_route("legacy.api.pypi.doc_upload", "doc_upload") 71 config.add_pypi_action_route("legacy.api.pypi.doap", "doap") 72 73 # Legacy XMLRPC 74 config.add_xmlrpc_endpoint( 75 "pypi", 76 pattern="/pypi", 77 header="Content-Type:text/xml", 78 ) 79 80 # Legacy Documentation 81 config.add_route("legacy.docs", config.registry.settings["docs.url"]) 82 83 # Legacy Redirects 84 config.add_redirect("/pypi/{name}/", "/project/{name}/") 85 config.add_redirect( 86 "/pypi/{name}/{version}/", 87 "/project/{name}/{version}/", 88 ) 89 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/warehouse/db.py b/warehouse/db.py --- a/warehouse/db.py +++ b/warehouse/db.py @@ -28,6 +28,24 @@ __all__ = ["includeme", "metadata", "ModelBase"] +# We'll add a basic predicate that won't do anything except allow marking a +# route as read only (or not). +class ReadOnlyPredicate: + + def __init__(self, val, config): + self.val = val + + def text(self): + return "read_only = {!r}".format(self.val) + + phash = text + + # This predicate doesn't actually participate in the route selection + # process, so we'll just always return True. + def __call__(self, info, request): + return True + + class ModelBase: def __repr__(self): @@ -84,6 +102,16 @@ # Create our session session = Session(bind=request.registry["sqlalchemy.engine"]) + # Set our transaction to read only if the route has been marked as read + # only. + for predicate in request.matched_route.predicates: + if isinstance(predicate, ReadOnlyPredicate) and predicate.val: + session.execute( + """ SET TRANSACTION + ISOLATION LEVEL SERIALIZABLE READ ONLY DEFERRABLE + """ + ) + # Register only this particular session with zope.sqlalchemy zope.sqlalchemy.register(session, transaction_manager=request.tm) @@ -103,3 +131,6 @@ # Register our request.db property config.add_request_method(_create_session, name="db", reify=True) + + # Add a route predicate to mark a route as read only. + config.add_route_predicate("read_only", ReadOnlyPredicate) diff --git a/warehouse/routes.py b/warehouse/routes.py --- a/warehouse/routes.py +++ b/warehouse/routes.py @@ -12,7 +12,7 @@ def includeme(config): - config.add_route("index", "/") + config.add_route("index", "/", read_only=True) # Accounts config.add_route( @@ -20,6 +20,7 @@ "/user/{username}/", factory="warehouse.accounts.models:UserFactory", traverse="/{username}", + read_only=True, ) config.add_route("accounts.login", "/account/login/") config.add_route("accounts.logout", "/account/logout/") @@ -30,34 +31,39 @@ "/project/{name}/", factory="warehouse.packaging.models:ProjectFactory", traverse="/{name}", + read_only=True, ) config.add_route( "packaging.release", "/project/{name}/{version}/", factory="warehouse.packaging.models:ProjectFactory", traverse="/{name}/{version}", + read_only=True, ) - config.add_route("packaging.file", "/packages/{path:.*}") + config.add_route("packaging.file", "/packages/{path:.*}", read_only=True) # Legacy URLs - config.add_route("legacy.api.simple.index", "/simple/") + config.add_route("legacy.api.simple.index", "/simple/", read_only=True) config.add_route( "legacy.api.simple.detail", "/simple/{name}/", factory="warehouse.packaging.models:ProjectFactory", traverse="/{name}/", + read_only=True, ) config.add_route( "legacy.api.json.project", "/pypi/{name}/json", factory="warehouse.packaging.models:ProjectFactory", traverse="/{name}", + read_only=True, ) config.add_route( "legacy.api.json.release", "/pypi/{name}/{version}/json", factory="warehouse.packaging.models:ProjectFactory", traverse="/{name}/{version}", + read_only=True, ) # Legacy Action URLs @@ -75,6 +81,7 @@ "pypi", pattern="/pypi", header="Content-Type:text/xml", + read_only=True, ) # Legacy Documentation
{"golden_diff": "diff --git a/warehouse/db.py b/warehouse/db.py\n--- a/warehouse/db.py\n+++ b/warehouse/db.py\n@@ -28,6 +28,24 @@\n __all__ = [\"includeme\", \"metadata\", \"ModelBase\"]\n \n \n+# We'll add a basic predicate that won't do anything except allow marking a\n+# route as read only (or not).\n+class ReadOnlyPredicate:\n+\n+ def __init__(self, val, config):\n+ self.val = val\n+\n+ def text(self):\n+ return \"read_only = {!r}\".format(self.val)\n+\n+ phash = text\n+\n+ # This predicate doesn't actually participate in the route selection\n+ # process, so we'll just always return True.\n+ def __call__(self, info, request):\n+ return True\n+\n+\n class ModelBase:\n \n def __repr__(self):\n@@ -84,6 +102,16 @@\n # Create our session\n session = Session(bind=request.registry[\"sqlalchemy.engine\"])\n \n+ # Set our transaction to read only if the route has been marked as read\n+ # only.\n+ for predicate in request.matched_route.predicates:\n+ if isinstance(predicate, ReadOnlyPredicate) and predicate.val:\n+ session.execute(\n+ \"\"\" SET TRANSACTION\n+ ISOLATION LEVEL SERIALIZABLE READ ONLY DEFERRABLE\n+ \"\"\"\n+ )\n+\n # Register only this particular session with zope.sqlalchemy\n zope.sqlalchemy.register(session, transaction_manager=request.tm)\n \n@@ -103,3 +131,6 @@\n \n # Register our request.db property\n config.add_request_method(_create_session, name=\"db\", reify=True)\n+\n+ # Add a route predicate to mark a route as read only.\n+ config.add_route_predicate(\"read_only\", ReadOnlyPredicate)\ndiff --git a/warehouse/routes.py b/warehouse/routes.py\n--- a/warehouse/routes.py\n+++ b/warehouse/routes.py\n@@ -12,7 +12,7 @@\n \n \n def includeme(config):\n- config.add_route(\"index\", \"/\")\n+ config.add_route(\"index\", \"/\", read_only=True)\n \n # Accounts\n config.add_route(\n@@ -20,6 +20,7 @@\n \"/user/{username}/\",\n factory=\"warehouse.accounts.models:UserFactory\",\n traverse=\"/{username}\",\n+ read_only=True,\n )\n config.add_route(\"accounts.login\", \"/account/login/\")\n config.add_route(\"accounts.logout\", \"/account/logout/\")\n@@ -30,34 +31,39 @@\n \"/project/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n+ read_only=True,\n )\n config.add_route(\n \"packaging.release\",\n \"/project/{name}/{version}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n+ read_only=True,\n )\n- config.add_route(\"packaging.file\", \"/packages/{path:.*}\")\n+ config.add_route(\"packaging.file\", \"/packages/{path:.*}\", read_only=True)\n \n # Legacy URLs\n- config.add_route(\"legacy.api.simple.index\", \"/simple/\")\n+ config.add_route(\"legacy.api.simple.index\", \"/simple/\", read_only=True)\n config.add_route(\n \"legacy.api.simple.detail\",\n \"/simple/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/\",\n+ read_only=True,\n )\n config.add_route(\n \"legacy.api.json.project\",\n \"/pypi/{name}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n+ read_only=True,\n )\n config.add_route(\n \"legacy.api.json.release\",\n \"/pypi/{name}/{version}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n+ read_only=True,\n )\n \n # Legacy Action URLs\n@@ -75,6 +81,7 @@\n \"pypi\",\n pattern=\"/pypi\",\n header=\"Content-Type:text/xml\",\n+ read_only=True,\n )\n \n # Legacy Documentation\n", "issue": "Enable Read Only Transactions (via factory wrapper)\nIf a route isn't going to modify data, then we can use a read only deferrable transaction so that we don't need to take SIRead locks and we skip some of the overhead of having a serializable transaction.\n\nCloses #623\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\n\nimport alembic.config\nimport sqlalchemy\nimport venusian\nimport zope.sqlalchemy\n\nfrom sqlalchemy import event\nfrom sqlalchemy.dialects.postgresql import UUID\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\nfrom warehouse.utils.attrs import make_repr\n\n\n__all__ = [\"includeme\", \"metadata\", \"ModelBase\"]\n\n\nclass ModelBase:\n\n def __repr__(self):\n self.__repr__ = make_repr(*self.__table__.columns.keys(), _self=self)\n return self.__repr__()\n\n\n# The Global metadata object.\nmetadata = sqlalchemy.MetaData()\n\n\n# Base class for models using declarative syntax\nModelBase = declarative_base(cls=ModelBase, metadata=metadata)\n\n\nclass Model(ModelBase):\n\n __abstract__ = True\n\n id = sqlalchemy.Column(\n UUID(as_uuid=True),\n primary_key=True,\n server_default=sqlalchemy.text(\"gen_random_uuid()\"),\n )\n\n# Create our session class here, this will stay stateless as we'll bind the\n# engine to each new state we create instead of binding it to the session\n# class.\nSession = sessionmaker()\n\n\ndef listens_for(target, identifier, *args, **kwargs):\n def deco(wrapped):\n def callback(scanner, _name, wrapped):\n wrapped = functools.partial(wrapped, scanner.config)\n event.listen(target, identifier, wrapped, *args, **kwargs)\n\n venusian.attach(wrapped, callback)\n\n return wrapped\n return deco\n\n\ndef _configure_alembic(config):\n alembic_cfg = alembic.config.Config()\n alembic_cfg.set_main_option(\"script_location\", \"warehouse:migrations\")\n alembic_cfg.set_main_option(\n \"url\", config.registry.settings[\"database.url\"],\n )\n return alembic_cfg\n\n\ndef _create_session(request):\n # Create our session\n session = Session(bind=request.registry[\"sqlalchemy.engine\"])\n\n # Register only this particular session with zope.sqlalchemy\n zope.sqlalchemy.register(session, transaction_manager=request.tm)\n\n # Return our session now that it's created and registered\n return session\n\n\ndef includeme(config):\n # Add a directive to get an alembic configuration.\n config.add_directive(\"alembic_config\", _configure_alembic)\n\n # Create our SQLAlchemy Engine.\n config.registry[\"sqlalchemy.engine\"] = sqlalchemy.create_engine(\n config.registry.settings[\"database.url\"],\n isolation_level=\"SERIALIZABLE\",\n )\n\n # Register our request.db property\n config.add_request_method(_create_session, name=\"db\", reify=True)\n", "path": "warehouse/db.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef includeme(config):\n config.add_route(\"index\", \"/\")\n\n # Accounts\n config.add_route(\n \"accounts.profile\",\n \"/user/{username}/\",\n factory=\"warehouse.accounts.models:UserFactory\",\n traverse=\"/{username}\",\n )\n config.add_route(\"accounts.login\", \"/account/login/\")\n config.add_route(\"accounts.logout\", \"/account/logout/\")\n\n # Packaging\n config.add_route(\n \"packaging.project\",\n \"/project/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n )\n config.add_route(\n \"packaging.release\",\n \"/project/{name}/{version}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n )\n config.add_route(\"packaging.file\", \"/packages/{path:.*}\")\n\n # Legacy URLs\n config.add_route(\"legacy.api.simple.index\", \"/simple/\")\n config.add_route(\n \"legacy.api.simple.detail\",\n \"/simple/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/\",\n )\n config.add_route(\n \"legacy.api.json.project\",\n \"/pypi/{name}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n )\n config.add_route(\n \"legacy.api.json.release\",\n \"/pypi/{name}/{version}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n )\n\n # Legacy Action URLs\n config.add_pypi_action_route(\"legacy.api.pypi.file_upload\", \"file_upload\")\n config.add_pypi_action_route(\"legacy.api.pypi.submit\", \"submit\")\n config.add_pypi_action_route(\n \"legacy.api.pypi.submit_pkg_info\",\n \"submit_pkg_info\",\n )\n config.add_pypi_action_route(\"legacy.api.pypi.doc_upload\", \"doc_upload\")\n config.add_pypi_action_route(\"legacy.api.pypi.doap\", \"doap\")\n\n # Legacy XMLRPC\n config.add_xmlrpc_endpoint(\n \"pypi\",\n pattern=\"/pypi\",\n header=\"Content-Type:text/xml\",\n )\n\n # Legacy Documentation\n config.add_route(\"legacy.docs\", config.registry.settings[\"docs.url\"])\n\n # Legacy Redirects\n config.add_redirect(\"/pypi/{name}/\", \"/project/{name}/\")\n config.add_redirect(\n \"/pypi/{name}/{version}/\",\n \"/project/{name}/{version}/\",\n )\n", "path": "warehouse/routes.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\n\nimport alembic.config\nimport sqlalchemy\nimport venusian\nimport zope.sqlalchemy\n\nfrom sqlalchemy import event\nfrom sqlalchemy.dialects.postgresql import UUID\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\nfrom warehouse.utils.attrs import make_repr\n\n\n__all__ = [\"includeme\", \"metadata\", \"ModelBase\"]\n\n\n# We'll add a basic predicate that won't do anything except allow marking a\n# route as read only (or not).\nclass ReadOnlyPredicate:\n\n def __init__(self, val, config):\n self.val = val\n\n def text(self):\n return \"read_only = {!r}\".format(self.val)\n\n phash = text\n\n # This predicate doesn't actually participate in the route selection\n # process, so we'll just always return True.\n def __call__(self, info, request):\n return True\n\n\nclass ModelBase:\n\n def __repr__(self):\n self.__repr__ = make_repr(*self.__table__.columns.keys(), _self=self)\n return self.__repr__()\n\n\n# The Global metadata object.\nmetadata = sqlalchemy.MetaData()\n\n\n# Base class for models using declarative syntax\nModelBase = declarative_base(cls=ModelBase, metadata=metadata)\n\n\nclass Model(ModelBase):\n\n __abstract__ = True\n\n id = sqlalchemy.Column(\n UUID(as_uuid=True),\n primary_key=True,\n server_default=sqlalchemy.text(\"gen_random_uuid()\"),\n )\n\n# Create our session class here, this will stay stateless as we'll bind the\n# engine to each new state we create instead of binding it to the session\n# class.\nSession = sessionmaker()\n\n\ndef listens_for(target, identifier, *args, **kwargs):\n def deco(wrapped):\n def callback(scanner, _name, wrapped):\n wrapped = functools.partial(wrapped, scanner.config)\n event.listen(target, identifier, wrapped, *args, **kwargs)\n\n venusian.attach(wrapped, callback)\n\n return wrapped\n return deco\n\n\ndef _configure_alembic(config):\n alembic_cfg = alembic.config.Config()\n alembic_cfg.set_main_option(\"script_location\", \"warehouse:migrations\")\n alembic_cfg.set_main_option(\n \"url\", config.registry.settings[\"database.url\"],\n )\n return alembic_cfg\n\n\ndef _create_session(request):\n # Create our session\n session = Session(bind=request.registry[\"sqlalchemy.engine\"])\n\n # Set our transaction to read only if the route has been marked as read\n # only.\n for predicate in request.matched_route.predicates:\n if isinstance(predicate, ReadOnlyPredicate) and predicate.val:\n session.execute(\n \"\"\" SET TRANSACTION\n ISOLATION LEVEL SERIALIZABLE READ ONLY DEFERRABLE\n \"\"\"\n )\n\n # Register only this particular session with zope.sqlalchemy\n zope.sqlalchemy.register(session, transaction_manager=request.tm)\n\n # Return our session now that it's created and registered\n return session\n\n\ndef includeme(config):\n # Add a directive to get an alembic configuration.\n config.add_directive(\"alembic_config\", _configure_alembic)\n\n # Create our SQLAlchemy Engine.\n config.registry[\"sqlalchemy.engine\"] = sqlalchemy.create_engine(\n config.registry.settings[\"database.url\"],\n isolation_level=\"SERIALIZABLE\",\n )\n\n # Register our request.db property\n config.add_request_method(_create_session, name=\"db\", reify=True)\n\n # Add a route predicate to mark a route as read only.\n config.add_route_predicate(\"read_only\", ReadOnlyPredicate)\n", "path": "warehouse/db.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef includeme(config):\n config.add_route(\"index\", \"/\", read_only=True)\n\n # Accounts\n config.add_route(\n \"accounts.profile\",\n \"/user/{username}/\",\n factory=\"warehouse.accounts.models:UserFactory\",\n traverse=\"/{username}\",\n read_only=True,\n )\n config.add_route(\"accounts.login\", \"/account/login/\")\n config.add_route(\"accounts.logout\", \"/account/logout/\")\n\n # Packaging\n config.add_route(\n \"packaging.project\",\n \"/project/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n read_only=True,\n )\n config.add_route(\n \"packaging.release\",\n \"/project/{name}/{version}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n read_only=True,\n )\n config.add_route(\"packaging.file\", \"/packages/{path:.*}\", read_only=True)\n\n # Legacy URLs\n config.add_route(\"legacy.api.simple.index\", \"/simple/\", read_only=True)\n config.add_route(\n \"legacy.api.simple.detail\",\n \"/simple/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/\",\n read_only=True,\n )\n config.add_route(\n \"legacy.api.json.project\",\n \"/pypi/{name}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n read_only=True,\n )\n config.add_route(\n \"legacy.api.json.release\",\n \"/pypi/{name}/{version}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n read_only=True,\n )\n\n # Legacy Action URLs\n config.add_pypi_action_route(\"legacy.api.pypi.file_upload\", \"file_upload\")\n config.add_pypi_action_route(\"legacy.api.pypi.submit\", \"submit\")\n config.add_pypi_action_route(\n \"legacy.api.pypi.submit_pkg_info\",\n \"submit_pkg_info\",\n )\n config.add_pypi_action_route(\"legacy.api.pypi.doc_upload\", \"doc_upload\")\n config.add_pypi_action_route(\"legacy.api.pypi.doap\", \"doap\")\n\n # Legacy XMLRPC\n config.add_xmlrpc_endpoint(\n \"pypi\",\n pattern=\"/pypi\",\n header=\"Content-Type:text/xml\",\n read_only=True,\n )\n\n # Legacy Documentation\n config.add_route(\"legacy.docs\", config.registry.settings[\"docs.url\"])\n\n # Legacy Redirects\n config.add_redirect(\"/pypi/{name}/\", \"/project/{name}/\")\n config.add_redirect(\n \"/pypi/{name}/{version}/\",\n \"/project/{name}/{version}/\",\n )\n", "path": "warehouse/routes.py"}]}
2,060
915
gh_patches_debug_10680
rasdani/github-patches
git_diff
piskvorky__gensim-902
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ZeroDivisionError: float division by zero in pagerank_weighted.py Got ZeroDivisionError: float division by zero in **pagerank_weighted.py** Input Text: BYAnonymousArabe Error thrown at: `summarize(text_to_sum, word_count=50)` ![gensim error](https://cloud.githubusercontent.com/assets/5749350/18311490/44da660a-7522-11e6-8812-7390ebca8ef6.png) I know word_count is 50 and I'm just giving a word. But ideally it should output that single word instead of throwing an error isn't it? Please throw some light --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gensim/summarization/summarizer.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # 4 # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html 5 6 import logging 7 from gensim.summarization.pagerank_weighted import pagerank_weighted as _pagerank 8 from gensim.summarization.textcleaner import clean_text_by_sentences as _clean_text_by_sentences 9 from gensim.summarization.commons import build_graph as _build_graph 10 from gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes 11 from gensim.summarization.bm25 import get_bm25_weights as _bm25_weights 12 from gensim.corpora import Dictionary 13 from math import log10 as _log10 14 from six.moves import xrange 15 16 17 INPUT_MIN_LENGTH = 10 18 19 WEIGHT_THRESHOLD = 1.e-3 20 21 logger = logging.getLogger(__name__) 22 23 24 def _set_graph_edge_weights(graph): 25 documents = graph.nodes() 26 weights = _bm25_weights(documents) 27 28 for i in xrange(len(documents)): 29 for j in xrange(len(documents)): 30 if i == j or weights[i][j] < WEIGHT_THRESHOLD: 31 continue 32 33 sentence_1 = documents[i] 34 sentence_2 = documents[j] 35 36 edge_1 = (sentence_1, sentence_2) 37 edge_2 = (sentence_2, sentence_1) 38 39 if not graph.has_edge(edge_1): 40 graph.add_edge(edge_1, weights[i][j]) 41 if not graph.has_edge(edge_2): 42 graph.add_edge(edge_2, weights[j][i]) 43 44 # Handles the case in which all similarities are zero. 45 # The resultant summary will consist of random sentences. 46 if all(graph.edge_weight(edge) == 0 for edge in graph.edges()): 47 _create_valid_graph(graph) 48 49 50 def _create_valid_graph(graph): 51 nodes = graph.nodes() 52 53 for i in xrange(len(nodes)): 54 for j in xrange(len(nodes)): 55 if i == j: 56 continue 57 58 edge = (nodes[i], nodes[j]) 59 60 if graph.has_edge(edge): 61 graph.del_edge(edge) 62 63 graph.add_edge(edge, 1) 64 65 66 def _get_doc_length(doc): 67 return sum([item[1] for item in doc]) 68 69 70 def _get_similarity(doc1, doc2, vec1, vec2): 71 numerator = vec1.dot(vec2.transpose()).toarray()[0][0] 72 length_1 = _get_doc_length(doc1) 73 length_2 = _get_doc_length(doc2) 74 75 denominator = _log10(length_1) + _log10(length_2) if length_1 > 0 and length_2 > 0 else 0 76 77 return numerator / denominator if denominator != 0 else 0 78 79 80 def _build_corpus(sentences): 81 split_tokens = [sentence.token.split() for sentence in sentences] 82 dictionary = Dictionary(split_tokens) 83 return [dictionary.doc2bow(token) for token in split_tokens] 84 85 86 def _get_important_sentences(sentences, corpus, important_docs): 87 hashable_corpus = _build_hasheable_corpus(corpus) 88 sentences_by_corpus = dict(zip(hashable_corpus, sentences)) 89 return [sentences_by_corpus[tuple(important_doc)] for important_doc in important_docs] 90 91 92 def _get_sentences_with_word_count(sentences, word_count): 93 """ Given a list of sentences, returns a list of sentences with a 94 total word count similar to the word count provided.""" 95 length = 0 96 selected_sentences = [] 97 98 # Loops until the word count is reached. 99 for sentence in sentences: 100 words_in_sentence = len(sentence.text.split()) 101 102 # Checks if the inclusion of the sentence gives a better approximation 103 # to the word parameter. 104 if abs(word_count - length - words_in_sentence) > abs(word_count - length): 105 return selected_sentences 106 107 selected_sentences.append(sentence) 108 length += words_in_sentence 109 110 return selected_sentences 111 112 113 def _extract_important_sentences(sentences, corpus, important_docs, word_count): 114 important_sentences = _get_important_sentences(sentences, corpus, important_docs) 115 116 # If no "word_count" option is provided, the number of sentences is 117 # reduced by the provided ratio. Else, the ratio is ignored. 118 return important_sentences if word_count is None else _get_sentences_with_word_count(important_sentences, word_count) 119 120 121 def _format_results(extracted_sentences, split): 122 if split: 123 return [sentence.text for sentence in extracted_sentences] 124 return "\n".join([sentence.text for sentence in extracted_sentences]) 125 126 127 def _build_hasheable_corpus(corpus): 128 return [tuple(doc) for doc in corpus] 129 130 131 def summarize_corpus(corpus, ratio=0.2): 132 """ 133 Returns a list of the most important documents of a corpus using a 134 variation of the TextRank algorithm. 135 The input must have at least INPUT_MIN_LENGTH (%d) documents for the 136 summary to make sense. 137 138 The length of the output can be specified using the ratio parameter, 139 which determines how many documents will be chosen for the summary 140 (defaults at 20%% of the number of documents of the corpus). 141 142 The most important documents are returned as a list sorted by the 143 document score, highest first. 144 145 """ % INPUT_MIN_LENGTH 146 hashable_corpus = _build_hasheable_corpus(corpus) 147 148 # If the corpus is empty, the function ends. 149 if len(corpus) == 0: 150 logger.warning("Input corpus is empty.") 151 return 152 153 # Warns the user if there are too few documents. 154 if len(corpus) < INPUT_MIN_LENGTH: 155 logger.warning("Input corpus is expected to have at least " + str(INPUT_MIN_LENGTH) + " documents.") 156 157 graph = _build_graph(hashable_corpus) 158 _set_graph_edge_weights(graph) 159 _remove_unreachable_nodes(graph) 160 161 pagerank_scores = _pagerank(graph) 162 163 hashable_corpus.sort(key=lambda doc: pagerank_scores.get(doc, 0), reverse=True) 164 165 return [list(doc) for doc in hashable_corpus[:int(len(corpus) * ratio)]] 166 167 168 def summarize(text, ratio=0.2, word_count=None, split=False): 169 """ 170 Returns a summarized version of the given text using a variation of 171 the TextRank algorithm. 172 The input must be longer than INPUT_MIN_LENGTH sentences for the 173 summary to make sense and must be given as a string. 174 175 The output summary will consist of the most representative sentences 176 and will also be returned as a string, divided by newlines. If the 177 split parameter is set to True, a list of sentences will be 178 returned. 179 180 The length of the output can be specified using the ratio and 181 word_count parameters: 182 ratio should be a number between 0 and 1 that determines the 183 percentage of the number of sentences of the original text to be 184 chosen for the summary (defaults at 0.2). 185 word_count determines how many words will the output contain. 186 If both parameters are provided, the ratio will be ignored. 187 """ 188 # Gets a list of processed sentences. 189 sentences = _clean_text_by_sentences(text) 190 191 # If no sentence could be identified, the function ends. 192 if len(sentences) == 0: 193 logger.warning("Input text is empty.") 194 return 195 196 # If only one sentence is present, the function return the input text (Avoids ZeroDivisionError). 197 if len(sentences) == 1: 198 logger.warning("Summarization not performed since the document has only one sentence.") 199 return text 200 201 # Warns if the text is too short. 202 if len(sentences) < INPUT_MIN_LENGTH: 203 logger.warning("Input text is expected to have at least " + str(INPUT_MIN_LENGTH) + " sentences.") 204 205 corpus = _build_corpus(sentences) 206 207 most_important_docs = summarize_corpus(corpus, ratio=ratio if word_count is None else 1) 208 209 # Extracts the most important sentences with the selected criterion. 210 extracted_sentences = _extract_important_sentences(sentences, corpus, most_important_docs, word_count) 211 212 # Sorts the extracted sentences by apparition order in the original text. 213 extracted_sentences.sort(key=lambda s: s.index) 214 215 return _format_results(extracted_sentences, split) 216 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gensim/summarization/summarizer.py b/gensim/summarization/summarizer.py --- a/gensim/summarization/summarizer.py +++ b/gensim/summarization/summarizer.py @@ -193,10 +193,9 @@ logger.warning("Input text is empty.") return - # If only one sentence is present, the function return the input text (Avoids ZeroDivisionError). + # If only one sentence is present, the function raises an error (Avoids ZeroDivisionError). if len(sentences) == 1: - logger.warning("Summarization not performed since the document has only one sentence.") - return text + raise ValueError("input must have more than one sentence") # Warns if the text is too short. if len(sentences) < INPUT_MIN_LENGTH:
{"golden_diff": "diff --git a/gensim/summarization/summarizer.py b/gensim/summarization/summarizer.py\n--- a/gensim/summarization/summarizer.py\n+++ b/gensim/summarization/summarizer.py\n@@ -193,10 +193,9 @@\n logger.warning(\"Input text is empty.\")\n return\n \n- # If only one sentence is present, the function return the input text (Avoids ZeroDivisionError). \n+ # If only one sentence is present, the function raises an error (Avoids ZeroDivisionError). \n if len(sentences) == 1:\n- logger.warning(\"Summarization not performed since the document has only one sentence.\")\n- return text\n+ raise ValueError(\"input must have more than one sentence\")\n \n # Warns if the text is too short.\n if len(sentences) < INPUT_MIN_LENGTH:\n", "issue": "ZeroDivisionError: float division by zero in pagerank_weighted.py\nGot ZeroDivisionError: float division by zero in **pagerank_weighted.py**\n\nInput Text: BYAnonymousArabe\n\nError thrown at: `summarize(text_to_sum, word_count=50)`\n\n![gensim error](https://cloud.githubusercontent.com/assets/5749350/18311490/44da660a-7522-11e6-8812-7390ebca8ef6.png)\n\nI know word_count is 50 and I'm just giving a word. But ideally it should output that single word instead of throwing an error isn't it?\n\nPlease throw some light\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\nimport logging\nfrom gensim.summarization.pagerank_weighted import pagerank_weighted as _pagerank\nfrom gensim.summarization.textcleaner import clean_text_by_sentences as _clean_text_by_sentences\nfrom gensim.summarization.commons import build_graph as _build_graph\nfrom gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes\nfrom gensim.summarization.bm25 import get_bm25_weights as _bm25_weights\nfrom gensim.corpora import Dictionary\nfrom math import log10 as _log10\nfrom six.moves import xrange\n\n\nINPUT_MIN_LENGTH = 10\n\nWEIGHT_THRESHOLD = 1.e-3\n\nlogger = logging.getLogger(__name__)\n\n\ndef _set_graph_edge_weights(graph):\n documents = graph.nodes()\n weights = _bm25_weights(documents)\n\n for i in xrange(len(documents)):\n for j in xrange(len(documents)):\n if i == j or weights[i][j] < WEIGHT_THRESHOLD:\n continue\n\n sentence_1 = documents[i]\n sentence_2 = documents[j]\n\n edge_1 = (sentence_1, sentence_2)\n edge_2 = (sentence_2, sentence_1)\n\n if not graph.has_edge(edge_1):\n graph.add_edge(edge_1, weights[i][j])\n if not graph.has_edge(edge_2):\n graph.add_edge(edge_2, weights[j][i])\n\n # Handles the case in which all similarities are zero.\n # The resultant summary will consist of random sentences.\n if all(graph.edge_weight(edge) == 0 for edge in graph.edges()):\n _create_valid_graph(graph)\n\n\ndef _create_valid_graph(graph):\n nodes = graph.nodes()\n\n for i in xrange(len(nodes)):\n for j in xrange(len(nodes)):\n if i == j:\n continue\n\n edge = (nodes[i], nodes[j])\n\n if graph.has_edge(edge):\n graph.del_edge(edge)\n\n graph.add_edge(edge, 1)\n\n\ndef _get_doc_length(doc):\n return sum([item[1] for item in doc])\n\n\ndef _get_similarity(doc1, doc2, vec1, vec2):\n numerator = vec1.dot(vec2.transpose()).toarray()[0][0]\n length_1 = _get_doc_length(doc1)\n length_2 = _get_doc_length(doc2)\n\n denominator = _log10(length_1) + _log10(length_2) if length_1 > 0 and length_2 > 0 else 0\n\n return numerator / denominator if denominator != 0 else 0\n\n\ndef _build_corpus(sentences):\n split_tokens = [sentence.token.split() for sentence in sentences]\n dictionary = Dictionary(split_tokens)\n return [dictionary.doc2bow(token) for token in split_tokens]\n\n\ndef _get_important_sentences(sentences, corpus, important_docs):\n hashable_corpus = _build_hasheable_corpus(corpus)\n sentences_by_corpus = dict(zip(hashable_corpus, sentences))\n return [sentences_by_corpus[tuple(important_doc)] for important_doc in important_docs]\n\n\ndef _get_sentences_with_word_count(sentences, word_count):\n \"\"\" Given a list of sentences, returns a list of sentences with a\n total word count similar to the word count provided.\"\"\"\n length = 0\n selected_sentences = []\n\n # Loops until the word count is reached.\n for sentence in sentences:\n words_in_sentence = len(sentence.text.split())\n\n # Checks if the inclusion of the sentence gives a better approximation\n # to the word parameter.\n if abs(word_count - length - words_in_sentence) > abs(word_count - length):\n return selected_sentences\n\n selected_sentences.append(sentence)\n length += words_in_sentence\n\n return selected_sentences\n\n\ndef _extract_important_sentences(sentences, corpus, important_docs, word_count):\n important_sentences = _get_important_sentences(sentences, corpus, important_docs)\n\n # If no \"word_count\" option is provided, the number of sentences is\n # reduced by the provided ratio. Else, the ratio is ignored.\n return important_sentences if word_count is None else _get_sentences_with_word_count(important_sentences, word_count)\n\n\ndef _format_results(extracted_sentences, split):\n if split:\n return [sentence.text for sentence in extracted_sentences]\n return \"\\n\".join([sentence.text for sentence in extracted_sentences])\n\n\ndef _build_hasheable_corpus(corpus):\n return [tuple(doc) for doc in corpus]\n\n\ndef summarize_corpus(corpus, ratio=0.2):\n \"\"\"\n Returns a list of the most important documents of a corpus using a\n variation of the TextRank algorithm.\n The input must have at least INPUT_MIN_LENGTH (%d) documents for the\n summary to make sense.\n\n The length of the output can be specified using the ratio parameter,\n which determines how many documents will be chosen for the summary\n (defaults at 20%% of the number of documents of the corpus).\n\n The most important documents are returned as a list sorted by the\n document score, highest first.\n\n \"\"\" % INPUT_MIN_LENGTH\n hashable_corpus = _build_hasheable_corpus(corpus)\n\n # If the corpus is empty, the function ends.\n if len(corpus) == 0:\n logger.warning(\"Input corpus is empty.\")\n return\n\n # Warns the user if there are too few documents.\n if len(corpus) < INPUT_MIN_LENGTH:\n logger.warning(\"Input corpus is expected to have at least \" + str(INPUT_MIN_LENGTH) + \" documents.\")\n\n graph = _build_graph(hashable_corpus)\n _set_graph_edge_weights(graph)\n _remove_unreachable_nodes(graph)\n\n pagerank_scores = _pagerank(graph)\n\n hashable_corpus.sort(key=lambda doc: pagerank_scores.get(doc, 0), reverse=True)\n\n return [list(doc) for doc in hashable_corpus[:int(len(corpus) * ratio)]]\n\n\ndef summarize(text, ratio=0.2, word_count=None, split=False):\n \"\"\"\n Returns a summarized version of the given text using a variation of\n the TextRank algorithm.\n The input must be longer than INPUT_MIN_LENGTH sentences for the\n summary to make sense and must be given as a string.\n\n The output summary will consist of the most representative sentences\n and will also be returned as a string, divided by newlines. If the\n split parameter is set to True, a list of sentences will be\n returned.\n\n The length of the output can be specified using the ratio and\n word_count parameters:\n ratio should be a number between 0 and 1 that determines the\n percentage of the number of sentences of the original text to be\n chosen for the summary (defaults at 0.2).\n word_count determines how many words will the output contain.\n If both parameters are provided, the ratio will be ignored.\n \"\"\"\n # Gets a list of processed sentences.\n sentences = _clean_text_by_sentences(text)\n\n # If no sentence could be identified, the function ends.\n if len(sentences) == 0:\n logger.warning(\"Input text is empty.\")\n return\n\n # If only one sentence is present, the function return the input text (Avoids ZeroDivisionError). \n if len(sentences) == 1:\n logger.warning(\"Summarization not performed since the document has only one sentence.\")\n return text\n \n # Warns if the text is too short.\n if len(sentences) < INPUT_MIN_LENGTH:\n logger.warning(\"Input text is expected to have at least \" + str(INPUT_MIN_LENGTH) + \" sentences.\")\n\n corpus = _build_corpus(sentences)\n\n most_important_docs = summarize_corpus(corpus, ratio=ratio if word_count is None else 1)\n\n # Extracts the most important sentences with the selected criterion.\n extracted_sentences = _extract_important_sentences(sentences, corpus, most_important_docs, word_count)\n\n # Sorts the extracted sentences by apparition order in the original text.\n extracted_sentences.sort(key=lambda s: s.index)\n\n return _format_results(extracted_sentences, split)\n", "path": "gensim/summarization/summarizer.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\nimport logging\nfrom gensim.summarization.pagerank_weighted import pagerank_weighted as _pagerank\nfrom gensim.summarization.textcleaner import clean_text_by_sentences as _clean_text_by_sentences\nfrom gensim.summarization.commons import build_graph as _build_graph\nfrom gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes\nfrom gensim.summarization.bm25 import get_bm25_weights as _bm25_weights\nfrom gensim.corpora import Dictionary\nfrom math import log10 as _log10\nfrom six.moves import xrange\n\n\nINPUT_MIN_LENGTH = 10\n\nWEIGHT_THRESHOLD = 1.e-3\n\nlogger = logging.getLogger(__name__)\n\n\ndef _set_graph_edge_weights(graph):\n documents = graph.nodes()\n weights = _bm25_weights(documents)\n\n for i in xrange(len(documents)):\n for j in xrange(len(documents)):\n if i == j or weights[i][j] < WEIGHT_THRESHOLD:\n continue\n\n sentence_1 = documents[i]\n sentence_2 = documents[j]\n\n edge_1 = (sentence_1, sentence_2)\n edge_2 = (sentence_2, sentence_1)\n\n if not graph.has_edge(edge_1):\n graph.add_edge(edge_1, weights[i][j])\n if not graph.has_edge(edge_2):\n graph.add_edge(edge_2, weights[j][i])\n\n # Handles the case in which all similarities are zero.\n # The resultant summary will consist of random sentences.\n if all(graph.edge_weight(edge) == 0 for edge in graph.edges()):\n _create_valid_graph(graph)\n\n\ndef _create_valid_graph(graph):\n nodes = graph.nodes()\n\n for i in xrange(len(nodes)):\n for j in xrange(len(nodes)):\n if i == j:\n continue\n\n edge = (nodes[i], nodes[j])\n\n if graph.has_edge(edge):\n graph.del_edge(edge)\n\n graph.add_edge(edge, 1)\n\n\ndef _get_doc_length(doc):\n return sum([item[1] for item in doc])\n\n\ndef _get_similarity(doc1, doc2, vec1, vec2):\n numerator = vec1.dot(vec2.transpose()).toarray()[0][0]\n length_1 = _get_doc_length(doc1)\n length_2 = _get_doc_length(doc2)\n\n denominator = _log10(length_1) + _log10(length_2) if length_1 > 0 and length_2 > 0 else 0\n\n return numerator / denominator if denominator != 0 else 0\n\n\ndef _build_corpus(sentences):\n split_tokens = [sentence.token.split() for sentence in sentences]\n dictionary = Dictionary(split_tokens)\n return [dictionary.doc2bow(token) for token in split_tokens]\n\n\ndef _get_important_sentences(sentences, corpus, important_docs):\n hashable_corpus = _build_hasheable_corpus(corpus)\n sentences_by_corpus = dict(zip(hashable_corpus, sentences))\n return [sentences_by_corpus[tuple(important_doc)] for important_doc in important_docs]\n\n\ndef _get_sentences_with_word_count(sentences, word_count):\n \"\"\" Given a list of sentences, returns a list of sentences with a\n total word count similar to the word count provided.\"\"\"\n length = 0\n selected_sentences = []\n\n # Loops until the word count is reached.\n for sentence in sentences:\n words_in_sentence = len(sentence.text.split())\n\n # Checks if the inclusion of the sentence gives a better approximation\n # to the word parameter.\n if abs(word_count - length - words_in_sentence) > abs(word_count - length):\n return selected_sentences\n\n selected_sentences.append(sentence)\n length += words_in_sentence\n\n return selected_sentences\n\n\ndef _extract_important_sentences(sentences, corpus, important_docs, word_count):\n important_sentences = _get_important_sentences(sentences, corpus, important_docs)\n\n # If no \"word_count\" option is provided, the number of sentences is\n # reduced by the provided ratio. Else, the ratio is ignored.\n return important_sentences if word_count is None else _get_sentences_with_word_count(important_sentences, word_count)\n\n\ndef _format_results(extracted_sentences, split):\n if split:\n return [sentence.text for sentence in extracted_sentences]\n return \"\\n\".join([sentence.text for sentence in extracted_sentences])\n\n\ndef _build_hasheable_corpus(corpus):\n return [tuple(doc) for doc in corpus]\n\n\ndef summarize_corpus(corpus, ratio=0.2):\n \"\"\"\n Returns a list of the most important documents of a corpus using a\n variation of the TextRank algorithm.\n The input must have at least INPUT_MIN_LENGTH (%d) documents for the\n summary to make sense.\n\n The length of the output can be specified using the ratio parameter,\n which determines how many documents will be chosen for the summary\n (defaults at 20%% of the number of documents of the corpus).\n\n The most important documents are returned as a list sorted by the\n document score, highest first.\n\n \"\"\" % INPUT_MIN_LENGTH\n hashable_corpus = _build_hasheable_corpus(corpus)\n\n # If the corpus is empty, the function ends.\n if len(corpus) == 0:\n logger.warning(\"Input corpus is empty.\")\n return\n\n # Warns the user if there are too few documents.\n if len(corpus) < INPUT_MIN_LENGTH:\n logger.warning(\"Input corpus is expected to have at least \" + str(INPUT_MIN_LENGTH) + \" documents.\")\n\n graph = _build_graph(hashable_corpus)\n _set_graph_edge_weights(graph)\n _remove_unreachable_nodes(graph)\n\n pagerank_scores = _pagerank(graph)\n\n hashable_corpus.sort(key=lambda doc: pagerank_scores.get(doc, 0), reverse=True)\n\n return [list(doc) for doc in hashable_corpus[:int(len(corpus) * ratio)]]\n\n\ndef summarize(text, ratio=0.2, word_count=None, split=False):\n \"\"\"\n Returns a summarized version of the given text using a variation of\n the TextRank algorithm.\n The input must be longer than INPUT_MIN_LENGTH sentences for the\n summary to make sense and must be given as a string.\n\n The output summary will consist of the most representative sentences\n and will also be returned as a string, divided by newlines. If the\n split parameter is set to True, a list of sentences will be\n returned.\n\n The length of the output can be specified using the ratio and\n word_count parameters:\n ratio should be a number between 0 and 1 that determines the\n percentage of the number of sentences of the original text to be\n chosen for the summary (defaults at 0.2).\n word_count determines how many words will the output contain.\n If both parameters are provided, the ratio will be ignored.\n \"\"\"\n # Gets a list of processed sentences.\n sentences = _clean_text_by_sentences(text)\n\n # If no sentence could be identified, the function ends.\n if len(sentences) == 0:\n logger.warning(\"Input text is empty.\")\n return\n\n # If only one sentence is present, the function raises an error (Avoids ZeroDivisionError). \n if len(sentences) == 1:\n raise ValueError(\"input must have more than one sentence\")\n \n # Warns if the text is too short.\n if len(sentences) < INPUT_MIN_LENGTH:\n logger.warning(\"Input text is expected to have at least \" + str(INPUT_MIN_LENGTH) + \" sentences.\")\n\n corpus = _build_corpus(sentences)\n\n most_important_docs = summarize_corpus(corpus, ratio=ratio if word_count is None else 1)\n\n # Extracts the most important sentences with the selected criterion.\n extracted_sentences = _extract_important_sentences(sentences, corpus, most_important_docs, word_count)\n\n # Sorts the extracted sentences by apparition order in the original text.\n extracted_sentences.sort(key=lambda s: s.index)\n\n return _format_results(extracted_sentences, split)\n", "path": "gensim/summarization/summarizer.py"}]}
2,778
203
gh_patches_debug_21581
rasdani/github-patches
git_diff
elastic__apm-agent-python-1134
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cloud metadata collection is leaking sockets When collecting the meta data information from cloud providers a socket connection is opened but not closed. This causes `ResourceWarnings` when running pytest. The issue seems to be the socket that is opened [here](https://github.com/elastic/apm-agent-python/blob/master/elasticapm/utils/cloud.py#L138). **To Reproduce** Create a file `test.py` with the following content: ```python import elasticapm def test(): c = elasticapm.Client({"SERVICE_NAME": "dev"}) c.build_metadata() assert False ``` Run the tests with: ```cmd pytest --pythonwarnings once test.py ``` You should see warnings similar to this one (you have to run this on a cloud hosted VM): ```cmd test.py::test /tmp/venv/lib/python3.6/site-packages/elasticapm/utils/cloud.py:48: ResourceWarning: unclosed <socket.socket fd=11, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52428), raddr=('169.254.169.254', 80)> socket.create_connection(("169.254.169.254", 80), 0.1) test.py::test /tmp/venv/lib/python3.6/site-packages/elasticapm/utils/cloud.py:48: ResourceWarning: unclosed <socket.socket fd=12, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52430), raddr=('169.254.169.254', 80)> socket.create_connection(("169.254.169.254", 80), 0.1) test.py::test /tmp/venv/lib/python3.6/site-packages/elasticapm/base.py:393: ResourceWarning: unclosed <socket.socket fd=11, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52432), raddr=('169.254.169.254', 80)> data = cloud.aws_metadata() test.py::test /tmp/venv/lib/python3.6/site-packages/elasticapm/base.py:393: ResourceWarning: unclosed <socket.socket fd=12, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52434), raddr=('169.254.169.254', 80)> data = cloud.aws_metadata() test.py::test /tmp/venv/lib/python3.6/site-packages/elasticapm/utils/cloud.py:138: ResourceWarning: unclosed <socket.socket fd=12, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52436), raddr=('169.254.169.254', 80)> socket.create_connection(("169.254.169.254", 80), 0.1) test.py::test /tmp/venv/lib/python3.6/site-packages/elasticapm/utils/cloud.py:138: ResourceWarning: unclosed <socket.socket fd=11, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52438), raddr=('169.254.169.254', 80)> socket.create_connection(("169.254.169.254", 80), 0.1) ``` **Environment (please complete the following information)** - OS: Mac and Linux - Python version: 3.6.6 - APM Server version: - Agent version: 5.10.0 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `elasticapm/utils/cloud.py` Content: ``` 1 # BSD 3-Clause License 2 # 3 # Copyright (c) 2019, Elasticsearch BV 4 # All rights reserved. 5 # 6 # Redistribution and use in source and binary forms, with or without 7 # modification, are permitted provided that the following conditions are met: 8 # 9 # * Redistributions of source code must retain the above copyright notice, this 10 # list of conditions and the following disclaimer. 11 # 12 # * Redistributions in binary form must reproduce the above copyright notice, 13 # this list of conditions and the following disclaimer in the documentation 14 # and/or other materials provided with the distribution. 15 # 16 # * Neither the name of the copyright holder nor the names of its 17 # contributors may be used to endorse or promote products derived from 18 # this software without specific prior written permission. 19 # 20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31 import json 32 import os 33 import socket 34 35 import urllib3 36 37 38 def aws_metadata(): 39 """ 40 Fetch AWS metadata from the local metadata server. If metadata server is 41 not found, return an empty dictionary 42 """ 43 http = urllib3.PoolManager() 44 45 try: 46 # This will throw an error if the metadata server isn't available, 47 # and will be quiet in the logs, unlike urllib3 48 socket.create_connection(("169.254.169.254", 80), 0.1) 49 50 try: 51 # This whole block is almost unnecessary. IMDSv1 will be supported 52 # indefinitely, so the only time this block is needed is if a 53 # security-conscious user has set the metadata service to require 54 # IMDSv2. Thus, the very expansive try:except: coverage. 55 56 # TODO: should we have a config option to completely disable IMDSv2 to reduce overhead? 57 ttl_header = {"X-aws-ec2-metadata-token-ttl-seconds": "300"} 58 token_url = "http://169.254.169.254/latest/api/token" 59 token_request = http.request("PUT", token_url, headers=ttl_header, timeout=1.0, retries=False) 60 token = token_request.data.decode("utf-8") 61 aws_token_header = {"X-aws-ec2-metadata-token": token} if token else {} 62 except Exception: 63 aws_token_header = {} 64 metadata = json.loads( 65 http.request( 66 "GET", 67 "http://169.254.169.254/latest/dynamic/instance-identity/document", 68 headers=aws_token_header, 69 timeout=1.0, 70 retries=False, 71 ).data.decode("utf-8") 72 ) 73 74 return { 75 "account": {"id": metadata["accountId"]}, 76 "instance": {"id": metadata["instanceId"]}, 77 "availability_zone": metadata["availabilityZone"], 78 "machine": {"type": metadata["instanceType"]}, 79 "provider": "aws", 80 "region": metadata["region"], 81 } 82 83 except Exception: 84 # Not on an AWS box 85 return {} 86 87 88 def gcp_metadata(): 89 """ 90 Fetch GCP metadata from the local metadata server. If metadata server is 91 not found, return an empty dictionary 92 """ 93 headers = {"Metadata-Flavor": "Google"} 94 http = urllib3.PoolManager() 95 96 try: 97 # This will throw an error if the metadata server isn't available, 98 # and will be quiet in the logs, unlike urllib3 99 socket.getaddrinfo("metadata.google.internal", 80, 0, socket.SOCK_STREAM) 100 101 metadata = json.loads( 102 http.request( 103 "GET", 104 "http://metadata.google.internal/computeMetadata/v1/?recursive=true", 105 headers=headers, 106 timeout=1.0, 107 retries=False, 108 ).data.decode("utf-8") 109 ) 110 111 availability_zone = os.path.split(metadata["instance"]["zone"])[1] 112 113 return { 114 "provider": "gcp", 115 "instance": {"id": str(metadata["instance"]["id"]), "name": metadata["instance"]["name"]}, 116 "project": {"id": str(metadata["project"]["numericProjectId"]), "name": metadata["project"]["projectId"]}, 117 "availability_zone": availability_zone, 118 "region": availability_zone.rsplit("-", 1)[0], 119 "machine": {"type": metadata["instance"]["machineType"].split("/")[-1]}, 120 } 121 122 except Exception: 123 # Not on a gcp box 124 return {} 125 126 127 def azure_metadata(): 128 """ 129 Fetch Azure metadata from the local metadata server. If metadata server is 130 not found, return an empty dictionary 131 """ 132 headers = {"Metadata": "true"} 133 http = urllib3.PoolManager() 134 135 try: 136 # This will throw an error if the metadata server isn't available, 137 # and will be quiet in the logs, unlike urllib3 138 socket.create_connection(("169.254.169.254", 80), 0.1) 139 140 # Can't use newest metadata service version, as it's not guaranteed 141 # to be available in all regions 142 metadata = json.loads( 143 http.request( 144 "GET", 145 "http://169.254.169.254/metadata/instance/compute?api-version=2019-08-15", 146 headers=headers, 147 timeout=1.0, 148 retries=False, 149 ).data.decode("utf-8") 150 ) 151 152 ret = { 153 "account": {"id": metadata["subscriptionId"]}, 154 "instance": {"id": metadata["vmId"], "name": metadata["name"]}, 155 "project": {"name": metadata["resourceGroupName"]}, 156 "availability_zone": metadata["zone"], 157 "machine": {"type": metadata["vmSize"]}, 158 "provider": "azure", 159 "region": metadata["location"], 160 } 161 162 if not ret["availability_zone"]: 163 ret.pop("availability_zone") 164 return ret 165 166 except Exception: 167 # Not on an Azure box, maybe an azure app service? 168 return azure_app_service_metadata() 169 170 171 def azure_app_service_metadata(): 172 ret = {"provider": "azure"} 173 website_owner_name = os.environ.get("WEBSITE_OWNER_NAME") 174 website_instance_id = os.environ.get("WEBSITE_INSTANCE_ID") 175 website_site_name = os.environ.get("WEBSITE_SITE_NAME") 176 website_resource_group = os.environ.get("WEBSITE_RESOURCE_GROUP") 177 if not all((website_owner_name, website_instance_id, website_site_name, website_resource_group)): 178 return {} 179 # Format of website_owner_name: {subscription id}+{app service plan resource group}-{region}webspace{.*} 180 if "+" not in website_owner_name: 181 return {} 182 try: 183 account_id, website_owner_name = website_owner_name.split("+") 184 ret["account"] = {"id": account_id} 185 region, _ = website_owner_name.split("webspace") 186 ret["region"] = region.rsplit("-", 1)[1] 187 except Exception: 188 return {} 189 190 ret["instance"] = {"id": website_instance_id, "name": website_site_name} 191 ret["project"] = {"name": website_resource_group} 192 193 return ret 194 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/elasticapm/utils/cloud.py b/elasticapm/utils/cloud.py --- a/elasticapm/utils/cloud.py +++ b/elasticapm/utils/cloud.py @@ -45,7 +45,8 @@ try: # This will throw an error if the metadata server isn't available, # and will be quiet in the logs, unlike urllib3 - socket.create_connection(("169.254.169.254", 80), 0.1) + with socket.create_connection(("169.254.169.254", 80), 0.1): + pass try: # This whole block is almost unnecessary. IMDSv1 will be supported @@ -135,7 +136,8 @@ try: # This will throw an error if the metadata server isn't available, # and will be quiet in the logs, unlike urllib3 - socket.create_connection(("169.254.169.254", 80), 0.1) + with socket.create_connection(("169.254.169.254", 80), 0.1): + pass # Can't use newest metadata service version, as it's not guaranteed # to be available in all regions
{"golden_diff": "diff --git a/elasticapm/utils/cloud.py b/elasticapm/utils/cloud.py\n--- a/elasticapm/utils/cloud.py\n+++ b/elasticapm/utils/cloud.py\n@@ -45,7 +45,8 @@\n try:\n # This will throw an error if the metadata server isn't available,\n # and will be quiet in the logs, unlike urllib3\n- socket.create_connection((\"169.254.169.254\", 80), 0.1)\n+ with socket.create_connection((\"169.254.169.254\", 80), 0.1):\n+ pass\n \n try:\n # This whole block is almost unnecessary. IMDSv1 will be supported\n@@ -135,7 +136,8 @@\n try:\n # This will throw an error if the metadata server isn't available,\n # and will be quiet in the logs, unlike urllib3\n- socket.create_connection((\"169.254.169.254\", 80), 0.1)\n+ with socket.create_connection((\"169.254.169.254\", 80), 0.1):\n+ pass\n \n # Can't use newest metadata service version, as it's not guaranteed\n # to be available in all regions\n", "issue": "Cloud metadata collection is leaking sockets\nWhen collecting the meta data information from cloud providers a socket connection is opened but not closed. This causes `ResourceWarnings` when running pytest. The issue seems to be the socket that is opened [here](https://github.com/elastic/apm-agent-python/blob/master/elasticapm/utils/cloud.py#L138).\r\n\r\n**To Reproduce**\r\nCreate a file `test.py` with the following content:\r\n\r\n```python\r\nimport elasticapm\r\n\r\ndef test():\r\n c = elasticapm.Client({\"SERVICE_NAME\": \"dev\"})\r\n c.build_metadata()\r\n assert False\r\n```\r\n\r\nRun the tests with:\r\n```cmd\r\npytest --pythonwarnings once test.py\r\n```\r\n\r\nYou should see warnings similar to this one (you have to run this on a cloud hosted VM):\r\n```cmd\r\ntest.py::test\r\n /tmp/venv/lib/python3.6/site-packages/elasticapm/utils/cloud.py:48: ResourceWarning: unclosed <socket.socket fd=11, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52428), raddr=('169.254.169.254', 80)>\r\n socket.create_connection((\"169.254.169.254\", 80), 0.1)\r\n\r\ntest.py::test\r\n /tmp/venv/lib/python3.6/site-packages/elasticapm/utils/cloud.py:48: ResourceWarning: unclosed <socket.socket fd=12, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52430), raddr=('169.254.169.254', 80)>\r\n socket.create_connection((\"169.254.169.254\", 80), 0.1)\r\n\r\ntest.py::test\r\n /tmp/venv/lib/python3.6/site-packages/elasticapm/base.py:393: ResourceWarning: unclosed <socket.socket fd=11, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52432), raddr=('169.254.169.254', 80)>\r\n data = cloud.aws_metadata()\r\n\r\ntest.py::test\r\n /tmp/venv/lib/python3.6/site-packages/elasticapm/base.py:393: ResourceWarning: unclosed <socket.socket fd=12, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52434), raddr=('169.254.169.254', 80)>\r\n data = cloud.aws_metadata()\r\n\r\ntest.py::test\r\n /tmp/venv/lib/python3.6/site-packages/elasticapm/utils/cloud.py:138: ResourceWarning: unclosed <socket.socket fd=12, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52436), raddr=('169.254.169.254', 80)>\r\n socket.create_connection((\"169.254.169.254\", 80), 0.1)\r\n\r\ntest.py::test\r\n /tmp/venv/lib/python3.6/site-packages/elasticapm/utils/cloud.py:138: ResourceWarning: unclosed <socket.socket fd=11, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52438), raddr=('169.254.169.254', 80)>\r\n socket.create_connection((\"169.254.169.254\", 80), 0.1)\r\n```\r\n**Environment (please complete the following information)**\r\n- OS: Mac and Linux\r\n- Python version: 3.6.6\r\n- APM Server version:\r\n- Agent version: 5.10.0\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport json\nimport os\nimport socket\n\nimport urllib3\n\n\ndef aws_metadata():\n \"\"\"\n Fetch AWS metadata from the local metadata server. If metadata server is\n not found, return an empty dictionary\n \"\"\"\n http = urllib3.PoolManager()\n\n try:\n # This will throw an error if the metadata server isn't available,\n # and will be quiet in the logs, unlike urllib3\n socket.create_connection((\"169.254.169.254\", 80), 0.1)\n\n try:\n # This whole block is almost unnecessary. IMDSv1 will be supported\n # indefinitely, so the only time this block is needed is if a\n # security-conscious user has set the metadata service to require\n # IMDSv2. Thus, the very expansive try:except: coverage.\n\n # TODO: should we have a config option to completely disable IMDSv2 to reduce overhead?\n ttl_header = {\"X-aws-ec2-metadata-token-ttl-seconds\": \"300\"}\n token_url = \"http://169.254.169.254/latest/api/token\"\n token_request = http.request(\"PUT\", token_url, headers=ttl_header, timeout=1.0, retries=False)\n token = token_request.data.decode(\"utf-8\")\n aws_token_header = {\"X-aws-ec2-metadata-token\": token} if token else {}\n except Exception:\n aws_token_header = {}\n metadata = json.loads(\n http.request(\n \"GET\",\n \"http://169.254.169.254/latest/dynamic/instance-identity/document\",\n headers=aws_token_header,\n timeout=1.0,\n retries=False,\n ).data.decode(\"utf-8\")\n )\n\n return {\n \"account\": {\"id\": metadata[\"accountId\"]},\n \"instance\": {\"id\": metadata[\"instanceId\"]},\n \"availability_zone\": metadata[\"availabilityZone\"],\n \"machine\": {\"type\": metadata[\"instanceType\"]},\n \"provider\": \"aws\",\n \"region\": metadata[\"region\"],\n }\n\n except Exception:\n # Not on an AWS box\n return {}\n\n\ndef gcp_metadata():\n \"\"\"\n Fetch GCP metadata from the local metadata server. If metadata server is\n not found, return an empty dictionary\n \"\"\"\n headers = {\"Metadata-Flavor\": \"Google\"}\n http = urllib3.PoolManager()\n\n try:\n # This will throw an error if the metadata server isn't available,\n # and will be quiet in the logs, unlike urllib3\n socket.getaddrinfo(\"metadata.google.internal\", 80, 0, socket.SOCK_STREAM)\n\n metadata = json.loads(\n http.request(\n \"GET\",\n \"http://metadata.google.internal/computeMetadata/v1/?recursive=true\",\n headers=headers,\n timeout=1.0,\n retries=False,\n ).data.decode(\"utf-8\")\n )\n\n availability_zone = os.path.split(metadata[\"instance\"][\"zone\"])[1]\n\n return {\n \"provider\": \"gcp\",\n \"instance\": {\"id\": str(metadata[\"instance\"][\"id\"]), \"name\": metadata[\"instance\"][\"name\"]},\n \"project\": {\"id\": str(metadata[\"project\"][\"numericProjectId\"]), \"name\": metadata[\"project\"][\"projectId\"]},\n \"availability_zone\": availability_zone,\n \"region\": availability_zone.rsplit(\"-\", 1)[0],\n \"machine\": {\"type\": metadata[\"instance\"][\"machineType\"].split(\"/\")[-1]},\n }\n\n except Exception:\n # Not on a gcp box\n return {}\n\n\ndef azure_metadata():\n \"\"\"\n Fetch Azure metadata from the local metadata server. If metadata server is\n not found, return an empty dictionary\n \"\"\"\n headers = {\"Metadata\": \"true\"}\n http = urllib3.PoolManager()\n\n try:\n # This will throw an error if the metadata server isn't available,\n # and will be quiet in the logs, unlike urllib3\n socket.create_connection((\"169.254.169.254\", 80), 0.1)\n\n # Can't use newest metadata service version, as it's not guaranteed\n # to be available in all regions\n metadata = json.loads(\n http.request(\n \"GET\",\n \"http://169.254.169.254/metadata/instance/compute?api-version=2019-08-15\",\n headers=headers,\n timeout=1.0,\n retries=False,\n ).data.decode(\"utf-8\")\n )\n\n ret = {\n \"account\": {\"id\": metadata[\"subscriptionId\"]},\n \"instance\": {\"id\": metadata[\"vmId\"], \"name\": metadata[\"name\"]},\n \"project\": {\"name\": metadata[\"resourceGroupName\"]},\n \"availability_zone\": metadata[\"zone\"],\n \"machine\": {\"type\": metadata[\"vmSize\"]},\n \"provider\": \"azure\",\n \"region\": metadata[\"location\"],\n }\n\n if not ret[\"availability_zone\"]:\n ret.pop(\"availability_zone\")\n return ret\n\n except Exception:\n # Not on an Azure box, maybe an azure app service?\n return azure_app_service_metadata()\n\n\ndef azure_app_service_metadata():\n ret = {\"provider\": \"azure\"}\n website_owner_name = os.environ.get(\"WEBSITE_OWNER_NAME\")\n website_instance_id = os.environ.get(\"WEBSITE_INSTANCE_ID\")\n website_site_name = os.environ.get(\"WEBSITE_SITE_NAME\")\n website_resource_group = os.environ.get(\"WEBSITE_RESOURCE_GROUP\")\n if not all((website_owner_name, website_instance_id, website_site_name, website_resource_group)):\n return {}\n # Format of website_owner_name: {subscription id}+{app service plan resource group}-{region}webspace{.*}\n if \"+\" not in website_owner_name:\n return {}\n try:\n account_id, website_owner_name = website_owner_name.split(\"+\")\n ret[\"account\"] = {\"id\": account_id}\n region, _ = website_owner_name.split(\"webspace\")\n ret[\"region\"] = region.rsplit(\"-\", 1)[1]\n except Exception:\n return {}\n\n ret[\"instance\"] = {\"id\": website_instance_id, \"name\": website_site_name}\n ret[\"project\"] = {\"name\": website_resource_group}\n\n return ret\n", "path": "elasticapm/utils/cloud.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport json\nimport os\nimport socket\n\nimport urllib3\n\n\ndef aws_metadata():\n \"\"\"\n Fetch AWS metadata from the local metadata server. If metadata server is\n not found, return an empty dictionary\n \"\"\"\n http = urllib3.PoolManager()\n\n try:\n # This will throw an error if the metadata server isn't available,\n # and will be quiet in the logs, unlike urllib3\n with socket.create_connection((\"169.254.169.254\", 80), 0.1):\n pass\n\n try:\n # This whole block is almost unnecessary. IMDSv1 will be supported\n # indefinitely, so the only time this block is needed is if a\n # security-conscious user has set the metadata service to require\n # IMDSv2. Thus, the very expansive try:except: coverage.\n\n # TODO: should we have a config option to completely disable IMDSv2 to reduce overhead?\n ttl_header = {\"X-aws-ec2-metadata-token-ttl-seconds\": \"300\"}\n token_url = \"http://169.254.169.254/latest/api/token\"\n token_request = http.request(\"PUT\", token_url, headers=ttl_header, timeout=1.0, retries=False)\n token = token_request.data.decode(\"utf-8\")\n aws_token_header = {\"X-aws-ec2-metadata-token\": token} if token else {}\n except Exception:\n aws_token_header = {}\n metadata = json.loads(\n http.request(\n \"GET\",\n \"http://169.254.169.254/latest/dynamic/instance-identity/document\",\n headers=aws_token_header,\n timeout=1.0,\n retries=False,\n ).data.decode(\"utf-8\")\n )\n\n return {\n \"account\": {\"id\": metadata[\"accountId\"]},\n \"instance\": {\"id\": metadata[\"instanceId\"]},\n \"availability_zone\": metadata[\"availabilityZone\"],\n \"machine\": {\"type\": metadata[\"instanceType\"]},\n \"provider\": \"aws\",\n \"region\": metadata[\"region\"],\n }\n\n except Exception:\n # Not on an AWS box\n return {}\n\n\ndef gcp_metadata():\n \"\"\"\n Fetch GCP metadata from the local metadata server. If metadata server is\n not found, return an empty dictionary\n \"\"\"\n headers = {\"Metadata-Flavor\": \"Google\"}\n http = urllib3.PoolManager()\n\n try:\n # This will throw an error if the metadata server isn't available,\n # and will be quiet in the logs, unlike urllib3\n socket.getaddrinfo(\"metadata.google.internal\", 80, 0, socket.SOCK_STREAM)\n\n metadata = json.loads(\n http.request(\n \"GET\",\n \"http://metadata.google.internal/computeMetadata/v1/?recursive=true\",\n headers=headers,\n timeout=1.0,\n retries=False,\n ).data.decode(\"utf-8\")\n )\n\n availability_zone = os.path.split(metadata[\"instance\"][\"zone\"])[1]\n\n return {\n \"provider\": \"gcp\",\n \"instance\": {\"id\": str(metadata[\"instance\"][\"id\"]), \"name\": metadata[\"instance\"][\"name\"]},\n \"project\": {\"id\": str(metadata[\"project\"][\"numericProjectId\"]), \"name\": metadata[\"project\"][\"projectId\"]},\n \"availability_zone\": availability_zone,\n \"region\": availability_zone.rsplit(\"-\", 1)[0],\n \"machine\": {\"type\": metadata[\"instance\"][\"machineType\"].split(\"/\")[-1]},\n }\n\n except Exception:\n # Not on a gcp box\n return {}\n\n\ndef azure_metadata():\n \"\"\"\n Fetch Azure metadata from the local metadata server. If metadata server is\n not found, return an empty dictionary\n \"\"\"\n headers = {\"Metadata\": \"true\"}\n http = urllib3.PoolManager()\n\n try:\n # This will throw an error if the metadata server isn't available,\n # and will be quiet in the logs, unlike urllib3\n with socket.create_connection((\"169.254.169.254\", 80), 0.1):\n pass\n\n # Can't use newest metadata service version, as it's not guaranteed\n # to be available in all regions\n metadata = json.loads(\n http.request(\n \"GET\",\n \"http://169.254.169.254/metadata/instance/compute?api-version=2019-08-15\",\n headers=headers,\n timeout=1.0,\n retries=False,\n ).data.decode(\"utf-8\")\n )\n\n ret = {\n \"account\": {\"id\": metadata[\"subscriptionId\"]},\n \"instance\": {\"id\": metadata[\"vmId\"], \"name\": metadata[\"name\"]},\n \"project\": {\"name\": metadata[\"resourceGroupName\"]},\n \"availability_zone\": metadata[\"zone\"],\n \"machine\": {\"type\": metadata[\"vmSize\"]},\n \"provider\": \"azure\",\n \"region\": metadata[\"location\"],\n }\n\n if not ret[\"availability_zone\"]:\n ret.pop(\"availability_zone\")\n return ret\n\n except Exception:\n # Not on an Azure box, maybe an azure app service?\n return azure_app_service_metadata()\n\n\ndef azure_app_service_metadata():\n ret = {\"provider\": \"azure\"}\n website_owner_name = os.environ.get(\"WEBSITE_OWNER_NAME\")\n website_instance_id = os.environ.get(\"WEBSITE_INSTANCE_ID\")\n website_site_name = os.environ.get(\"WEBSITE_SITE_NAME\")\n website_resource_group = os.environ.get(\"WEBSITE_RESOURCE_GROUP\")\n if not all((website_owner_name, website_instance_id, website_site_name, website_resource_group)):\n return {}\n # Format of website_owner_name: {subscription id}+{app service plan resource group}-{region}webspace{.*}\n if \"+\" not in website_owner_name:\n return {}\n try:\n account_id, website_owner_name = website_owner_name.split(\"+\")\n ret[\"account\"] = {\"id\": account_id}\n region, _ = website_owner_name.split(\"webspace\")\n ret[\"region\"] = region.rsplit(\"-\", 1)[1]\n except Exception:\n return {}\n\n ret[\"instance\"] = {\"id\": website_instance_id, \"name\": website_site_name}\n ret[\"project\"] = {\"name\": website_resource_group}\n\n return ret\n", "path": "elasticapm/utils/cloud.py"}]}
3,440
309
gh_patches_debug_19733
rasdani/github-patches
git_diff
bids-standard__pybids-81
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fieldmaps - _magnitude2.nii[.gz] is optional As per these lines: https://github.com/INCF/pybids/blob/800d15053952991c9cd4a00cf0039288d489ca12/bids/grabbids/bids_layout.py#L143-L148 when a fieldmap type is identified as `'phasediff'` then pybids understands that there is always a `_magnitude2.nii[.gz]` file. However, this is maked as optional in the specification. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bids/grabbids/bids_layout.py` Content: ``` 1 import os 2 import json 3 4 from os.path import dirname 5 from os.path import abspath 6 from os.path import join as pathjoin 7 8 from .bids_validator import BIDSValidator 9 from grabbit import Layout 10 11 __all__ = ['BIDSLayout'] 12 13 14 class BIDSLayout(Layout): 15 16 def __init__(self, path, config=None, validate=False, 17 index_associated=True, **kwargs): 18 self.validator = BIDSValidator(index_associated=index_associated) 19 self.validate = validate 20 if config is None: 21 root = dirname(abspath(__file__)) 22 config = pathjoin(root, 'config', 'bids.json') 23 super(BIDSLayout, self).__init__(path, config, 24 dynamic_getters=True, **kwargs) 25 26 def _validate_file(self, f): 27 # If validate=True then checks files according to BIDS and 28 # returns False if file doesn't fit BIDS specification 29 if not self.validate: 30 return True 31 to_check = f.path 32 to_check = to_check.split(os.path.abspath(self.root), maxsplit=1)[1] 33 34 sep = os.path.sep 35 if to_check[:len(sep)] != sep: 36 to_check = sep + to_check 37 else: 38 None 39 40 return self.validator.is_bids(to_check) 41 42 def _get_nearest_helper(self, path, extension, type=None, **kwargs): 43 """ Helper function for grabbit get_nearest """ 44 path = abspath(path) 45 46 if path not in self.files: 47 raise ValueError("File '%s' could not be found in the current BIDS" 48 " project." % path) 49 50 if not type: 51 # Constrain the search to .json files with the same type as target 52 type = self.files[path].entities['type'] 53 54 tmp = self.get_nearest(path, extensions=extension, all_=True, 55 type=type, ignore_strict_entities=['type'], 56 **kwargs) 57 58 if len(tmp): 59 return tmp 60 else: 61 return None 62 63 def get_metadata(self, path, **kwargs): 64 ''' Returns metadata found in JSON sidecars for the specified file. 65 Args: 66 path (str): Path to the file to get metadata for. 67 kwargs (dict): Optional keyword arguments to pass onto 68 get_nearest(). 69 Notes: 70 A dictionary containing metadata extracted from all matching .json 71 files is returned. In cases where the same key is found in multiple 72 files, the values in files closer to the input filename will take 73 precedence, per the inheritance rules in the BIDS specification. 74 ''' 75 potentialJSONs = self._get_nearest_helper(path, '.json', **kwargs) 76 77 if not isinstance(potentialJSONs, list): 78 return potentialJSONs 79 80 merged_param_dict = {} 81 for json_file_path in reversed(potentialJSONs): 82 if os.path.exists(json_file_path): 83 param_dict = json.load(open(json_file_path, "r", 84 encoding='utf-8')) 85 merged_param_dict.update(param_dict) 86 87 return merged_param_dict 88 89 def get_bvec(self, path, **kwargs): 90 tmp = self._get_nearest_helper(path, 'bvec', type='dwi', **kwargs)[0] 91 if isinstance(tmp, list): 92 return tmp[0] 93 else: 94 return tmp 95 96 def get_bval(self, path, **kwargs): 97 tmp = self._get_nearest_helper(path, 'bval', type='dwi', **kwargs)[0] 98 if isinstance(tmp, list): 99 return tmp[0] 100 else: 101 return tmp 102 103 def get_events(self, path, **kwargs): 104 tmp = self._get_nearest_helper(path, '.tsv', type='events', **kwargs) 105 if isinstance(tmp, list): 106 return tmp[0] 107 else: 108 return tmp 109 110 def get_fieldmap(self, path, return_list=False): 111 fieldmaps = self._get_fieldmaps(path) 112 113 if return_list: 114 return fieldmaps 115 else: 116 if len(fieldmaps) == 1: 117 return fieldmaps[0] 118 elif len(fieldmaps) > 1: 119 raise ValueError("More than one fieldmap found, but the " 120 "'return_list' argument was set to False. " 121 "Either ensure that there is only one " 122 "fieldmap for this image, or set the " 123 "'return_list' argument to True and handle " 124 "the result as a list.") 125 else: # len(fieldmaps) == 0 126 return None 127 128 def _get_fieldmaps(self, path): 129 sub = os.path.split(path)[1].split("_")[0].split("sub-")[1] 130 fieldmap_set = [] 131 type_ = '(phase1|phasediff|epi|fieldmap)' 132 for file in self.get(subject=sub, type=type_, 133 extensions=['nii.gz', 'nii']): 134 metadata = self.get_metadata(file.filename) 135 if metadata and "IntendedFor" in metadata.keys(): 136 if isinstance(metadata["IntendedFor"], list): 137 intended_for = metadata["IntendedFor"] 138 else: 139 intended_for = [metadata["IntendedFor"]] 140 if any([path.endswith(suffix) for suffix in intended_for]): 141 cur_fieldmap = {} 142 if file.type == "phasediff": 143 cur_fieldmap = {"phasediff": file.filename, 144 "magnitude1": file.filename.replace( 145 "phasediff", "magnitude1"), 146 "magnitude2": file.filename.replace( 147 "phasediff", "magnitude2"), 148 "type": "phasediff"} 149 elif file.type == "phase1": 150 cur_fieldmap["phase1"] = file.filename 151 cur_fieldmap["magnitude1"] = \ 152 file.filename.replace("phase1", "magnitude1") 153 cur_fieldmap["phase2"] = \ 154 file.filename.replace("phase1", "phase2") 155 cur_fieldmap["magnitude2"] = \ 156 file.filename.replace("phase1", "magnitude2") 157 cur_fieldmap["type"] = "phase" 158 elif file.type == "epi": 159 cur_fieldmap["epi"] = file.filename 160 cur_fieldmap["type"] = "epi" 161 elif file.type == "fieldmap": 162 cur_fieldmap["fieldmap"] = file.filename 163 cur_fieldmap["magnitude"] = \ 164 file.filename.replace("fieldmap", "magnitude") 165 cur_fieldmap["type"] = "fieldmap" 166 fieldmap_set.append(cur_fieldmap) 167 return fieldmap_set 168 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bids/grabbids/bids_layout.py b/bids/grabbids/bids_layout.py --- a/bids/grabbids/bids_layout.py +++ b/bids/grabbids/bids_layout.py @@ -143,9 +143,11 @@ cur_fieldmap = {"phasediff": file.filename, "magnitude1": file.filename.replace( "phasediff", "magnitude1"), - "magnitude2": file.filename.replace( - "phasediff", "magnitude2"), "type": "phasediff"} + magnitude2 = file.filename.replace( + "phasediff", "magnitude2") + if os.path.isfile(magnitude2): + cur_fieldmap['magnitude2'] = magnitude2 elif file.type == "phase1": cur_fieldmap["phase1"] = file.filename cur_fieldmap["magnitude1"] = \
{"golden_diff": "diff --git a/bids/grabbids/bids_layout.py b/bids/grabbids/bids_layout.py\n--- a/bids/grabbids/bids_layout.py\n+++ b/bids/grabbids/bids_layout.py\n@@ -143,9 +143,11 @@\n cur_fieldmap = {\"phasediff\": file.filename,\n \"magnitude1\": file.filename.replace(\n \"phasediff\", \"magnitude1\"),\n- \"magnitude2\": file.filename.replace(\n- \"phasediff\", \"magnitude2\"),\n \"type\": \"phasediff\"}\n+ magnitude2 = file.filename.replace(\n+ \"phasediff\", \"magnitude2\")\n+ if os.path.isfile(magnitude2):\n+ cur_fieldmap['magnitude2'] = magnitude2\n elif file.type == \"phase1\":\n cur_fieldmap[\"phase1\"] = file.filename\n cur_fieldmap[\"magnitude1\"] = \\\n", "issue": "Fieldmaps - _magnitude2.nii[.gz] is optional\nAs per these lines:\r\n\r\nhttps://github.com/INCF/pybids/blob/800d15053952991c9cd4a00cf0039288d489ca12/bids/grabbids/bids_layout.py#L143-L148\r\n\r\nwhen a fieldmap type is identified as `'phasediff'` then pybids understands that there is always a `_magnitude2.nii[.gz]` file. However, this is maked as optional in the specification.\n", "before_files": [{"content": "import os\nimport json\n\nfrom os.path import dirname\nfrom os.path import abspath\nfrom os.path import join as pathjoin\n\nfrom .bids_validator import BIDSValidator\nfrom grabbit import Layout\n\n__all__ = ['BIDSLayout']\n\n\nclass BIDSLayout(Layout):\n\n def __init__(self, path, config=None, validate=False,\n index_associated=True, **kwargs):\n self.validator = BIDSValidator(index_associated=index_associated)\n self.validate = validate\n if config is None:\n root = dirname(abspath(__file__))\n config = pathjoin(root, 'config', 'bids.json')\n super(BIDSLayout, self).__init__(path, config,\n dynamic_getters=True, **kwargs)\n\n def _validate_file(self, f):\n # If validate=True then checks files according to BIDS and\n # returns False if file doesn't fit BIDS specification\n if not self.validate:\n return True\n to_check = f.path\n to_check = to_check.split(os.path.abspath(self.root), maxsplit=1)[1]\n\n sep = os.path.sep\n if to_check[:len(sep)] != sep:\n to_check = sep + to_check\n else:\n None\n\n return self.validator.is_bids(to_check)\n\n def _get_nearest_helper(self, path, extension, type=None, **kwargs):\n \"\"\" Helper function for grabbit get_nearest \"\"\"\n path = abspath(path)\n\n if path not in self.files:\n raise ValueError(\"File '%s' could not be found in the current BIDS\"\n \" project.\" % path)\n\n if not type:\n # Constrain the search to .json files with the same type as target\n type = self.files[path].entities['type']\n\n tmp = self.get_nearest(path, extensions=extension, all_=True,\n type=type, ignore_strict_entities=['type'],\n **kwargs)\n\n if len(tmp):\n return tmp\n else:\n return None\n\n def get_metadata(self, path, **kwargs):\n ''' Returns metadata found in JSON sidecars for the specified file.\n Args:\n path (str): Path to the file to get metadata for.\n kwargs (dict): Optional keyword arguments to pass onto\n get_nearest().\n Notes:\n A dictionary containing metadata extracted from all matching .json\n files is returned. In cases where the same key is found in multiple\n files, the values in files closer to the input filename will take\n precedence, per the inheritance rules in the BIDS specification.\n '''\n potentialJSONs = self._get_nearest_helper(path, '.json', **kwargs)\n\n if not isinstance(potentialJSONs, list):\n return potentialJSONs\n\n merged_param_dict = {}\n for json_file_path in reversed(potentialJSONs):\n if os.path.exists(json_file_path):\n param_dict = json.load(open(json_file_path, \"r\",\n encoding='utf-8'))\n merged_param_dict.update(param_dict)\n\n return merged_param_dict\n\n def get_bvec(self, path, **kwargs):\n tmp = self._get_nearest_helper(path, 'bvec', type='dwi', **kwargs)[0]\n if isinstance(tmp, list):\n return tmp[0]\n else:\n return tmp\n\n def get_bval(self, path, **kwargs):\n tmp = self._get_nearest_helper(path, 'bval', type='dwi', **kwargs)[0]\n if isinstance(tmp, list):\n return tmp[0]\n else:\n return tmp\n\n def get_events(self, path, **kwargs):\n tmp = self._get_nearest_helper(path, '.tsv', type='events', **kwargs)\n if isinstance(tmp, list):\n return tmp[0]\n else:\n return tmp\n\n def get_fieldmap(self, path, return_list=False):\n fieldmaps = self._get_fieldmaps(path)\n\n if return_list:\n return fieldmaps\n else:\n if len(fieldmaps) == 1:\n return fieldmaps[0]\n elif len(fieldmaps) > 1:\n raise ValueError(\"More than one fieldmap found, but the \"\n \"'return_list' argument was set to False. \"\n \"Either ensure that there is only one \"\n \"fieldmap for this image, or set the \"\n \"'return_list' argument to True and handle \"\n \"the result as a list.\")\n else: # len(fieldmaps) == 0\n return None\n\n def _get_fieldmaps(self, path):\n sub = os.path.split(path)[1].split(\"_\")[0].split(\"sub-\")[1]\n fieldmap_set = []\n type_ = '(phase1|phasediff|epi|fieldmap)'\n for file in self.get(subject=sub, type=type_,\n extensions=['nii.gz', 'nii']):\n metadata = self.get_metadata(file.filename)\n if metadata and \"IntendedFor\" in metadata.keys():\n if isinstance(metadata[\"IntendedFor\"], list):\n intended_for = metadata[\"IntendedFor\"]\n else:\n intended_for = [metadata[\"IntendedFor\"]]\n if any([path.endswith(suffix) for suffix in intended_for]):\n cur_fieldmap = {}\n if file.type == \"phasediff\":\n cur_fieldmap = {\"phasediff\": file.filename,\n \"magnitude1\": file.filename.replace(\n \"phasediff\", \"magnitude1\"),\n \"magnitude2\": file.filename.replace(\n \"phasediff\", \"magnitude2\"),\n \"type\": \"phasediff\"}\n elif file.type == \"phase1\":\n cur_fieldmap[\"phase1\"] = file.filename\n cur_fieldmap[\"magnitude1\"] = \\\n file.filename.replace(\"phase1\", \"magnitude1\")\n cur_fieldmap[\"phase2\"] = \\\n file.filename.replace(\"phase1\", \"phase2\")\n cur_fieldmap[\"magnitude2\"] = \\\n file.filename.replace(\"phase1\", \"magnitude2\")\n cur_fieldmap[\"type\"] = \"phase\"\n elif file.type == \"epi\":\n cur_fieldmap[\"epi\"] = file.filename\n cur_fieldmap[\"type\"] = \"epi\"\n elif file.type == \"fieldmap\":\n cur_fieldmap[\"fieldmap\"] = file.filename\n cur_fieldmap[\"magnitude\"] = \\\n file.filename.replace(\"fieldmap\", \"magnitude\")\n cur_fieldmap[\"type\"] = \"fieldmap\"\n fieldmap_set.append(cur_fieldmap)\n return fieldmap_set\n", "path": "bids/grabbids/bids_layout.py"}], "after_files": [{"content": "import os\nimport json\n\nfrom os.path import dirname\nfrom os.path import abspath\nfrom os.path import join as pathjoin\n\nfrom .bids_validator import BIDSValidator\nfrom grabbit import Layout\n\n__all__ = ['BIDSLayout']\n\n\nclass BIDSLayout(Layout):\n\n def __init__(self, path, config=None, validate=False,\n index_associated=True, **kwargs):\n self.validator = BIDSValidator(index_associated=index_associated)\n self.validate = validate\n if config is None:\n root = dirname(abspath(__file__))\n config = pathjoin(root, 'config', 'bids.json')\n super(BIDSLayout, self).__init__(path, config,\n dynamic_getters=True, **kwargs)\n\n def _validate_file(self, f):\n # If validate=True then checks files according to BIDS and\n # returns False if file doesn't fit BIDS specification\n if not self.validate:\n return True\n to_check = f.path\n to_check = to_check.split(os.path.abspath(self.root), maxsplit=1)[1]\n\n sep = os.path.sep\n if to_check[:len(sep)] != sep:\n to_check = sep + to_check\n else:\n None\n\n return self.validator.is_bids(to_check)\n\n def _get_nearest_helper(self, path, extension, type=None, **kwargs):\n \"\"\" Helper function for grabbit get_nearest \"\"\"\n path = abspath(path)\n\n if path not in self.files:\n raise ValueError(\"File '%s' could not be found in the current BIDS\"\n \" project.\" % path)\n\n if not type:\n # Constrain the search to .json files with the same type as target\n type = self.files[path].entities['type']\n\n tmp = self.get_nearest(path, extensions=extension, all_=True,\n type=type, ignore_strict_entities=['type'],\n **kwargs)\n\n if len(tmp):\n return tmp\n else:\n return None\n\n def get_metadata(self, path, **kwargs):\n ''' Returns metadata found in JSON sidecars for the specified file.\n Args:\n path (str): Path to the file to get metadata for.\n kwargs (dict): Optional keyword arguments to pass onto\n get_nearest().\n Notes:\n A dictionary containing metadata extracted from all matching .json\n files is returned. In cases where the same key is found in multiple\n files, the values in files closer to the input filename will take\n precedence, per the inheritance rules in the BIDS specification.\n '''\n potentialJSONs = self._get_nearest_helper(path, '.json', **kwargs)\n\n if not isinstance(potentialJSONs, list):\n return potentialJSONs\n\n merged_param_dict = {}\n for json_file_path in reversed(potentialJSONs):\n if os.path.exists(json_file_path):\n param_dict = json.load(open(json_file_path, \"r\",\n encoding='utf-8'))\n merged_param_dict.update(param_dict)\n\n return merged_param_dict\n\n def get_bvec(self, path, **kwargs):\n tmp = self._get_nearest_helper(path, 'bvec', type='dwi', **kwargs)[0]\n if isinstance(tmp, list):\n return tmp[0]\n else:\n return tmp\n\n def get_bval(self, path, **kwargs):\n tmp = self._get_nearest_helper(path, 'bval', type='dwi', **kwargs)[0]\n if isinstance(tmp, list):\n return tmp[0]\n else:\n return tmp\n\n def get_events(self, path, **kwargs):\n tmp = self._get_nearest_helper(path, '.tsv', type='events', **kwargs)\n if isinstance(tmp, list):\n return tmp[0]\n else:\n return tmp\n\n def get_fieldmap(self, path, return_list=False):\n fieldmaps = self._get_fieldmaps(path)\n\n if return_list:\n return fieldmaps\n else:\n if len(fieldmaps) == 1:\n return fieldmaps[0]\n elif len(fieldmaps) > 1:\n raise ValueError(\"More than one fieldmap found, but the \"\n \"'return_list' argument was set to False. \"\n \"Either ensure that there is only one \"\n \"fieldmap for this image, or set the \"\n \"'return_list' argument to True and handle \"\n \"the result as a list.\")\n else: # len(fieldmaps) == 0\n return None\n\n def _get_fieldmaps(self, path):\n sub = os.path.split(path)[1].split(\"_\")[0].split(\"sub-\")[1]\n fieldmap_set = []\n type_ = '(phase1|phasediff|epi|fieldmap)'\n for file in self.get(subject=sub, type=type_,\n extensions=['nii.gz', 'nii']):\n metadata = self.get_metadata(file.filename)\n if metadata and \"IntendedFor\" in metadata.keys():\n if isinstance(metadata[\"IntendedFor\"], list):\n intended_for = metadata[\"IntendedFor\"]\n else:\n intended_for = [metadata[\"IntendedFor\"]]\n if any([path.endswith(suffix) for suffix in intended_for]):\n cur_fieldmap = {}\n if file.type == \"phasediff\":\n cur_fieldmap = {\"phasediff\": file.filename,\n \"magnitude1\": file.filename.replace(\n \"phasediff\", \"magnitude1\"),\n \"type\": \"phasediff\"}\n magnitude2 = file.filename.replace(\n \"phasediff\", \"magnitude2\")\n if os.path.isfile(magnitude2):\n cur_fieldmap['magnitude2'] = magnitude2\n elif file.type == \"phase1\":\n cur_fieldmap[\"phase1\"] = file.filename\n cur_fieldmap[\"magnitude1\"] = \\\n file.filename.replace(\"phase1\", \"magnitude1\")\n cur_fieldmap[\"phase2\"] = \\\n file.filename.replace(\"phase1\", \"phase2\")\n cur_fieldmap[\"magnitude2\"] = \\\n file.filename.replace(\"phase1\", \"magnitude2\")\n cur_fieldmap[\"type\"] = \"phase\"\n elif file.type == \"epi\":\n cur_fieldmap[\"epi\"] = file.filename\n cur_fieldmap[\"type\"] = \"epi\"\n elif file.type == \"fieldmap\":\n cur_fieldmap[\"fieldmap\"] = file.filename\n cur_fieldmap[\"magnitude\"] = \\\n file.filename.replace(\"fieldmap\", \"magnitude\")\n cur_fieldmap[\"type\"] = \"fieldmap\"\n fieldmap_set.append(cur_fieldmap)\n return fieldmap_set\n", "path": "bids/grabbids/bids_layout.py"}]}
2,203
199
gh_patches_debug_2789
rasdani/github-patches
git_diff
ivy-llc__ivy-18204
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- meshgrid --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/frontends/paddle/tensor/creation.py` Content: ``` 1 # global 2 import ivy 3 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes 4 from .tensor import Tensor 5 from ivy.functional.frontends.paddle.func_wrapper import ( 6 to_ivy_arrays_and_back, 7 ) 8 9 10 @to_ivy_arrays_and_back 11 def to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True): 12 array = ivy.array(data, dtype=dtype, device=place) 13 return Tensor(array, dtype=dtype, place=place) 14 15 16 @with_unsupported_dtypes({"2.5.0 and below": "int8"}, "paddle") 17 @to_ivy_arrays_and_back 18 def ones(shape, /, *, dtype=None, name=None): 19 dtype = "float32" if dtype is None else dtype 20 return ivy.ones(shape, dtype=dtype) 21 22 23 @with_unsupported_dtypes( 24 {"2.5.0 and below": ("uint8", "int8", "complex64", "complex128")}, "paddle" 25 ) 26 @to_ivy_arrays_and_back 27 def ones_like(x, /, *, dtype=None, name=None): 28 dtype = x.dtype if dtype is None else dtype 29 return ivy.ones_like(x, dtype=dtype) 30 31 32 @with_unsupported_dtypes({"2.5.0 and below": "int8"}, "paddle") 33 @to_ivy_arrays_and_back 34 def zeros(shape, /, *, dtype=None, name=None): 35 dtype = "float32" if dtype is None else dtype 36 return ivy.zeros(shape, dtype=dtype) 37 38 39 @with_unsupported_dtypes( 40 {"2.5.0 and below": ("uint8", "int8", "complex64", "complex128")}, "paddle" 41 ) 42 @to_ivy_arrays_and_back 43 def zeros_like(x, /, *, dtype=None, name=None): 44 dtype = x.dtype if dtype is None else dtype 45 return ivy.zeros_like(x, dtype=dtype) 46 47 48 @to_ivy_arrays_and_back 49 def full(shape, fill_value, /, *, dtype=None, name=None): 50 dtype = "float32" if dtype is None else dtype 51 return ivy.full(shape, fill_value, dtype=dtype) 52 53 54 @to_ivy_arrays_and_back 55 def full_like(x, fill_value, /, *, dtype=None, name=None): 56 dtype = x.dtype if dtype is None else dtype 57 return ivy.full_like(x, fill_value, dtype=dtype) 58 59 60 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 61 @to_ivy_arrays_and_back 62 def arange(start, end=None, step=1, dtype=None, name=None): 63 return ivy.arange(start, end, step=step, dtype=dtype) 64 65 66 @to_ivy_arrays_and_back 67 def empty(shape, dtype=None): 68 return ivy.empty(shape=shape, dtype=dtype) 69 70 71 @to_ivy_arrays_and_back 72 def eye(num_rows, num_columns=None, dtype=None, name=None): 73 return ivy.eye(num_rows, num_columns, dtype=dtype) 74 75 76 @to_ivy_arrays_and_back 77 def empty_like(x, dtype=None, name=None): 78 return ivy.empty_like(x, dtype=dtype) 79 80 81 @with_unsupported_dtypes( 82 { 83 "2.5.0 and below": ( 84 "uint8", 85 "int8", 86 "int16", 87 "float16", 88 "complex64", 89 "complex128", 90 "bool", 91 ) 92 }, 93 "paddle", 94 ) 95 @to_ivy_arrays_and_back 96 def tril(x, diagonal=0, name=None): 97 return ivy.tril(x, k=diagonal) 98 99 100 @with_unsupported_dtypes( 101 { 102 "2.5.0 and below": ( 103 "uint8", 104 "int8", 105 "int16", 106 "float16", 107 "complex64", 108 "complex128", 109 "bool", 110 ) 111 }, 112 "paddle", 113 ) 114 @to_ivy_arrays_and_back 115 def triu(x, diagonal=0, name=None): 116 return ivy.triu(x, k=diagonal) 117 118 119 @with_supported_dtypes( 120 {"2.5.0 and below": ("float32", "float64", "int32", "int64")}, "paddle" 121 ) 122 @to_ivy_arrays_and_back 123 def diagflat(x, offset=0, name=None): 124 arr = ivy.diagflat(x, offset=offset) 125 return arr 126 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/frontends/paddle/tensor/creation.py b/ivy/functional/frontends/paddle/tensor/creation.py --- a/ivy/functional/frontends/paddle/tensor/creation.py +++ b/ivy/functional/frontends/paddle/tensor/creation.py @@ -123,3 +123,11 @@ def diagflat(x, offset=0, name=None): arr = ivy.diagflat(x, offset=offset) return arr + + +@with_supported_dtypes( + {"2.5.0 and below": ("float32", "float64", "int32", "int64")}, "paddle" +) +@to_ivy_arrays_and_back +def meshgrid(*args, **kwargs): + return ivy.meshgrid(*args, indexing="ij")
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/creation.py b/ivy/functional/frontends/paddle/tensor/creation.py\n--- a/ivy/functional/frontends/paddle/tensor/creation.py\n+++ b/ivy/functional/frontends/paddle/tensor/creation.py\n@@ -123,3 +123,11 @@\n def diagflat(x, offset=0, name=None):\r\n arr = ivy.diagflat(x, offset=offset)\r\n return arr\r\n+\r\n+\r\n+@with_supported_dtypes(\r\n+ {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n+)\r\n+@to_ivy_arrays_and_back\r\n+def meshgrid(*args, **kwargs):\r\n+ return ivy.meshgrid(*args, indexing=\"ij\")\n", "issue": "meshgrid\n\n", "before_files": [{"content": "# global\r\nimport ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\r\nfrom .tensor import Tensor\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n to_ivy_arrays_and_back,\r\n)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):\r\n array = ivy.array(data, dtype=dtype, device=place)\r\n return Tensor(array, dtype=dtype, place=place)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef ones(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.ones(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.0 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef ones_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.ones_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef zeros(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.zeros(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.0 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef zeros_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.zeros_like(x, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full(shape, fill_value, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.full(shape, fill_value, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full_like(x, fill_value, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.full_like(x, fill_value, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef arange(start, end=None, step=1, dtype=None, name=None):\r\n return ivy.arange(start, end, step=step, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty(shape, dtype=None):\r\n return ivy.empty(shape=shape, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef eye(num_rows, num_columns=None, dtype=None, name=None):\r\n return ivy.eye(num_rows, num_columns, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty_like(x, dtype=None, name=None):\r\n return ivy.empty_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef tril(x, diagonal=0, name=None):\r\n return ivy.tril(x, k=diagonal)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef triu(x, diagonal=0, name=None):\r\n return ivy.triu(x, k=diagonal)\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef diagflat(x, offset=0, name=None):\r\n arr = ivy.diagflat(x, offset=offset)\r\n return arr\r\n", "path": "ivy/functional/frontends/paddle/tensor/creation.py"}], "after_files": [{"content": "# global\r\nimport ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\r\nfrom .tensor import Tensor\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n to_ivy_arrays_and_back,\r\n)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):\r\n array = ivy.array(data, dtype=dtype, device=place)\r\n return Tensor(array, dtype=dtype, place=place)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef ones(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.ones(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.0 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef ones_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.ones_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef zeros(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.zeros(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.0 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef zeros_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.zeros_like(x, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full(shape, fill_value, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.full(shape, fill_value, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full_like(x, fill_value, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.full_like(x, fill_value, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef arange(start, end=None, step=1, dtype=None, name=None):\r\n return ivy.arange(start, end, step=step, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty(shape, dtype=None):\r\n return ivy.empty(shape=shape, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef eye(num_rows, num_columns=None, dtype=None, name=None):\r\n return ivy.eye(num_rows, num_columns, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty_like(x, dtype=None, name=None):\r\n return ivy.empty_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef tril(x, diagonal=0, name=None):\r\n return ivy.tril(x, k=diagonal)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef triu(x, diagonal=0, name=None):\r\n return ivy.triu(x, k=diagonal)\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef diagflat(x, offset=0, name=None):\r\n arr = ivy.diagflat(x, offset=offset)\r\n return arr\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef meshgrid(*args, **kwargs):\r\n return ivy.meshgrid(*args, indexing=\"ij\")\r\n", "path": "ivy/functional/frontends/paddle/tensor/creation.py"}]}
1,548
190
gh_patches_debug_2003
rasdani/github-patches
git_diff
googleapis__google-cloud-python-9604
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Release google-cloud-storage Hi @tseaver, could you help cut a release for google-cloud-storage? cc: @JesseLovelace --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `storage/setup.py` Content: ``` 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import io 16 import os 17 18 import setuptools 19 20 21 # Package metadata. 22 23 name = "google-cloud-storage" 24 description = "Google Cloud Storage API client library" 25 version = "1.21.0" 26 # Should be one of: 27 # 'Development Status :: 3 - Alpha' 28 # 'Development Status :: 4 - Beta' 29 # 'Development Status :: 5 - Production/Stable' 30 release_status = "Development Status :: 5 - Production/Stable" 31 dependencies = [ 32 "google-auth >= 1.2.0", 33 "google-cloud-core >= 1.0.3, < 2.0dev", 34 "google-resumable-media >= 0.3.1, != 0.4.0, < 0.5dev", 35 ] 36 extras = {} 37 38 39 # Setup boilerplate below this line. 40 41 package_root = os.path.abspath(os.path.dirname(__file__)) 42 43 readme_filename = os.path.join(package_root, "README.rst") 44 with io.open(readme_filename, encoding="utf-8") as readme_file: 45 readme = readme_file.read() 46 47 # Only include packages under the 'google' namespace. Do not include tests, 48 # benchmarks, etc. 49 packages = [ 50 package for package in setuptools.find_packages() if package.startswith("google") 51 ] 52 53 # Determine which namespaces are needed. 54 namespaces = ["google"] 55 if "google.cloud" in packages: 56 namespaces.append("google.cloud") 57 58 59 setuptools.setup( 60 name=name, 61 version=version, 62 description=description, 63 long_description=readme, 64 author="Google LLC", 65 author_email="[email protected]", 66 license="Apache 2.0", 67 url="https://github.com/GoogleCloudPlatform/google-cloud-python", 68 classifiers=[ 69 release_status, 70 "Intended Audience :: Developers", 71 "License :: OSI Approved :: Apache Software License", 72 "Programming Language :: Python", 73 "Programming Language :: Python :: 2", 74 "Programming Language :: Python :: 2.7", 75 "Programming Language :: Python :: 3", 76 "Programming Language :: Python :: 3.5", 77 "Programming Language :: Python :: 3.6", 78 "Programming Language :: Python :: 3.7", 79 "Operating System :: OS Independent", 80 "Topic :: Internet", 81 ], 82 platforms="Posix; MacOS X; Windows", 83 packages=packages, 84 namespace_packages=namespaces, 85 install_requires=dependencies, 86 extras_require=extras, 87 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", 88 include_package_data=True, 89 zip_safe=False, 90 ) 91 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/storage/setup.py b/storage/setup.py --- a/storage/setup.py +++ b/storage/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-storage" description = "Google Cloud Storage API client library" -version = "1.21.0" +version = "1.22.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta'
{"golden_diff": "diff --git a/storage/setup.py b/storage/setup.py\n--- a/storage/setup.py\n+++ b/storage/setup.py\n@@ -22,7 +22,7 @@\n \n name = \"google-cloud-storage\"\n description = \"Google Cloud Storage API client library\"\n-version = \"1.21.0\"\n+version = \"1.22.0\"\n # Should be one of:\n # 'Development Status :: 3 - Alpha'\n # 'Development Status :: 4 - Beta'\n", "issue": "Release google-cloud-storage \nHi @tseaver, could you help cut a release for google-cloud-storage?\r\n\r\ncc: @JesseLovelace \n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-storage\"\ndescription = \"Google Cloud Storage API client library\"\nversion = \"1.21.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-auth >= 1.2.0\",\n \"google-cloud-core >= 1.0.3, < 2.0dev\",\n \"google-resumable-media >= 0.3.1, != 0.4.0, < 0.5dev\",\n]\nextras = {}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/GoogleCloudPlatform/google-cloud-python\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "storage/setup.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-storage\"\ndescription = \"Google Cloud Storage API client library\"\nversion = \"1.22.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-auth >= 1.2.0\",\n \"google-cloud-core >= 1.0.3, < 2.0dev\",\n \"google-resumable-media >= 0.3.1, != 0.4.0, < 0.5dev\",\n]\nextras = {}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/GoogleCloudPlatform/google-cloud-python\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "storage/setup.py"}]}
1,149
101
gh_patches_debug_318
rasdani/github-patches
git_diff
TencentBlueKing__bk-user-164
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 部门查询接口 ?lookup_field=name,当部门名称中含有 "." 时返回 404 **用文字描述你遇到的问题** 请用简练的文字描述你遇到的问题,问题描述的清晰程度决定了问题被解决的效率。 **重现方法** 1. 创建一个目录,名字包含点,如【广东省.深圳市】 2. 使用api查询, http:://{host:port}/api/v2/departments/广东省.深圳市/?lookup_field=name 查询结果是404 请描述问题重现的方法,如果不方便描述,可以通过截图或者视频辅助。 **预期行为** 预期的正常行为 **版本** - 提供用户管理的具体版本号 - 是否是企业版问题? **如果是 SaaS 页面问题,请提供使用的操作系统和浏览器信息** - OS: [e.g. iOS] - Browser [e.g. chrome, safari] - Version [e.g. 22] **额外信息** 任何你觉得有助于问题解决的内容 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/api/bkuser_core/departments/urls.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """ 3 TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available. 4 Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. 5 Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at http://opensource.org/licenses/MIT 7 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on 8 an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the 9 specific language governing permissions and limitations under the License. 10 """ 11 from bkuser_core.common.constants import LOOKUP_FIELD_NAME 12 from django.conf.urls import url 13 14 from . import views 15 16 PVAR_DEPARTMENT_ID = r"(?P<%s>[\w\-]+)" % LOOKUP_FIELD_NAME 17 18 urlpatterns = [ 19 url( 20 r"^api/v2/departments/$", 21 views.DepartmentViewSet.as_view( 22 { 23 "get": "list", 24 "post": "create", 25 } 26 ), 27 name="departments", 28 ), 29 url( 30 r"^api/v2/departments/%s/$" % PVAR_DEPARTMENT_ID, 31 views.DepartmentViewSet.as_view( 32 { 33 "get": "retrieve", 34 "post": "update", 35 "delete": "destroy", 36 "patch": "partial_update", 37 } 38 ), 39 name="departments.action", 40 ), 41 url( 42 r"^api/v2/departments/%s/restoration/$" % PVAR_DEPARTMENT_ID, 43 views.DepartmentViewSet.as_view( 44 { 45 "post": "restoration", 46 } 47 ), 48 name="departments.restoration", 49 ), 50 url( 51 r"^api/v2/departments/%s/ancestors/$" % PVAR_DEPARTMENT_ID, 52 views.DepartmentViewSet.as_view( 53 { 54 "get": "get_ancestor", 55 } 56 ), 57 name="departments.ancestors", 58 ), 59 url( 60 r"^api/v2/departments/%s/children/$" % PVAR_DEPARTMENT_ID, 61 views.DepartmentViewSet.as_view( 62 { 63 "get": "get_children", 64 } 65 ), 66 name="departments.children", 67 ), 68 url( 69 r"^api/v2/departments/%s/profiles/$" % PVAR_DEPARTMENT_ID, 70 views.DepartmentViewSet.as_view({"get": "get_profiles", "post": "add_profiles"}), 71 name="departments.profiles", 72 ), 73 ######### 74 # Batch # 75 ######### 76 url( 77 r"^api/v2/batch/departments/profiles/$", 78 views.BatchDepartmentsViewSet.as_view( 79 { 80 "get": "multiple_retrieve_profiles", 81 } 82 ), 83 name="department.batch", 84 ), 85 ######## 86 # Edge # 87 ######## 88 url( 89 r"^api/v2/edges/department_profile/$", 90 views.DepartmentProfileEdgeViewSet.as_view({"get": "list"}), 91 name="edge.department_profile", 92 ), 93 ############# 94 # shortcuts # 95 ############# 96 url( 97 r"^api/v2/shortcuts/departments/tops/$", 98 views.DepartmentViewSet.as_view({"get": "list_tops"}), 99 name="shortcuts.departments.list.tops", 100 ), 101 ] 102 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/api/bkuser_core/departments/urls.py b/src/api/bkuser_core/departments/urls.py --- a/src/api/bkuser_core/departments/urls.py +++ b/src/api/bkuser_core/departments/urls.py @@ -13,7 +13,7 @@ from . import views -PVAR_DEPARTMENT_ID = r"(?P<%s>[\w\-]+)" % LOOKUP_FIELD_NAME +PVAR_DEPARTMENT_ID = r"(?P<%s>[\w\-\.]+)" % LOOKUP_FIELD_NAME urlpatterns = [ url(
{"golden_diff": "diff --git a/src/api/bkuser_core/departments/urls.py b/src/api/bkuser_core/departments/urls.py\n--- a/src/api/bkuser_core/departments/urls.py\n+++ b/src/api/bkuser_core/departments/urls.py\n@@ -13,7 +13,7 @@\n \n from . import views\n \n-PVAR_DEPARTMENT_ID = r\"(?P<%s>[\\w\\-]+)\" % LOOKUP_FIELD_NAME\n+PVAR_DEPARTMENT_ID = r\"(?P<%s>[\\w\\-\\.]+)\" % LOOKUP_FIELD_NAME\n \n urlpatterns = [\n url(\n", "issue": "\u90e8\u95e8\u67e5\u8be2\u63a5\u53e3 ?lookup_field=name\uff0c\u5f53\u90e8\u95e8\u540d\u79f0\u4e2d\u542b\u6709 \".\" \u65f6\u8fd4\u56de 404\n**\u7528\u6587\u5b57\u63cf\u8ff0\u4f60\u9047\u5230\u7684\u95ee\u9898**\r\n\r\n\u8bf7\u7528\u7b80\u7ec3\u7684\u6587\u5b57\u63cf\u8ff0\u4f60\u9047\u5230\u7684\u95ee\u9898\uff0c\u95ee\u9898\u63cf\u8ff0\u7684\u6e05\u6670\u7a0b\u5ea6\u51b3\u5b9a\u4e86\u95ee\u9898\u88ab\u89e3\u51b3\u7684\u6548\u7387\u3002\r\n\r\n**\u91cd\u73b0\u65b9\u6cd5**\r\n1. \u521b\u5efa\u4e00\u4e2a\u76ee\u5f55\uff0c\u540d\u5b57\u5305\u542b\u70b9\uff0c\u5982\u3010\u5e7f\u4e1c\u7701.\u6df1\u5733\u5e02\u3011\r\n2. \u4f7f\u7528api\u67e5\u8be2\uff0c http:://{host:port}/api/v2/departments/\u5e7f\u4e1c\u7701.\u6df1\u5733\u5e02/?lookup_field=name\r\n\r\n\u67e5\u8be2\u7ed3\u679c\u662f404\r\n\r\n\u8bf7\u63cf\u8ff0\u95ee\u9898\u91cd\u73b0\u7684\u65b9\u6cd5\uff0c\u5982\u679c\u4e0d\u65b9\u4fbf\u63cf\u8ff0\uff0c\u53ef\u4ee5\u901a\u8fc7\u622a\u56fe\u6216\u8005\u89c6\u9891\u8f85\u52a9\u3002\r\n\r\n**\u9884\u671f\u884c\u4e3a**\r\n\r\n\u9884\u671f\u7684\u6b63\u5e38\u884c\u4e3a\r\n\r\n**\u7248\u672c**\r\n- \u63d0\u4f9b\u7528\u6237\u7ba1\u7406\u7684\u5177\u4f53\u7248\u672c\u53f7\r\n- \u662f\u5426\u662f\u4f01\u4e1a\u7248\u95ee\u9898\uff1f\r\n\r\n**\u5982\u679c\u662f SaaS \u9875\u9762\u95ee\u9898\uff0c\u8bf7\u63d0\u4f9b\u4f7f\u7528\u7684\u64cd\u4f5c\u7cfb\u7edf\u548c\u6d4f\u89c8\u5668\u4fe1\u606f**\r\n - OS: [e.g. iOS]\r\n - Browser [e.g. chrome, safari]\r\n - Version [e.g. 22]\r\n\r\n**\u989d\u5916\u4fe1\u606f**\r\n\r\n\u4efb\u4f55\u4f60\u89c9\u5f97\u6709\u52a9\u4e8e\u95ee\u9898\u89e3\u51b3\u7684\u5185\u5bb9\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making \u84dd\u9cb8\u667a\u4e91-\u7528\u6237\u7ba1\u7406(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nfrom bkuser_core.common.constants import LOOKUP_FIELD_NAME\nfrom django.conf.urls import url\n\nfrom . import views\n\nPVAR_DEPARTMENT_ID = r\"(?P<%s>[\\w\\-]+)\" % LOOKUP_FIELD_NAME\n\nurlpatterns = [\n url(\n r\"^api/v2/departments/$\",\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"list\",\n \"post\": \"create\",\n }\n ),\n name=\"departments\",\n ),\n url(\n r\"^api/v2/departments/%s/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"retrieve\",\n \"post\": \"update\",\n \"delete\": \"destroy\",\n \"patch\": \"partial_update\",\n }\n ),\n name=\"departments.action\",\n ),\n url(\n r\"^api/v2/departments/%s/restoration/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"post\": \"restoration\",\n }\n ),\n name=\"departments.restoration\",\n ),\n url(\n r\"^api/v2/departments/%s/ancestors/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"get_ancestor\",\n }\n ),\n name=\"departments.ancestors\",\n ),\n url(\n r\"^api/v2/departments/%s/children/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"get_children\",\n }\n ),\n name=\"departments.children\",\n ),\n url(\n r\"^api/v2/departments/%s/profiles/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view({\"get\": \"get_profiles\", \"post\": \"add_profiles\"}),\n name=\"departments.profiles\",\n ),\n #########\n # Batch #\n #########\n url(\n r\"^api/v2/batch/departments/profiles/$\",\n views.BatchDepartmentsViewSet.as_view(\n {\n \"get\": \"multiple_retrieve_profiles\",\n }\n ),\n name=\"department.batch\",\n ),\n ########\n # Edge #\n ########\n url(\n r\"^api/v2/edges/department_profile/$\",\n views.DepartmentProfileEdgeViewSet.as_view({\"get\": \"list\"}),\n name=\"edge.department_profile\",\n ),\n #############\n # shortcuts #\n #############\n url(\n r\"^api/v2/shortcuts/departments/tops/$\",\n views.DepartmentViewSet.as_view({\"get\": \"list_tops\"}),\n name=\"shortcuts.departments.list.tops\",\n ),\n]\n", "path": "src/api/bkuser_core/departments/urls.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making \u84dd\u9cb8\u667a\u4e91-\u7528\u6237\u7ba1\u7406(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nfrom bkuser_core.common.constants import LOOKUP_FIELD_NAME\nfrom django.conf.urls import url\n\nfrom . import views\n\nPVAR_DEPARTMENT_ID = r\"(?P<%s>[\\w\\-\\.]+)\" % LOOKUP_FIELD_NAME\n\nurlpatterns = [\n url(\n r\"^api/v2/departments/$\",\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"list\",\n \"post\": \"create\",\n }\n ),\n name=\"departments\",\n ),\n url(\n r\"^api/v2/departments/%s/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"retrieve\",\n \"post\": \"update\",\n \"delete\": \"destroy\",\n \"patch\": \"partial_update\",\n }\n ),\n name=\"departments.action\",\n ),\n url(\n r\"^api/v2/departments/%s/restoration/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"post\": \"restoration\",\n }\n ),\n name=\"departments.restoration\",\n ),\n url(\n r\"^api/v2/departments/%s/ancestors/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"get_ancestor\",\n }\n ),\n name=\"departments.ancestors\",\n ),\n url(\n r\"^api/v2/departments/%s/children/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"get_children\",\n }\n ),\n name=\"departments.children\",\n ),\n url(\n r\"^api/v2/departments/%s/profiles/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view({\"get\": \"get_profiles\", \"post\": \"add_profiles\"}),\n name=\"departments.profiles\",\n ),\n #########\n # Batch #\n #########\n url(\n r\"^api/v2/batch/departments/profiles/$\",\n views.BatchDepartmentsViewSet.as_view(\n {\n \"get\": \"multiple_retrieve_profiles\",\n }\n ),\n name=\"department.batch\",\n ),\n ########\n # Edge #\n ########\n url(\n r\"^api/v2/edges/department_profile/$\",\n views.DepartmentProfileEdgeViewSet.as_view({\"get\": \"list\"}),\n name=\"edge.department_profile\",\n ),\n #############\n # shortcuts #\n #############\n url(\n r\"^api/v2/shortcuts/departments/tops/$\",\n views.DepartmentViewSet.as_view({\"get\": \"list_tops\"}),\n name=\"shortcuts.departments.list.tops\",\n ),\n]\n", "path": "src/api/bkuser_core/departments/urls.py"}]}
1,406
136
gh_patches_debug_35015
rasdani/github-patches
git_diff
openstates__openstates-scrapers-1546
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- TN: Only scraping Joint committees Running the [TN committee scraper](http://github.com/openstates/openstates/blob/7506cd3d6e0dacf891a23818d012ff0adf789265/openstates/tn/committees.py) locally, only the 4 joint committees are scraped: ``` 18:35:44 INFO scrapelib: GET - http://www.capitol.tn.gov/senate/committees/ 18:35:44 INFO scrapelib: GET - http://www.capitol.tn.gov/joint/ 18:35:45 INFO scrapelib: GET - http://www.capitol.tn.gov/joint/committees/fiscal-review 18:35:46 INFO billy: Save committee Fiscal Review 18:35:46 INFO scrapelib: GET - http://www.capitol.tn.gov/joint/committees/gov-opps 18:35:47 INFO scrapelib: GET - http://www.capitol.tn.gov/senate/committees/gov-opps.html 18:35:48 INFO scrapelib: GET - http://www.capitol.tn.gov/house/committees/gov-opps.html 18:35:49 INFO billy: Save committee Government Operations 18:35:49 INFO scrapelib: GET - http://www.capitol.tn.gov/joint/committees/pension-ins.html 18:35:50 INFO billy: Save committee Pensions and Insurance 18:35:50 INFO scrapelib: GET - http://www.tn.gov/tacir/ 18:35:52 INFO billy: Save committee TACIR 18:35:52 INFO scrapelib: GET - http://www.capitol.tn.gov/house/committees/ 18:35:52 INFO billy: imported 4 committee files ``` This matches the [bobsled report](http://bobsled.openstates.org/run-tn-2017-03-17.html) and [TN committees available on openstates.org](https://openstates.org/tn/committees/?chamber=) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `openstates/tn/committees.py` Content: ``` 1 """ 2 Archived Committee notes: 3 4 Senate committees only avail from 105th forward 5 6 Urls are inconsistent 7 'http://www.capitol.tn.gov/senate/archives/105GA/Committees/scommemb.htm' 8 'http://www.capitol.tn.gov/senate/archives/106GA/Committees/index.html' 9 10 'http://www.capitol.tn.gov/house/archives/99GA/Committees/hcommemb.htm' 11 'http://www.capitol.tn.gov/house/archives/100GA/hcommemb.htm' 12 'http://www.capitol.tn.gov/house/archives/101GA/hcommemb.htm' 13 'http://www.capitol.tn.gov/house/archives/102GA/Committees/HComm.htm' 14 'http://www.capitol.tn.gov/house/archives/103GA/hcommemb.htm' 15 'http://www.capitol.tn.gov/house/archives/104GA/hcommemb.htm' 16 'http://www.capitol.tn.gov/house/archives/105GA/Committees/hcommemb.htm' 17 'http://www.capitol.tn.gov/house/archives/106GA/Committees/index.html' 18 19 """ 20 import re 21 22 from billy.scrape.committees import Committee, CommitteeScraper 23 import lxml.html 24 import requests 25 26 27 def fix_whitespace(s): 28 return re.sub(r'\s+', ' ', s) 29 30 31 class TNCommitteeScraper(CommitteeScraper): 32 jurisdiction = 'tn' 33 base_href = 'http://www.capitol.tn.gov' 34 chambers = { 35 'lower': 'house', 36 'upper': 'senate' 37 } 38 39 def scrape(self, chamber, term): 40 self.validate_term(term, latest_only=True) 41 url_chamber = self.chambers[chamber] 42 url = 'http://www.capitol.tn.gov/%s/committees/' % (url_chamber) 43 if chamber == 'upper': 44 self.scrape_senate_committees(url) 45 self.scrape_joint_committees() 46 else: 47 self.scrape_house_committees(url) 48 49 def scrape_senate_committees(self, url): 50 page = self.get(url).text 51 page = lxml.html.fromstring(page) 52 page.make_links_absolute(url) 53 54 find_expr = 'body/div/div/h1[text()="Senate Committees"]/' \ 55 'following-sibling::div/div/div/div//a' 56 links = [(a.text_content(), a.attrib['href']) for a in 57 page.xpath(find_expr)] 58 59 for committee_name, link in links: 60 self._scrape_committee(committee_name, link, 'upper') 61 62 def scrape_house_committees(self, url): 63 html = self.get(url).text 64 doc = lxml.html.fromstring(html) 65 doc.make_links_absolute(url) 66 67 links = doc.xpath( 68 'body/div/div/h1[text()="House Committees"]/' 69 'following-sibling::div/div/div/div//a' 70 ) 71 72 for a in links: 73 self._scrape_committee(a.text.strip(), a.get('href'), 'lower') 74 75 def _scrape_committee(self, committee_name, link, chamber): 76 """Scrape individual committee page and add members""" 77 78 page = self.get(link).text 79 page = lxml.html.fromstring(page) 80 page.make_links_absolute(link) 81 82 is_subcommittee = bool(page.xpath('//li/a[text()="Committee"]')) 83 if is_subcommittee: 84 com = Committee( 85 chamber, 86 re.sub(r'\s*Subcommittee\s*', '', committee_name), 87 committee_name 88 ) 89 else: 90 com = Committee(chamber, committee_name) 91 92 OFFICER_SEARCH = '//h2[contains(text(), "Committee Officers")]/' \ 93 'following-sibling::div/ul/li/a' 94 MEMBER_SEARCH = '//h2[contains(text(), "Committee Members")]/' \ 95 'following-sibling::div/ul/li/a' 96 HOUSE_SEARCH = '//h2[contains(text(), "House Members")]/' \ 97 'following-sibling::div/ul/li/a' 98 SENATE_SEARCH = '//h2[contains(text(), "House Members")]/' \ 99 'following-sibling::div/ul/li/a' 100 for a in (page.xpath(OFFICER_SEARCH) + page.xpath(MEMBER_SEARCH)): 101 102 member_name = ' '.join([ 103 x.strip() for x in 104 a.xpath('text()') + a.xpath('span/text()') 105 if x.strip() 106 ]) 107 role = a.xpath('small') 108 if role: 109 role = role[0].xpath('text()')[0].strip() 110 else: 111 role = 'member' 112 113 com.add_member(member_name, role) 114 115 com.add_source(link) 116 self.save_committee(com) 117 118 #Scrapes joint committees 119 def scrape_joint_committees(self): 120 main_url = 'http://www.capitol.tn.gov/joint/' 121 122 page = self.get(main_url).text 123 page = lxml.html.fromstring(page) 124 page.make_links_absolute(main_url) 125 126 for el in page.xpath( 127 '//div/h2[text()="Committees"]/' 128 'following-sibling::div/div//a' 129 ): 130 com_name = el.text 131 com_link = el.attrib["href"] 132 self.scrape_joint_committee(com_name, com_link) 133 134 #Scrapes the individual joint committee - most of it is special case 135 def scrape_joint_committee(self, committee_name, url): 136 if 'state.tn.us' in url: 137 com = Committee('joint', committee_name) 138 try: 139 page = self.get(url).text 140 except requests.exceptions.ConnectionError: 141 self.logger.warning("Committee link is broken, skipping") 142 return 143 144 page = lxml.html.fromstring(page) 145 146 for el in page.xpath("//div[@class='Blurb']/table//tr[2 <= position() and position() < 10]/td[1]"): 147 if el.xpath('text()') == ['Vacant']: 148 continue 149 150 (member_name, ) = el.xpath('a/text()') 151 if el.xpath('text()'): 152 role = el.xpath('text()')[0].strip(' ,') 153 else: 154 role = 'member' 155 156 member_name = member_name.replace('Senator', '') 157 member_name = member_name.replace('Representative', '') 158 member_name = member_name.strip() 159 com.add_member(member_name, role) 160 161 com.add_source(url) 162 self.save_committee(com) 163 164 elif 'gov-opps' in url: 165 com = Committee('joint', committee_name) 166 page = self.get(url).text 167 page = lxml.html.fromstring(page) 168 169 links = ['senate', 'house'] 170 for link in links: 171 chamber_link = self.base_href + '/' + link + '/committees/gov-opps.html' 172 chamber_page = self.get(chamber_link).text 173 chamber_page = lxml.html.fromstring(chamber_page) 174 175 OFFICER_SEARCH = '//h2[contains(text(), "Committee Officers")]/' \ 176 'following-sibling::div/ul/li/a' 177 MEMBER_SEARCH = '//h2[contains(text(), "Committee Members")]/' \ 178 'following-sibling::div/ul/li/a' 179 for a in ( 180 chamber_page.xpath(OFFICER_SEARCH) + 181 chamber_page.xpath(MEMBER_SEARCH) 182 ): 183 member_name = ' '.join([ 184 x.strip() for x in 185 a.xpath('.//text()') 186 if x.strip() 187 ]) 188 role = a.xpath('small') 189 if role: 190 role = role[0].xpath('text()')[0].strip() 191 member_name = member_name.replace(role, '').strip() 192 else: 193 role = 'member' 194 com.add_member(member_name, role) 195 196 com.add_source(chamber_link) 197 198 com.add_source(url) 199 self.save_committee(com) 200 201 else: 202 self._scrape_committee(committee_name, url, 'joint') 203 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/openstates/tn/committees.py b/openstates/tn/committees.py --- a/openstates/tn/committees.py +++ b/openstates/tn/committees.py @@ -28,6 +28,10 @@ return re.sub(r'\s+', ' ', s) +# All links in a section with a given title +COMMITTEE_LINKS_TEMPLATE = '//h2[text()="{header}"]/parent::*//a' + + class TNCommitteeScraper(CommitteeScraper): jurisdiction = 'tn' base_href = 'http://www.capitol.tn.gov' @@ -51,8 +55,9 @@ page = lxml.html.fromstring(page) page.make_links_absolute(url) - find_expr = 'body/div/div/h1[text()="Senate Committees"]/' \ - 'following-sibling::div/div/div/div//a' + standing = COMMITTEE_LINKS_TEMPLATE.format(header="Standing Committees") + select = COMMITTEE_LINKS_TEMPLATE.format(header="Select Committees") + find_expr = '{}|{}'.format(standing, select) links = [(a.text_content(), a.attrib['href']) for a in page.xpath(find_expr)] @@ -64,10 +69,9 @@ doc = lxml.html.fromstring(html) doc.make_links_absolute(url) - links = doc.xpath( - 'body/div/div/h1[text()="House Committees"]/' - 'following-sibling::div/div/div/div//a' - ) + standing = COMMITTEE_LINKS_TEMPLATE.format(header="Committees & Subcommittees") + select = COMMITTEE_LINKS_TEMPLATE.format(header="Select Committees") + links = doc.xpath('{}|{}'.format(standing, select)) for a in links: self._scrape_committee(a.text.strip(), a.get('href'), 'lower') @@ -123,10 +127,7 @@ page = lxml.html.fromstring(page) page.make_links_absolute(main_url) - for el in page.xpath( - '//div/h2[text()="Committees"]/' - 'following-sibling::div/div//a' - ): + for el in page.xpath(COMMITTEE_LINKS_TEMPLATE.format(header="Committees")): com_name = el.text com_link = el.attrib["href"] self.scrape_joint_committee(com_name, com_link)
{"golden_diff": "diff --git a/openstates/tn/committees.py b/openstates/tn/committees.py\n--- a/openstates/tn/committees.py\n+++ b/openstates/tn/committees.py\n@@ -28,6 +28,10 @@\n return re.sub(r'\\s+', ' ', s)\n \n \n+# All links in a section with a given title\n+COMMITTEE_LINKS_TEMPLATE = '//h2[text()=\"{header}\"]/parent::*//a'\n+\n+\n class TNCommitteeScraper(CommitteeScraper):\n jurisdiction = 'tn'\n base_href = 'http://www.capitol.tn.gov'\n@@ -51,8 +55,9 @@\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n \n- find_expr = 'body/div/div/h1[text()=\"Senate Committees\"]/' \\\n- 'following-sibling::div/div/div/div//a'\n+ standing = COMMITTEE_LINKS_TEMPLATE.format(header=\"Standing Committees\")\n+ select = COMMITTEE_LINKS_TEMPLATE.format(header=\"Select Committees\")\n+ find_expr = '{}|{}'.format(standing, select)\n links = [(a.text_content(), a.attrib['href']) for a in\n page.xpath(find_expr)]\n \n@@ -64,10 +69,9 @@\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n \n- links = doc.xpath(\n- 'body/div/div/h1[text()=\"House Committees\"]/'\n- 'following-sibling::div/div/div/div//a'\n- )\n+ standing = COMMITTEE_LINKS_TEMPLATE.format(header=\"Committees & Subcommittees\")\n+ select = COMMITTEE_LINKS_TEMPLATE.format(header=\"Select Committees\")\n+ links = doc.xpath('{}|{}'.format(standing, select))\n \n for a in links:\n self._scrape_committee(a.text.strip(), a.get('href'), 'lower')\n@@ -123,10 +127,7 @@\n page = lxml.html.fromstring(page)\n page.make_links_absolute(main_url)\n \n- for el in page.xpath(\n- '//div/h2[text()=\"Committees\"]/'\n- 'following-sibling::div/div//a'\n- ):\n+ for el in page.xpath(COMMITTEE_LINKS_TEMPLATE.format(header=\"Committees\")):\n com_name = el.text\n com_link = el.attrib[\"href\"]\n self.scrape_joint_committee(com_name, com_link)\n", "issue": "TN: Only scraping Joint committees\nRunning the [TN committee scraper](http://github.com/openstates/openstates/blob/7506cd3d6e0dacf891a23818d012ff0adf789265/openstates/tn/committees.py) locally, only the 4 joint committees are scraped:\r\n\r\n```\r\n18:35:44 INFO scrapelib: GET - http://www.capitol.tn.gov/senate/committees/\r\n18:35:44 INFO scrapelib: GET - http://www.capitol.tn.gov/joint/\r\n18:35:45 INFO scrapelib: GET - http://www.capitol.tn.gov/joint/committees/fiscal-review\r\n18:35:46 INFO billy: Save committee Fiscal Review\r\n18:35:46 INFO scrapelib: GET - http://www.capitol.tn.gov/joint/committees/gov-opps\r\n18:35:47 INFO scrapelib: GET - http://www.capitol.tn.gov/senate/committees/gov-opps.html\r\n18:35:48 INFO scrapelib: GET - http://www.capitol.tn.gov/house/committees/gov-opps.html\r\n18:35:49 INFO billy: Save committee Government Operations\r\n18:35:49 INFO scrapelib: GET - http://www.capitol.tn.gov/joint/committees/pension-ins.html\r\n18:35:50 INFO billy: Save committee Pensions and Insurance\r\n18:35:50 INFO scrapelib: GET - http://www.tn.gov/tacir/\r\n18:35:52 INFO billy: Save committee TACIR\r\n18:35:52 INFO scrapelib: GET - http://www.capitol.tn.gov/house/committees/\r\n18:35:52 INFO billy: imported 4 committee files\r\n```\r\n\r\nThis matches the [bobsled report](http://bobsled.openstates.org/run-tn-2017-03-17.html) and [TN committees available on openstates.org](https://openstates.org/tn/committees/?chamber=)\n", "before_files": [{"content": "\"\"\"\nArchived Committee notes:\n\nSenate committees only avail from 105th forward\n\nUrls are inconsistent\n'http://www.capitol.tn.gov/senate/archives/105GA/Committees/scommemb.htm'\n'http://www.capitol.tn.gov/senate/archives/106GA/Committees/index.html'\n\n'http://www.capitol.tn.gov/house/archives/99GA/Committees/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/100GA/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/101GA/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/102GA/Committees/HComm.htm'\n'http://www.capitol.tn.gov/house/archives/103GA/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/104GA/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/105GA/Committees/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/106GA/Committees/index.html'\n\n\"\"\"\nimport re\n\nfrom billy.scrape.committees import Committee, CommitteeScraper\nimport lxml.html\nimport requests\n\n\ndef fix_whitespace(s):\n return re.sub(r'\\s+', ' ', s)\n\n\nclass TNCommitteeScraper(CommitteeScraper):\n jurisdiction = 'tn'\n base_href = 'http://www.capitol.tn.gov'\n chambers = {\n 'lower': 'house',\n 'upper': 'senate'\n }\n\n def scrape(self, chamber, term):\n self.validate_term(term, latest_only=True)\n url_chamber = self.chambers[chamber]\n url = 'http://www.capitol.tn.gov/%s/committees/' % (url_chamber)\n if chamber == 'upper':\n self.scrape_senate_committees(url)\n self.scrape_joint_committees()\n else:\n self.scrape_house_committees(url)\n\n def scrape_senate_committees(self, url):\n page = self.get(url).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n find_expr = 'body/div/div/h1[text()=\"Senate Committees\"]/' \\\n 'following-sibling::div/div/div/div//a'\n links = [(a.text_content(), a.attrib['href']) for a in\n page.xpath(find_expr)]\n\n for committee_name, link in links:\n self._scrape_committee(committee_name, link, 'upper')\n\n def scrape_house_committees(self, url):\n html = self.get(url).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n\n links = doc.xpath(\n 'body/div/div/h1[text()=\"House Committees\"]/'\n 'following-sibling::div/div/div/div//a'\n )\n\n for a in links:\n self._scrape_committee(a.text.strip(), a.get('href'), 'lower')\n\n def _scrape_committee(self, committee_name, link, chamber):\n \"\"\"Scrape individual committee page and add members\"\"\"\n\n page = self.get(link).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(link)\n\n is_subcommittee = bool(page.xpath('//li/a[text()=\"Committee\"]'))\n if is_subcommittee:\n com = Committee(\n chamber,\n re.sub(r'\\s*Subcommittee\\s*', '', committee_name),\n committee_name\n )\n else:\n com = Committee(chamber, committee_name)\n\n OFFICER_SEARCH = '//h2[contains(text(), \"Committee Officers\")]/' \\\n 'following-sibling::div/ul/li/a'\n MEMBER_SEARCH = '//h2[contains(text(), \"Committee Members\")]/' \\\n 'following-sibling::div/ul/li/a'\n HOUSE_SEARCH = '//h2[contains(text(), \"House Members\")]/' \\\n 'following-sibling::div/ul/li/a'\n SENATE_SEARCH = '//h2[contains(text(), \"House Members\")]/' \\\n 'following-sibling::div/ul/li/a'\n for a in (page.xpath(OFFICER_SEARCH) + page.xpath(MEMBER_SEARCH)):\n\n member_name = ' '.join([\n x.strip() for x in\n a.xpath('text()') + a.xpath('span/text()')\n if x.strip()\n ])\n role = a.xpath('small')\n if role:\n role = role[0].xpath('text()')[0].strip()\n else:\n role = 'member'\n\n com.add_member(member_name, role)\n\n com.add_source(link)\n self.save_committee(com)\n\n #Scrapes joint committees\n def scrape_joint_committees(self):\n main_url = 'http://www.capitol.tn.gov/joint/'\n\n page = self.get(main_url).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(main_url)\n\n for el in page.xpath(\n '//div/h2[text()=\"Committees\"]/'\n 'following-sibling::div/div//a'\n ):\n com_name = el.text\n com_link = el.attrib[\"href\"]\n self.scrape_joint_committee(com_name, com_link)\n\n #Scrapes the individual joint committee - most of it is special case\n def scrape_joint_committee(self, committee_name, url):\n if 'state.tn.us' in url:\n com = Committee('joint', committee_name)\n try:\n page = self.get(url).text\n except requests.exceptions.ConnectionError:\n self.logger.warning(\"Committee link is broken, skipping\")\n return\n\n page = lxml.html.fromstring(page)\n\n for el in page.xpath(\"//div[@class='Blurb']/table//tr[2 <= position() and position() < 10]/td[1]\"):\n if el.xpath('text()') == ['Vacant']:\n continue\n\n (member_name, ) = el.xpath('a/text()')\n if el.xpath('text()'):\n role = el.xpath('text()')[0].strip(' ,')\n else:\n role = 'member'\n\n member_name = member_name.replace('Senator', '')\n member_name = member_name.replace('Representative', '')\n member_name = member_name.strip()\n com.add_member(member_name, role)\n\n com.add_source(url)\n self.save_committee(com)\n\n elif 'gov-opps' in url:\n com = Committee('joint', committee_name)\n page = self.get(url).text\n page = lxml.html.fromstring(page)\n\n links = ['senate', 'house']\n for link in links:\n chamber_link = self.base_href + '/' + link + '/committees/gov-opps.html'\n chamber_page = self.get(chamber_link).text\n chamber_page = lxml.html.fromstring(chamber_page)\n \n OFFICER_SEARCH = '//h2[contains(text(), \"Committee Officers\")]/' \\\n 'following-sibling::div/ul/li/a'\n MEMBER_SEARCH = '//h2[contains(text(), \"Committee Members\")]/' \\\n 'following-sibling::div/ul/li/a'\n for a in (\n chamber_page.xpath(OFFICER_SEARCH) + \n chamber_page.xpath(MEMBER_SEARCH)\n ):\n member_name = ' '.join([\n x.strip() for x in\n a.xpath('.//text()')\n if x.strip()\n ])\n role = a.xpath('small')\n if role:\n role = role[0].xpath('text()')[0].strip()\n member_name = member_name.replace(role, '').strip()\n else:\n role = 'member'\n com.add_member(member_name, role)\n\n com.add_source(chamber_link)\n\n com.add_source(url)\n self.save_committee(com)\n\n else:\n self._scrape_committee(committee_name, url, 'joint')\n", "path": "openstates/tn/committees.py"}], "after_files": [{"content": "\"\"\"\nArchived Committee notes:\n\nSenate committees only avail from 105th forward\n\nUrls are inconsistent\n'http://www.capitol.tn.gov/senate/archives/105GA/Committees/scommemb.htm'\n'http://www.capitol.tn.gov/senate/archives/106GA/Committees/index.html'\n\n'http://www.capitol.tn.gov/house/archives/99GA/Committees/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/100GA/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/101GA/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/102GA/Committees/HComm.htm'\n'http://www.capitol.tn.gov/house/archives/103GA/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/104GA/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/105GA/Committees/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/106GA/Committees/index.html'\n\n\"\"\"\nimport re\n\nfrom billy.scrape.committees import Committee, CommitteeScraper\nimport lxml.html\nimport requests\n\n\ndef fix_whitespace(s):\n return re.sub(r'\\s+', ' ', s)\n\n\n# All links in a section with a given title\nCOMMITTEE_LINKS_TEMPLATE = '//h2[text()=\"{header}\"]/parent::*//a'\n\n\nclass TNCommitteeScraper(CommitteeScraper):\n jurisdiction = 'tn'\n base_href = 'http://www.capitol.tn.gov'\n chambers = {\n 'lower': 'house',\n 'upper': 'senate'\n }\n\n def scrape(self, chamber, term):\n self.validate_term(term, latest_only=True)\n url_chamber = self.chambers[chamber]\n url = 'http://www.capitol.tn.gov/%s/committees/' % (url_chamber)\n if chamber == 'upper':\n self.scrape_senate_committees(url)\n self.scrape_joint_committees()\n else:\n self.scrape_house_committees(url)\n\n def scrape_senate_committees(self, url):\n page = self.get(url).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n standing = COMMITTEE_LINKS_TEMPLATE.format(header=\"Standing Committees\")\n select = COMMITTEE_LINKS_TEMPLATE.format(header=\"Select Committees\")\n find_expr = '{}|{}'.format(standing, select)\n links = [(a.text_content(), a.attrib['href']) for a in\n page.xpath(find_expr)]\n\n for committee_name, link in links:\n self._scrape_committee(committee_name, link, 'upper')\n\n def scrape_house_committees(self, url):\n html = self.get(url).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n\n standing = COMMITTEE_LINKS_TEMPLATE.format(header=\"Committees & Subcommittees\")\n select = COMMITTEE_LINKS_TEMPLATE.format(header=\"Select Committees\")\n links = doc.xpath('{}|{}'.format(standing, select))\n\n for a in links:\n self._scrape_committee(a.text.strip(), a.get('href'), 'lower')\n\n def _scrape_committee(self, committee_name, link, chamber):\n \"\"\"Scrape individual committee page and add members\"\"\"\n\n page = self.get(link).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(link)\n\n is_subcommittee = bool(page.xpath('//li/a[text()=\"Committee\"]'))\n if is_subcommittee:\n com = Committee(\n chamber,\n re.sub(r'\\s*Subcommittee\\s*', '', committee_name),\n committee_name\n )\n else:\n com = Committee(chamber, committee_name)\n\n OFFICER_SEARCH = '//h2[contains(text(), \"Committee Officers\")]/' \\\n 'following-sibling::div/ul/li/a'\n MEMBER_SEARCH = '//h2[contains(text(), \"Committee Members\")]/' \\\n 'following-sibling::div/ul/li/a'\n HOUSE_SEARCH = '//h2[contains(text(), \"House Members\")]/' \\\n 'following-sibling::div/ul/li/a'\n SENATE_SEARCH = '//h2[contains(text(), \"House Members\")]/' \\\n 'following-sibling::div/ul/li/a'\n for a in (page.xpath(OFFICER_SEARCH) + page.xpath(MEMBER_SEARCH)):\n\n member_name = ' '.join([\n x.strip() for x in\n a.xpath('text()') + a.xpath('span/text()')\n if x.strip()\n ])\n role = a.xpath('small')\n if role:\n role = role[0].xpath('text()')[0].strip()\n else:\n role = 'member'\n\n com.add_member(member_name, role)\n\n com.add_source(link)\n self.save_committee(com)\n\n #Scrapes joint committees\n def scrape_joint_committees(self):\n main_url = 'http://www.capitol.tn.gov/joint/'\n\n page = self.get(main_url).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(main_url)\n\n for el in page.xpath(COMMITTEE_LINKS_TEMPLATE.format(header=\"Committees\")):\n com_name = el.text\n com_link = el.attrib[\"href\"]\n self.scrape_joint_committee(com_name, com_link)\n\n #Scrapes the individual joint committee - most of it is special case\n def scrape_joint_committee(self, committee_name, url):\n if 'state.tn.us' in url:\n com = Committee('joint', committee_name)\n try:\n page = self.get(url).text\n except requests.exceptions.ConnectionError:\n self.logger.warning(\"Committee link is broken, skipping\")\n return\n\n page = lxml.html.fromstring(page)\n\n for el in page.xpath(\"//div[@class='Blurb']/table//tr[2 <= position() and position() < 10]/td[1]\"):\n if el.xpath('text()') == ['Vacant']:\n continue\n\n (member_name, ) = el.xpath('a/text()')\n if el.xpath('text()'):\n role = el.xpath('text()')[0].strip(' ,')\n else:\n role = 'member'\n\n member_name = member_name.replace('Senator', '')\n member_name = member_name.replace('Representative', '')\n member_name = member_name.strip()\n com.add_member(member_name, role)\n\n com.add_source(url)\n self.save_committee(com)\n\n elif 'gov-opps' in url:\n com = Committee('joint', committee_name)\n page = self.get(url).text\n page = lxml.html.fromstring(page)\n\n links = ['senate', 'house']\n for link in links:\n chamber_link = self.base_href + '/' + link + '/committees/gov-opps.html'\n chamber_page = self.get(chamber_link).text\n chamber_page = lxml.html.fromstring(chamber_page)\n \n OFFICER_SEARCH = '//h2[contains(text(), \"Committee Officers\")]/' \\\n 'following-sibling::div/ul/li/a'\n MEMBER_SEARCH = '//h2[contains(text(), \"Committee Members\")]/' \\\n 'following-sibling::div/ul/li/a'\n for a in (\n chamber_page.xpath(OFFICER_SEARCH) + \n chamber_page.xpath(MEMBER_SEARCH)\n ):\n member_name = ' '.join([\n x.strip() for x in\n a.xpath('.//text()')\n if x.strip()\n ])\n role = a.xpath('small')\n if role:\n role = role[0].xpath('text()')[0].strip()\n member_name = member_name.replace(role, '').strip()\n else:\n role = 'member'\n com.add_member(member_name, role)\n\n com.add_source(chamber_link)\n\n com.add_source(url)\n self.save_committee(com)\n\n else:\n self._scrape_committee(committee_name, url, 'joint')\n", "path": "openstates/tn/committees.py"}]}
2,997
529
gh_patches_debug_2253
rasdani/github-patches
git_diff
coala__coala-bears-1082
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- GofmtBear: Add advanced asciinema The coala bear GofmtBear does not have a proper asciinema. `gofmt` is a command line tool that automatically solves formatting / styling issues to the absolute coding style that Go has. I'm planning to use a working code as the sample where it's filled with mixed indentation (spaces and tabs), semicolons and demonstrate how gofmt formats the code automatically and correctly to the absolute Go coding style. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bears/go/GofmtBear.py` Content: ``` 1 from coalib.bearlib.abstractions.Linter import linter 2 from coalib.bears.requirements.GoRequirement import GoRequirement 3 4 5 @linter(executable='gofmt', 6 use_stdin=True, 7 output_format='corrected', 8 result_message='Formatting can be improved.') 9 class GofmtBear: 10 """ 11 Suggest better formatting options in Go code. Basic checks like alignment, 12 indentation, and redundant parentheses are provided. 13 14 This is done using the ``gofmt`` utility. For more information visit 15 <https://golang.org/cmd/gofmt/>. 16 """ 17 LANGUAGES = {'Go'} 18 REQUIREMENTS = {GoRequirement(package='golang.org/cmd/gofmt', flag='-u')} 19 AUTHORS = {'The coala developers'} 20 AUTHORS_EMAILS = {'[email protected]'} 21 LICENSE = 'AGPL-3.0' 22 CAN_FIX = {'Formatting'} 23 24 @staticmethod 25 def create_arguments(filename, file, config_file): 26 return () 27 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bears/go/GofmtBear.py b/bears/go/GofmtBear.py --- a/bears/go/GofmtBear.py +++ b/bears/go/GofmtBear.py @@ -20,6 +20,7 @@ AUTHORS_EMAILS = {'[email protected]'} LICENSE = 'AGPL-3.0' CAN_FIX = {'Formatting'} + ASCIINEMA_URL = 'https://asciinema.org/a/94812' @staticmethod def create_arguments(filename, file, config_file):
{"golden_diff": "diff --git a/bears/go/GofmtBear.py b/bears/go/GofmtBear.py\n--- a/bears/go/GofmtBear.py\n+++ b/bears/go/GofmtBear.py\n@@ -20,6 +20,7 @@\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_FIX = {'Formatting'}\n+ ASCIINEMA_URL = 'https://asciinema.org/a/94812'\n \n @staticmethod\n def create_arguments(filename, file, config_file):\n", "issue": "GofmtBear: Add advanced asciinema\nThe coala bear GofmtBear does not have a proper asciinema.\r\n\r\n`gofmt` is a command line tool that automatically solves formatting / styling issues to the absolute coding style that Go has.\r\n\r\nI'm planning to use a working code as the sample where it's filled with mixed indentation (spaces and tabs), semicolons and demonstrate how gofmt formats the code automatically and correctly to the absolute Go coding style.\r\n\n", "before_files": [{"content": "from coalib.bearlib.abstractions.Linter import linter\nfrom coalib.bears.requirements.GoRequirement import GoRequirement\n\n\n@linter(executable='gofmt',\n use_stdin=True,\n output_format='corrected',\n result_message='Formatting can be improved.')\nclass GofmtBear:\n \"\"\"\n Suggest better formatting options in Go code. Basic checks like alignment,\n indentation, and redundant parentheses are provided.\n\n This is done using the ``gofmt`` utility. For more information visit\n <https://golang.org/cmd/gofmt/>.\n \"\"\"\n LANGUAGES = {'Go'}\n REQUIREMENTS = {GoRequirement(package='golang.org/cmd/gofmt', flag='-u')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_FIX = {'Formatting'}\n\n @staticmethod\n def create_arguments(filename, file, config_file):\n return ()\n", "path": "bears/go/GofmtBear.py"}], "after_files": [{"content": "from coalib.bearlib.abstractions.Linter import linter\nfrom coalib.bears.requirements.GoRequirement import GoRequirement\n\n\n@linter(executable='gofmt',\n use_stdin=True,\n output_format='corrected',\n result_message='Formatting can be improved.')\nclass GofmtBear:\n \"\"\"\n Suggest better formatting options in Go code. Basic checks like alignment,\n indentation, and redundant parentheses are provided.\n\n This is done using the ``gofmt`` utility. For more information visit\n <https://golang.org/cmd/gofmt/>.\n \"\"\"\n LANGUAGES = {'Go'}\n REQUIREMENTS = {GoRequirement(package='golang.org/cmd/gofmt', flag='-u')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_FIX = {'Formatting'}\n ASCIINEMA_URL = 'https://asciinema.org/a/94812'\n\n @staticmethod\n def create_arguments(filename, file, config_file):\n return ()\n", "path": "bears/go/GofmtBear.py"}]}
623
128
gh_patches_debug_31762
rasdani/github-patches
git_diff
DataDog__dd-agent-1042
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Windows CPU times can add up to > 100% We're collecting PercentUserTime and PercentIdleTime from [Win32_PerfFormattedData_PerfOS_Processor](http://msdn.microsoft.com/en-us/library/windows/desktop/aa394271%28v=vs.85%29.aspx) which shouldn't ever add up to more than 100%, but it seems like it does. Maybe we need to use a different metric other than PercentIdleTime? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checks/system/win32.py` Content: ``` 1 from checks import Check 2 3 try: 4 import wmi 5 w = wmi.WMI() 6 except Exception: 7 wmi, w = None, None 8 9 # Device WMI drive types 10 class DriveType(object): 11 UNKNOWN, NOROOT, REMOVEABLE, LOCAL, NETWORK, CD, RAM = (0, 1, 2, 3, 4, 5, 6) 12 B2MB = float(1048576) 13 KB2MB = B2KB = float(1024) 14 15 def should_ignore_disk(name, blacklist_re): 16 # blacklist_re is a compiled regex, compilation done at config loading time 17 return name =='_total' or blacklist_re is not None and blacklist_re.match(name) 18 19 class Processes(Check): 20 def __init__(self, logger): 21 Check.__init__(self, logger) 22 self.gauge('system.proc.queue_length') 23 self.gauge('system.proc.count') 24 25 def check(self, agentConfig): 26 try: 27 os = w.Win32_PerfFormattedData_PerfOS_System()[0] 28 except AttributeError: 29 self.logger.info('Missing Win32_PerfFormattedData_PerfOS_System WMI class.' \ 30 ' No process metrics will be returned.') 31 return 32 33 try: 34 cpu = w.Win32_PerfFormattedData_PerfOS_Processor(name="_Total")[0] 35 except AttributeError: 36 self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \ 37 ' No process metrics will be returned.') 38 return 39 if os.ProcessorQueueLength is not None: 40 self.save_sample('system.proc.queue_length', os.ProcessorQueueLength) 41 if os.Processes is not None: 42 self.save_sample('system.proc.count', os.Processes) 43 44 return self.get_metrics() 45 46 class Memory(Check): 47 def __init__(self, logger): 48 Check.__init__(self, logger) 49 self.logger = logger 50 self.gauge('system.mem.free') 51 self.gauge('system.mem.used') 52 self.gauge('system.mem.total') 53 self.gauge('system.mem.cached') 54 self.gauge('system.mem.committed') 55 self.gauge('system.mem.paged') 56 self.gauge('system.mem.nonpaged') 57 58 def check(self, agentConfig): 59 try: 60 os = w.Win32_OperatingSystem()[0] 61 except AttributeError: 62 self.logger.info('Missing Win32_OperatingSystem. No memory metrics will be returned.') 63 return 64 65 if os.TotalVisibleMemorySize is not None and os.FreePhysicalMemory is not None: 66 total = int(os.TotalVisibleMemorySize) / KB2MB 67 free = int(os.FreePhysicalMemory) / KB2MB 68 self.save_sample('system.mem.total', total) 69 self.save_sample('system.mem.free', free) 70 self.save_sample('system.mem.used', total - free) 71 72 mem = w.Win32_PerfFormattedData_PerfOS_Memory()[0] 73 if mem.CacheBytes is not None: 74 self.save_sample('system.mem.cached', int(mem.CacheBytes) / B2MB) 75 if mem.CommittedBytes is not None: 76 self.save_sample('system.mem.committed', int(mem.CommittedBytes) / B2MB) 77 if mem.PoolPagedBytes is not None: 78 self.save_sample('system.mem.paged', int(mem.PoolPagedBytes) / B2MB) 79 if mem.PoolNonpagedBytes is not None: 80 self.save_sample('system.mem.nonpaged', int(mem.PoolNonpagedBytes) / B2MB) 81 82 return self.get_metrics() 83 84 class Cpu(Check): 85 def __init__(self, logger): 86 Check.__init__(self, logger) 87 self.logger = logger 88 self.gauge('system.cpu.user') 89 self.gauge('system.cpu.idle') 90 self.gauge('system.cpu.interrupt') 91 self.gauge('system.cpu.system') 92 93 def check(self, agentConfig): 94 try: 95 cpu = w.Win32_PerfFormattedData_PerfOS_Processor() 96 except AttributeError: 97 self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \ 98 ' No CPU metrics will be returned.') 99 return 100 101 cpu_user = self._average_metric(cpu, 'PercentUserTime') 102 if cpu_user: 103 self.save_sample('system.cpu.user', cpu_user) 104 105 cpu_idle = self._average_metric(cpu, 'PercentIdleTime') 106 if cpu_idle: 107 self.save_sample('system.cpu.idle', cpu_idle) 108 109 cpu_interrupt = self._average_metric(cpu, 'PercentInterruptTime') 110 if cpu_interrupt is not None: 111 self.save_sample('system.cpu.interrupt', cpu_interrupt) 112 113 cpu_privileged = self._average_metric(cpu, 'PercentPrivilegedTime') 114 if cpu_privileged is not None: 115 self.save_sample('system.cpu.system', cpu_privileged) 116 117 return self.get_metrics() 118 119 def _average_metric(self, wmi_class, wmi_prop): 120 ''' Sum all of the values of a metric from a WMI class object, excluding 121 the value for "_Total" 122 ''' 123 val = 0 124 counter = 0 125 for wmi_object in wmi_class: 126 if wmi_object.Name == '_Total': 127 # Skip the _Total value 128 continue 129 130 if getattr(wmi_object, wmi_prop) is not None: 131 counter += 1 132 val += float(getattr(wmi_object, wmi_prop)) 133 134 if counter > 0: 135 return val / counter 136 137 return val 138 139 140 class Network(Check): 141 def __init__(self, logger): 142 Check.__init__(self, logger) 143 self.logger = logger 144 self.gauge('system.net.bytes_rcvd') 145 self.gauge('system.net.bytes_sent') 146 147 def check(self, agentConfig): 148 try: 149 net = w.Win32_PerfFormattedData_Tcpip_NetworkInterface() 150 except AttributeError: 151 self.logger.info('Missing Win32_PerfFormattedData_Tcpip_NetworkInterface WMI class.' \ 152 ' No network metrics will be returned') 153 return 154 155 for iface in net: 156 name = self.normalize_device_name(iface.name) 157 if iface.BytesReceivedPerSec is not None: 158 self.save_sample('system.net.bytes_rcvd', iface.BytesReceivedPerSec, 159 device_name=name) 160 if iface.BytesSentPerSec is not None: 161 self.save_sample('system.net.bytes_sent', iface.BytesSentPerSec, 162 device_name=name) 163 return self.get_metrics() 164 165 class Disk(Check): 166 def __init__(self, logger): 167 Check.__init__(self, logger) 168 self.logger = logger 169 self.gauge('system.disk.free') 170 self.gauge('system.disk.total') 171 self.gauge('system.disk.in_use') 172 self.gauge('system.disk.used') 173 174 def check(self, agentConfig): 175 try: 176 disk = w.Win32_LogicalDisk() 177 except AttributeError: 178 self.logger.info('Missing Win32_LogicalDisk WMI class.' \ 179 ' No disk metrics will be returned.') 180 return 181 182 blacklist_re = agentConfig.get('device_blacklist_re', None) 183 for device in disk: 184 name = self.normalize_device_name(device.name) 185 if device.DriveType in (DriveType.CD, DriveType.UNKNOWN) or should_ignore_disk(name, blacklist_re): 186 continue 187 if device.FreeSpace is not None and device.Size is not None: 188 free = float(device.FreeSpace) / B2KB 189 total = float(device.Size) / B2KB 190 used = total - free 191 self.save_sample('system.disk.free', free, device_name=name) 192 self.save_sample('system.disk.total', total, device_name=name) 193 self.save_sample('system.disk.used', used, device_name=name) 194 self.save_sample('system.disk.in_use', (used / total), 195 device_name=name) 196 return self.get_metrics() 197 198 class IO(Check): 199 def __init__(self, logger): 200 Check.__init__(self, logger) 201 self.logger = logger 202 self.gauge('system.io.wkb_s') 203 self.gauge('system.io.w_s') 204 self.gauge('system.io.rkb_s') 205 self.gauge('system.io.r_s') 206 self.gauge('system.io.avg_q_sz') 207 208 def check(self, agentConfig): 209 try: 210 disk = w.Win32_PerfFormattedData_PerfDisk_LogicalDisk() 211 except AttributeError: 212 self.logger.info('Missing Win32_PerfFormattedData_PerfDisk_LogicalDiskUnable WMI class.' \ 213 ' No I/O metrics will be returned.') 214 return 215 blacklist_re = agentConfig.get('device_blacklist_re', None) 216 for device in disk: 217 name = self.normalize_device_name(device.name) 218 if should_ignore_disk(name, blacklist_re): 219 continue 220 if device.DiskWriteBytesPerSec is not None: 221 self.save_sample('system.io.wkb_s', int(device.DiskWriteBytesPerSec) / B2KB, 222 device_name=name) 223 if device.DiskWritesPerSec is not None: 224 self.save_sample('system.io.w_s', int(device.DiskWritesPerSec), 225 device_name=name) 226 if device.DiskReadBytesPerSec is not None: 227 self.save_sample('system.io.rkb_s', int(device.DiskReadBytesPerSec) / B2KB, 228 device_name=name) 229 if device.DiskReadsPerSec is not None: 230 self.save_sample('system.io.r_s', int(device.DiskReadsPerSec), 231 device_name=name) 232 if device.CurrentDiskQueueLength is not None: 233 self.save_sample('system.io.avg_q_sz', device.CurrentDiskQueueLength, 234 device_name=name) 235 return self.get_metrics() 236 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checks/system/win32.py b/checks/system/win32.py --- a/checks/system/win32.py +++ b/checks/system/win32.py @@ -1,5 +1,9 @@ +# project from checks import Check +# 3rd party +import psutil + try: import wmi w = wmi.WMI() @@ -85,10 +89,10 @@ def __init__(self, logger): Check.__init__(self, logger) self.logger = logger - self.gauge('system.cpu.user') - self.gauge('system.cpu.idle') + self.counter('system.cpu.user') + self.counter('system.cpu.idle') self.gauge('system.cpu.interrupt') - self.gauge('system.cpu.system') + self.counter('system.cpu.system') def check(self, agentConfig): try: @@ -98,21 +102,15 @@ ' No CPU metrics will be returned.') return - cpu_user = self._average_metric(cpu, 'PercentUserTime') - if cpu_user: - self.save_sample('system.cpu.user', cpu_user) - - cpu_idle = self._average_metric(cpu, 'PercentIdleTime') - if cpu_idle: - self.save_sample('system.cpu.idle', cpu_idle) - cpu_interrupt = self._average_metric(cpu, 'PercentInterruptTime') if cpu_interrupt is not None: self.save_sample('system.cpu.interrupt', cpu_interrupt) - cpu_privileged = self._average_metric(cpu, 'PercentPrivilegedTime') - if cpu_privileged is not None: - self.save_sample('system.cpu.system', cpu_privileged) + cpu_percent = psutil.cpu_times() + + self.save_sample('system.cpu.user', 100 * cpu_percent.user / psutil.NUM_CPUS) + self.save_sample('system.cpu.idle', 100 * cpu_percent.idle / psutil.NUM_CPUS) + self.save_sample('system.cpu.system', 100 * cpu_percent.system/ psutil.NUM_CPUS) return self.get_metrics()
{"golden_diff": "diff --git a/checks/system/win32.py b/checks/system/win32.py\n--- a/checks/system/win32.py\n+++ b/checks/system/win32.py\n@@ -1,5 +1,9 @@\n+# project\n from checks import Check\n \n+# 3rd party\n+import psutil\n+\n try:\n import wmi\n w = wmi.WMI()\n@@ -85,10 +89,10 @@\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n- self.gauge('system.cpu.user')\n- self.gauge('system.cpu.idle')\n+ self.counter('system.cpu.user')\n+ self.counter('system.cpu.idle')\n self.gauge('system.cpu.interrupt')\n- self.gauge('system.cpu.system')\n+ self.counter('system.cpu.system')\n \n def check(self, agentConfig):\n try:\n@@ -98,21 +102,15 @@\n ' No CPU metrics will be returned.')\n return\n \n- cpu_user = self._average_metric(cpu, 'PercentUserTime')\n- if cpu_user:\n- self.save_sample('system.cpu.user', cpu_user)\n-\n- cpu_idle = self._average_metric(cpu, 'PercentIdleTime')\n- if cpu_idle:\n- self.save_sample('system.cpu.idle', cpu_idle)\n-\n cpu_interrupt = self._average_metric(cpu, 'PercentInterruptTime')\n if cpu_interrupt is not None:\n self.save_sample('system.cpu.interrupt', cpu_interrupt)\n \n- cpu_privileged = self._average_metric(cpu, 'PercentPrivilegedTime')\n- if cpu_privileged is not None:\n- self.save_sample('system.cpu.system', cpu_privileged)\n+ cpu_percent = psutil.cpu_times()\n+\n+ self.save_sample('system.cpu.user', 100 * cpu_percent.user / psutil.NUM_CPUS)\n+ self.save_sample('system.cpu.idle', 100 * cpu_percent.idle / psutil.NUM_CPUS)\n+ self.save_sample('system.cpu.system', 100 * cpu_percent.system/ psutil.NUM_CPUS)\n \n return self.get_metrics()\n", "issue": "Windows CPU times can add up to > 100%\nWe're collecting PercentUserTime and PercentIdleTime from [Win32_PerfFormattedData_PerfOS_Processor](http://msdn.microsoft.com/en-us/library/windows/desktop/aa394271%28v=vs.85%29.aspx) which shouldn't ever add up to more than 100%, but it seems like it does.\n\nMaybe we need to use a different metric other than PercentIdleTime?\n\n", "before_files": [{"content": "from checks import Check\n\ntry:\n import wmi\n w = wmi.WMI()\nexcept Exception:\n wmi, w = None, None\n\n# Device WMI drive types\nclass DriveType(object):\n UNKNOWN, NOROOT, REMOVEABLE, LOCAL, NETWORK, CD, RAM = (0, 1, 2, 3, 4, 5, 6)\nB2MB = float(1048576)\nKB2MB = B2KB = float(1024)\n\ndef should_ignore_disk(name, blacklist_re):\n # blacklist_re is a compiled regex, compilation done at config loading time\n return name =='_total' or blacklist_re is not None and blacklist_re.match(name)\n\nclass Processes(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.gauge('system.proc.queue_length')\n self.gauge('system.proc.count')\n\n def check(self, agentConfig):\n try:\n os = w.Win32_PerfFormattedData_PerfOS_System()[0]\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_System WMI class.' \\\n ' No process metrics will be returned.')\n return\n\n try:\n cpu = w.Win32_PerfFormattedData_PerfOS_Processor(name=\"_Total\")[0]\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \\\n ' No process metrics will be returned.')\n return\n if os.ProcessorQueueLength is not None:\n self.save_sample('system.proc.queue_length', os.ProcessorQueueLength)\n if os.Processes is not None:\n self.save_sample('system.proc.count', os.Processes)\n\n return self.get_metrics()\n\nclass Memory(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.mem.free')\n self.gauge('system.mem.used')\n self.gauge('system.mem.total')\n self.gauge('system.mem.cached')\n self.gauge('system.mem.committed')\n self.gauge('system.mem.paged')\n self.gauge('system.mem.nonpaged')\n\n def check(self, agentConfig):\n try:\n os = w.Win32_OperatingSystem()[0]\n except AttributeError:\n self.logger.info('Missing Win32_OperatingSystem. No memory metrics will be returned.')\n return\n\n if os.TotalVisibleMemorySize is not None and os.FreePhysicalMemory is not None:\n total = int(os.TotalVisibleMemorySize) / KB2MB\n free = int(os.FreePhysicalMemory) / KB2MB\n self.save_sample('system.mem.total', total)\n self.save_sample('system.mem.free', free)\n self.save_sample('system.mem.used', total - free)\n\n mem = w.Win32_PerfFormattedData_PerfOS_Memory()[0]\n if mem.CacheBytes is not None:\n self.save_sample('system.mem.cached', int(mem.CacheBytes) / B2MB)\n if mem.CommittedBytes is not None:\n self.save_sample('system.mem.committed', int(mem.CommittedBytes) / B2MB)\n if mem.PoolPagedBytes is not None:\n self.save_sample('system.mem.paged', int(mem.PoolPagedBytes) / B2MB)\n if mem.PoolNonpagedBytes is not None:\n self.save_sample('system.mem.nonpaged', int(mem.PoolNonpagedBytes) / B2MB)\n\n return self.get_metrics()\n\nclass Cpu(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.cpu.user')\n self.gauge('system.cpu.idle')\n self.gauge('system.cpu.interrupt')\n self.gauge('system.cpu.system')\n\n def check(self, agentConfig):\n try:\n cpu = w.Win32_PerfFormattedData_PerfOS_Processor()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \\\n ' No CPU metrics will be returned.')\n return\n\n cpu_user = self._average_metric(cpu, 'PercentUserTime')\n if cpu_user:\n self.save_sample('system.cpu.user', cpu_user)\n\n cpu_idle = self._average_metric(cpu, 'PercentIdleTime')\n if cpu_idle:\n self.save_sample('system.cpu.idle', cpu_idle)\n\n cpu_interrupt = self._average_metric(cpu, 'PercentInterruptTime')\n if cpu_interrupt is not None:\n self.save_sample('system.cpu.interrupt', cpu_interrupt)\n\n cpu_privileged = self._average_metric(cpu, 'PercentPrivilegedTime')\n if cpu_privileged is not None:\n self.save_sample('system.cpu.system', cpu_privileged)\n\n return self.get_metrics()\n\n def _average_metric(self, wmi_class, wmi_prop):\n ''' Sum all of the values of a metric from a WMI class object, excluding\n the value for \"_Total\"\n '''\n val = 0\n counter = 0\n for wmi_object in wmi_class:\n if wmi_object.Name == '_Total':\n # Skip the _Total value\n continue\n\n if getattr(wmi_object, wmi_prop) is not None:\n counter += 1\n val += float(getattr(wmi_object, wmi_prop))\n\n if counter > 0:\n return val / counter\n\n return val\n\n\nclass Network(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.net.bytes_rcvd')\n self.gauge('system.net.bytes_sent')\n\n def check(self, agentConfig):\n try:\n net = w.Win32_PerfFormattedData_Tcpip_NetworkInterface()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_Tcpip_NetworkInterface WMI class.' \\\n ' No network metrics will be returned')\n return\n\n for iface in net:\n name = self.normalize_device_name(iface.name)\n if iface.BytesReceivedPerSec is not None:\n self.save_sample('system.net.bytes_rcvd', iface.BytesReceivedPerSec,\n device_name=name)\n if iface.BytesSentPerSec is not None:\n self.save_sample('system.net.bytes_sent', iface.BytesSentPerSec,\n device_name=name)\n return self.get_metrics()\n\nclass Disk(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.disk.free')\n self.gauge('system.disk.total')\n self.gauge('system.disk.in_use')\n self.gauge('system.disk.used')\n\n def check(self, agentConfig):\n try:\n disk = w.Win32_LogicalDisk()\n except AttributeError:\n self.logger.info('Missing Win32_LogicalDisk WMI class.' \\\n ' No disk metrics will be returned.')\n return\n\n blacklist_re = agentConfig.get('device_blacklist_re', None)\n for device in disk:\n name = self.normalize_device_name(device.name)\n if device.DriveType in (DriveType.CD, DriveType.UNKNOWN) or should_ignore_disk(name, blacklist_re):\n continue\n if device.FreeSpace is not None and device.Size is not None:\n free = float(device.FreeSpace) / B2KB\n total = float(device.Size) / B2KB\n used = total - free\n self.save_sample('system.disk.free', free, device_name=name)\n self.save_sample('system.disk.total', total, device_name=name)\n self.save_sample('system.disk.used', used, device_name=name)\n self.save_sample('system.disk.in_use', (used / total),\n device_name=name)\n return self.get_metrics()\n\nclass IO(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.io.wkb_s')\n self.gauge('system.io.w_s')\n self.gauge('system.io.rkb_s')\n self.gauge('system.io.r_s')\n self.gauge('system.io.avg_q_sz')\n\n def check(self, agentConfig):\n try:\n disk = w.Win32_PerfFormattedData_PerfDisk_LogicalDisk()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfDisk_LogicalDiskUnable WMI class.' \\\n ' No I/O metrics will be returned.')\n return\n blacklist_re = agentConfig.get('device_blacklist_re', None)\n for device in disk:\n name = self.normalize_device_name(device.name)\n if should_ignore_disk(name, blacklist_re):\n continue\n if device.DiskWriteBytesPerSec is not None:\n self.save_sample('system.io.wkb_s', int(device.DiskWriteBytesPerSec) / B2KB,\n device_name=name)\n if device.DiskWritesPerSec is not None:\n self.save_sample('system.io.w_s', int(device.DiskWritesPerSec),\n device_name=name)\n if device.DiskReadBytesPerSec is not None:\n self.save_sample('system.io.rkb_s', int(device.DiskReadBytesPerSec) / B2KB,\n device_name=name)\n if device.DiskReadsPerSec is not None:\n self.save_sample('system.io.r_s', int(device.DiskReadsPerSec),\n device_name=name)\n if device.CurrentDiskQueueLength is not None:\n self.save_sample('system.io.avg_q_sz', device.CurrentDiskQueueLength,\n device_name=name)\n return self.get_metrics()\n", "path": "checks/system/win32.py"}], "after_files": [{"content": "# project\nfrom checks import Check\n\n# 3rd party\nimport psutil\n\ntry:\n import wmi\n w = wmi.WMI()\nexcept Exception:\n wmi, w = None, None\n\n# Device WMI drive types\nclass DriveType(object):\n UNKNOWN, NOROOT, REMOVEABLE, LOCAL, NETWORK, CD, RAM = (0, 1, 2, 3, 4, 5, 6)\nB2MB = float(1048576)\nKB2MB = B2KB = float(1024)\n\ndef should_ignore_disk(name, blacklist_re):\n # blacklist_re is a compiled regex, compilation done at config loading time\n return name =='_total' or blacklist_re is not None and blacklist_re.match(name)\n\nclass Processes(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.gauge('system.proc.queue_length')\n self.gauge('system.proc.count')\n\n def check(self, agentConfig):\n try:\n os = w.Win32_PerfFormattedData_PerfOS_System()[0]\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_System WMI class.' \\\n ' No process metrics will be returned.')\n return\n\n try:\n cpu = w.Win32_PerfFormattedData_PerfOS_Processor(name=\"_Total\")[0]\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \\\n ' No process metrics will be returned.')\n return\n if os.ProcessorQueueLength is not None:\n self.save_sample('system.proc.queue_length', os.ProcessorQueueLength)\n if os.Processes is not None:\n self.save_sample('system.proc.count', os.Processes)\n\n return self.get_metrics()\n\nclass Memory(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.mem.free')\n self.gauge('system.mem.used')\n self.gauge('system.mem.total')\n self.gauge('system.mem.cached')\n self.gauge('system.mem.committed')\n self.gauge('system.mem.paged')\n self.gauge('system.mem.nonpaged')\n\n def check(self, agentConfig):\n try:\n os = w.Win32_OperatingSystem()[0]\n except AttributeError:\n self.logger.info('Missing Win32_OperatingSystem. No memory metrics will be returned.')\n return\n\n if os.TotalVisibleMemorySize is not None and os.FreePhysicalMemory is not None:\n total = int(os.TotalVisibleMemorySize) / KB2MB\n free = int(os.FreePhysicalMemory) / KB2MB\n self.save_sample('system.mem.total', total)\n self.save_sample('system.mem.free', free)\n self.save_sample('system.mem.used', total - free)\n\n mem = w.Win32_PerfFormattedData_PerfOS_Memory()[0]\n if mem.CacheBytes is not None:\n self.save_sample('system.mem.cached', int(mem.CacheBytes) / B2MB)\n if mem.CommittedBytes is not None:\n self.save_sample('system.mem.committed', int(mem.CommittedBytes) / B2MB)\n if mem.PoolPagedBytes is not None:\n self.save_sample('system.mem.paged', int(mem.PoolPagedBytes) / B2MB)\n if mem.PoolNonpagedBytes is not None:\n self.save_sample('system.mem.nonpaged', int(mem.PoolNonpagedBytes) / B2MB)\n\n return self.get_metrics()\n\nclass Cpu(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.counter('system.cpu.user')\n self.counter('system.cpu.idle')\n self.gauge('system.cpu.interrupt')\n self.counter('system.cpu.system')\n\n def check(self, agentConfig):\n try:\n cpu = w.Win32_PerfFormattedData_PerfOS_Processor()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \\\n ' No CPU metrics will be returned.')\n return\n\n cpu_interrupt = self._average_metric(cpu, 'PercentInterruptTime')\n if cpu_interrupt is not None:\n self.save_sample('system.cpu.interrupt', cpu_interrupt)\n\n cpu_percent = psutil.cpu_times()\n\n self.save_sample('system.cpu.user', 100 * cpu_percent.user / psutil.NUM_CPUS)\n self.save_sample('system.cpu.idle', 100 * cpu_percent.idle / psutil.NUM_CPUS)\n self.save_sample('system.cpu.system', 100 * cpu_percent.system/ psutil.NUM_CPUS)\n\n return self.get_metrics()\n\n def _average_metric(self, wmi_class, wmi_prop):\n ''' Sum all of the values of a metric from a WMI class object, excluding\n the value for \"_Total\"\n '''\n val = 0\n counter = 0\n for wmi_object in wmi_class:\n if wmi_object.Name == '_Total':\n # Skip the _Total value\n continue\n\n if getattr(wmi_object, wmi_prop) is not None:\n counter += 1\n val += float(getattr(wmi_object, wmi_prop))\n\n if counter > 0:\n return val / counter\n\n return val\n\n\nclass Network(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.net.bytes_rcvd')\n self.gauge('system.net.bytes_sent')\n\n def check(self, agentConfig):\n try:\n net = w.Win32_PerfFormattedData_Tcpip_NetworkInterface()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_Tcpip_NetworkInterface WMI class.' \\\n ' No network metrics will be returned')\n return\n\n for iface in net:\n name = self.normalize_device_name(iface.name)\n if iface.BytesReceivedPerSec is not None:\n self.save_sample('system.net.bytes_rcvd', iface.BytesReceivedPerSec,\n device_name=name)\n if iface.BytesSentPerSec is not None:\n self.save_sample('system.net.bytes_sent', iface.BytesSentPerSec,\n device_name=name)\n return self.get_metrics()\n\nclass Disk(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.disk.free')\n self.gauge('system.disk.total')\n self.gauge('system.disk.in_use')\n self.gauge('system.disk.used')\n\n def check(self, agentConfig):\n try:\n disk = w.Win32_LogicalDisk()\n except AttributeError:\n self.logger.info('Missing Win32_LogicalDisk WMI class.' \\\n ' No disk metrics will be returned.')\n return\n\n blacklist_re = agentConfig.get('device_blacklist_re', None)\n for device in disk:\n name = self.normalize_device_name(device.name)\n if device.DriveType in (DriveType.CD, DriveType.UNKNOWN) or should_ignore_disk(name, blacklist_re):\n continue\n if device.FreeSpace is not None and device.Size is not None:\n free = float(device.FreeSpace) / B2KB\n total = float(device.Size) / B2KB\n used = total - free\n self.save_sample('system.disk.free', free, device_name=name)\n self.save_sample('system.disk.total', total, device_name=name)\n self.save_sample('system.disk.used', used, device_name=name)\n self.save_sample('system.disk.in_use', (used / total),\n device_name=name)\n return self.get_metrics()\n\nclass IO(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.io.wkb_s')\n self.gauge('system.io.w_s')\n self.gauge('system.io.rkb_s')\n self.gauge('system.io.r_s')\n self.gauge('system.io.avg_q_sz')\n\n def check(self, agentConfig):\n try:\n disk = w.Win32_PerfFormattedData_PerfDisk_LogicalDisk()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfDisk_LogicalDiskUnable WMI class.' \\\n ' No I/O metrics will be returned.')\n return\n blacklist_re = agentConfig.get('device_blacklist_re', None)\n for device in disk:\n name = self.normalize_device_name(device.name)\n if should_ignore_disk(name, blacklist_re):\n continue\n if device.DiskWriteBytesPerSec is not None:\n self.save_sample('system.io.wkb_s', int(device.DiskWriteBytesPerSec) / B2KB,\n device_name=name)\n if device.DiskWritesPerSec is not None:\n self.save_sample('system.io.w_s', int(device.DiskWritesPerSec),\n device_name=name)\n if device.DiskReadBytesPerSec is not None:\n self.save_sample('system.io.rkb_s', int(device.DiskReadBytesPerSec) / B2KB,\n device_name=name)\n if device.DiskReadsPerSec is not None:\n self.save_sample('system.io.r_s', int(device.DiskReadsPerSec),\n device_name=name)\n if device.CurrentDiskQueueLength is not None:\n self.save_sample('system.io.avg_q_sz', device.CurrentDiskQueueLength,\n device_name=name)\n return self.get_metrics()\n", "path": "checks/system/win32.py"}]}
3,086
478
gh_patches_debug_13245
rasdani/github-patches
git_diff
getnikola__nikola-1780
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `check -l` can’t stomach REDIRECTIONS pages ``` REDIRECTIONS = [ ( "test.html", "/index.html" ) ] WARNING: check: Broken link in output/test.html: 0; url=/index.html WARNING: check: Broken link in output/test.html: /index.html ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nikola/plugins/command/check.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # Copyright © 2012-2015 Roberto Alsina and others. 4 5 # Permission is hereby granted, free of charge, to any 6 # person obtaining a copy of this software and associated 7 # documentation files (the "Software"), to deal in the 8 # Software without restriction, including without limitation 9 # the rights to use, copy, modify, merge, publish, 10 # distribute, sublicense, and/or sell copies of the 11 # Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice 15 # shall be included in all copies or substantial portions of 16 # the Software. 17 # 18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS 22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 27 from __future__ import print_function 28 from collections import defaultdict 29 import os 30 import re 31 import sys 32 try: 33 from urllib import unquote 34 from urlparse import urlparse, urljoin, urldefrag 35 except ImportError: 36 from urllib.parse import unquote, urlparse, urljoin, urldefrag # NOQA 37 38 from doit.loader import generate_tasks 39 import lxml.html 40 try: 41 import requests 42 except ImportError: 43 requests = None 44 45 from nikola.plugin_categories import Command 46 from nikola.utils import get_logger, req_missing 47 48 49 def _call_nikola_list(site): 50 files = [] 51 deps = defaultdict(list) 52 for task in generate_tasks('render_site', site.gen_tasks('render_site', "Task", '')): 53 files.extend(task.targets) 54 for target in task.targets: 55 deps[target].extend(task.file_dep) 56 for task in generate_tasks('post_render', site.gen_tasks('render_site', "LateTask", '')): 57 files.extend(task.targets) 58 for target in task.targets: 59 deps[target].extend(task.file_dep) 60 return files, deps 61 62 63 def real_scan_files(site): 64 task_fnames = set([]) 65 real_fnames = set([]) 66 output_folder = site.config['OUTPUT_FOLDER'] 67 # First check that all targets are generated in the right places 68 for fname in _call_nikola_list(site)[0]: 69 fname = fname.strip() 70 if fname.startswith(output_folder): 71 task_fnames.add(fname) 72 # And now check that there are no non-target files 73 for root, dirs, files in os.walk(output_folder, followlinks=True): 74 for src_name in files: 75 fname = os.path.join(root, src_name) 76 real_fnames.add(fname) 77 78 only_on_output = list(real_fnames - task_fnames) 79 80 only_on_input = list(task_fnames - real_fnames) 81 82 return (only_on_output, only_on_input) 83 84 85 def fs_relpath_from_url_path(url_path): 86 """Expects as input an urlparse(s).path""" 87 url_path = unquote(url_path) 88 # in windows relative paths don't begin with os.sep 89 if sys.platform == 'win32' and len(url_path): 90 url_path = url_path.replace('/', '\\') 91 return url_path 92 93 94 class CommandCheck(Command): 95 """Check the generated site.""" 96 97 name = "check" 98 logger = None 99 100 doc_usage = "-l [--find-sources] | -f" 101 doc_purpose = "check links and files in the generated site" 102 cmd_options = [ 103 { 104 'name': 'links', 105 'short': 'l', 106 'long': 'check-links', 107 'type': bool, 108 'default': False, 109 'help': 'Check for dangling links', 110 }, 111 { 112 'name': 'files', 113 'short': 'f', 114 'long': 'check-files', 115 'type': bool, 116 'default': False, 117 'help': 'Check for unknown (orphaned and not generated) files', 118 }, 119 { 120 'name': 'clean', 121 'long': 'clean-files', 122 'type': bool, 123 'default': False, 124 'help': 'Remove all unknown files, use with caution', 125 }, 126 { 127 'name': 'find_sources', 128 'long': 'find-sources', 129 'type': bool, 130 'default': False, 131 'help': 'List possible source files for files with broken links.', 132 }, 133 { 134 'name': 'verbose', 135 'long': 'verbose', 136 'short': 'v', 137 'type': bool, 138 'default': False, 139 'help': 'Be more verbose.', 140 }, 141 { 142 'name': 'remote', 143 'long': 'remote', 144 'short': 'r', 145 'type': bool, 146 'default': False, 147 'help': 'Check that remote links work.', 148 }, 149 ] 150 151 def _execute(self, options, args): 152 """Check the generated site.""" 153 self.logger = get_logger('check', self.site.loghandlers) 154 155 if not options['links'] and not options['files'] and not options['clean']: 156 print(self.help()) 157 return False 158 if options['verbose']: 159 self.logger.level = 1 160 else: 161 self.logger.level = 4 162 if options['links']: 163 failure = self.scan_links(options['find_sources'], options['remote']) 164 if options['files']: 165 failure = self.scan_files() 166 if options['clean']: 167 failure = self.clean_files() 168 if failure: 169 sys.exit(1) 170 171 existing_targets = set([]) 172 checked_remote_targets = {} 173 174 def analyze(self, fname, find_sources=False, check_remote=False): 175 rv = False 176 self.whitelist = [re.compile(x) for x in self.site.config['LINK_CHECK_WHITELIST']] 177 base_url = urlparse(self.site.config['BASE_URL']) 178 self.existing_targets.add(self.site.config['SITE_URL']) 179 self.existing_targets.add(self.site.config['BASE_URL']) 180 url_type = self.site.config['URL_TYPE'] 181 182 deps = {} 183 if find_sources: 184 deps = _call_nikola_list(self.site)[1] 185 186 if check_remote and requests is None: 187 req_missing(['requests'], 'check remote links') 188 189 if url_type in ('absolute', 'full_path'): 190 url_netloc_to_root = urlparse(self.site.config['BASE_URL']).path 191 try: 192 filename = fname 193 194 if filename.startswith(self.site.config['CACHE_FOLDER']): 195 # Do not look at links in the cache, which are not parsed by 196 # anyone and may result in false positives. Problems arise 197 # with galleries, for example. Full rationale: (Issue #1447) 198 self.logger.notice("Ignoring {0} (in cache, links may be incorrect)".format(filename)) 199 return False 200 201 d = lxml.html.fromstring(open(filename, 'rb').read()) 202 for l in d.iterlinks(): 203 target = l[2] 204 if target == "#": 205 continue 206 target, _ = urldefrag(target) 207 parsed = urlparse(target) 208 209 # Warn about links from https to http (mixed-security) 210 if base_url.netloc == parsed.netloc and base_url.scheme == "https" and parsed.scheme == "http": 211 self.logger.warn("Mixed-content security for link in {0}: {1}".format(filename, target)) 212 213 # Absolute links to other domains, skip 214 # Absolute links when using only paths, skip. 215 if ((parsed.scheme or target.startswith('//')) and parsed.netloc != base_url.netloc) or \ 216 ((parsed.scheme or target.startswith('//')) and url_type in ('rel_path', 'full_path')): 217 if not check_remote or parsed.scheme not in ["http", "https"]: 218 continue 219 if parsed.netloc == base_url.netloc: # absolute URL to self.site 220 continue 221 if target in self.checked_remote_targets: # already checked this exact target 222 if self.checked_remote_targets[target] > 399: 223 self.logger.warn("Broken link in {0}: {1} [Error {2}]".format(filename, target, self.checked_remote_targets[target])) 224 continue 225 # Check the remote link works 226 req_headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0 (Nikola)'} # I’m a real boy! 227 resp = requests.head(target, headers=req_headers) 228 self.checked_remote_targets[target] = resp.status_code 229 if resp.status_code > 399: # Error 230 self.logger.warn("Broken link in {0}: {1} [Error {2}]".format(filename, target, resp.status_code)) 231 continue 232 elif resp.status_code <= 399: # The address leads *somewhere* that is not an error 233 self.logger.debug("Successfully checked remote link in {0}: {1} [HTTP: {2}]".format(filename, target, resp.status_code)) 234 continue 235 self.logger.warn("Could not check remote link in {0}: {1} [Unknown problem]".format(filename, target)) 236 continue 237 238 if url_type == 'rel_path': 239 if target.startswith('/'): 240 target_filename = os.path.abspath( 241 os.path.join(os.path.dirname(filename), unquote(target.lstrip('/')))) 242 else: # Relative path 243 target_filename = os.path.abspath( 244 os.path.join(os.path.dirname(filename), unquote(target))) 245 246 elif url_type in ('full_path', 'absolute'): 247 if url_type == 'absolute': 248 # convert to 'full_path' case, ie url relative to root 249 url_rel_path = parsed.path[len(url_netloc_to_root):] 250 else: 251 # convert to relative to base path 252 url_rel_path = target[len(url_netloc_to_root):] 253 if url_rel_path == '' or url_rel_path.endswith('/'): 254 url_rel_path = urljoin(url_rel_path, self.site.config['INDEX_FILE']) 255 fs_rel_path = fs_relpath_from_url_path(url_rel_path) 256 target_filename = os.path.join(self.site.config['OUTPUT_FOLDER'], fs_rel_path) 257 258 if any(re.search(x, target_filename) for x in self.whitelist): 259 continue 260 elif target_filename not in self.existing_targets: 261 if os.path.exists(target_filename): 262 self.logger.notice("Good link {0} => {1}".format(target, target_filename)) 263 self.existing_targets.add(target_filename) 264 else: 265 rv = True 266 self.logger.warn("Broken link in {0}: {1}".format(filename, target)) 267 if find_sources: 268 self.logger.warn("Possible sources:") 269 self.logger.warn("\n".join(deps[filename])) 270 self.logger.warn("===============================\n") 271 except Exception as exc: 272 self.logger.error("Error with: {0} {1}".format(filename, exc)) 273 return rv 274 275 def scan_links(self, find_sources=False, check_remote=False): 276 self.logger.info("Checking Links:") 277 self.logger.info("===============\n") 278 self.logger.notice("{0} mode".format(self.site.config['URL_TYPE'])) 279 failure = False 280 # Maybe we should just examine all HTML files 281 output_folder = self.site.config['OUTPUT_FOLDER'] 282 for fname in _call_nikola_list(self.site)[0]: 283 if fname.startswith(output_folder) and '.html' == fname[-5:]: 284 if self.analyze(fname, find_sources, check_remote): 285 failure = True 286 if not failure: 287 self.logger.info("All links checked.") 288 return failure 289 290 def scan_files(self): 291 failure = False 292 self.logger.info("Checking Files:") 293 self.logger.info("===============\n") 294 only_on_output, only_on_input = real_scan_files(self.site) 295 296 # Ignore folders 297 only_on_output = [p for p in only_on_output if not os.path.isdir(p)] 298 only_on_input = [p for p in only_on_input if not os.path.isdir(p)] 299 300 if only_on_output: 301 only_on_output.sort() 302 self.logger.warn("Files from unknown origins (orphans):") 303 for f in only_on_output: 304 self.logger.warn(f) 305 failure = True 306 if only_on_input: 307 only_on_input.sort() 308 self.logger.warn("Files not generated:") 309 for f in only_on_input: 310 self.logger.warn(f) 311 if not failure: 312 self.logger.info("All files checked.") 313 return failure 314 315 def clean_files(self): 316 only_on_output, _ = real_scan_files(self.site) 317 for f in only_on_output: 318 os.unlink(f) 319 return True 320 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nikola/plugins/command/check.py b/nikola/plugins/command/check.py --- a/nikola/plugins/command/check.py +++ b/nikola/plugins/command/check.py @@ -238,7 +238,7 @@ if url_type == 'rel_path': if target.startswith('/'): target_filename = os.path.abspath( - os.path.join(os.path.dirname(filename), unquote(target.lstrip('/')))) + os.path.join(self.site.config['OUTPUT_FOLDER'], unquote(target.lstrip('/')))) else: # Relative path target_filename = os.path.abspath( os.path.join(os.path.dirname(filename), unquote(target)))
{"golden_diff": "diff --git a/nikola/plugins/command/check.py b/nikola/plugins/command/check.py\n--- a/nikola/plugins/command/check.py\n+++ b/nikola/plugins/command/check.py\n@@ -238,7 +238,7 @@\n if url_type == 'rel_path':\n if target.startswith('/'):\n target_filename = os.path.abspath(\n- os.path.join(os.path.dirname(filename), unquote(target.lstrip('/'))))\n+ os.path.join(self.site.config['OUTPUT_FOLDER'], unquote(target.lstrip('/'))))\n else: # Relative path\n target_filename = os.path.abspath(\n os.path.join(os.path.dirname(filename), unquote(target)))\n", "issue": "`check -l` can\u2019t stomach REDIRECTIONS pages\n```\nREDIRECTIONS = [ ( \"test.html\", \"/index.html\" ) ]\n\nWARNING: check: Broken link in output/test.html: 0; url=/index.html\nWARNING: check: Broken link in output/test.html: /index.html\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2015 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import print_function\nfrom collections import defaultdict\nimport os\nimport re\nimport sys\ntry:\n from urllib import unquote\n from urlparse import urlparse, urljoin, urldefrag\nexcept ImportError:\n from urllib.parse import unquote, urlparse, urljoin, urldefrag # NOQA\n\nfrom doit.loader import generate_tasks\nimport lxml.html\ntry:\n import requests\nexcept ImportError:\n requests = None\n\nfrom nikola.plugin_categories import Command\nfrom nikola.utils import get_logger, req_missing\n\n\ndef _call_nikola_list(site):\n files = []\n deps = defaultdict(list)\n for task in generate_tasks('render_site', site.gen_tasks('render_site', \"Task\", '')):\n files.extend(task.targets)\n for target in task.targets:\n deps[target].extend(task.file_dep)\n for task in generate_tasks('post_render', site.gen_tasks('render_site', \"LateTask\", '')):\n files.extend(task.targets)\n for target in task.targets:\n deps[target].extend(task.file_dep)\n return files, deps\n\n\ndef real_scan_files(site):\n task_fnames = set([])\n real_fnames = set([])\n output_folder = site.config['OUTPUT_FOLDER']\n # First check that all targets are generated in the right places\n for fname in _call_nikola_list(site)[0]:\n fname = fname.strip()\n if fname.startswith(output_folder):\n task_fnames.add(fname)\n # And now check that there are no non-target files\n for root, dirs, files in os.walk(output_folder, followlinks=True):\n for src_name in files:\n fname = os.path.join(root, src_name)\n real_fnames.add(fname)\n\n only_on_output = list(real_fnames - task_fnames)\n\n only_on_input = list(task_fnames - real_fnames)\n\n return (only_on_output, only_on_input)\n\n\ndef fs_relpath_from_url_path(url_path):\n \"\"\"Expects as input an urlparse(s).path\"\"\"\n url_path = unquote(url_path)\n # in windows relative paths don't begin with os.sep\n if sys.platform == 'win32' and len(url_path):\n url_path = url_path.replace('/', '\\\\')\n return url_path\n\n\nclass CommandCheck(Command):\n \"\"\"Check the generated site.\"\"\"\n\n name = \"check\"\n logger = None\n\n doc_usage = \"-l [--find-sources] | -f\"\n doc_purpose = \"check links and files in the generated site\"\n cmd_options = [\n {\n 'name': 'links',\n 'short': 'l',\n 'long': 'check-links',\n 'type': bool,\n 'default': False,\n 'help': 'Check for dangling links',\n },\n {\n 'name': 'files',\n 'short': 'f',\n 'long': 'check-files',\n 'type': bool,\n 'default': False,\n 'help': 'Check for unknown (orphaned and not generated) files',\n },\n {\n 'name': 'clean',\n 'long': 'clean-files',\n 'type': bool,\n 'default': False,\n 'help': 'Remove all unknown files, use with caution',\n },\n {\n 'name': 'find_sources',\n 'long': 'find-sources',\n 'type': bool,\n 'default': False,\n 'help': 'List possible source files for files with broken links.',\n },\n {\n 'name': 'verbose',\n 'long': 'verbose',\n 'short': 'v',\n 'type': bool,\n 'default': False,\n 'help': 'Be more verbose.',\n },\n {\n 'name': 'remote',\n 'long': 'remote',\n 'short': 'r',\n 'type': bool,\n 'default': False,\n 'help': 'Check that remote links work.',\n },\n ]\n\n def _execute(self, options, args):\n \"\"\"Check the generated site.\"\"\"\n self.logger = get_logger('check', self.site.loghandlers)\n\n if not options['links'] and not options['files'] and not options['clean']:\n print(self.help())\n return False\n if options['verbose']:\n self.logger.level = 1\n else:\n self.logger.level = 4\n if options['links']:\n failure = self.scan_links(options['find_sources'], options['remote'])\n if options['files']:\n failure = self.scan_files()\n if options['clean']:\n failure = self.clean_files()\n if failure:\n sys.exit(1)\n\n existing_targets = set([])\n checked_remote_targets = {}\n\n def analyze(self, fname, find_sources=False, check_remote=False):\n rv = False\n self.whitelist = [re.compile(x) for x in self.site.config['LINK_CHECK_WHITELIST']]\n base_url = urlparse(self.site.config['BASE_URL'])\n self.existing_targets.add(self.site.config['SITE_URL'])\n self.existing_targets.add(self.site.config['BASE_URL'])\n url_type = self.site.config['URL_TYPE']\n\n deps = {}\n if find_sources:\n deps = _call_nikola_list(self.site)[1]\n\n if check_remote and requests is None:\n req_missing(['requests'], 'check remote links')\n\n if url_type in ('absolute', 'full_path'):\n url_netloc_to_root = urlparse(self.site.config['BASE_URL']).path\n try:\n filename = fname\n\n if filename.startswith(self.site.config['CACHE_FOLDER']):\n # Do not look at links in the cache, which are not parsed by\n # anyone and may result in false positives. Problems arise\n # with galleries, for example. Full rationale: (Issue #1447)\n self.logger.notice(\"Ignoring {0} (in cache, links may be incorrect)\".format(filename))\n return False\n\n d = lxml.html.fromstring(open(filename, 'rb').read())\n for l in d.iterlinks():\n target = l[2]\n if target == \"#\":\n continue\n target, _ = urldefrag(target)\n parsed = urlparse(target)\n\n # Warn about links from https to http (mixed-security)\n if base_url.netloc == parsed.netloc and base_url.scheme == \"https\" and parsed.scheme == \"http\":\n self.logger.warn(\"Mixed-content security for link in {0}: {1}\".format(filename, target))\n\n # Absolute links to other domains, skip\n # Absolute links when using only paths, skip.\n if ((parsed.scheme or target.startswith('//')) and parsed.netloc != base_url.netloc) or \\\n ((parsed.scheme or target.startswith('//')) and url_type in ('rel_path', 'full_path')):\n if not check_remote or parsed.scheme not in [\"http\", \"https\"]:\n continue\n if parsed.netloc == base_url.netloc: # absolute URL to self.site\n continue\n if target in self.checked_remote_targets: # already checked this exact target\n if self.checked_remote_targets[target] > 399:\n self.logger.warn(\"Broken link in {0}: {1} [Error {2}]\".format(filename, target, self.checked_remote_targets[target]))\n continue\n # Check the remote link works\n req_headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0 (Nikola)'} # I\u2019m a real boy!\n resp = requests.head(target, headers=req_headers)\n self.checked_remote_targets[target] = resp.status_code\n if resp.status_code > 399: # Error\n self.logger.warn(\"Broken link in {0}: {1} [Error {2}]\".format(filename, target, resp.status_code))\n continue\n elif resp.status_code <= 399: # The address leads *somewhere* that is not an error\n self.logger.debug(\"Successfully checked remote link in {0}: {1} [HTTP: {2}]\".format(filename, target, resp.status_code))\n continue\n self.logger.warn(\"Could not check remote link in {0}: {1} [Unknown problem]\".format(filename, target))\n continue\n\n if url_type == 'rel_path':\n if target.startswith('/'):\n target_filename = os.path.abspath(\n os.path.join(os.path.dirname(filename), unquote(target.lstrip('/'))))\n else: # Relative path\n target_filename = os.path.abspath(\n os.path.join(os.path.dirname(filename), unquote(target)))\n\n elif url_type in ('full_path', 'absolute'):\n if url_type == 'absolute':\n # convert to 'full_path' case, ie url relative to root\n url_rel_path = parsed.path[len(url_netloc_to_root):]\n else:\n # convert to relative to base path\n url_rel_path = target[len(url_netloc_to_root):]\n if url_rel_path == '' or url_rel_path.endswith('/'):\n url_rel_path = urljoin(url_rel_path, self.site.config['INDEX_FILE'])\n fs_rel_path = fs_relpath_from_url_path(url_rel_path)\n target_filename = os.path.join(self.site.config['OUTPUT_FOLDER'], fs_rel_path)\n\n if any(re.search(x, target_filename) for x in self.whitelist):\n continue\n elif target_filename not in self.existing_targets:\n if os.path.exists(target_filename):\n self.logger.notice(\"Good link {0} => {1}\".format(target, target_filename))\n self.existing_targets.add(target_filename)\n else:\n rv = True\n self.logger.warn(\"Broken link in {0}: {1}\".format(filename, target))\n if find_sources:\n self.logger.warn(\"Possible sources:\")\n self.logger.warn(\"\\n\".join(deps[filename]))\n self.logger.warn(\"===============================\\n\")\n except Exception as exc:\n self.logger.error(\"Error with: {0} {1}\".format(filename, exc))\n return rv\n\n def scan_links(self, find_sources=False, check_remote=False):\n self.logger.info(\"Checking Links:\")\n self.logger.info(\"===============\\n\")\n self.logger.notice(\"{0} mode\".format(self.site.config['URL_TYPE']))\n failure = False\n # Maybe we should just examine all HTML files\n output_folder = self.site.config['OUTPUT_FOLDER']\n for fname in _call_nikola_list(self.site)[0]:\n if fname.startswith(output_folder) and '.html' == fname[-5:]:\n if self.analyze(fname, find_sources, check_remote):\n failure = True\n if not failure:\n self.logger.info(\"All links checked.\")\n return failure\n\n def scan_files(self):\n failure = False\n self.logger.info(\"Checking Files:\")\n self.logger.info(\"===============\\n\")\n only_on_output, only_on_input = real_scan_files(self.site)\n\n # Ignore folders\n only_on_output = [p for p in only_on_output if not os.path.isdir(p)]\n only_on_input = [p for p in only_on_input if not os.path.isdir(p)]\n\n if only_on_output:\n only_on_output.sort()\n self.logger.warn(\"Files from unknown origins (orphans):\")\n for f in only_on_output:\n self.logger.warn(f)\n failure = True\n if only_on_input:\n only_on_input.sort()\n self.logger.warn(\"Files not generated:\")\n for f in only_on_input:\n self.logger.warn(f)\n if not failure:\n self.logger.info(\"All files checked.\")\n return failure\n\n def clean_files(self):\n only_on_output, _ = real_scan_files(self.site)\n for f in only_on_output:\n os.unlink(f)\n return True\n", "path": "nikola/plugins/command/check.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2015 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import print_function\nfrom collections import defaultdict\nimport os\nimport re\nimport sys\ntry:\n from urllib import unquote\n from urlparse import urlparse, urljoin, urldefrag\nexcept ImportError:\n from urllib.parse import unquote, urlparse, urljoin, urldefrag # NOQA\n\nfrom doit.loader import generate_tasks\nimport lxml.html\ntry:\n import requests\nexcept ImportError:\n requests = None\n\nfrom nikola.plugin_categories import Command\nfrom nikola.utils import get_logger, req_missing\n\n\ndef _call_nikola_list(site):\n files = []\n deps = defaultdict(list)\n for task in generate_tasks('render_site', site.gen_tasks('render_site', \"Task\", '')):\n files.extend(task.targets)\n for target in task.targets:\n deps[target].extend(task.file_dep)\n for task in generate_tasks('post_render', site.gen_tasks('render_site', \"LateTask\", '')):\n files.extend(task.targets)\n for target in task.targets:\n deps[target].extend(task.file_dep)\n return files, deps\n\n\ndef real_scan_files(site):\n task_fnames = set([])\n real_fnames = set([])\n output_folder = site.config['OUTPUT_FOLDER']\n # First check that all targets are generated in the right places\n for fname in _call_nikola_list(site)[0]:\n fname = fname.strip()\n if fname.startswith(output_folder):\n task_fnames.add(fname)\n # And now check that there are no non-target files\n for root, dirs, files in os.walk(output_folder, followlinks=True):\n for src_name in files:\n fname = os.path.join(root, src_name)\n real_fnames.add(fname)\n\n only_on_output = list(real_fnames - task_fnames)\n\n only_on_input = list(task_fnames - real_fnames)\n\n return (only_on_output, only_on_input)\n\n\ndef fs_relpath_from_url_path(url_path):\n \"\"\"Expects as input an urlparse(s).path\"\"\"\n url_path = unquote(url_path)\n # in windows relative paths don't begin with os.sep\n if sys.platform == 'win32' and len(url_path):\n url_path = url_path.replace('/', '\\\\')\n return url_path\n\n\nclass CommandCheck(Command):\n \"\"\"Check the generated site.\"\"\"\n\n name = \"check\"\n logger = None\n\n doc_usage = \"-l [--find-sources] | -f\"\n doc_purpose = \"check links and files in the generated site\"\n cmd_options = [\n {\n 'name': 'links',\n 'short': 'l',\n 'long': 'check-links',\n 'type': bool,\n 'default': False,\n 'help': 'Check for dangling links',\n },\n {\n 'name': 'files',\n 'short': 'f',\n 'long': 'check-files',\n 'type': bool,\n 'default': False,\n 'help': 'Check for unknown (orphaned and not generated) files',\n },\n {\n 'name': 'clean',\n 'long': 'clean-files',\n 'type': bool,\n 'default': False,\n 'help': 'Remove all unknown files, use with caution',\n },\n {\n 'name': 'find_sources',\n 'long': 'find-sources',\n 'type': bool,\n 'default': False,\n 'help': 'List possible source files for files with broken links.',\n },\n {\n 'name': 'verbose',\n 'long': 'verbose',\n 'short': 'v',\n 'type': bool,\n 'default': False,\n 'help': 'Be more verbose.',\n },\n {\n 'name': 'remote',\n 'long': 'remote',\n 'short': 'r',\n 'type': bool,\n 'default': False,\n 'help': 'Check that remote links work.',\n },\n ]\n\n def _execute(self, options, args):\n \"\"\"Check the generated site.\"\"\"\n self.logger = get_logger('check', self.site.loghandlers)\n\n if not options['links'] and not options['files'] and not options['clean']:\n print(self.help())\n return False\n if options['verbose']:\n self.logger.level = 1\n else:\n self.logger.level = 4\n if options['links']:\n failure = self.scan_links(options['find_sources'], options['remote'])\n if options['files']:\n failure = self.scan_files()\n if options['clean']:\n failure = self.clean_files()\n if failure:\n sys.exit(1)\n\n existing_targets = set([])\n checked_remote_targets = {}\n\n def analyze(self, fname, find_sources=False, check_remote=False):\n rv = False\n self.whitelist = [re.compile(x) for x in self.site.config['LINK_CHECK_WHITELIST']]\n base_url = urlparse(self.site.config['BASE_URL'])\n self.existing_targets.add(self.site.config['SITE_URL'])\n self.existing_targets.add(self.site.config['BASE_URL'])\n url_type = self.site.config['URL_TYPE']\n\n deps = {}\n if find_sources:\n deps = _call_nikola_list(self.site)[1]\n\n if check_remote and requests is None:\n req_missing(['requests'], 'check remote links')\n\n if url_type in ('absolute', 'full_path'):\n url_netloc_to_root = urlparse(self.site.config['BASE_URL']).path\n try:\n filename = fname\n\n if filename.startswith(self.site.config['CACHE_FOLDER']):\n # Do not look at links in the cache, which are not parsed by\n # anyone and may result in false positives. Problems arise\n # with galleries, for example. Full rationale: (Issue #1447)\n self.logger.notice(\"Ignoring {0} (in cache, links may be incorrect)\".format(filename))\n return False\n\n d = lxml.html.fromstring(open(filename, 'rb').read())\n for l in d.iterlinks():\n target = l[2]\n if target == \"#\":\n continue\n target, _ = urldefrag(target)\n parsed = urlparse(target)\n\n # Warn about links from https to http (mixed-security)\n if base_url.netloc == parsed.netloc and base_url.scheme == \"https\" and parsed.scheme == \"http\":\n self.logger.warn(\"Mixed-content security for link in {0}: {1}\".format(filename, target))\n\n # Absolute links to other domains, skip\n # Absolute links when using only paths, skip.\n if ((parsed.scheme or target.startswith('//')) and parsed.netloc != base_url.netloc) or \\\n ((parsed.scheme or target.startswith('//')) and url_type in ('rel_path', 'full_path')):\n if not check_remote or parsed.scheme not in [\"http\", \"https\"]:\n continue\n if parsed.netloc == base_url.netloc: # absolute URL to self.site\n continue\n if target in self.checked_remote_targets: # already checked this exact target\n if self.checked_remote_targets[target] > 399:\n self.logger.warn(\"Broken link in {0}: {1} [Error {2}]\".format(filename, target, self.checked_remote_targets[target]))\n continue\n # Check the remote link works\n req_headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0 (Nikola)'} # I\u2019m a real boy!\n resp = requests.head(target, headers=req_headers)\n self.checked_remote_targets[target] = resp.status_code\n if resp.status_code > 399: # Error\n self.logger.warn(\"Broken link in {0}: {1} [Error {2}]\".format(filename, target, resp.status_code))\n continue\n elif resp.status_code <= 399: # The address leads *somewhere* that is not an error\n self.logger.debug(\"Successfully checked remote link in {0}: {1} [HTTP: {2}]\".format(filename, target, resp.status_code))\n continue\n self.logger.warn(\"Could not check remote link in {0}: {1} [Unknown problem]\".format(filename, target))\n continue\n\n if url_type == 'rel_path':\n if target.startswith('/'):\n target_filename = os.path.abspath(\n os.path.join(self.site.config['OUTPUT_FOLDER'], unquote(target.lstrip('/'))))\n else: # Relative path\n target_filename = os.path.abspath(\n os.path.join(os.path.dirname(filename), unquote(target)))\n\n elif url_type in ('full_path', 'absolute'):\n if url_type == 'absolute':\n # convert to 'full_path' case, ie url relative to root\n url_rel_path = parsed.path[len(url_netloc_to_root):]\n else:\n # convert to relative to base path\n url_rel_path = target[len(url_netloc_to_root):]\n if url_rel_path == '' or url_rel_path.endswith('/'):\n url_rel_path = urljoin(url_rel_path, self.site.config['INDEX_FILE'])\n fs_rel_path = fs_relpath_from_url_path(url_rel_path)\n target_filename = os.path.join(self.site.config['OUTPUT_FOLDER'], fs_rel_path)\n\n if any(re.search(x, target_filename) for x in self.whitelist):\n continue\n elif target_filename not in self.existing_targets:\n if os.path.exists(target_filename):\n self.logger.notice(\"Good link {0} => {1}\".format(target, target_filename))\n self.existing_targets.add(target_filename)\n else:\n rv = True\n self.logger.warn(\"Broken link in {0}: {1}\".format(filename, target))\n if find_sources:\n self.logger.warn(\"Possible sources:\")\n self.logger.warn(\"\\n\".join(deps[filename]))\n self.logger.warn(\"===============================\\n\")\n except Exception as exc:\n self.logger.error(\"Error with: {0} {1}\".format(filename, exc))\n return rv\n\n def scan_links(self, find_sources=False, check_remote=False):\n self.logger.info(\"Checking Links:\")\n self.logger.info(\"===============\\n\")\n self.logger.notice(\"{0} mode\".format(self.site.config['URL_TYPE']))\n failure = False\n # Maybe we should just examine all HTML files\n output_folder = self.site.config['OUTPUT_FOLDER']\n for fname in _call_nikola_list(self.site)[0]:\n if fname.startswith(output_folder) and '.html' == fname[-5:]:\n if self.analyze(fname, find_sources, check_remote):\n failure = True\n if not failure:\n self.logger.info(\"All links checked.\")\n return failure\n\n def scan_files(self):\n failure = False\n self.logger.info(\"Checking Files:\")\n self.logger.info(\"===============\\n\")\n only_on_output, only_on_input = real_scan_files(self.site)\n\n # Ignore folders\n only_on_output = [p for p in only_on_output if not os.path.isdir(p)]\n only_on_input = [p for p in only_on_input if not os.path.isdir(p)]\n\n if only_on_output:\n only_on_output.sort()\n self.logger.warn(\"Files from unknown origins (orphans):\")\n for f in only_on_output:\n self.logger.warn(f)\n failure = True\n if only_on_input:\n only_on_input.sort()\n self.logger.warn(\"Files not generated:\")\n for f in only_on_input:\n self.logger.warn(f)\n if not failure:\n self.logger.info(\"All files checked.\")\n return failure\n\n def clean_files(self):\n only_on_output, _ = real_scan_files(self.site)\n for f in only_on_output:\n os.unlink(f)\n return True\n", "path": "nikola/plugins/command/check.py"}]}
3,959
144
gh_patches_debug_41237
rasdani/github-patches
git_diff
bridgecrewio__checkov-1655
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Secrets framework scan not working in 2.0.407 **Describe the bug** This is to duplicate #1536 which has been closed. I don't believe the issue has been resolved. ``` bash-5.1# pwd /test bash-5.1# checkov -d . --framework all _ _ ___| |__ ___ ___| | _______ __ / __| '_ \ / _ \/ __| |/ / _ \ \ / / | (__| | | | __/ (__| < (_) \ V / \___|_| |_|\___|\___|_|\_\___/ \_/ By bridgecrew.io | version: 2.0.407 bash-5.1# ls -la total 4 drwxrwxrwx 1 root root 4096 Aug 25 16:35 . drwxr-xr-x 1 root root 4096 Sep 12 16:29 .. -rwxrwxrwx 1 root root 73 Aug 25 16:26 password.txt -rwxrwxrwx 1 root root 44 Aug 25 16:34 providers.tf -rwxrwxrwx 1 root root 30 Aug 25 16:35 secret_password.yaml bash-5.1# cat providers.tf access_key = "sfffglfgjldfjdlrt5454002lfofd"bash-5.1# cat password.txt password: mypassword b64password: bXlwYXNzd29yZAo= mypassword=secretkey bash-5.1# cat secret_password.yaml pwd: correcthorsebatterystaple bash-5.1# checkov -d ./ --framework secrets _ _ ___| |__ ___ ___| | _______ __ / __| '_ \ / _ \/ __| |/ / _ \ \ / / | (__| | | | __/ (__| < (_) \ V / \___|_| |_|\___|\___|_|\_\___/ \_/ By bridgecrew.io | version: 2.0.407 bash-5.1# ls -la total 4 drwxrwxrwx 1 root root 4096 Aug 25 16:35 . drwxr-xr-x 1 root root 4096 Sep 12 16:29 .. -rwxrwxrwx 1 root root 73 Aug 25 16:26 password.txt -rwxrwxrwx 1 root root 44 Aug 25 16:34 providers.tf -rwxrwxrwx 1 root root 30 Aug 25 16:35 secret_password.yaml ``` **To Reproduce** Steps to reproduce the behavior: 1. See above and details in #1536 **Expected behavior** I expect secrets to be detected. **Desktop (please complete the following information):** - running the checkov container - Checkov Version: 2.0.204 **Additional context** Add any other context about the problem here (e.g. code snippets). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/secrets/plugins/entropy_keyword_combinator.py` Content: ``` 1 from detect_secrets.core.potential_secret import PotentialSecret 2 from detect_secrets.plugins.high_entropy_strings import Base64HighEntropyString, HexHighEntropyString 3 from detect_secrets.plugins.keyword import KeywordDetector 4 from detect_secrets.plugins.base import BasePlugin 5 from typing import Generator, Any, Set 6 7 8 class EntropyKeywordCombinator(BasePlugin): 9 secret_type = None 10 11 def __init__(self, limit: float) -> None: 12 self.high_entropy_scanners = (Base64HighEntropyString(limit=limit), HexHighEntropyString(limit=limit)) 13 self.keyword_scanner = KeywordDetector() 14 15 def analyze_line( 16 self, 17 filename: str, 18 line: str, 19 line_number: int = 0, 20 **kwargs: Any 21 ) -> Set[PotentialSecret]: 22 """ 23 This method first runs the keyword plugin. If it finds a match - it runs the entropy scanners, and if 24 one of the entropy scanners find a match (on a line which was already matched by keyword plugin) - it is returned. 25 """ 26 keyword_matches = self.keyword_scanner.analyze_line(filename, line, line_number, **kwargs) 27 if len(keyword_matches): 28 for entropy_scanner in self.high_entropy_scanners: 29 matches = entropy_scanner.analyze_line(filename, line, line_number, **kwargs) 30 if len(matches) > 0: 31 return matches 32 return set([]) 33 34 def analyze_string(self, string: str) -> Generator[str, None, None]: 35 raise NotImplementedError() 36 ``` Path: `checkov/secrets/runner.py` Content: ``` 1 import linecache 2 import logging 3 import os 4 import re 5 import time 6 from typing import Optional, List 7 8 from detect_secrets import SecretsCollection 9 from detect_secrets.core.potential_secret import PotentialSecret 10 from detect_secrets.settings import transient_settings 11 from typing_extensions import TypedDict 12 13 from checkov.common.bridgecrew.platform_integration import bc_integration 14 from checkov.common.comment.enum import COMMENT_REGEX 15 from checkov.common.graph.graph_builder.utils import run_function_multithreaded 16 from checkov.common.models.consts import SUPPORTED_FILE_EXTENSIONS 17 from checkov.common.models.enums import CheckResult 18 from checkov.common.output.record import Record 19 from checkov.common.output.report import Report 20 from checkov.common.runners.base_runner import BaseRunner, filter_ignored_paths 21 from checkov.common.runners.base_runner import ignored_directories 22 from checkov.common.util.consts import DEFAULT_EXTERNAL_MODULES_DIR 23 from checkov.runner_filter import RunnerFilter 24 25 SECRET_TYPE_TO_ID = { 26 'Artifactory Credentials': 'CKV_SECRET_1', 27 'AWS Access Key': 'CKV_SECRET_2', 28 'Azure Storage Account access key': 'CKV_SECRET_3', 29 'Basic Auth Credentials': 'CKV_SECRET_4', 30 'Cloudant Credentials': 'CKV_SECRET_5', 31 'Base64 High Entropy String': 'CKV_SECRET_6', 32 'IBM Cloud IAM Key': 'CKV_SECRET_7', 33 'IBM COS HMAC Credentials': 'CKV_SECRET_8', 34 'JSON Web Token': 'CKV_SECRET_9', 35 # 'Secret Keyword': 'CKV_SECRET_10', 36 'Mailchimp Access Key': 'CKV_SECRET_11', 37 'NPM tokens': 'CKV_SECRET_12', 38 'Private Key': 'CKV_SECRET_13', 39 'Slack Token': 'CKV_SECRET_14', 40 'SoftLayer Credentials': 'CKV_SECRET_15', 41 'Square OAuth Secret': 'CKV_SECRET_16', 42 'Stripe Access Key': 'CKV_SECRET_17', 43 'Twilio API Key': 'CKV_SECRET_18', 44 'Hex High Entropy String': 'CKV_SECRET_19' 45 } 46 CHECK_ID_TO_SECRET_TYPE = {v: k for k, v in SECRET_TYPE_TO_ID.items()} 47 48 PROHIBITED_FILES = ['Pipfile.lock', 'yarn.lock', 'package-lock.json', 'requirements.txt'] 49 50 51 class _CheckResult(TypedDict, total=False): 52 result: CheckResult 53 suppress_comment: str 54 55 56 class Runner(BaseRunner): 57 check_type = 'secrets' 58 59 def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(), 60 collect_skip_comments=True) -> Report: 61 current_dir = os.path.dirname(os.path.realpath(__file__)) 62 secrets = SecretsCollection() 63 with transient_settings({ 64 # Only run scans with only these plugins. 65 'plugins_used': [ 66 { 67 'name': 'AWSKeyDetector' 68 }, 69 { 70 'name': 'ArtifactoryDetector' 71 }, 72 { 73 'name': 'AzureStorageKeyDetector' 74 }, 75 { 76 'name': 'BasicAuthDetector' 77 }, 78 { 79 'name': 'CloudantDetector' 80 }, 81 { 82 'name': 'IbmCloudIamDetector' 83 }, 84 { 85 'name': 'MailchimpDetector' 86 }, 87 { 88 'name': 'PrivateKeyDetector' 89 }, 90 { 91 'name': 'SlackDetector' 92 }, 93 { 94 'name': 'SoftlayerDetector' 95 }, 96 { 97 'name': 'SquareOAuthDetector' 98 }, 99 { 100 'name': 'StripeDetector' 101 }, 102 { 103 'name': 'TwilioKeyDetector' 104 }, 105 { 106 'name': 'EntropyKeywordCombinator', 107 'path': f'file://{current_dir}/plugins/entropy_keyword_combinator.py', 108 'limit': 4.5 109 } 110 ] 111 }) as settings: 112 report = Report(self.check_type) 113 # Implement non IaC files (including .terraform dir) 114 files_to_scan = files or [] 115 excluded_paths = (runner_filter.excluded_paths or []) + ignored_directories + [DEFAULT_EXTERNAL_MODULES_DIR] 116 if root_folder: 117 for root, d_names, f_names in os.walk(root_folder): 118 filter_ignored_paths(root, d_names, excluded_paths) 119 filter_ignored_paths(root, f_names, excluded_paths) 120 for file in f_names: 121 if file not in PROHIBITED_FILES and f".{file.split('.')[-1]}" in SUPPORTED_FILE_EXTENSIONS: 122 files_to_scan.append(os.path.join(root, file)) 123 logging.info(f'Secrets scanning will scan {len(files_to_scan)} files') 124 125 settings.disable_filters(*['detect_secrets.filters.heuristic.is_indirect_reference']) 126 127 def _scan_file(file_paths: List[str]): 128 for file_path in file_paths: 129 start = time.time() 130 try: 131 secrets.scan_file(file_path) 132 except Exception as err: 133 logging.warning(f"Secret scanning:could not process file {file_path}, {err}") 134 continue 135 end = time.time() 136 scan_time = end - start 137 if scan_time > 10: 138 logging.info(f'Scanned {file_path}, took {scan_time} seconds') 139 140 run_function_multithreaded(_scan_file, files_to_scan, 1, num_of_workers=os.cpu_count()) 141 142 for _, secret in iter(secrets): 143 check_id = SECRET_TYPE_TO_ID.get(secret.type) 144 bc_check_id = bc_integration.ckv_to_bc_id_mapping.get(check_id) if bc_integration.ckv_to_bc_id_mapping else None 145 if not check_id: 146 continue 147 if runner_filter.checks and not runner_filter.should_run_check(check_id, bc_check_id): 148 continue 149 result: _CheckResult = {'result': CheckResult.FAILED} 150 line_text = linecache.getline(secret.filename, secret.line_number) 151 if line_text != "" and line_text.split()[0] == 'git_commit': 152 continue 153 result = self.search_for_suppression( 154 check_id=check_id, 155 bc_check_id=bc_check_id, 156 secret=secret, 157 skipped_checks=runner_filter.skip_checks, 158 ) or result 159 report.add_resource(f'{secret.filename}:{secret.secret_hash}') 160 report.add_record(Record( 161 check_id=check_id, 162 bc_check_id=bc_check_id, 163 check_name=secret.type, 164 check_result=result, 165 code_block=[(secret.line_number, line_text)], 166 file_path=f'/{os.path.relpath(secret.filename, root_folder)}', 167 file_line_range=[secret.line_number, secret.line_number + 1], 168 resource=secret.secret_hash, 169 check_class=None, 170 evaluations=None, 171 file_abs_path=os.path.abspath(secret.filename) 172 )) 173 174 return report 175 176 @staticmethod 177 def search_for_suppression( 178 check_id: str, 179 bc_check_id: str, 180 secret: PotentialSecret, 181 skipped_checks: List[str] 182 ) -> Optional[_CheckResult]: 183 if (check_id in skipped_checks or bc_check_id in skipped_checks) and check_id in CHECK_ID_TO_SECRET_TYPE.keys(): 184 return { 185 "result": CheckResult.SKIPPED, 186 "suppress_comment": f"Secret scan {check_id} is skipped" 187 } 188 # Check for suppression comment in the line before, the line of, and the line after the secret 189 for line_number in [secret.line_number, secret.line_number - 1, secret.line_number + 1]: 190 lt = linecache.getline(secret.filename, line_number) 191 skip_search = re.search(COMMENT_REGEX, lt) 192 if skip_search and (skip_search.group(2) == check_id or skip_search.group(2) == bc_check_id): 193 return { 194 "result": CheckResult.SKIPPED, 195 "suppress_comment": skip_search.group(3)[1:] if skip_search.group(3) else "No comment provided" 196 } 197 return None 198 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/secrets/plugins/entropy_keyword_combinator.py b/checkov/secrets/plugins/entropy_keyword_combinator.py --- a/checkov/secrets/plugins/entropy_keyword_combinator.py +++ b/checkov/secrets/plugins/entropy_keyword_combinator.py @@ -1,7 +1,7 @@ from detect_secrets.core.potential_secret import PotentialSecret from detect_secrets.plugins.high_entropy_strings import Base64HighEntropyString, HexHighEntropyString from detect_secrets.plugins.keyword import KeywordDetector -from detect_secrets.plugins.base import BasePlugin +from detect_secrets.plugins.base import BasePlugin from typing import Generator, Any, Set @@ -24,10 +24,10 @@ one of the entropy scanners find a match (on a line which was already matched by keyword plugin) - it is returned. """ keyword_matches = self.keyword_scanner.analyze_line(filename, line, line_number, **kwargs) - if len(keyword_matches): + if keyword_matches: for entropy_scanner in self.high_entropy_scanners: matches = entropy_scanner.analyze_line(filename, line, line_number, **kwargs) - if len(matches) > 0: + if matches: return matches return set([]) diff --git a/checkov/secrets/runner.py b/checkov/secrets/runner.py --- a/checkov/secrets/runner.py +++ b/checkov/secrets/runner.py @@ -45,6 +45,7 @@ } CHECK_ID_TO_SECRET_TYPE = {v: k for k, v in SECRET_TYPE_TO_ID.items()} +ENTROPY_KEYWORD_LIMIT = 3 PROHIBITED_FILES = ['Pipfile.lock', 'yarn.lock', 'package-lock.json', 'requirements.txt'] @@ -56,8 +57,14 @@ class Runner(BaseRunner): check_type = 'secrets' - def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(), - collect_skip_comments=True) -> Report: + def run( + self, + root_folder: str, + external_checks_dir: Optional[List[str]] = None, + files: Optional[List[str]] = None, + runner_filter: RunnerFilter = RunnerFilter(), + collect_skip_comments: bool = True + ) -> Report: current_dir = os.path.dirname(os.path.realpath(__file__)) secrets = SecretsCollection() with transient_settings({ @@ -105,7 +112,7 @@ { 'name': 'EntropyKeywordCombinator', 'path': f'file://{current_dir}/plugins/entropy_keyword_combinator.py', - 'limit': 4.5 + 'limit': ENTROPY_KEYWORD_LIMIT } ] }) as settings: @@ -124,7 +131,7 @@ settings.disable_filters(*['detect_secrets.filters.heuristic.is_indirect_reference']) - def _scan_file(file_paths: List[str]): + def _scan_file(file_paths: List[str]) -> None: for file_path in file_paths: start = time.time() try:
{"golden_diff": "diff --git a/checkov/secrets/plugins/entropy_keyword_combinator.py b/checkov/secrets/plugins/entropy_keyword_combinator.py\n--- a/checkov/secrets/plugins/entropy_keyword_combinator.py\n+++ b/checkov/secrets/plugins/entropy_keyword_combinator.py\n@@ -1,7 +1,7 @@\n from detect_secrets.core.potential_secret import PotentialSecret\n from detect_secrets.plugins.high_entropy_strings import Base64HighEntropyString, HexHighEntropyString\n from detect_secrets.plugins.keyword import KeywordDetector\n-from detect_secrets.plugins.base import BasePlugin\n+from detect_secrets.plugins.base import BasePlugin\n from typing import Generator, Any, Set\n \n \n@@ -24,10 +24,10 @@\n one of the entropy scanners find a match (on a line which was already matched by keyword plugin) - it is returned.\n \"\"\"\n keyword_matches = self.keyword_scanner.analyze_line(filename, line, line_number, **kwargs)\n- if len(keyword_matches):\n+ if keyword_matches:\n for entropy_scanner in self.high_entropy_scanners:\n matches = entropy_scanner.analyze_line(filename, line, line_number, **kwargs)\n- if len(matches) > 0:\n+ if matches:\n return matches\n return set([])\n \ndiff --git a/checkov/secrets/runner.py b/checkov/secrets/runner.py\n--- a/checkov/secrets/runner.py\n+++ b/checkov/secrets/runner.py\n@@ -45,6 +45,7 @@\n }\n CHECK_ID_TO_SECRET_TYPE = {v: k for k, v in SECRET_TYPE_TO_ID.items()}\n \n+ENTROPY_KEYWORD_LIMIT = 3\n PROHIBITED_FILES = ['Pipfile.lock', 'yarn.lock', 'package-lock.json', 'requirements.txt']\n \n \n@@ -56,8 +57,14 @@\n class Runner(BaseRunner):\n check_type = 'secrets'\n \n- def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),\n- collect_skip_comments=True) -> Report:\n+ def run(\n+ self,\n+ root_folder: str,\n+ external_checks_dir: Optional[List[str]] = None,\n+ files: Optional[List[str]] = None,\n+ runner_filter: RunnerFilter = RunnerFilter(),\n+ collect_skip_comments: bool = True\n+ ) -> Report:\n current_dir = os.path.dirname(os.path.realpath(__file__))\n secrets = SecretsCollection()\n with transient_settings({\n@@ -105,7 +112,7 @@\n {\n 'name': 'EntropyKeywordCombinator',\n 'path': f'file://{current_dir}/plugins/entropy_keyword_combinator.py',\n- 'limit': 4.5\n+ 'limit': ENTROPY_KEYWORD_LIMIT\n }\n ]\n }) as settings:\n@@ -124,7 +131,7 @@\n \n settings.disable_filters(*['detect_secrets.filters.heuristic.is_indirect_reference'])\n \n- def _scan_file(file_paths: List[str]):\n+ def _scan_file(file_paths: List[str]) -> None:\n for file_path in file_paths:\n start = time.time()\n try:\n", "issue": "Secrets framework scan not working in 2.0.407\n**Describe the bug**\r\nThis is to duplicate #1536 which has been closed. I don't believe the issue has been resolved.\r\n\r\n```\r\nbash-5.1# pwd\r\n/test\r\nbash-5.1# checkov -d . --framework all\r\n\r\n _ _\r\n ___| |__ ___ ___| | _______ __\r\n / __| '_ \\ / _ \\/ __| |/ / _ \\ \\ / /\r\n | (__| | | | __/ (__| < (_) \\ V /\r\n \\___|_| |_|\\___|\\___|_|\\_\\___/ \\_/\r\n\r\nBy bridgecrew.io | version: 2.0.407\r\n\r\nbash-5.1# ls -la\r\ntotal 4\r\ndrwxrwxrwx 1 root root 4096 Aug 25 16:35 .\r\ndrwxr-xr-x 1 root root 4096 Sep 12 16:29 ..\r\n-rwxrwxrwx 1 root root 73 Aug 25 16:26 password.txt\r\n-rwxrwxrwx 1 root root 44 Aug 25 16:34 providers.tf\r\n-rwxrwxrwx 1 root root 30 Aug 25 16:35 secret_password.yaml\r\n\r\nbash-5.1# cat providers.tf\r\naccess_key = \"sfffglfgjldfjdlrt5454002lfofd\"bash-5.1# cat password.txt\r\npassword: mypassword\r\nb64password: bXlwYXNzd29yZAo=\r\nmypassword=secretkey\r\n\r\nbash-5.1# cat secret_password.yaml\r\npwd: correcthorsebatterystaple\r\n\r\nbash-5.1# checkov -d ./ --framework secrets\r\n\r\n _ _\r\n ___| |__ ___ ___| | _______ __\r\n / __| '_ \\ / _ \\/ __| |/ / _ \\ \\ / /\r\n | (__| | | | __/ (__| < (_) \\ V /\r\n \\___|_| |_|\\___|\\___|_|\\_\\___/ \\_/\r\n\r\nBy bridgecrew.io | version: 2.0.407\r\n\r\nbash-5.1# ls -la\r\ntotal 4\r\ndrwxrwxrwx 1 root root 4096 Aug 25 16:35 .\r\ndrwxr-xr-x 1 root root 4096 Sep 12 16:29 ..\r\n-rwxrwxrwx 1 root root 73 Aug 25 16:26 password.txt\r\n-rwxrwxrwx 1 root root 44 Aug 25 16:34 providers.tf\r\n-rwxrwxrwx 1 root root 30 Aug 25 16:35 secret_password.yaml\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. See above and details in #1536 \r\n\r\n**Expected behavior**\r\nI expect secrets to be detected.\r\n\r\n**Desktop (please complete the following information):**\r\n - running the checkov container\r\n - Checkov Version: 2.0.204\r\n\r\n**Additional context**\r\nAdd any other context about the problem here (e.g. code snippets).\r\n\n", "before_files": [{"content": "from detect_secrets.core.potential_secret import PotentialSecret\nfrom detect_secrets.plugins.high_entropy_strings import Base64HighEntropyString, HexHighEntropyString\nfrom detect_secrets.plugins.keyword import KeywordDetector\nfrom detect_secrets.plugins.base import BasePlugin\nfrom typing import Generator, Any, Set\n\n\nclass EntropyKeywordCombinator(BasePlugin):\n secret_type = None\n\n def __init__(self, limit: float) -> None:\n self.high_entropy_scanners = (Base64HighEntropyString(limit=limit), HexHighEntropyString(limit=limit))\n self.keyword_scanner = KeywordDetector()\n\n def analyze_line(\n self,\n filename: str,\n line: str,\n line_number: int = 0,\n **kwargs: Any\n ) -> Set[PotentialSecret]:\n \"\"\"\n This method first runs the keyword plugin. If it finds a match - it runs the entropy scanners, and if\n one of the entropy scanners find a match (on a line which was already matched by keyword plugin) - it is returned.\n \"\"\"\n keyword_matches = self.keyword_scanner.analyze_line(filename, line, line_number, **kwargs)\n if len(keyword_matches):\n for entropy_scanner in self.high_entropy_scanners:\n matches = entropy_scanner.analyze_line(filename, line, line_number, **kwargs)\n if len(matches) > 0:\n return matches\n return set([])\n\n def analyze_string(self, string: str) -> Generator[str, None, None]:\n raise NotImplementedError()\n", "path": "checkov/secrets/plugins/entropy_keyword_combinator.py"}, {"content": "import linecache\nimport logging\nimport os\nimport re\nimport time\nfrom typing import Optional, List\n\nfrom detect_secrets import SecretsCollection\nfrom detect_secrets.core.potential_secret import PotentialSecret\nfrom detect_secrets.settings import transient_settings\nfrom typing_extensions import TypedDict\n\nfrom checkov.common.bridgecrew.platform_integration import bc_integration\nfrom checkov.common.comment.enum import COMMENT_REGEX\nfrom checkov.common.graph.graph_builder.utils import run_function_multithreaded\nfrom checkov.common.models.consts import SUPPORTED_FILE_EXTENSIONS\nfrom checkov.common.models.enums import CheckResult\nfrom checkov.common.output.record import Record\nfrom checkov.common.output.report import Report\nfrom checkov.common.runners.base_runner import BaseRunner, filter_ignored_paths\nfrom checkov.common.runners.base_runner import ignored_directories\nfrom checkov.common.util.consts import DEFAULT_EXTERNAL_MODULES_DIR\nfrom checkov.runner_filter import RunnerFilter\n\nSECRET_TYPE_TO_ID = {\n 'Artifactory Credentials': 'CKV_SECRET_1',\n 'AWS Access Key': 'CKV_SECRET_2',\n 'Azure Storage Account access key': 'CKV_SECRET_3',\n 'Basic Auth Credentials': 'CKV_SECRET_4',\n 'Cloudant Credentials': 'CKV_SECRET_5',\n 'Base64 High Entropy String': 'CKV_SECRET_6',\n 'IBM Cloud IAM Key': 'CKV_SECRET_7',\n 'IBM COS HMAC Credentials': 'CKV_SECRET_8',\n 'JSON Web Token': 'CKV_SECRET_9',\n # 'Secret Keyword': 'CKV_SECRET_10',\n 'Mailchimp Access Key': 'CKV_SECRET_11',\n 'NPM tokens': 'CKV_SECRET_12',\n 'Private Key': 'CKV_SECRET_13',\n 'Slack Token': 'CKV_SECRET_14',\n 'SoftLayer Credentials': 'CKV_SECRET_15',\n 'Square OAuth Secret': 'CKV_SECRET_16',\n 'Stripe Access Key': 'CKV_SECRET_17',\n 'Twilio API Key': 'CKV_SECRET_18',\n 'Hex High Entropy String': 'CKV_SECRET_19'\n}\nCHECK_ID_TO_SECRET_TYPE = {v: k for k, v in SECRET_TYPE_TO_ID.items()}\n\nPROHIBITED_FILES = ['Pipfile.lock', 'yarn.lock', 'package-lock.json', 'requirements.txt']\n\n\nclass _CheckResult(TypedDict, total=False):\n result: CheckResult\n suppress_comment: str\n\n\nclass Runner(BaseRunner):\n check_type = 'secrets'\n\n def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),\n collect_skip_comments=True) -> Report:\n current_dir = os.path.dirname(os.path.realpath(__file__))\n secrets = SecretsCollection()\n with transient_settings({\n # Only run scans with only these plugins.\n 'plugins_used': [\n {\n 'name': 'AWSKeyDetector'\n },\n {\n 'name': 'ArtifactoryDetector'\n },\n {\n 'name': 'AzureStorageKeyDetector'\n },\n {\n 'name': 'BasicAuthDetector'\n },\n {\n 'name': 'CloudantDetector'\n },\n {\n 'name': 'IbmCloudIamDetector'\n },\n {\n 'name': 'MailchimpDetector'\n },\n {\n 'name': 'PrivateKeyDetector'\n },\n {\n 'name': 'SlackDetector'\n },\n {\n 'name': 'SoftlayerDetector'\n },\n {\n 'name': 'SquareOAuthDetector'\n },\n {\n 'name': 'StripeDetector'\n },\n {\n 'name': 'TwilioKeyDetector'\n },\n {\n 'name': 'EntropyKeywordCombinator',\n 'path': f'file://{current_dir}/plugins/entropy_keyword_combinator.py',\n 'limit': 4.5\n }\n ]\n }) as settings:\n report = Report(self.check_type)\n # Implement non IaC files (including .terraform dir)\n files_to_scan = files or []\n excluded_paths = (runner_filter.excluded_paths or []) + ignored_directories + [DEFAULT_EXTERNAL_MODULES_DIR]\n if root_folder:\n for root, d_names, f_names in os.walk(root_folder):\n filter_ignored_paths(root, d_names, excluded_paths)\n filter_ignored_paths(root, f_names, excluded_paths)\n for file in f_names:\n if file not in PROHIBITED_FILES and f\".{file.split('.')[-1]}\" in SUPPORTED_FILE_EXTENSIONS:\n files_to_scan.append(os.path.join(root, file))\n logging.info(f'Secrets scanning will scan {len(files_to_scan)} files')\n\n settings.disable_filters(*['detect_secrets.filters.heuristic.is_indirect_reference'])\n\n def _scan_file(file_paths: List[str]):\n for file_path in file_paths:\n start = time.time()\n try:\n secrets.scan_file(file_path)\n except Exception as err:\n logging.warning(f\"Secret scanning:could not process file {file_path}, {err}\")\n continue\n end = time.time()\n scan_time = end - start\n if scan_time > 10:\n logging.info(f'Scanned {file_path}, took {scan_time} seconds')\n\n run_function_multithreaded(_scan_file, files_to_scan, 1, num_of_workers=os.cpu_count())\n\n for _, secret in iter(secrets):\n check_id = SECRET_TYPE_TO_ID.get(secret.type)\n bc_check_id = bc_integration.ckv_to_bc_id_mapping.get(check_id) if bc_integration.ckv_to_bc_id_mapping else None\n if not check_id:\n continue\n if runner_filter.checks and not runner_filter.should_run_check(check_id, bc_check_id):\n continue\n result: _CheckResult = {'result': CheckResult.FAILED}\n line_text = linecache.getline(secret.filename, secret.line_number)\n if line_text != \"\" and line_text.split()[0] == 'git_commit':\n continue\n result = self.search_for_suppression(\n check_id=check_id,\n bc_check_id=bc_check_id,\n secret=secret,\n skipped_checks=runner_filter.skip_checks,\n ) or result\n report.add_resource(f'{secret.filename}:{secret.secret_hash}')\n report.add_record(Record(\n check_id=check_id,\n bc_check_id=bc_check_id,\n check_name=secret.type,\n check_result=result,\n code_block=[(secret.line_number, line_text)],\n file_path=f'/{os.path.relpath(secret.filename, root_folder)}',\n file_line_range=[secret.line_number, secret.line_number + 1],\n resource=secret.secret_hash,\n check_class=None,\n evaluations=None,\n file_abs_path=os.path.abspath(secret.filename)\n ))\n\n return report\n\n @staticmethod\n def search_for_suppression(\n check_id: str,\n bc_check_id: str,\n secret: PotentialSecret,\n skipped_checks: List[str]\n ) -> Optional[_CheckResult]:\n if (check_id in skipped_checks or bc_check_id in skipped_checks) and check_id in CHECK_ID_TO_SECRET_TYPE.keys():\n return {\n \"result\": CheckResult.SKIPPED,\n \"suppress_comment\": f\"Secret scan {check_id} is skipped\"\n }\n # Check for suppression comment in the line before, the line of, and the line after the secret\n for line_number in [secret.line_number, secret.line_number - 1, secret.line_number + 1]:\n lt = linecache.getline(secret.filename, line_number)\n skip_search = re.search(COMMENT_REGEX, lt)\n if skip_search and (skip_search.group(2) == check_id or skip_search.group(2) == bc_check_id):\n return {\n \"result\": CheckResult.SKIPPED,\n \"suppress_comment\": skip_search.group(3)[1:] if skip_search.group(3) else \"No comment provided\"\n }\n return None\n", "path": "checkov/secrets/runner.py"}], "after_files": [{"content": "from detect_secrets.core.potential_secret import PotentialSecret\nfrom detect_secrets.plugins.high_entropy_strings import Base64HighEntropyString, HexHighEntropyString\nfrom detect_secrets.plugins.keyword import KeywordDetector\nfrom detect_secrets.plugins.base import BasePlugin\nfrom typing import Generator, Any, Set\n\n\nclass EntropyKeywordCombinator(BasePlugin):\n secret_type = None\n\n def __init__(self, limit: float) -> None:\n self.high_entropy_scanners = (Base64HighEntropyString(limit=limit), HexHighEntropyString(limit=limit))\n self.keyword_scanner = KeywordDetector()\n\n def analyze_line(\n self,\n filename: str,\n line: str,\n line_number: int = 0,\n **kwargs: Any\n ) -> Set[PotentialSecret]:\n \"\"\"\n This method first runs the keyword plugin. If it finds a match - it runs the entropy scanners, and if\n one of the entropy scanners find a match (on a line which was already matched by keyword plugin) - it is returned.\n \"\"\"\n keyword_matches = self.keyword_scanner.analyze_line(filename, line, line_number, **kwargs)\n if keyword_matches:\n for entropy_scanner in self.high_entropy_scanners:\n matches = entropy_scanner.analyze_line(filename, line, line_number, **kwargs)\n if matches:\n return matches\n return set([])\n\n def analyze_string(self, string: str) -> Generator[str, None, None]:\n raise NotImplementedError()\n", "path": "checkov/secrets/plugins/entropy_keyword_combinator.py"}, {"content": "import linecache\nimport logging\nimport os\nimport re\nimport time\nfrom typing import Optional, List\n\nfrom detect_secrets import SecretsCollection\nfrom detect_secrets.core.potential_secret import PotentialSecret\nfrom detect_secrets.settings import transient_settings\nfrom typing_extensions import TypedDict\n\nfrom checkov.common.bridgecrew.platform_integration import bc_integration\nfrom checkov.common.comment.enum import COMMENT_REGEX\nfrom checkov.common.graph.graph_builder.utils import run_function_multithreaded\nfrom checkov.common.models.consts import SUPPORTED_FILE_EXTENSIONS\nfrom checkov.common.models.enums import CheckResult\nfrom checkov.common.output.record import Record\nfrom checkov.common.output.report import Report\nfrom checkov.common.runners.base_runner import BaseRunner, filter_ignored_paths\nfrom checkov.common.runners.base_runner import ignored_directories\nfrom checkov.common.util.consts import DEFAULT_EXTERNAL_MODULES_DIR\nfrom checkov.runner_filter import RunnerFilter\n\nSECRET_TYPE_TO_ID = {\n 'Artifactory Credentials': 'CKV_SECRET_1',\n 'AWS Access Key': 'CKV_SECRET_2',\n 'Azure Storage Account access key': 'CKV_SECRET_3',\n 'Basic Auth Credentials': 'CKV_SECRET_4',\n 'Cloudant Credentials': 'CKV_SECRET_5',\n 'Base64 High Entropy String': 'CKV_SECRET_6',\n 'IBM Cloud IAM Key': 'CKV_SECRET_7',\n 'IBM COS HMAC Credentials': 'CKV_SECRET_8',\n 'JSON Web Token': 'CKV_SECRET_9',\n # 'Secret Keyword': 'CKV_SECRET_10',\n 'Mailchimp Access Key': 'CKV_SECRET_11',\n 'NPM tokens': 'CKV_SECRET_12',\n 'Private Key': 'CKV_SECRET_13',\n 'Slack Token': 'CKV_SECRET_14',\n 'SoftLayer Credentials': 'CKV_SECRET_15',\n 'Square OAuth Secret': 'CKV_SECRET_16',\n 'Stripe Access Key': 'CKV_SECRET_17',\n 'Twilio API Key': 'CKV_SECRET_18',\n 'Hex High Entropy String': 'CKV_SECRET_19'\n}\nCHECK_ID_TO_SECRET_TYPE = {v: k for k, v in SECRET_TYPE_TO_ID.items()}\n\nENTROPY_KEYWORD_LIMIT = 3\nPROHIBITED_FILES = ['Pipfile.lock', 'yarn.lock', 'package-lock.json', 'requirements.txt']\n\n\nclass _CheckResult(TypedDict, total=False):\n result: CheckResult\n suppress_comment: str\n\n\nclass Runner(BaseRunner):\n check_type = 'secrets'\n\n def run(\n self,\n root_folder: str,\n external_checks_dir: Optional[List[str]] = None,\n files: Optional[List[str]] = None,\n runner_filter: RunnerFilter = RunnerFilter(),\n collect_skip_comments: bool = True\n ) -> Report:\n current_dir = os.path.dirname(os.path.realpath(__file__))\n secrets = SecretsCollection()\n with transient_settings({\n # Only run scans with only these plugins.\n 'plugins_used': [\n {\n 'name': 'AWSKeyDetector'\n },\n {\n 'name': 'ArtifactoryDetector'\n },\n {\n 'name': 'AzureStorageKeyDetector'\n },\n {\n 'name': 'BasicAuthDetector'\n },\n {\n 'name': 'CloudantDetector'\n },\n {\n 'name': 'IbmCloudIamDetector'\n },\n {\n 'name': 'MailchimpDetector'\n },\n {\n 'name': 'PrivateKeyDetector'\n },\n {\n 'name': 'SlackDetector'\n },\n {\n 'name': 'SoftlayerDetector'\n },\n {\n 'name': 'SquareOAuthDetector'\n },\n {\n 'name': 'StripeDetector'\n },\n {\n 'name': 'TwilioKeyDetector'\n },\n {\n 'name': 'EntropyKeywordCombinator',\n 'path': f'file://{current_dir}/plugins/entropy_keyword_combinator.py',\n 'limit': ENTROPY_KEYWORD_LIMIT\n }\n ]\n }) as settings:\n report = Report(self.check_type)\n # Implement non IaC files (including .terraform dir)\n files_to_scan = files or []\n excluded_paths = (runner_filter.excluded_paths or []) + ignored_directories + [DEFAULT_EXTERNAL_MODULES_DIR]\n if root_folder:\n for root, d_names, f_names in os.walk(root_folder):\n filter_ignored_paths(root, d_names, excluded_paths)\n filter_ignored_paths(root, f_names, excluded_paths)\n for file in f_names:\n if file not in PROHIBITED_FILES and f\".{file.split('.')[-1]}\" in SUPPORTED_FILE_EXTENSIONS:\n files_to_scan.append(os.path.join(root, file))\n logging.info(f'Secrets scanning will scan {len(files_to_scan)} files')\n\n settings.disable_filters(*['detect_secrets.filters.heuristic.is_indirect_reference'])\n\n def _scan_file(file_paths: List[str]) -> None:\n for file_path in file_paths:\n start = time.time()\n try:\n secrets.scan_file(file_path)\n except Exception as err:\n logging.warning(f\"Secret scanning:could not process file {file_path}, {err}\")\n continue\n end = time.time()\n scan_time = end - start\n if scan_time > 10:\n logging.info(f'Scanned {file_path}, took {scan_time} seconds')\n\n run_function_multithreaded(_scan_file, files_to_scan, 1, num_of_workers=os.cpu_count())\n\n for _, secret in iter(secrets):\n check_id = SECRET_TYPE_TO_ID.get(secret.type)\n bc_check_id = bc_integration.ckv_to_bc_id_mapping.get(check_id) if bc_integration.ckv_to_bc_id_mapping else None\n if not check_id:\n continue\n if runner_filter.checks and not runner_filter.should_run_check(check_id, bc_check_id):\n continue\n result: _CheckResult = {'result': CheckResult.FAILED}\n line_text = linecache.getline(secret.filename, secret.line_number)\n if line_text != \"\" and line_text.split()[0] == 'git_commit':\n continue\n result = self.search_for_suppression(\n check_id=check_id,\n bc_check_id=bc_check_id,\n secret=secret,\n skipped_checks=runner_filter.skip_checks,\n ) or result\n report.add_resource(f'{secret.filename}:{secret.secret_hash}')\n report.add_record(Record(\n check_id=check_id,\n bc_check_id=bc_check_id,\n check_name=secret.type,\n check_result=result,\n code_block=[(secret.line_number, line_text)],\n file_path=f'/{os.path.relpath(secret.filename, root_folder)}',\n file_line_range=[secret.line_number, secret.line_number + 1],\n resource=secret.secret_hash,\n check_class=None,\n evaluations=None,\n file_abs_path=os.path.abspath(secret.filename)\n ))\n\n return report\n\n @staticmethod\n def search_for_suppression(\n check_id: str,\n bc_check_id: str,\n secret: PotentialSecret,\n skipped_checks: List[str]\n ) -> Optional[_CheckResult]:\n if (check_id in skipped_checks or bc_check_id in skipped_checks) and check_id in CHECK_ID_TO_SECRET_TYPE.keys():\n return {\n \"result\": CheckResult.SKIPPED,\n \"suppress_comment\": f\"Secret scan {check_id} is skipped\"\n }\n # Check for suppression comment in the line before, the line of, and the line after the secret\n for line_number in [secret.line_number, secret.line_number - 1, secret.line_number + 1]:\n lt = linecache.getline(secret.filename, line_number)\n skip_search = re.search(COMMENT_REGEX, lt)\n if skip_search and (skip_search.group(2) == check_id or skip_search.group(2) == bc_check_id):\n return {\n \"result\": CheckResult.SKIPPED,\n \"suppress_comment\": skip_search.group(3)[1:] if skip_search.group(3) else \"No comment provided\"\n }\n return None\n", "path": "checkov/secrets/runner.py"}]}
3,654
689
gh_patches_debug_9694
rasdani/github-patches
git_diff
pretix__pretix-883
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- rich_text: tel schema When providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened. rich_text: tel schema When providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pretix/base/templatetags/rich_text.py` Content: ``` 1 import urllib.parse 2 3 import bleach 4 import markdown 5 from bleach import DEFAULT_CALLBACKS 6 from django import template 7 from django.conf import settings 8 from django.core import signing 9 from django.urls import reverse 10 from django.utils.http import is_safe_url 11 from django.utils.safestring import mark_safe 12 13 register = template.Library() 14 15 ALLOWED_TAGS = [ 16 'a', 17 'abbr', 18 'acronym', 19 'b', 20 'blockquote', 21 'br', 22 'code', 23 'em', 24 'i', 25 'li', 26 'ol', 27 'strong', 28 'ul', 29 'p', 30 'table', 31 'tbody', 32 'thead', 33 'tr', 34 'td', 35 'th', 36 'div', 37 'span', 38 'hr', 39 'h1', 40 'h2', 41 'h3', 42 'h4', 43 'h5', 44 'h6', 45 'pre', 46 # Update doc/user/markdown.rst if you change this! 47 ] 48 49 ALLOWED_ATTRIBUTES = { 50 'a': ['href', 'title'], 51 'abbr': ['title'], 52 'acronym': ['title'], 53 'table': ['width'], 54 'td': ['width', 'align'], 55 'div': ['class'], 56 'p': ['class'], 57 'span': ['class'], 58 # Update doc/user/markdown.rst if you change this! 59 } 60 61 ALLOWED_PROTOCOLS = ['http', 'https', 'mailto', 'tel'] 62 63 64 def safelink_callback(attrs, new=False): 65 url = attrs.get((None, 'href'), '/') 66 if not is_safe_url(url) and not url.startswith('mailto:'): 67 signer = signing.Signer(salt='safe-redirect') 68 attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url)) 69 attrs[None, 'target'] = '_blank' 70 attrs[None, 'rel'] = 'noopener' 71 return attrs 72 73 74 def abslink_callback(attrs, new=False): 75 attrs[None, 'href'] = urllib.parse.urljoin(settings.SITE_URL, attrs.get((None, 'href'), '/')) 76 attrs[None, 'target'] = '_blank' 77 attrs[None, 'rel'] = 'noopener' 78 return attrs 79 80 81 def markdown_compile(source): 82 return bleach.clean( 83 markdown.markdown( 84 source, 85 extensions=[ 86 'markdown.extensions.sane_lists', 87 # 'markdown.extensions.nl2br', # TODO: Enable, but check backwards-compatibility issues e.g. with mails 88 ] 89 ), 90 tags=ALLOWED_TAGS, 91 attributes=ALLOWED_ATTRIBUTES, 92 protocols=ALLOWED_PROTOCOLS, 93 ) 94 95 96 @register.filter 97 def rich_text(text: str, **kwargs): 98 """ 99 Processes markdown and cleans HTML in a text input. 100 """ 101 text = str(text) 102 body_md = bleach.linkify( 103 markdown_compile(text), 104 callbacks=DEFAULT_CALLBACKS + ([safelink_callback] if kwargs.get('safelinks', True) else [abslink_callback]) 105 ) 106 return mark_safe(body_md) 107 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pretix/base/templatetags/rich_text.py b/src/pretix/base/templatetags/rich_text.py --- a/src/pretix/base/templatetags/rich_text.py +++ b/src/pretix/base/templatetags/rich_text.py @@ -63,7 +63,7 @@ def safelink_callback(attrs, new=False): url = attrs.get((None, 'href'), '/') - if not is_safe_url(url) and not url.startswith('mailto:'): + if not is_safe_url(url) and not url.startswith('mailto:') and not url.startswith('tel:'): signer = signing.Signer(salt='safe-redirect') attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url)) attrs[None, 'target'] = '_blank'
{"golden_diff": "diff --git a/src/pretix/base/templatetags/rich_text.py b/src/pretix/base/templatetags/rich_text.py\n--- a/src/pretix/base/templatetags/rich_text.py\n+++ b/src/pretix/base/templatetags/rich_text.py\n@@ -63,7 +63,7 @@\n \n def safelink_callback(attrs, new=False):\n url = attrs.get((None, 'href'), '/')\n- if not is_safe_url(url) and not url.startswith('mailto:'):\n+ if not is_safe_url(url) and not url.startswith('mailto:') and not url.startswith('tel:'):\n signer = signing.Signer(salt='safe-redirect')\n attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))\n attrs[None, 'target'] = '_blank'\n", "issue": "rich_text: tel schema\nWhen providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.\nrich_text: tel schema\nWhen providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.\n", "before_files": [{"content": "import urllib.parse\n\nimport bleach\nimport markdown\nfrom bleach import DEFAULT_CALLBACKS\nfrom django import template\nfrom django.conf import settings\nfrom django.core import signing\nfrom django.urls import reverse\nfrom django.utils.http import is_safe_url\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\nALLOWED_TAGS = [\n 'a',\n 'abbr',\n 'acronym',\n 'b',\n 'blockquote',\n 'br',\n 'code',\n 'em',\n 'i',\n 'li',\n 'ol',\n 'strong',\n 'ul',\n 'p',\n 'table',\n 'tbody',\n 'thead',\n 'tr',\n 'td',\n 'th',\n 'div',\n 'span',\n 'hr',\n 'h1',\n 'h2',\n 'h3',\n 'h4',\n 'h5',\n 'h6',\n 'pre',\n # Update doc/user/markdown.rst if you change this!\n]\n\nALLOWED_ATTRIBUTES = {\n 'a': ['href', 'title'],\n 'abbr': ['title'],\n 'acronym': ['title'],\n 'table': ['width'],\n 'td': ['width', 'align'],\n 'div': ['class'],\n 'p': ['class'],\n 'span': ['class'],\n # Update doc/user/markdown.rst if you change this!\n}\n\nALLOWED_PROTOCOLS = ['http', 'https', 'mailto', 'tel']\n\n\ndef safelink_callback(attrs, new=False):\n url = attrs.get((None, 'href'), '/')\n if not is_safe_url(url) and not url.startswith('mailto:'):\n signer = signing.Signer(salt='safe-redirect')\n attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef abslink_callback(attrs, new=False):\n attrs[None, 'href'] = urllib.parse.urljoin(settings.SITE_URL, attrs.get((None, 'href'), '/'))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef markdown_compile(source):\n return bleach.clean(\n markdown.markdown(\n source,\n extensions=[\n 'markdown.extensions.sane_lists',\n # 'markdown.extensions.nl2br', # TODO: Enable, but check backwards-compatibility issues e.g. with mails\n ]\n ),\n tags=ALLOWED_TAGS,\n attributes=ALLOWED_ATTRIBUTES,\n protocols=ALLOWED_PROTOCOLS,\n )\n\n\[email protected]\ndef rich_text(text: str, **kwargs):\n \"\"\"\n Processes markdown and cleans HTML in a text input.\n \"\"\"\n text = str(text)\n body_md = bleach.linkify(\n markdown_compile(text),\n callbacks=DEFAULT_CALLBACKS + ([safelink_callback] if kwargs.get('safelinks', True) else [abslink_callback])\n )\n return mark_safe(body_md)\n", "path": "src/pretix/base/templatetags/rich_text.py"}], "after_files": [{"content": "import urllib.parse\n\nimport bleach\nimport markdown\nfrom bleach import DEFAULT_CALLBACKS\nfrom django import template\nfrom django.conf import settings\nfrom django.core import signing\nfrom django.urls import reverse\nfrom django.utils.http import is_safe_url\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\nALLOWED_TAGS = [\n 'a',\n 'abbr',\n 'acronym',\n 'b',\n 'blockquote',\n 'br',\n 'code',\n 'em',\n 'i',\n 'li',\n 'ol',\n 'strong',\n 'ul',\n 'p',\n 'table',\n 'tbody',\n 'thead',\n 'tr',\n 'td',\n 'th',\n 'div',\n 'span',\n 'hr',\n 'h1',\n 'h2',\n 'h3',\n 'h4',\n 'h5',\n 'h6',\n 'pre',\n # Update doc/user/markdown.rst if you change this!\n]\n\nALLOWED_ATTRIBUTES = {\n 'a': ['href', 'title'],\n 'abbr': ['title'],\n 'acronym': ['title'],\n 'table': ['width'],\n 'td': ['width', 'align'],\n 'div': ['class'],\n 'p': ['class'],\n 'span': ['class'],\n # Update doc/user/markdown.rst if you change this!\n}\n\nALLOWED_PROTOCOLS = ['http', 'https', 'mailto', 'tel']\n\n\ndef safelink_callback(attrs, new=False):\n url = attrs.get((None, 'href'), '/')\n if not is_safe_url(url) and not url.startswith('mailto:') and not url.startswith('tel:'):\n signer = signing.Signer(salt='safe-redirect')\n attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef abslink_callback(attrs, new=False):\n attrs[None, 'href'] = urllib.parse.urljoin(settings.SITE_URL, attrs.get((None, 'href'), '/'))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef markdown_compile(source):\n return bleach.clean(\n markdown.markdown(\n source,\n extensions=[\n 'markdown.extensions.sane_lists',\n # 'markdown.extensions.nl2br', # TODO: Enable, but check backwards-compatibility issues e.g. with mails\n ]\n ),\n tags=ALLOWED_TAGS,\n attributes=ALLOWED_ATTRIBUTES,\n protocols=ALLOWED_PROTOCOLS,\n )\n\n\[email protected]\ndef rich_text(text: str, **kwargs):\n \"\"\"\n Processes markdown and cleans HTML in a text input.\n \"\"\"\n text = str(text)\n body_md = bleach.linkify(\n markdown_compile(text),\n callbacks=DEFAULT_CALLBACKS + ([safelink_callback] if kwargs.get('safelinks', True) else [abslink_callback])\n )\n return mark_safe(body_md)\n", "path": "src/pretix/base/templatetags/rich_text.py"}]}
1,417
195
gh_patches_debug_37392
rasdani/github-patches
git_diff
wemake-services__wemake-python-styleguide-1670
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Stray WPS513 warning. # Bug report `WPS513` is emitted on `if`/`else`/`if` chains that contain other statements in the last `if` block. ## What's wrong The following snippet: ```python if private_key is not None: if isinstance(private_key, PKey): key = private_key else: if isinstance(private_key, str): # WPS513 Found implicit `elif` condition stream = io.StringIO() stream.write(private_key) stream.seek(0) private_key = stream key = RSAKey.from_private_key(private_key) ``` triggers a stray WPS513 warning. Seems like the assignment to `key` is not taken into account. ## How is that should be `WPS513` should only be emitted if the dangling `if` statement is the only one present in the `else` block. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wemake_python_styleguide/visitors/tokenize/conditions.py` Content: ``` 1 import tokenize 2 from typing import ClassVar, FrozenSet 3 4 from typing_extensions import final 5 6 from wemake_python_styleguide.violations.refactoring import ( 7 ImplicitElifViolation, 8 ) 9 from wemake_python_styleguide.visitors.base import BaseTokenVisitor 10 11 12 @final 13 class IfElseVisitor(BaseTokenVisitor): 14 """ 15 Checks if tokens tokens. 16 17 We use ``tokenize`` instead of ``ast`` because 18 19 .. code:: python 20 21 if some: 22 ... 23 else: 24 if other: 25 ... 26 27 has the same ``ast`` representation as: 28 29 .. code:: python 30 31 if some: 32 ... 33 elif other: 34 ... 35 36 That's why we have to use ``tokenize`` to find 37 the raw tokens inside the text. 38 39 """ 40 41 _allowed_token_types: ClassVar[FrozenSet[int]] = frozenset(( 42 tokenize.NEWLINE, 43 tokenize.NL, 44 tokenize.COLON, 45 tokenize.INDENT, 46 )) 47 48 def visit_name(self, token: tokenize.TokenInfo) -> None: 49 """ 50 Checks that ``if`` nodes are defined correctly. 51 52 Raises: 53 ImplicitElifViolation 54 55 """ 56 self._check_implicit_elif(token) 57 58 def _does_else_belong_to_if(self, start_index: int) -> bool: 59 previous_token = self.file_tokens[start_index - 1] 60 61 if previous_token.type != tokenize.DEDENT: 62 # This is not the first token on the line, which means that it can 63 # also be "embedded" else: x if A else B 64 return False 65 66 for token in reversed(self.file_tokens[:start_index - 1]): 67 if token.type != tokenize.NAME: 68 continue 69 70 # Here we rely upon an intuition that in Python else have to be 71 # on the same level (same indentation) as parent statement. 72 if token.start[1] == previous_token.start[1]: 73 return token.string in {'if', 'elif'} 74 75 return False 76 77 def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None: 78 if token.string != 'else': 79 return 80 81 index = self.file_tokens.index(token) 82 83 # `else` token can belong also to `for` and `try/except` statement, 84 # which can trigger false positive for that violation. 85 if not self._does_else_belong_to_if(index): 86 return 87 88 # There's a bug in coverage, I am not sure how to make it work. 89 for next_token in self.file_tokens[index + 1:]: # pragma: no cover 90 if next_token.exact_type in self._allowed_token_types: 91 continue 92 elif next_token.string == 'if': 93 self.add_violation(ImplicitElifViolation(next_token)) 94 return 95 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wemake_python_styleguide/visitors/tokenize/conditions.py b/wemake_python_styleguide/visitors/tokenize/conditions.py --- a/wemake_python_styleguide/visitors/tokenize/conditions.py +++ b/wemake_python_styleguide/visitors/tokenize/conditions.py @@ -1,5 +1,5 @@ import tokenize -from typing import ClassVar, FrozenSet +from typing import ClassVar, FrozenSet, Sequence from typing_extensions import final @@ -74,21 +74,63 @@ return False - def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None: - if token.string != 'else': - return + def _if_has_code_below( + self, + remaining_tokens: Sequence[tokenize.TokenInfo], + ) -> bool: + """ + Checks code immediately below an if statement to remove false positives. + + Checks that, below an if that comes immediately after an else, there is + more code to be considered so as not to throw an incorrect violation. + """ + index = 1 + + while remaining_tokens[index - 1].exact_type != tokenize.INDENT: + index += 1 + + context_count = 1 - index = self.file_tokens.index(token) + while context_count: + next_token = remaining_tokens[index] + if next_token.exact_type == tokenize.INDENT: + context_count += 1 + if next_token.exact_type == tokenize.DEDENT: + context_count -= 1 + index += 1 + + return remaining_tokens[index].exact_type != tokenize.DEDENT + + def _check_complex_else( + self, + tokens: Sequence[tokenize.TokenInfo], + current_token: tokenize.TokenInfo, + index: int, + ) -> None: + complex_else = self._if_has_code_below(tokens[index + 1:]) + if not complex_else: + self.add_violation(ImplicitElifViolation(current_token)) + + def _is_invalid_token(self, index: int, token: tokenize.TokenInfo) -> bool: + is_not_else = token.string != 'else' # `else` token can belong also to `for` and `try/except` statement, # which can trigger false positive for that violation. - if not self._does_else_belong_to_if(index): + belongs_to_if = self._does_else_belong_to_if(index) + + return is_not_else or not belongs_to_if + + def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None: + token_index = self.file_tokens.index(token) + + if self._is_invalid_token(token_index, token): return # There's a bug in coverage, I am not sure how to make it work. - for next_token in self.file_tokens[index + 1:]: # pragma: no cover + next_tokens = self.file_tokens[token_index + 1:] + for index, next_token in enumerate(next_tokens): # pragma: no cover if next_token.exact_type in self._allowed_token_types: continue elif next_token.string == 'if': - self.add_violation(ImplicitElifViolation(next_token)) + self._check_complex_else(next_tokens, next_token, index) return
{"golden_diff": "diff --git a/wemake_python_styleguide/visitors/tokenize/conditions.py b/wemake_python_styleguide/visitors/tokenize/conditions.py\n--- a/wemake_python_styleguide/visitors/tokenize/conditions.py\n+++ b/wemake_python_styleguide/visitors/tokenize/conditions.py\n@@ -1,5 +1,5 @@\n import tokenize\n-from typing import ClassVar, FrozenSet\n+from typing import ClassVar, FrozenSet, Sequence\n \n from typing_extensions import final\n \n@@ -74,21 +74,63 @@\n \n return False\n \n- def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n- if token.string != 'else':\n- return\n+ def _if_has_code_below(\n+ self,\n+ remaining_tokens: Sequence[tokenize.TokenInfo],\n+ ) -> bool:\n+ \"\"\"\n+ Checks code immediately below an if statement to remove false positives.\n+\n+ Checks that, below an if that comes immediately after an else, there is\n+ more code to be considered so as not to throw an incorrect violation.\n+ \"\"\"\n+ index = 1\n+\n+ while remaining_tokens[index - 1].exact_type != tokenize.INDENT:\n+ index += 1\n+\n+ context_count = 1\n \n- index = self.file_tokens.index(token)\n+ while context_count:\n+ next_token = remaining_tokens[index]\n+ if next_token.exact_type == tokenize.INDENT:\n+ context_count += 1\n+ if next_token.exact_type == tokenize.DEDENT:\n+ context_count -= 1\n+ index += 1\n+\n+ return remaining_tokens[index].exact_type != tokenize.DEDENT\n+\n+ def _check_complex_else(\n+ self,\n+ tokens: Sequence[tokenize.TokenInfo],\n+ current_token: tokenize.TokenInfo,\n+ index: int,\n+ ) -> None:\n+ complex_else = self._if_has_code_below(tokens[index + 1:])\n+ if not complex_else:\n+ self.add_violation(ImplicitElifViolation(current_token))\n+\n+ def _is_invalid_token(self, index: int, token: tokenize.TokenInfo) -> bool:\n+ is_not_else = token.string != 'else'\n \n # `else` token can belong also to `for` and `try/except` statement,\n # which can trigger false positive for that violation.\n- if not self._does_else_belong_to_if(index):\n+ belongs_to_if = self._does_else_belong_to_if(index)\n+\n+ return is_not_else or not belongs_to_if\n+\n+ def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n+ token_index = self.file_tokens.index(token)\n+\n+ if self._is_invalid_token(token_index, token):\n return\n \n # There's a bug in coverage, I am not sure how to make it work.\n- for next_token in self.file_tokens[index + 1:]: # pragma: no cover\n+ next_tokens = self.file_tokens[token_index + 1:]\n+ for index, next_token in enumerate(next_tokens): # pragma: no cover\n if next_token.exact_type in self._allowed_token_types:\n continue\n elif next_token.string == 'if':\n- self.add_violation(ImplicitElifViolation(next_token))\n+ self._check_complex_else(next_tokens, next_token, index)\n return\n", "issue": "Stray WPS513 warning.\n# Bug report\r\n\r\n`WPS513` is emitted on `if`/`else`/`if` chains that contain other statements in the last `if` block.\r\n\r\n## What's wrong\r\n\r\nThe following snippet:\r\n\r\n```python\r\nif private_key is not None:\r\n if isinstance(private_key, PKey):\r\n key = private_key\r\n else:\r\n if isinstance(private_key, str): # WPS513 Found implicit `elif` condition\r\n stream = io.StringIO()\r\n stream.write(private_key)\r\n stream.seek(0)\r\n private_key = stream\r\n key = RSAKey.from_private_key(private_key)\r\n```\r\n\r\ntriggers a stray WPS513 warning. Seems like the assignment to `key` is not taken into account.\r\n\r\n## How is that should be\r\n\r\n`WPS513` should only be emitted if the dangling `if` statement is the only one present in the `else` block.\n", "before_files": [{"content": "import tokenize\nfrom typing import ClassVar, FrozenSet\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.violations.refactoring import (\n ImplicitElifViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseTokenVisitor\n\n\n@final\nclass IfElseVisitor(BaseTokenVisitor):\n \"\"\"\n Checks if tokens tokens.\n\n We use ``tokenize`` instead of ``ast`` because\n\n .. code:: python\n\n if some:\n ...\n else:\n if other:\n ...\n\n has the same ``ast`` representation as:\n\n .. code:: python\n\n if some:\n ...\n elif other:\n ...\n\n That's why we have to use ``tokenize`` to find\n the raw tokens inside the text.\n\n \"\"\"\n\n _allowed_token_types: ClassVar[FrozenSet[int]] = frozenset((\n tokenize.NEWLINE,\n tokenize.NL,\n tokenize.COLON,\n tokenize.INDENT,\n ))\n\n def visit_name(self, token: tokenize.TokenInfo) -> None:\n \"\"\"\n Checks that ``if`` nodes are defined correctly.\n\n Raises:\n ImplicitElifViolation\n\n \"\"\"\n self._check_implicit_elif(token)\n\n def _does_else_belong_to_if(self, start_index: int) -> bool:\n previous_token = self.file_tokens[start_index - 1]\n\n if previous_token.type != tokenize.DEDENT:\n # This is not the first token on the line, which means that it can\n # also be \"embedded\" else: x if A else B\n return False\n\n for token in reversed(self.file_tokens[:start_index - 1]):\n if token.type != tokenize.NAME:\n continue\n\n # Here we rely upon an intuition that in Python else have to be\n # on the same level (same indentation) as parent statement.\n if token.start[1] == previous_token.start[1]:\n return token.string in {'if', 'elif'}\n\n return False\n\n def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n if token.string != 'else':\n return\n\n index = self.file_tokens.index(token)\n\n # `else` token can belong also to `for` and `try/except` statement,\n # which can trigger false positive for that violation.\n if not self._does_else_belong_to_if(index):\n return\n\n # There's a bug in coverage, I am not sure how to make it work.\n for next_token in self.file_tokens[index + 1:]: # pragma: no cover\n if next_token.exact_type in self._allowed_token_types:\n continue\n elif next_token.string == 'if':\n self.add_violation(ImplicitElifViolation(next_token))\n return\n", "path": "wemake_python_styleguide/visitors/tokenize/conditions.py"}], "after_files": [{"content": "import tokenize\nfrom typing import ClassVar, FrozenSet, Sequence\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.violations.refactoring import (\n ImplicitElifViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseTokenVisitor\n\n\n@final\nclass IfElseVisitor(BaseTokenVisitor):\n \"\"\"\n Checks if tokens tokens.\n\n We use ``tokenize`` instead of ``ast`` because\n\n .. code:: python\n\n if some:\n ...\n else:\n if other:\n ...\n\n has the same ``ast`` representation as:\n\n .. code:: python\n\n if some:\n ...\n elif other:\n ...\n\n That's why we have to use ``tokenize`` to find\n the raw tokens inside the text.\n\n \"\"\"\n\n _allowed_token_types: ClassVar[FrozenSet[int]] = frozenset((\n tokenize.NEWLINE,\n tokenize.NL,\n tokenize.COLON,\n tokenize.INDENT,\n ))\n\n def visit_name(self, token: tokenize.TokenInfo) -> None:\n \"\"\"\n Checks that ``if`` nodes are defined correctly.\n\n Raises:\n ImplicitElifViolation\n\n \"\"\"\n self._check_implicit_elif(token)\n\n def _does_else_belong_to_if(self, start_index: int) -> bool:\n previous_token = self.file_tokens[start_index - 1]\n\n if previous_token.type != tokenize.DEDENT:\n # This is not the first token on the line, which means that it can\n # also be \"embedded\" else: x if A else B\n return False\n\n for token in reversed(self.file_tokens[:start_index - 1]):\n if token.type != tokenize.NAME:\n continue\n\n # Here we rely upon an intuition that in Python else have to be\n # on the same level (same indentation) as parent statement.\n if token.start[1] == previous_token.start[1]:\n return token.string in {'if', 'elif'}\n\n return False\n\n def _if_has_code_below(\n self,\n remaining_tokens: Sequence[tokenize.TokenInfo],\n ) -> bool:\n \"\"\"\n Checks code immediately below an if statement to remove false positives.\n\n Checks that, below an if that comes immediately after an else, there is\n more code to be considered so as not to throw an incorrect violation.\n \"\"\"\n index = 1\n\n while remaining_tokens[index - 1].exact_type != tokenize.INDENT:\n index += 1\n\n context_count = 1\n\n while context_count:\n next_token = remaining_tokens[index]\n if next_token.exact_type == tokenize.INDENT:\n context_count += 1\n if next_token.exact_type == tokenize.DEDENT:\n context_count -= 1\n index += 1\n\n return remaining_tokens[index].exact_type != tokenize.DEDENT\n\n def _check_complex_else(\n self,\n tokens: Sequence[tokenize.TokenInfo],\n current_token: tokenize.TokenInfo,\n index: int,\n ) -> None:\n complex_else = self._if_has_code_below(tokens[index + 1:])\n if not complex_else:\n self.add_violation(ImplicitElifViolation(current_token))\n\n def _is_invalid_token(self, index: int, token: tokenize.TokenInfo) -> bool:\n is_not_else = token.string != 'else'\n\n # `else` token can belong also to `for` and `try/except` statement,\n # which can trigger false positive for that violation.\n belongs_to_if = self._does_else_belong_to_if(index)\n\n return is_not_else or not belongs_to_if\n\n def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n token_index = self.file_tokens.index(token)\n\n if self._is_invalid_token(token_index, token):\n return\n\n # There's a bug in coverage, I am not sure how to make it work.\n next_tokens = self.file_tokens[token_index + 1:]\n for index, next_token in enumerate(next_tokens): # pragma: no cover\n if next_token.exact_type in self._allowed_token_types:\n continue\n elif next_token.string == 'if':\n self._check_complex_else(next_tokens, next_token, index)\n return\n", "path": "wemake_python_styleguide/visitors/tokenize/conditions.py"}]}
1,268
757
gh_patches_debug_18425
rasdani/github-patches
git_diff
horovod__horovod-2992
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Failed to use LossScaleOptimizer with DistributedOptimizer. **Environment:** Note that I checked this issue on NVIDIA's NGC TF2 container image, `nvcr.io/nvidia/tensorflow:21.05-tf2-py3`. (Release notes: https://docs.nvidia.com/deeplearning/frameworks/tensorflow-release-notes/rel_21-05.html#rel_21-05) 1. Framework: TensorFlow (and its keras API) 2. Framework version: 2.4.0+nv 3. Horovod version: 0.21.3 4. MPI version: 4.1.1rc1 5. CUDA version: V11.3.58 6. NCCL version: 2.9.8 7. Python version: 3.8.5 8. Spark / PySpark version: N/A 9. Ray version: N/A 10. OS and version: Ubuntu 20.04.2 LTS 11. GCC version: 9.3.0 12. CMake version: 3.16.3 **Checklist:** 1. Did you search issues to find if somebody asked this question before?: Y 2. If your question is about hang, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/running.rst)?: N/A 3. If your question is about docker, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/docker.rst)?: Y 4. Did you check if you question is answered in the [troubleshooting guide](https://github.com/horovod/horovod/blob/master/docs/troubleshooting.rst)?: Y **Bug report:** Please describe erroneous behavior you're observing and steps to reproduce it. When I tried to use newer mixed precision API, `tf.keras.mixed_precision.LossScaleOptimizer`, with Horovod, the error, `AttributeError: 'LossScaleOptimizer' object has no attribute '_optimizer'`, happened. Minimal reproduction code is below, and if a line of `LossScaleOptimizer` is commented out, no error happens. ```python import tensorflow as tf import horovod.tensorflow.keras as hvd def main(): hvd.init() gpus = tf.config.experimental.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) if gpus: tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU') policy = tf.keras.mixed_precision.Policy('mixed_float16') tf.keras.mixed_precision.set_global_policy(policy) opt = tf.keras.optimizers.RMSprop(learning_rate=0.001 * (hvd.size()**0.5)) opt = tf.keras.mixed_precision.LossScaleOptimizer(opt) opt = hvd.DistributedOptimizer(opt) if __name__ == '__main__': main() ``` Full stacktrace message is below. ``` Traceback (most recent call last): File "minimal_repro.py", line 21, in <module> main() File "minimal_repro.py", line 17, in main opt = hvd.DistributedOptimizer(opt) File "/usr/local/lib/python3.8/dist-packages/horovod/tensorflow/keras/__init__.py", line 97, in DistributedOptimizer return _impl.create_distributed_optimizer( File "/usr/local/lib/python3.8/dist-packages/horovod/_keras/__init__.py", line 167, in create_distributed_optimizer return cls.from_config(config) File "/usr/local/lib/python3.8/dist-packages/tensorflow/python/keras/mixed_precision/loss_scale_optimizer.py", line 794, in from_config return cls(**config) File "/usr/local/lib/python3.8/dist-packages/horovod/_keras/__init__.py", line 37, in __init__ self._name = name or "Distributed%s" % self.__class__.__base__.__name__ File "/usr/local/lib/python3.8/dist-packages/tensorflow/python/keras/mixed_precision/loss_scale_optimizer.py", line 918, in __setattr__ if (name != '_optimizer' and name in self._optimizer._hyper File "/usr/local/lib/python3.8/dist-packages/tensorflow/python/keras/mixed_precision/loss_scale_optimizer.py", line 888, in __getattribute__ raise e File "/usr/local/lib/python3.8/dist-packages/tensorflow/python/keras/mixed_precision/loss_scale_optimizer.py", line 884, in __getattribute__ return object.__getattribute__(self, name) AttributeError: 'LossScaleOptimizer' object has no attribute '_optimizer' ``` As far as I checked, at [`keras/mixed_precision/loss_scale_optimizer.py`#L534](https://github.com/tensorflow/tensorflow/blob/v2.4.0/tensorflow/python/keras/mixed_precision/loss_scale_optimizer.py#L534), `_optimizer` is initialized, and Horovod's automatically generated wrapper class, `_DistributedOptimizer`, tries to access `_name` attribute before this initialization ([`horovod/_keras/__init__.py`#L37](https://github.com/horovod/horovod/blob/master/horovod/_keras/__init__.py#L37)). Then, `LossScaleOptimizer` checks if an attribute exists or not when accessing to any attribute, and raise an error for a few attributes like `_optimizer` at ([`keras/mixed_precision/loss_scale_optimizer.py`#L882-L888](https://github.com/tensorflow/tensorflow/blob/v2.4.0/tensorflow/python/keras/mixed_precision/loss_scale_optimizer.py#L882-L888)) However, it looks like `_DistributedOptimizer` makes the initialization (= `_optimizer` initialization) delayed to the last of `__init__()` ([`horovod/_keras/__init__.py`#L69](https://github.com/horovod/horovod/blob/master/horovod/_keras/__init__.py#L69)). As a result, this error looks like caused. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `horovod/_keras/__init__.py` Content: ``` 1 # Copyright 2017 Uber Technologies, Inc. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 16 from distutils.version import LooseVersion 17 18 import horovod.tensorflow as hvd 19 import tensorflow as tf 20 from horovod.tensorflow.gradient_aggregation import LocalGradientAggregationHelper 21 from horovod.tensorflow.gradient_aggregation_eager import LocalGradientAggregationHelperEager 22 from horovod.tensorflow.mpi_ops import rank 23 24 25 _PRE_TF_2_4_0 = LooseVersion(tf.__version__) < LooseVersion('2.4.0') 26 27 28 def create_distributed_optimizer(keras, optimizer, name, device_dense, device_sparse, 29 compression, sparse_as_dense, gradient_predivide_factor, 30 op, backward_passes_per_step=1, 31 average_aggregated_gradients=False, 32 groups=None): 33 class _DistributedOptimizer(keras.optimizers.Optimizer): 34 _HAS_AGGREGATE_GRAD = True 35 36 def __init__(self, **kwargs): 37 self._name = name or "Distributed%s" % self.__class__.__base__.__name__ 38 self._aggregated_gradients = False 39 40 self._allreduce_grads = hvd._make_allreduce_grads_fn( 41 self._name, 42 device_dense, 43 device_sparse, 44 compression, 45 sparse_as_dense, 46 op, 47 gradient_predivide_factor, 48 groups) 49 50 self._agg_helper = None 51 if backward_passes_per_step > 1: 52 if hvd._executing_eagerly(): 53 self._agg_helper = LocalGradientAggregationHelperEager( 54 backward_passes_per_step=backward_passes_per_step, 55 allreduce_func=self._allreduce_grads, 56 sparse_as_dense=sparse_as_dense, 57 average_aggregated_gradients=average_aggregated_gradients, 58 ) 59 else: 60 self._agg_helper = LocalGradientAggregationHelper( 61 backward_passes_per_step=backward_passes_per_step, 62 allreduce_func=self._allreduce_grads, 63 sparse_as_dense=sparse_as_dense, 64 average_aggregated_gradients=average_aggregated_gradients, 65 rank=rank(), 66 optimizer_type=LocalGradientAggregationHelper._OPTIMIZER_TYPE_KERAS, 67 ) 68 69 super(self.__class__, self).__init__(**kwargs) 70 71 def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None): 72 """ 73 Compute gradients of all trainable variables. 74 75 See Optimizer.get_gradients() for more info. 76 77 In DistributedOptimizer, get_gradients() is overriden to also 78 allreduce the gradients before returning them. 79 """ 80 if _PRE_TF_2_4_0: 81 return super(self.__class__, self)._compute_gradients( 82 loss, var_list, grad_loss, tape) 83 84 tape = backprop.GradientTape() if tape is None else tape 85 grads_and_vars = super(self.__class__, self)._compute_gradients( 86 # pylint: disable=protected-access 87 loss, 88 var_list, 89 grad_loss, 90 tape=tape) 91 grads, weights = list(zip(*grads_and_vars)) 92 93 allreduced_grads = self._allreduce(grads, weights) 94 return list(zip(allreduced_grads, weights)) 95 96 def get_gradients(self, loss, params): 97 """ 98 Compute gradients of all trainable variables. 99 100 See Optimizer.get_gradients() for more info. 101 102 In DistributedOptimizer, get_gradients() is overriden to also 103 allreduce the gradients before returning them. 104 """ 105 gradients = super(self.__class__, self).get_gradients(loss, params) 106 return self._allreduce(gradients, params) 107 108 def _aggregate_gradients(self, grads_and_vars): 109 if _PRE_TF_2_4_0: 110 grads, vars = list(zip(*grads_and_vars)) 111 aggregated_grads = self._allreduce(grads, vars) 112 return aggregated_grads 113 else: 114 return super(self.__class__, self)._aggregate_gradients( 115 grads_and_vars) 116 117 def _allreduce(self, grads, vars): 118 self._aggregated_gradients = True 119 120 if self._agg_helper: 121 return self._agg_helper.compute_gradients(tuple(grads), tuple(vars)) 122 else: 123 return self._allreduce_grads(grads, vars) 124 125 def apply_gradients(self, *args, **kwargs): 126 if self._agg_helper: 127 if isinstance(args[0], zip): 128 # If grad_and_vars are passed in as a zip object 129 # convert to a list. This is necessary for TF2.4+ 130 # b/c args[0] is used in both conditional branches 131 # inside _agg_helper.apply_gradients(). 132 args = list(args) 133 args[0] = list(args[0]) 134 args = tuple(args) 135 136 results = self._agg_helper.apply_gradients( 137 lambda: super(self.__class__, self).apply_gradients(*args, **kwargs), 138 self, 139 *args, 140 **kwargs, 141 ) 142 else: 143 results = super(self.__class__, self).apply_gradients(*args, **kwargs) 144 145 if _PRE_TF_2_4_0 and not self._aggregated_gradients: 146 raise Exception('`apply_gradients()` was called without a call to ' 147 '`get_gradients()` or `_aggregate_gradients`. If you\'re ' 148 'using TensorFlow 2.0, please specify ' 149 '`experimental_run_tf_function=False` in `compile()`.') 150 151 return results 152 153 # We dynamically create a new class that inherits from the optimizer that was passed in. 154 # The goal is to override get_gradients() method with an allreduce implementation. 155 # This class will have the same name as the optimizer it's wrapping, so that the saved 156 # model could be easily restored without Horovod. 157 cls = type(optimizer.__class__.__name__, (optimizer.__class__,), 158 dict(_DistributedOptimizer.__dict__)) 159 160 return cls.from_config(optimizer.get_config()) 161 162 163 def _eval(backend, op_or_result): 164 if hvd._executing_eagerly(): 165 return op_or_result 166 else: 167 return backend.get_session().run(op_or_result) 168 169 170 if hasattr(hvd, 'broadcast_global_variables'): 171 def broadcast_global_variables(backend, root_rank): 172 return _eval(backend, hvd.broadcast_global_variables(root_rank)) 173 174 175 def allreduce(backend, value, name, average, prescale_factor, postscale_factor, op, compression): 176 return _eval(backend, hvd.allreduce(tf.constant(value, name=name), average=average, 177 prescale_factor=prescale_factor, 178 postscale_factor=postscale_factor, 179 op=op, compression=compression)) 180 181 182 def allgather(backend, value, name): 183 return _eval(backend, hvd.allgather(tf.constant(value, name=name))) 184 185 186 def broadcast(backend, value, root_rank, name): 187 return _eval(backend, hvd.broadcast(tf.constant(value, name=name), root_rank)) 188 189 190 def load_model(keras, wrap_optimizer, optimizer_modules, filepath, custom_optimizers, custom_objects): 191 horovod_objects = { 192 subclass.__name__.lower(): wrap_optimizer(subclass) 193 for subclass in keras.optimizers.Optimizer.__subclasses__() 194 if subclass.__module__ in optimizer_modules 195 } 196 197 if custom_optimizers is not None: 198 horovod_objects.update({ 199 cls.__name__: wrap_optimizer(cls) 200 for cls in custom_optimizers 201 }) 202 203 if custom_objects is not None: 204 horovod_objects.update(custom_objects) 205 206 return keras.models.load_model(filepath, custom_objects=horovod_objects) 207 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/horovod/_keras/__init__.py b/horovod/_keras/__init__.py --- a/horovod/_keras/__init__.py +++ b/horovod/_keras/__init__.py @@ -34,6 +34,8 @@ _HAS_AGGREGATE_GRAD = True def __init__(self, **kwargs): + super(self.__class__, self).__init__(**kwargs) + self._name = name or "Distributed%s" % self.__class__.__base__.__name__ self._aggregated_gradients = False @@ -66,8 +68,6 @@ optimizer_type=LocalGradientAggregationHelper._OPTIMIZER_TYPE_KERAS, ) - super(self.__class__, self).__init__(**kwargs) - def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None): """ Compute gradients of all trainable variables.
{"golden_diff": "diff --git a/horovod/_keras/__init__.py b/horovod/_keras/__init__.py\n--- a/horovod/_keras/__init__.py\n+++ b/horovod/_keras/__init__.py\n@@ -34,6 +34,8 @@\n _HAS_AGGREGATE_GRAD = True\n \n def __init__(self, **kwargs):\n+ super(self.__class__, self).__init__(**kwargs)\n+\n self._name = name or \"Distributed%s\" % self.__class__.__base__.__name__\n self._aggregated_gradients = False\n \n@@ -66,8 +68,6 @@\n optimizer_type=LocalGradientAggregationHelper._OPTIMIZER_TYPE_KERAS,\n )\n \n- super(self.__class__, self).__init__(**kwargs)\n-\n def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):\n \"\"\"\n Compute gradients of all trainable variables.\n", "issue": "Failed to use LossScaleOptimizer with DistributedOptimizer.\n**Environment:**\r\nNote that I checked this issue on NVIDIA's NGC TF2 container image, `nvcr.io/nvidia/tensorflow:21.05-tf2-py3`. (Release notes: https://docs.nvidia.com/deeplearning/frameworks/tensorflow-release-notes/rel_21-05.html#rel_21-05)\r\n\r\n1. Framework: TensorFlow (and its keras API)\r\n2. Framework version: 2.4.0+nv\r\n3. Horovod version: 0.21.3\r\n4. MPI version: 4.1.1rc1\r\n5. CUDA version: V11.3.58\r\n6. NCCL version: 2.9.8\r\n7. Python version: 3.8.5\r\n8. Spark / PySpark version: N/A\r\n9. Ray version: N/A\r\n10. OS and version: Ubuntu 20.04.2 LTS\r\n11. GCC version: 9.3.0\r\n12. CMake version: 3.16.3\r\n\r\n**Checklist:**\r\n1. Did you search issues to find if somebody asked this question before?: Y\r\n2. If your question is about hang, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/running.rst)?: N/A\r\n3. If your question is about docker, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/docker.rst)?: Y\r\n4. Did you check if you question is answered in the [troubleshooting guide](https://github.com/horovod/horovod/blob/master/docs/troubleshooting.rst)?: Y\r\n\r\n**Bug report:**\r\nPlease describe erroneous behavior you're observing and steps to reproduce it.\r\n\r\nWhen I tried to use newer mixed precision API, `tf.keras.mixed_precision.LossScaleOptimizer`, with Horovod, the error, `AttributeError: 'LossScaleOptimizer' object has no attribute '_optimizer'`, happened.\r\n\r\nMinimal reproduction code is below, and if a line of `LossScaleOptimizer` is commented out, no error happens.\r\n\r\n```python\r\nimport tensorflow as tf\r\nimport horovod.tensorflow.keras as hvd\r\n\r\ndef main():\r\n hvd.init()\r\n\r\n gpus = tf.config.experimental.list_physical_devices('GPU')\r\n for gpu in gpus:\r\n tf.config.experimental.set_memory_growth(gpu, True)\r\n if gpus:\r\n tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')\r\n\r\n policy = tf.keras.mixed_precision.Policy('mixed_float16')\r\n tf.keras.mixed_precision.set_global_policy(policy)\r\n opt = tf.keras.optimizers.RMSprop(learning_rate=0.001 * (hvd.size()**0.5))\r\n opt = tf.keras.mixed_precision.LossScaleOptimizer(opt)\r\n opt = hvd.DistributedOptimizer(opt)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n```\r\n\r\nFull stacktrace message is below.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"minimal_repro.py\", line 21, in <module>\r\n main()\r\n File \"minimal_repro.py\", line 17, in main\r\n opt = hvd.DistributedOptimizer(opt)\r\n File \"/usr/local/lib/python3.8/dist-packages/horovod/tensorflow/keras/__init__.py\", line 97, in DistributedOptimizer\r\n return _impl.create_distributed_optimizer(\r\n File \"/usr/local/lib/python3.8/dist-packages/horovod/_keras/__init__.py\", line 167, in create_distributed_optimizer\r\n return cls.from_config(config)\r\n File \"/usr/local/lib/python3.8/dist-packages/tensorflow/python/keras/mixed_precision/loss_scale_optimizer.py\", line 794, in from_config\r\n return cls(**config)\r\n File \"/usr/local/lib/python3.8/dist-packages/horovod/_keras/__init__.py\", line 37, in __init__\r\n self._name = name or \"Distributed%s\" % self.__class__.__base__.__name__\r\n File \"/usr/local/lib/python3.8/dist-packages/tensorflow/python/keras/mixed_precision/loss_scale_optimizer.py\", line 918, in __setattr__\r\n if (name != '_optimizer' and name in self._optimizer._hyper\r\n File \"/usr/local/lib/python3.8/dist-packages/tensorflow/python/keras/mixed_precision/loss_scale_optimizer.py\", line 888, in __getattribute__\r\n raise e\r\n File \"/usr/local/lib/python3.8/dist-packages/tensorflow/python/keras/mixed_precision/loss_scale_optimizer.py\", line 884, in __getattribute__\r\n return object.__getattribute__(self, name)\r\nAttributeError: 'LossScaleOptimizer' object has no attribute '_optimizer'\r\n```\r\n\r\nAs far as I checked, at [`keras/mixed_precision/loss_scale_optimizer.py`#L534](https://github.com/tensorflow/tensorflow/blob/v2.4.0/tensorflow/python/keras/mixed_precision/loss_scale_optimizer.py#L534), `_optimizer` is initialized, and Horovod's automatically generated wrapper class, `_DistributedOptimizer`, tries to access `_name` attribute before this initialization ([`horovod/_keras/__init__.py`#L37](https://github.com/horovod/horovod/blob/master/horovod/_keras/__init__.py#L37)). Then, `LossScaleOptimizer` checks if an attribute exists or not when accessing to any attribute, and raise an error for a few attributes like `_optimizer` at ([`keras/mixed_precision/loss_scale_optimizer.py`#L882-L888](https://github.com/tensorflow/tensorflow/blob/v2.4.0/tensorflow/python/keras/mixed_precision/loss_scale_optimizer.py#L882-L888))\r\nHowever, it looks like `_DistributedOptimizer` makes the initialization (= `_optimizer` initialization) delayed to the last of `__init__()` ([`horovod/_keras/__init__.py`#L69](https://github.com/horovod/horovod/blob/master/horovod/_keras/__init__.py#L69)). As a result, this error looks like caused.\n", "before_files": [{"content": "# Copyright 2017 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom distutils.version import LooseVersion\n\nimport horovod.tensorflow as hvd\nimport tensorflow as tf\nfrom horovod.tensorflow.gradient_aggregation import LocalGradientAggregationHelper\nfrom horovod.tensorflow.gradient_aggregation_eager import LocalGradientAggregationHelperEager\nfrom horovod.tensorflow.mpi_ops import rank\n\n\n_PRE_TF_2_4_0 = LooseVersion(tf.__version__) < LooseVersion('2.4.0')\n\n\ndef create_distributed_optimizer(keras, optimizer, name, device_dense, device_sparse,\n compression, sparse_as_dense, gradient_predivide_factor,\n op, backward_passes_per_step=1,\n average_aggregated_gradients=False,\n groups=None):\n class _DistributedOptimizer(keras.optimizers.Optimizer):\n _HAS_AGGREGATE_GRAD = True\n\n def __init__(self, **kwargs):\n self._name = name or \"Distributed%s\" % self.__class__.__base__.__name__\n self._aggregated_gradients = False\n\n self._allreduce_grads = hvd._make_allreduce_grads_fn(\n self._name,\n device_dense,\n device_sparse,\n compression,\n sparse_as_dense,\n op,\n gradient_predivide_factor,\n groups)\n\n self._agg_helper = None\n if backward_passes_per_step > 1:\n if hvd._executing_eagerly():\n self._agg_helper = LocalGradientAggregationHelperEager(\n backward_passes_per_step=backward_passes_per_step,\n allreduce_func=self._allreduce_grads,\n sparse_as_dense=sparse_as_dense,\n average_aggregated_gradients=average_aggregated_gradients,\n )\n else:\n self._agg_helper = LocalGradientAggregationHelper(\n backward_passes_per_step=backward_passes_per_step,\n allreduce_func=self._allreduce_grads,\n sparse_as_dense=sparse_as_dense,\n average_aggregated_gradients=average_aggregated_gradients,\n rank=rank(),\n optimizer_type=LocalGradientAggregationHelper._OPTIMIZER_TYPE_KERAS,\n )\n\n super(self.__class__, self).__init__(**kwargs)\n\n def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):\n \"\"\"\n Compute gradients of all trainable variables.\n\n See Optimizer.get_gradients() for more info.\n\n In DistributedOptimizer, get_gradients() is overriden to also\n allreduce the gradients before returning them.\n \"\"\"\n if _PRE_TF_2_4_0:\n return super(self.__class__, self)._compute_gradients(\n loss, var_list, grad_loss, tape)\n\n tape = backprop.GradientTape() if tape is None else tape\n grads_and_vars = super(self.__class__, self)._compute_gradients(\n # pylint: disable=protected-access\n loss,\n var_list,\n grad_loss,\n tape=tape)\n grads, weights = list(zip(*grads_and_vars))\n\n allreduced_grads = self._allreduce(grads, weights)\n return list(zip(allreduced_grads, weights))\n\n def get_gradients(self, loss, params):\n \"\"\"\n Compute gradients of all trainable variables.\n\n See Optimizer.get_gradients() for more info.\n\n In DistributedOptimizer, get_gradients() is overriden to also\n allreduce the gradients before returning them.\n \"\"\"\n gradients = super(self.__class__, self).get_gradients(loss, params)\n return self._allreduce(gradients, params)\n\n def _aggregate_gradients(self, grads_and_vars):\n if _PRE_TF_2_4_0:\n grads, vars = list(zip(*grads_and_vars))\n aggregated_grads = self._allreduce(grads, vars)\n return aggregated_grads\n else:\n return super(self.__class__, self)._aggregate_gradients(\n grads_and_vars)\n\n def _allreduce(self, grads, vars):\n self._aggregated_gradients = True\n\n if self._agg_helper:\n return self._agg_helper.compute_gradients(tuple(grads), tuple(vars))\n else:\n return self._allreduce_grads(grads, vars)\n\n def apply_gradients(self, *args, **kwargs):\n if self._agg_helper:\n if isinstance(args[0], zip):\n # If grad_and_vars are passed in as a zip object\n # convert to a list. This is necessary for TF2.4+\n # b/c args[0] is used in both conditional branches\n # inside _agg_helper.apply_gradients().\n args = list(args)\n args[0] = list(args[0])\n args = tuple(args)\n\n results = self._agg_helper.apply_gradients(\n lambda: super(self.__class__, self).apply_gradients(*args, **kwargs),\n self,\n *args,\n **kwargs,\n )\n else:\n results = super(self.__class__, self).apply_gradients(*args, **kwargs)\n\n if _PRE_TF_2_4_0 and not self._aggregated_gradients:\n raise Exception('`apply_gradients()` was called without a call to '\n '`get_gradients()` or `_aggregate_gradients`. If you\\'re '\n 'using TensorFlow 2.0, please specify '\n '`experimental_run_tf_function=False` in `compile()`.')\n\n return results\n\n # We dynamically create a new class that inherits from the optimizer that was passed in.\n # The goal is to override get_gradients() method with an allreduce implementation.\n # This class will have the same name as the optimizer it's wrapping, so that the saved\n # model could be easily restored without Horovod.\n cls = type(optimizer.__class__.__name__, (optimizer.__class__,),\n dict(_DistributedOptimizer.__dict__))\n\n return cls.from_config(optimizer.get_config())\n\n\ndef _eval(backend, op_or_result):\n if hvd._executing_eagerly():\n return op_or_result\n else:\n return backend.get_session().run(op_or_result)\n\n\nif hasattr(hvd, 'broadcast_global_variables'):\n def broadcast_global_variables(backend, root_rank):\n return _eval(backend, hvd.broadcast_global_variables(root_rank))\n\n\ndef allreduce(backend, value, name, average, prescale_factor, postscale_factor, op, compression):\n return _eval(backend, hvd.allreduce(tf.constant(value, name=name), average=average,\n prescale_factor=prescale_factor,\n postscale_factor=postscale_factor,\n op=op, compression=compression))\n\n\ndef allgather(backend, value, name):\n return _eval(backend, hvd.allgather(tf.constant(value, name=name)))\n\n\ndef broadcast(backend, value, root_rank, name):\n return _eval(backend, hvd.broadcast(tf.constant(value, name=name), root_rank))\n\n\ndef load_model(keras, wrap_optimizer, optimizer_modules, filepath, custom_optimizers, custom_objects):\n horovod_objects = {\n subclass.__name__.lower(): wrap_optimizer(subclass)\n for subclass in keras.optimizers.Optimizer.__subclasses__()\n if subclass.__module__ in optimizer_modules\n }\n\n if custom_optimizers is not None:\n horovod_objects.update({\n cls.__name__: wrap_optimizer(cls)\n for cls in custom_optimizers\n })\n\n if custom_objects is not None:\n horovod_objects.update(custom_objects)\n\n return keras.models.load_model(filepath, custom_objects=horovod_objects)\n", "path": "horovod/_keras/__init__.py"}], "after_files": [{"content": "# Copyright 2017 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom distutils.version import LooseVersion\n\nimport horovod.tensorflow as hvd\nimport tensorflow as tf\nfrom horovod.tensorflow.gradient_aggregation import LocalGradientAggregationHelper\nfrom horovod.tensorflow.gradient_aggregation_eager import LocalGradientAggregationHelperEager\nfrom horovod.tensorflow.mpi_ops import rank\n\n\n_PRE_TF_2_4_0 = LooseVersion(tf.__version__) < LooseVersion('2.4.0')\n\n\ndef create_distributed_optimizer(keras, optimizer, name, device_dense, device_sparse,\n compression, sparse_as_dense, gradient_predivide_factor,\n op, backward_passes_per_step=1,\n average_aggregated_gradients=False,\n groups=None):\n class _DistributedOptimizer(keras.optimizers.Optimizer):\n _HAS_AGGREGATE_GRAD = True\n\n def __init__(self, **kwargs):\n super(self.__class__, self).__init__(**kwargs)\n\n self._name = name or \"Distributed%s\" % self.__class__.__base__.__name__\n self._aggregated_gradients = False\n\n self._allreduce_grads = hvd._make_allreduce_grads_fn(\n self._name,\n device_dense,\n device_sparse,\n compression,\n sparse_as_dense,\n op,\n gradient_predivide_factor,\n groups)\n\n self._agg_helper = None\n if backward_passes_per_step > 1:\n if hvd._executing_eagerly():\n self._agg_helper = LocalGradientAggregationHelperEager(\n backward_passes_per_step=backward_passes_per_step,\n allreduce_func=self._allreduce_grads,\n sparse_as_dense=sparse_as_dense,\n average_aggregated_gradients=average_aggregated_gradients,\n )\n else:\n self._agg_helper = LocalGradientAggregationHelper(\n backward_passes_per_step=backward_passes_per_step,\n allreduce_func=self._allreduce_grads,\n sparse_as_dense=sparse_as_dense,\n average_aggregated_gradients=average_aggregated_gradients,\n rank=rank(),\n optimizer_type=LocalGradientAggregationHelper._OPTIMIZER_TYPE_KERAS,\n )\n\n def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):\n \"\"\"\n Compute gradients of all trainable variables.\n\n See Optimizer.get_gradients() for more info.\n\n In DistributedOptimizer, get_gradients() is overriden to also\n allreduce the gradients before returning them.\n \"\"\"\n if _PRE_TF_2_4_0:\n return super(self.__class__, self)._compute_gradients(\n loss, var_list, grad_loss, tape)\n\n tape = backprop.GradientTape() if tape is None else tape\n grads_and_vars = super(self.__class__, self)._compute_gradients(\n # pylint: disable=protected-access\n loss,\n var_list,\n grad_loss,\n tape=tape)\n grads, weights = list(zip(*grads_and_vars))\n\n allreduced_grads = self._allreduce(grads, weights)\n return list(zip(allreduced_grads, weights))\n\n def get_gradients(self, loss, params):\n \"\"\"\n Compute gradients of all trainable variables.\n\n See Optimizer.get_gradients() for more info.\n\n In DistributedOptimizer, get_gradients() is overriden to also\n allreduce the gradients before returning them.\n \"\"\"\n gradients = super(self.__class__, self).get_gradients(loss, params)\n return self._allreduce(gradients, params)\n\n def _aggregate_gradients(self, grads_and_vars):\n if _PRE_TF_2_4_0:\n grads, vars = list(zip(*grads_and_vars))\n aggregated_grads = self._allreduce(grads, vars)\n return aggregated_grads\n else:\n return super(self.__class__, self)._aggregate_gradients(\n grads_and_vars)\n\n def _allreduce(self, grads, vars):\n self._aggregated_gradients = True\n\n if self._agg_helper:\n return self._agg_helper.compute_gradients(tuple(grads), tuple(vars))\n else:\n return self._allreduce_grads(grads, vars)\n\n def apply_gradients(self, *args, **kwargs):\n if self._agg_helper:\n if isinstance(args[0], zip):\n # If grad_and_vars are passed in as a zip object\n # convert to a list. This is necessary for TF2.4+\n # b/c args[0] is used in both conditional branches\n # inside _agg_helper.apply_gradients().\n args = list(args)\n args[0] = list(args[0])\n args = tuple(args)\n\n results = self._agg_helper.apply_gradients(\n lambda: super(self.__class__, self).apply_gradients(*args, **kwargs),\n self,\n *args,\n **kwargs,\n )\n else:\n results = super(self.__class__, self).apply_gradients(*args, **kwargs)\n\n if _PRE_TF_2_4_0 and not self._aggregated_gradients:\n raise Exception('`apply_gradients()` was called without a call to '\n '`get_gradients()` or `_aggregate_gradients`. If you\\'re '\n 'using TensorFlow 2.0, please specify '\n '`experimental_run_tf_function=False` in `compile()`.')\n\n return results\n\n # We dynamically create a new class that inherits from the optimizer that was passed in.\n # The goal is to override get_gradients() method with an allreduce implementation.\n # This class will have the same name as the optimizer it's wrapping, so that the saved\n # model could be easily restored without Horovod.\n cls = type(optimizer.__class__.__name__, (optimizer.__class__,),\n dict(_DistributedOptimizer.__dict__))\n\n return cls.from_config(optimizer.get_config())\n\n\ndef _eval(backend, op_or_result):\n if hvd._executing_eagerly():\n return op_or_result\n else:\n return backend.get_session().run(op_or_result)\n\n\nif hasattr(hvd, 'broadcast_global_variables'):\n def broadcast_global_variables(backend, root_rank):\n return _eval(backend, hvd.broadcast_global_variables(root_rank))\n\n\ndef allreduce(backend, value, name, average, prescale_factor, postscale_factor, op, compression):\n return _eval(backend, hvd.allreduce(tf.constant(value, name=name), average=average,\n prescale_factor=prescale_factor,\n postscale_factor=postscale_factor,\n op=op, compression=compression))\n\n\ndef allgather(backend, value, name):\n return _eval(backend, hvd.allgather(tf.constant(value, name=name)))\n\n\ndef broadcast(backend, value, root_rank, name):\n return _eval(backend, hvd.broadcast(tf.constant(value, name=name), root_rank))\n\n\ndef load_model(keras, wrap_optimizer, optimizer_modules, filepath, custom_optimizers, custom_objects):\n horovod_objects = {\n subclass.__name__.lower(): wrap_optimizer(subclass)\n for subclass in keras.optimizers.Optimizer.__subclasses__()\n if subclass.__module__ in optimizer_modules\n }\n\n if custom_optimizers is not None:\n horovod_objects.update({\n cls.__name__: wrap_optimizer(cls)\n for cls in custom_optimizers\n })\n\n if custom_objects is not None:\n horovod_objects.update(custom_objects)\n\n return keras.models.load_model(filepath, custom_objects=horovod_objects)\n", "path": "horovod/_keras/__init__.py"}]}
3,921
212
gh_patches_debug_16949
rasdani/github-patches
git_diff
fonttools__fonttools-579
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [feaLib] Lexer for double quote strings should not stop at newline? I have fonts and fea files with a name table where some nameid strings have newlines in the string. According to the spec > A string is composed of 1-byte ASCII characters enclosed by ASCII double quote characters ("). Newlines embedded within the string are removed from the character sequence to be stored. Looking at a ttx dump these strings have `&#13;` inserted for the newlines so ttx seems to have no issue, but loading the fea file with feaLib fails because in lexer.py there is `self.scan_until_('"\r\n')` If I remove `\r\n` it loads fine, but then test_string fails in lexer_test.py because it doesn't raise an Exception at the embedded newline. Maybe I'm missing something, but I don't see a reason for the `\r\n` in the double quote lexer. [feaLib] Lexer for double quote strings should not stop at newline? I have fonts and fea files with a name table where some nameid strings have newlines in the string. According to the spec > A string is composed of 1-byte ASCII characters enclosed by ASCII double quote characters ("). Newlines embedded within the string are removed from the character sequence to be stored. Looking at a ttx dump these strings have `&#13;` inserted for the newlines so ttx seems to have no issue, but loading the fea file with feaLib fails because in lexer.py there is `self.scan_until_('"\r\n')` If I remove `\r\n` it loads fine, but then test_string fails in lexer_test.py because it doesn't raise an Exception at the embedded newline. Maybe I'm missing something, but I don't see a reason for the `\r\n` in the double quote lexer. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `Lib/fontTools/feaLib/lexer.py` Content: ``` 1 from __future__ import print_function, division, absolute_import 2 from __future__ import unicode_literals 3 from fontTools.misc.py23 import * 4 from fontTools.feaLib.error import FeatureLibError 5 import re 6 import os 7 8 9 class Lexer(object): 10 NUMBER = "NUMBER" 11 FLOAT = "FLOAT" 12 STRING = "STRING" 13 NAME = "NAME" 14 FILENAME = "FILENAME" 15 GLYPHCLASS = "GLYPHCLASS" 16 CID = "CID" 17 SYMBOL = "SYMBOL" 18 COMMENT = "COMMENT" 19 NEWLINE = "NEWLINE" 20 21 CHAR_WHITESPACE_ = " \t" 22 CHAR_NEWLINE_ = "\r\n" 23 CHAR_SYMBOL_ = ",;:-+'{}[]<>()=" 24 CHAR_DIGIT_ = "0123456789" 25 CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" 26 CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" 27 CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\" 28 CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/" 29 30 RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.]+$") 31 32 MODE_NORMAL_ = "NORMAL" 33 MODE_FILENAME_ = "FILENAME" 34 35 def __init__(self, text, filename): 36 self.filename_ = filename 37 self.line_ = 1 38 self.pos_ = 0 39 self.line_start_ = 0 40 self.text_ = text 41 self.text_length_ = len(text) 42 self.mode_ = Lexer.MODE_NORMAL_ 43 44 def __iter__(self): 45 return self 46 47 def next(self): # Python 2 48 return self.__next__() 49 50 def __next__(self): # Python 3 51 while True: 52 token_type, token, location = self.next_() 53 if token_type not in {Lexer.COMMENT, Lexer.NEWLINE}: 54 return (token_type, token, location) 55 56 def next_(self): 57 self.scan_over_(Lexer.CHAR_WHITESPACE_) 58 column = self.pos_ - self.line_start_ + 1 59 location = (self.filename_, self.line_, column) 60 start = self.pos_ 61 text = self.text_ 62 limit = len(text) 63 if start >= limit: 64 raise StopIteration() 65 cur_char = text[start] 66 next_char = text[start + 1] if start + 1 < limit else None 67 68 if cur_char == "\n": 69 self.pos_ += 1 70 self.line_ += 1 71 self.line_start_ = self.pos_ 72 return (Lexer.NEWLINE, None, location) 73 if cur_char == "\r": 74 self.pos_ += (2 if next_char == "\n" else 1) 75 self.line_ += 1 76 self.line_start_ = self.pos_ 77 return (Lexer.NEWLINE, None, location) 78 if cur_char == "#": 79 self.scan_until_(Lexer.CHAR_NEWLINE_) 80 return (Lexer.COMMENT, text[start:self.pos_], location) 81 82 if self.mode_ is Lexer.MODE_FILENAME_: 83 if cur_char != "(": 84 raise FeatureLibError("Expected '(' before file name", 85 location) 86 self.scan_until_(")") 87 cur_char = text[self.pos_] if self.pos_ < limit else None 88 if cur_char != ")": 89 raise FeatureLibError("Expected ')' after file name", 90 location) 91 self.pos_ += 1 92 self.mode_ = Lexer.MODE_NORMAL_ 93 return (Lexer.FILENAME, text[start + 1:self.pos_ - 1], location) 94 95 if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: 96 self.pos_ += 1 97 self.scan_over_(Lexer.CHAR_DIGIT_) 98 return (Lexer.CID, int(text[start + 1:self.pos_], 10), location) 99 if cur_char == "@": 100 self.pos_ += 1 101 self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) 102 glyphclass = text[start + 1:self.pos_] 103 if len(glyphclass) < 1: 104 raise FeatureLibError("Expected glyph class name", location) 105 if len(glyphclass) > 63: 106 raise FeatureLibError( 107 "Glyph class names must not be longer than 63 characters", 108 location) 109 if not Lexer.RE_GLYPHCLASS.match(glyphclass): 110 raise FeatureLibError( 111 "Glyph class names must consist of letters, digits, " 112 "underscore, or period", location) 113 return (Lexer.GLYPHCLASS, glyphclass, location) 114 if cur_char in Lexer.CHAR_NAME_START_: 115 self.pos_ += 1 116 self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) 117 token = text[start:self.pos_] 118 if token == "include": 119 self.mode_ = Lexer.MODE_FILENAME_ 120 return (Lexer.NAME, token, location) 121 if cur_char == "0" and next_char in "xX": 122 self.pos_ += 2 123 self.scan_over_(Lexer.CHAR_HEXDIGIT_) 124 return (Lexer.NUMBER, int(text[start:self.pos_], 16), location) 125 if cur_char in Lexer.CHAR_DIGIT_: 126 self.scan_over_(Lexer.CHAR_DIGIT_) 127 if self.pos_ >= limit or text[self.pos_] != ".": 128 return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) 129 self.scan_over_(".") 130 self.scan_over_(Lexer.CHAR_DIGIT_) 131 return (Lexer.FLOAT, float(text[start:self.pos_]), location) 132 if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: 133 self.pos_ += 1 134 self.scan_over_(Lexer.CHAR_DIGIT_) 135 if self.pos_ >= limit or text[self.pos_] != ".": 136 return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) 137 self.scan_over_(".") 138 self.scan_over_(Lexer.CHAR_DIGIT_) 139 return (Lexer.FLOAT, float(text[start:self.pos_]), location) 140 if cur_char in Lexer.CHAR_SYMBOL_: 141 self.pos_ += 1 142 return (Lexer.SYMBOL, cur_char, location) 143 if cur_char == '"': 144 self.pos_ += 1 145 self.scan_until_('"\r\n') 146 if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': 147 self.pos_ += 1 148 return (Lexer.STRING, text[start + 1:self.pos_ - 1], location) 149 else: 150 raise FeatureLibError("Expected '\"' to terminate string", 151 location) 152 raise FeatureLibError("Unexpected character: '%s'" % cur_char, 153 location) 154 155 def scan_over_(self, valid): 156 p = self.pos_ 157 while p < self.text_length_ and self.text_[p] in valid: 158 p += 1 159 self.pos_ = p 160 161 def scan_until_(self, stop_at): 162 p = self.pos_ 163 while p < self.text_length_ and self.text_[p] not in stop_at: 164 p += 1 165 self.pos_ = p 166 167 168 class IncludingLexer(object): 169 def __init__(self, featurefile): 170 self.lexers_ = [self.make_lexer_(featurefile)] 171 172 def __iter__(self): 173 return self 174 175 def next(self): # Python 2 176 return self.__next__() 177 178 def __next__(self): # Python 3 179 while self.lexers_: 180 lexer = self.lexers_[-1] 181 try: 182 token_type, token, location = lexer.next() 183 except StopIteration: 184 self.lexers_.pop() 185 continue 186 if token_type is Lexer.NAME and token == "include": 187 fname_type, fname_token, fname_location = lexer.next() 188 if fname_type is not Lexer.FILENAME: 189 raise FeatureLibError("Expected file name", fname_location) 190 #semi_type, semi_token, semi_location = lexer.next() 191 #if semi_type is not Lexer.SYMBOL or semi_token != ";": 192 # raise FeatureLibError("Expected ';'", semi_location) 193 curpath = os.path.dirname(lexer.filename_) 194 path = os.path.join(curpath, fname_token) 195 if len(self.lexers_) >= 5: 196 raise FeatureLibError("Too many recursive includes", 197 fname_location) 198 self.lexers_.append(self.make_lexer_(path, fname_location)) 199 continue 200 else: 201 return (token_type, token, location) 202 raise StopIteration() 203 204 @staticmethod 205 def make_lexer_(file_or_path, location=None): 206 if hasattr(file_or_path, "read"): 207 fileobj, closing = file_or_path, False 208 else: 209 filename, closing = file_or_path, True 210 try: 211 fileobj = open(filename, "r", encoding="utf-8") 212 except IOError as err: 213 raise FeatureLibError(str(err), location) 214 data = fileobj.read() 215 filename = fileobj.name if hasattr(fileobj, "name") else "<features>" 216 if closing: 217 fileobj.close() 218 return Lexer(data, filename) 219 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/Lib/fontTools/feaLib/lexer.py b/Lib/fontTools/feaLib/lexer.py --- a/Lib/fontTools/feaLib/lexer.py +++ b/Lib/fontTools/feaLib/lexer.py @@ -142,10 +142,12 @@ return (Lexer.SYMBOL, cur_char, location) if cur_char == '"': self.pos_ += 1 - self.scan_until_('"\r\n') + self.scan_until_('"') if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': self.pos_ += 1 - return (Lexer.STRING, text[start + 1:self.pos_ - 1], location) + # strip newlines embedded within a string + string = re.sub("[\r\n]", "", text[start + 1:self.pos_ - 1]) + return (Lexer.STRING, string, location) else: raise FeatureLibError("Expected '\"' to terminate string", location)
{"golden_diff": "diff --git a/Lib/fontTools/feaLib/lexer.py b/Lib/fontTools/feaLib/lexer.py\n--- a/Lib/fontTools/feaLib/lexer.py\n+++ b/Lib/fontTools/feaLib/lexer.py\n@@ -142,10 +142,12 @@\n return (Lexer.SYMBOL, cur_char, location)\n if cur_char == '\"':\n self.pos_ += 1\n- self.scan_until_('\"\\r\\n')\n+ self.scan_until_('\"')\n if self.pos_ < self.text_length_ and self.text_[self.pos_] == '\"':\n self.pos_ += 1\n- return (Lexer.STRING, text[start + 1:self.pos_ - 1], location)\n+ # strip newlines embedded within a string\n+ string = re.sub(\"[\\r\\n]\", \"\", text[start + 1:self.pos_ - 1])\n+ return (Lexer.STRING, string, location)\n else:\n raise FeatureLibError(\"Expected '\\\"' to terminate string\",\n location)\n", "issue": "[feaLib] Lexer for double quote strings should not stop at newline?\nI have fonts and fea files with a name table where some nameid strings have newlines in the string. According to the spec \n\n> A string is composed of 1-byte ASCII characters enclosed by ASCII double quote characters (\"). Newlines embedded within the string are removed from the character sequence to be stored.\n\nLooking at a ttx dump these strings have `&#13;` inserted for the newlines so ttx seems to have no issue, but loading the fea file with feaLib fails because in lexer.py there is `self.scan_until_('\"\\r\\n')`\nIf I remove `\\r\\n` it loads fine, but then test_string fails in lexer_test.py because it doesn't raise an Exception at the embedded newline. Maybe I'm missing something, but I don't see a reason for the `\\r\\n` in the double quote lexer. \n\n[feaLib] Lexer for double quote strings should not stop at newline?\nI have fonts and fea files with a name table where some nameid strings have newlines in the string. According to the spec \n\n> A string is composed of 1-byte ASCII characters enclosed by ASCII double quote characters (\"). Newlines embedded within the string are removed from the character sequence to be stored.\n\nLooking at a ttx dump these strings have `&#13;` inserted for the newlines so ttx seems to have no issue, but loading the fea file with feaLib fails because in lexer.py there is `self.scan_until_('\"\\r\\n')`\nIf I remove `\\r\\n` it loads fine, but then test_string fails in lexer_test.py because it doesn't raise an Exception at the embedded newline. Maybe I'm missing something, but I don't see a reason for the `\\r\\n` in the double quote lexer. \n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\nfrom __future__ import unicode_literals\nfrom fontTools.misc.py23 import *\nfrom fontTools.feaLib.error import FeatureLibError\nimport re\nimport os\n\n\nclass Lexer(object):\n NUMBER = \"NUMBER\"\n FLOAT = \"FLOAT\"\n STRING = \"STRING\"\n NAME = \"NAME\"\n FILENAME = \"FILENAME\"\n GLYPHCLASS = \"GLYPHCLASS\"\n CID = \"CID\"\n SYMBOL = \"SYMBOL\"\n COMMENT = \"COMMENT\"\n NEWLINE = \"NEWLINE\"\n\n CHAR_WHITESPACE_ = \" \\t\"\n CHAR_NEWLINE_ = \"\\r\\n\"\n CHAR_SYMBOL_ = \",;:-+'{}[]<>()=\"\n CHAR_DIGIT_ = \"0123456789\"\n CHAR_HEXDIGIT_ = \"0123456789ABCDEFabcdef\"\n CHAR_LETTER_ = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\n CHAR_NAME_START_ = CHAR_LETTER_ + \"_+*:.^~!\\\\\"\n CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + \"_.+*:^~!/\"\n\n RE_GLYPHCLASS = re.compile(r\"^[A-Za-z_0-9.]+$\")\n\n MODE_NORMAL_ = \"NORMAL\"\n MODE_FILENAME_ = \"FILENAME\"\n\n def __init__(self, text, filename):\n self.filename_ = filename\n self.line_ = 1\n self.pos_ = 0\n self.line_start_ = 0\n self.text_ = text\n self.text_length_ = len(text)\n self.mode_ = Lexer.MODE_NORMAL_\n\n def __iter__(self):\n return self\n\n def next(self): # Python 2\n return self.__next__()\n\n def __next__(self): # Python 3\n while True:\n token_type, token, location = self.next_()\n if token_type not in {Lexer.COMMENT, Lexer.NEWLINE}:\n return (token_type, token, location)\n\n def next_(self):\n self.scan_over_(Lexer.CHAR_WHITESPACE_)\n column = self.pos_ - self.line_start_ + 1\n location = (self.filename_, self.line_, column)\n start = self.pos_\n text = self.text_\n limit = len(text)\n if start >= limit:\n raise StopIteration()\n cur_char = text[start]\n next_char = text[start + 1] if start + 1 < limit else None\n\n if cur_char == \"\\n\":\n self.pos_ += 1\n self.line_ += 1\n self.line_start_ = self.pos_\n return (Lexer.NEWLINE, None, location)\n if cur_char == \"\\r\":\n self.pos_ += (2 if next_char == \"\\n\" else 1)\n self.line_ += 1\n self.line_start_ = self.pos_\n return (Lexer.NEWLINE, None, location)\n if cur_char == \"#\":\n self.scan_until_(Lexer.CHAR_NEWLINE_)\n return (Lexer.COMMENT, text[start:self.pos_], location)\n\n if self.mode_ is Lexer.MODE_FILENAME_:\n if cur_char != \"(\":\n raise FeatureLibError(\"Expected '(' before file name\",\n location)\n self.scan_until_(\")\")\n cur_char = text[self.pos_] if self.pos_ < limit else None\n if cur_char != \")\":\n raise FeatureLibError(\"Expected ')' after file name\",\n location)\n self.pos_ += 1\n self.mode_ = Lexer.MODE_NORMAL_\n return (Lexer.FILENAME, text[start + 1:self.pos_ - 1], location)\n\n if cur_char == \"\\\\\" and next_char in Lexer.CHAR_DIGIT_:\n self.pos_ += 1\n self.scan_over_(Lexer.CHAR_DIGIT_)\n return (Lexer.CID, int(text[start + 1:self.pos_], 10), location)\n if cur_char == \"@\":\n self.pos_ += 1\n self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)\n glyphclass = text[start + 1:self.pos_]\n if len(glyphclass) < 1:\n raise FeatureLibError(\"Expected glyph class name\", location)\n if len(glyphclass) > 63:\n raise FeatureLibError(\n \"Glyph class names must not be longer than 63 characters\",\n location)\n if not Lexer.RE_GLYPHCLASS.match(glyphclass):\n raise FeatureLibError(\n \"Glyph class names must consist of letters, digits, \"\n \"underscore, or period\", location)\n return (Lexer.GLYPHCLASS, glyphclass, location)\n if cur_char in Lexer.CHAR_NAME_START_:\n self.pos_ += 1\n self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)\n token = text[start:self.pos_]\n if token == \"include\":\n self.mode_ = Lexer.MODE_FILENAME_\n return (Lexer.NAME, token, location)\n if cur_char == \"0\" and next_char in \"xX\":\n self.pos_ += 2\n self.scan_over_(Lexer.CHAR_HEXDIGIT_)\n return (Lexer.NUMBER, int(text[start:self.pos_], 16), location)\n if cur_char in Lexer.CHAR_DIGIT_:\n self.scan_over_(Lexer.CHAR_DIGIT_)\n if self.pos_ >= limit or text[self.pos_] != \".\":\n return (Lexer.NUMBER, int(text[start:self.pos_], 10), location)\n self.scan_over_(\".\")\n self.scan_over_(Lexer.CHAR_DIGIT_)\n return (Lexer.FLOAT, float(text[start:self.pos_]), location)\n if cur_char == \"-\" and next_char in Lexer.CHAR_DIGIT_:\n self.pos_ += 1\n self.scan_over_(Lexer.CHAR_DIGIT_)\n if self.pos_ >= limit or text[self.pos_] != \".\":\n return (Lexer.NUMBER, int(text[start:self.pos_], 10), location)\n self.scan_over_(\".\")\n self.scan_over_(Lexer.CHAR_DIGIT_)\n return (Lexer.FLOAT, float(text[start:self.pos_]), location)\n if cur_char in Lexer.CHAR_SYMBOL_:\n self.pos_ += 1\n return (Lexer.SYMBOL, cur_char, location)\n if cur_char == '\"':\n self.pos_ += 1\n self.scan_until_('\"\\r\\n')\n if self.pos_ < self.text_length_ and self.text_[self.pos_] == '\"':\n self.pos_ += 1\n return (Lexer.STRING, text[start + 1:self.pos_ - 1], location)\n else:\n raise FeatureLibError(\"Expected '\\\"' to terminate string\",\n location)\n raise FeatureLibError(\"Unexpected character: '%s'\" % cur_char,\n location)\n\n def scan_over_(self, valid):\n p = self.pos_\n while p < self.text_length_ and self.text_[p] in valid:\n p += 1\n self.pos_ = p\n\n def scan_until_(self, stop_at):\n p = self.pos_\n while p < self.text_length_ and self.text_[p] not in stop_at:\n p += 1\n self.pos_ = p\n\n\nclass IncludingLexer(object):\n def __init__(self, featurefile):\n self.lexers_ = [self.make_lexer_(featurefile)]\n\n def __iter__(self):\n return self\n\n def next(self): # Python 2\n return self.__next__()\n\n def __next__(self): # Python 3\n while self.lexers_:\n lexer = self.lexers_[-1]\n try:\n token_type, token, location = lexer.next()\n except StopIteration:\n self.lexers_.pop()\n continue\n if token_type is Lexer.NAME and token == \"include\":\n fname_type, fname_token, fname_location = lexer.next()\n if fname_type is not Lexer.FILENAME:\n raise FeatureLibError(\"Expected file name\", fname_location)\n #semi_type, semi_token, semi_location = lexer.next()\n #if semi_type is not Lexer.SYMBOL or semi_token != \";\":\n # raise FeatureLibError(\"Expected ';'\", semi_location)\n curpath = os.path.dirname(lexer.filename_)\n path = os.path.join(curpath, fname_token)\n if len(self.lexers_) >= 5:\n raise FeatureLibError(\"Too many recursive includes\",\n fname_location)\n self.lexers_.append(self.make_lexer_(path, fname_location))\n continue\n else:\n return (token_type, token, location)\n raise StopIteration()\n\n @staticmethod\n def make_lexer_(file_or_path, location=None):\n if hasattr(file_or_path, \"read\"):\n fileobj, closing = file_or_path, False\n else:\n filename, closing = file_or_path, True\n try:\n fileobj = open(filename, \"r\", encoding=\"utf-8\")\n except IOError as err:\n raise FeatureLibError(str(err), location)\n data = fileobj.read()\n filename = fileobj.name if hasattr(fileobj, \"name\") else \"<features>\"\n if closing:\n fileobj.close()\n return Lexer(data, filename)\n", "path": "Lib/fontTools/feaLib/lexer.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\nfrom __future__ import unicode_literals\nfrom fontTools.misc.py23 import *\nfrom fontTools.feaLib.error import FeatureLibError\nimport re\nimport os\n\n\nclass Lexer(object):\n NUMBER = \"NUMBER\"\n FLOAT = \"FLOAT\"\n STRING = \"STRING\"\n NAME = \"NAME\"\n FILENAME = \"FILENAME\"\n GLYPHCLASS = \"GLYPHCLASS\"\n CID = \"CID\"\n SYMBOL = \"SYMBOL\"\n COMMENT = \"COMMENT\"\n NEWLINE = \"NEWLINE\"\n\n CHAR_WHITESPACE_ = \" \\t\"\n CHAR_NEWLINE_ = \"\\r\\n\"\n CHAR_SYMBOL_ = \",;:-+'{}[]<>()=\"\n CHAR_DIGIT_ = \"0123456789\"\n CHAR_HEXDIGIT_ = \"0123456789ABCDEFabcdef\"\n CHAR_LETTER_ = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\n CHAR_NAME_START_ = CHAR_LETTER_ + \"_+*:.^~!\\\\\"\n CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + \"_.+*:^~!/\"\n\n RE_GLYPHCLASS = re.compile(r\"^[A-Za-z_0-9.]+$\")\n\n MODE_NORMAL_ = \"NORMAL\"\n MODE_FILENAME_ = \"FILENAME\"\n\n def __init__(self, text, filename):\n self.filename_ = filename\n self.line_ = 1\n self.pos_ = 0\n self.line_start_ = 0\n self.text_ = text\n self.text_length_ = len(text)\n self.mode_ = Lexer.MODE_NORMAL_\n\n def __iter__(self):\n return self\n\n def next(self): # Python 2\n return self.__next__()\n\n def __next__(self): # Python 3\n while True:\n token_type, token, location = self.next_()\n if token_type not in {Lexer.COMMENT, Lexer.NEWLINE}:\n return (token_type, token, location)\n\n def next_(self):\n self.scan_over_(Lexer.CHAR_WHITESPACE_)\n column = self.pos_ - self.line_start_ + 1\n location = (self.filename_, self.line_, column)\n start = self.pos_\n text = self.text_\n limit = len(text)\n if start >= limit:\n raise StopIteration()\n cur_char = text[start]\n next_char = text[start + 1] if start + 1 < limit else None\n\n if cur_char == \"\\n\":\n self.pos_ += 1\n self.line_ += 1\n self.line_start_ = self.pos_\n return (Lexer.NEWLINE, None, location)\n if cur_char == \"\\r\":\n self.pos_ += (2 if next_char == \"\\n\" else 1)\n self.line_ += 1\n self.line_start_ = self.pos_\n return (Lexer.NEWLINE, None, location)\n if cur_char == \"#\":\n self.scan_until_(Lexer.CHAR_NEWLINE_)\n return (Lexer.COMMENT, text[start:self.pos_], location)\n\n if self.mode_ is Lexer.MODE_FILENAME_:\n if cur_char != \"(\":\n raise FeatureLibError(\"Expected '(' before file name\",\n location)\n self.scan_until_(\")\")\n cur_char = text[self.pos_] if self.pos_ < limit else None\n if cur_char != \")\":\n raise FeatureLibError(\"Expected ')' after file name\",\n location)\n self.pos_ += 1\n self.mode_ = Lexer.MODE_NORMAL_\n return (Lexer.FILENAME, text[start + 1:self.pos_ - 1], location)\n\n if cur_char == \"\\\\\" and next_char in Lexer.CHAR_DIGIT_:\n self.pos_ += 1\n self.scan_over_(Lexer.CHAR_DIGIT_)\n return (Lexer.CID, int(text[start + 1:self.pos_], 10), location)\n if cur_char == \"@\":\n self.pos_ += 1\n self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)\n glyphclass = text[start + 1:self.pos_]\n if len(glyphclass) < 1:\n raise FeatureLibError(\"Expected glyph class name\", location)\n if len(glyphclass) > 63:\n raise FeatureLibError(\n \"Glyph class names must not be longer than 63 characters\",\n location)\n if not Lexer.RE_GLYPHCLASS.match(glyphclass):\n raise FeatureLibError(\n \"Glyph class names must consist of letters, digits, \"\n \"underscore, or period\", location)\n return (Lexer.GLYPHCLASS, glyphclass, location)\n if cur_char in Lexer.CHAR_NAME_START_:\n self.pos_ += 1\n self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)\n token = text[start:self.pos_]\n if token == \"include\":\n self.mode_ = Lexer.MODE_FILENAME_\n return (Lexer.NAME, token, location)\n if cur_char == \"0\" and next_char in \"xX\":\n self.pos_ += 2\n self.scan_over_(Lexer.CHAR_HEXDIGIT_)\n return (Lexer.NUMBER, int(text[start:self.pos_], 16), location)\n if cur_char in Lexer.CHAR_DIGIT_:\n self.scan_over_(Lexer.CHAR_DIGIT_)\n if self.pos_ >= limit or text[self.pos_] != \".\":\n return (Lexer.NUMBER, int(text[start:self.pos_], 10), location)\n self.scan_over_(\".\")\n self.scan_over_(Lexer.CHAR_DIGIT_)\n return (Lexer.FLOAT, float(text[start:self.pos_]), location)\n if cur_char == \"-\" and next_char in Lexer.CHAR_DIGIT_:\n self.pos_ += 1\n self.scan_over_(Lexer.CHAR_DIGIT_)\n if self.pos_ >= limit or text[self.pos_] != \".\":\n return (Lexer.NUMBER, int(text[start:self.pos_], 10), location)\n self.scan_over_(\".\")\n self.scan_over_(Lexer.CHAR_DIGIT_)\n return (Lexer.FLOAT, float(text[start:self.pos_]), location)\n if cur_char in Lexer.CHAR_SYMBOL_:\n self.pos_ += 1\n return (Lexer.SYMBOL, cur_char, location)\n if cur_char == '\"':\n self.pos_ += 1\n self.scan_until_('\"')\n if self.pos_ < self.text_length_ and self.text_[self.pos_] == '\"':\n self.pos_ += 1\n # strip newlines embedded within a string\n string = re.sub(\"[\\r\\n]\", \"\", text[start + 1:self.pos_ - 1])\n return (Lexer.STRING, string, location)\n else:\n raise FeatureLibError(\"Expected '\\\"' to terminate string\",\n location)\n raise FeatureLibError(\"Unexpected character: '%s'\" % cur_char,\n location)\n\n def scan_over_(self, valid):\n p = self.pos_\n while p < self.text_length_ and self.text_[p] in valid:\n p += 1\n self.pos_ = p\n\n def scan_until_(self, stop_at):\n p = self.pos_\n while p < self.text_length_ and self.text_[p] not in stop_at:\n p += 1\n self.pos_ = p\n\n\nclass IncludingLexer(object):\n def __init__(self, featurefile):\n self.lexers_ = [self.make_lexer_(featurefile)]\n\n def __iter__(self):\n return self\n\n def next(self): # Python 2\n return self.__next__()\n\n def __next__(self): # Python 3\n while self.lexers_:\n lexer = self.lexers_[-1]\n try:\n token_type, token, location = lexer.next()\n except StopIteration:\n self.lexers_.pop()\n continue\n if token_type is Lexer.NAME and token == \"include\":\n fname_type, fname_token, fname_location = lexer.next()\n if fname_type is not Lexer.FILENAME:\n raise FeatureLibError(\"Expected file name\", fname_location)\n #semi_type, semi_token, semi_location = lexer.next()\n #if semi_type is not Lexer.SYMBOL or semi_token != \";\":\n # raise FeatureLibError(\"Expected ';'\", semi_location)\n curpath = os.path.dirname(lexer.filename_)\n path = os.path.join(curpath, fname_token)\n if len(self.lexers_) >= 5:\n raise FeatureLibError(\"Too many recursive includes\",\n fname_location)\n self.lexers_.append(self.make_lexer_(path, fname_location))\n continue\n else:\n return (token_type, token, location)\n raise StopIteration()\n\n @staticmethod\n def make_lexer_(file_or_path, location=None):\n if hasattr(file_or_path, \"read\"):\n fileobj, closing = file_or_path, False\n else:\n filename, closing = file_or_path, True\n try:\n fileobj = open(filename, \"r\", encoding=\"utf-8\")\n except IOError as err:\n raise FeatureLibError(str(err), location)\n data = fileobj.read()\n filename = fileobj.name if hasattr(fileobj, \"name\") else \"<features>\"\n if closing:\n fileobj.close()\n return Lexer(data, filename)\n", "path": "Lib/fontTools/feaLib/lexer.py"}]}
3,209
228
gh_patches_debug_20297
rasdani/github-patches
git_diff
mozmeao__snippets-service-1063
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix Snippets ICal AttributeError exception Snippets ICal feed raises: `AttributeError: 'ASRSnippet' object has no attribute 'locales'` Fix the error and add tests catch this on the unit test level. We did catch this on the acceptance tests level though :sweat_smile: --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `snippets/base/urls.py` Content: ``` 1 from django.urls import path 2 3 from watchman import views as watchman_views 4 5 from snippets.base import views 6 from snippets.base import feed 7 8 9 urlpatterns = [ 10 path('', views.HomeView.as_view()), 11 path('<int:startpage_version>/<name>/<version>/<appbuildid>/<build_target>/' 12 '<locale>/<channel>/<os_version>/<distribution>/<distribution_version>/', 13 views.fetch_snippets, name='base.fetch_snippets'), 14 path('preview/', views.preview_snippet, name='base.preview'), 15 path('preview-asr/<str:uuid>/', views.preview_asr_snippet, name='asr-preview'), 16 path('show/<int:snippet_id>/', views.show_snippet, name='base.show'), 17 path('show/uuid/<str:snippet_id>/', views.show_snippet, {'uuid': True}, name='base.show_uuid'), 18 path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'), 19 path('healthz/', watchman_views.ping, name='watchman.ping'), 20 path('readiness/', watchman_views.status, name='watchman.status'), 21 path('feeds/snippets.ics', feed.SnippetsFeed()), 22 ] 23 ``` Path: `snippets/base/feed.py` Content: ``` 1 import operator 2 from datetime import timedelta 3 from distutils.util import strtobool 4 from textwrap import dedent 5 from urllib.parse import urlparse 6 7 from django.conf import settings 8 from django.db.models import Q 9 10 import django_filters 11 from django_ical.views import ICalFeed 12 13 from snippets.base import models 14 15 16 class ASRSnippetFilter(django_filters.FilterSet): 17 name = django_filters.CharFilter(lookup_expr='icontains') 18 locale = django_filters.CharFilter(method='filter_locale') 19 only_scheduled = django_filters.ChoiceFilter( 20 method='filter_scheduled', choices=(('true', 'Yes'), 21 ('false', 'No'), 22 ('all', 'All'))) 23 24 def filter_locale(self, queryset, name, value): 25 if not value: 26 return queryset 27 28 locales = value.split(',') 29 return queryset.filter( 30 operator.or_( 31 *[Q(locale__code=',{},'.format(locale)) for locale in locales] 32 ) 33 ) 34 35 def filter_scheduled(self, queryset, name, value): 36 if value == 'all': 37 return queryset 38 39 value = strtobool(value) 40 41 if value: 42 return queryset.exclude(publish_start=None, publish_end=None) 43 44 return queryset.filter(publish_start=None, publish_end=None) 45 46 class Meta: 47 model = models.ASRSnippet 48 fields = [] 49 50 51 class SnippetsFeed(ICalFeed): 52 timezone = 'UTC' 53 title = 'Snippets' 54 55 def __call__(self, request, *args, **kwargs): 56 self.request = request 57 return super().__call__(request, *args, **kwargs) 58 59 @property 60 def product_id(self): 61 return '//{}/Snippets?{}'.format(urlparse(settings.SITE_URL).netloc, 62 self.request.GET.urlencode()) 63 64 def items(self): 65 queryset = (models.ASRSnippet.objects 66 .filter(for_qa=False, status=models.STATUS_CHOICES['Published']) 67 .order_by('publish_start')) 68 filtr = ASRSnippetFilter(self.request.GET, queryset=queryset) 69 return filtr.qs 70 71 def item_title(self, item): 72 return item.name 73 74 def item_link(self, item): 75 return item.get_admin_url() 76 77 def item_description(self, item): 78 description = dedent('''\ 79 Channels: {} 80 Locales: {}' 81 Preview Link: {} 82 '''.format(', '.join(item.channels), 83 ', '.join(item.locales.values_list('name', flat=True)), 84 item.get_preview_url())) 85 return description 86 87 def item_start_datetime(self, item): 88 return item.publish_start or item.created 89 90 def item_end_datetime(self, item): 91 return item.publish_end or (self.item_start_datetime(item) + timedelta(days=365)) 92 93 def item_created(self, item): 94 return item.created 95 96 def item_updateddate(self, item): 97 return item.modified 98 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/snippets/base/feed.py b/snippets/base/feed.py --- a/snippets/base/feed.py +++ b/snippets/base/feed.py @@ -77,10 +77,10 @@ def item_description(self, item): description = dedent('''\ Channels: {} - Locales: {}' + Locale: {}' Preview Link: {} '''.format(', '.join(item.channels), - ', '.join(item.locales.values_list('name', flat=True)), + item.locale, item.get_preview_url())) return description diff --git a/snippets/base/urls.py b/snippets/base/urls.py --- a/snippets/base/urls.py +++ b/snippets/base/urls.py @@ -18,5 +18,5 @@ path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'), path('healthz/', watchman_views.ping, name='watchman.ping'), path('readiness/', watchman_views.status, name='watchman.status'), - path('feeds/snippets.ics', feed.SnippetsFeed()), + path('feeds/snippets.ics', feed.SnippetsFeed(), name='ical-feed'), ]
{"golden_diff": "diff --git a/snippets/base/feed.py b/snippets/base/feed.py\n--- a/snippets/base/feed.py\n+++ b/snippets/base/feed.py\n@@ -77,10 +77,10 @@\n def item_description(self, item):\n description = dedent('''\\\n Channels: {}\n- Locales: {}'\n+ Locale: {}'\n Preview Link: {}\n '''.format(', '.join(item.channels),\n- ', '.join(item.locales.values_list('name', flat=True)),\n+ item.locale,\n item.get_preview_url()))\n return description\n \ndiff --git a/snippets/base/urls.py b/snippets/base/urls.py\n--- a/snippets/base/urls.py\n+++ b/snippets/base/urls.py\n@@ -18,5 +18,5 @@\n path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'),\n path('healthz/', watchman_views.ping, name='watchman.ping'),\n path('readiness/', watchman_views.status, name='watchman.status'),\n- path('feeds/snippets.ics', feed.SnippetsFeed()),\n+ path('feeds/snippets.ics', feed.SnippetsFeed(), name='ical-feed'),\n ]\n", "issue": "Fix Snippets ICal AttributeError exception \nSnippets ICal feed raises:\r\n\r\n`AttributeError: 'ASRSnippet' object has no attribute 'locales'`\r\n\r\nFix the error and add tests catch this on the unit test level. We did catch this on the acceptance tests level though :sweat_smile: \r\n\n", "before_files": [{"content": "from django.urls import path\n\nfrom watchman import views as watchman_views\n\nfrom snippets.base import views\nfrom snippets.base import feed\n\n\nurlpatterns = [\n path('', views.HomeView.as_view()),\n path('<int:startpage_version>/<name>/<version>/<appbuildid>/<build_target>/'\n '<locale>/<channel>/<os_version>/<distribution>/<distribution_version>/',\n views.fetch_snippets, name='base.fetch_snippets'),\n path('preview/', views.preview_snippet, name='base.preview'),\n path('preview-asr/<str:uuid>/', views.preview_asr_snippet, name='asr-preview'),\n path('show/<int:snippet_id>/', views.show_snippet, name='base.show'),\n path('show/uuid/<str:snippet_id>/', views.show_snippet, {'uuid': True}, name='base.show_uuid'),\n path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'),\n path('healthz/', watchman_views.ping, name='watchman.ping'),\n path('readiness/', watchman_views.status, name='watchman.status'),\n path('feeds/snippets.ics', feed.SnippetsFeed()),\n]\n", "path": "snippets/base/urls.py"}, {"content": "import operator\nfrom datetime import timedelta\nfrom distutils.util import strtobool\nfrom textwrap import dedent\nfrom urllib.parse import urlparse\n\nfrom django.conf import settings\nfrom django.db.models import Q\n\nimport django_filters\nfrom django_ical.views import ICalFeed\n\nfrom snippets.base import models\n\n\nclass ASRSnippetFilter(django_filters.FilterSet):\n name = django_filters.CharFilter(lookup_expr='icontains')\n locale = django_filters.CharFilter(method='filter_locale')\n only_scheduled = django_filters.ChoiceFilter(\n method='filter_scheduled', choices=(('true', 'Yes'),\n ('false', 'No'),\n ('all', 'All')))\n\n def filter_locale(self, queryset, name, value):\n if not value:\n return queryset\n\n locales = value.split(',')\n return queryset.filter(\n operator.or_(\n *[Q(locale__code=',{},'.format(locale)) for locale in locales]\n )\n )\n\n def filter_scheduled(self, queryset, name, value):\n if value == 'all':\n return queryset\n\n value = strtobool(value)\n\n if value:\n return queryset.exclude(publish_start=None, publish_end=None)\n\n return queryset.filter(publish_start=None, publish_end=None)\n\n class Meta:\n model = models.ASRSnippet\n fields = []\n\n\nclass SnippetsFeed(ICalFeed):\n timezone = 'UTC'\n title = 'Snippets'\n\n def __call__(self, request, *args, **kwargs):\n self.request = request\n return super().__call__(request, *args, **kwargs)\n\n @property\n def product_id(self):\n return '//{}/Snippets?{}'.format(urlparse(settings.SITE_URL).netloc,\n self.request.GET.urlencode())\n\n def items(self):\n queryset = (models.ASRSnippet.objects\n .filter(for_qa=False, status=models.STATUS_CHOICES['Published'])\n .order_by('publish_start'))\n filtr = ASRSnippetFilter(self.request.GET, queryset=queryset)\n return filtr.qs\n\n def item_title(self, item):\n return item.name\n\n def item_link(self, item):\n return item.get_admin_url()\n\n def item_description(self, item):\n description = dedent('''\\\n Channels: {}\n Locales: {}'\n Preview Link: {}\n '''.format(', '.join(item.channels),\n ', '.join(item.locales.values_list('name', flat=True)),\n item.get_preview_url()))\n return description\n\n def item_start_datetime(self, item):\n return item.publish_start or item.created\n\n def item_end_datetime(self, item):\n return item.publish_end or (self.item_start_datetime(item) + timedelta(days=365))\n\n def item_created(self, item):\n return item.created\n\n def item_updateddate(self, item):\n return item.modified\n", "path": "snippets/base/feed.py"}], "after_files": [{"content": "from django.urls import path\n\nfrom watchman import views as watchman_views\n\nfrom snippets.base import views\nfrom snippets.base import feed\n\n\nurlpatterns = [\n path('', views.HomeView.as_view()),\n path('<int:startpage_version>/<name>/<version>/<appbuildid>/<build_target>/'\n '<locale>/<channel>/<os_version>/<distribution>/<distribution_version>/',\n views.fetch_snippets, name='base.fetch_snippets'),\n path('preview/', views.preview_snippet, name='base.preview'),\n path('preview-asr/<str:uuid>/', views.preview_asr_snippet, name='asr-preview'),\n path('show/<int:snippet_id>/', views.show_snippet, name='base.show'),\n path('show/uuid/<str:snippet_id>/', views.show_snippet, {'uuid': True}, name='base.show_uuid'),\n path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'),\n path('healthz/', watchman_views.ping, name='watchman.ping'),\n path('readiness/', watchman_views.status, name='watchman.status'),\n path('feeds/snippets.ics', feed.SnippetsFeed(), name='ical-feed'),\n]\n", "path": "snippets/base/urls.py"}, {"content": "import operator\nfrom datetime import timedelta\nfrom distutils.util import strtobool\nfrom textwrap import dedent\nfrom urllib.parse import urlparse\n\nfrom django.conf import settings\nfrom django.db.models import Q\n\nimport django_filters\nfrom django_ical.views import ICalFeed\n\nfrom snippets.base import models\n\n\nclass ASRSnippetFilter(django_filters.FilterSet):\n name = django_filters.CharFilter(lookup_expr='icontains')\n locale = django_filters.CharFilter(method='filter_locale')\n only_scheduled = django_filters.ChoiceFilter(\n method='filter_scheduled', choices=(('true', 'Yes'),\n ('false', 'No'),\n ('all', 'All')))\n\n def filter_locale(self, queryset, name, value):\n if not value:\n return queryset\n\n locales = value.split(',')\n return queryset.filter(\n operator.or_(\n *[Q(locale__code=',{},'.format(locale)) for locale in locales]\n )\n )\n\n def filter_scheduled(self, queryset, name, value):\n if value == 'all':\n return queryset\n\n value = strtobool(value)\n\n if value:\n return queryset.exclude(publish_start=None, publish_end=None)\n\n return queryset.filter(publish_start=None, publish_end=None)\n\n class Meta:\n model = models.ASRSnippet\n fields = []\n\n\nclass SnippetsFeed(ICalFeed):\n timezone = 'UTC'\n title = 'Snippets'\n\n def __call__(self, request, *args, **kwargs):\n self.request = request\n return super().__call__(request, *args, **kwargs)\n\n @property\n def product_id(self):\n return '//{}/Snippets?{}'.format(urlparse(settings.SITE_URL).netloc,\n self.request.GET.urlencode())\n\n def items(self):\n queryset = (models.ASRSnippet.objects\n .filter(for_qa=False, status=models.STATUS_CHOICES['Published'])\n .order_by('publish_start'))\n filtr = ASRSnippetFilter(self.request.GET, queryset=queryset)\n return filtr.qs\n\n def item_title(self, item):\n return item.name\n\n def item_link(self, item):\n return item.get_admin_url()\n\n def item_description(self, item):\n description = dedent('''\\\n Channels: {}\n Locale: {}'\n Preview Link: {}\n '''.format(', '.join(item.channels),\n item.locale,\n item.get_preview_url()))\n return description\n\n def item_start_datetime(self, item):\n return item.publish_start or item.created\n\n def item_end_datetime(self, item):\n return item.publish_end or (self.item_start_datetime(item) + timedelta(days=365))\n\n def item_created(self, item):\n return item.created\n\n def item_updateddate(self, item):\n return item.modified\n", "path": "snippets/base/feed.py"}]}
1,448
269
gh_patches_debug_29258
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-4250
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `applications/Chat/examples/ray/mmmt_prompt.py` Content: ``` 1 import argparse 2 import os 3 import socket 4 from functools import partial 5 6 import pandas as pd 7 import ray 8 import torch 9 from coati.quant import llama_load_quant, low_resource_init 10 from coati.ray.detached_trainer_ppo import DetachedPPOTrainer 11 from coati.ray.experience_maker_holder import ExperienceMakerHolder 12 from coati.ray.utils import ( 13 get_actor_from_args, 14 get_critic_from_args, 15 get_receivers_per_sender, 16 get_reward_model_from_args, 17 get_strategy_from_args, 18 ) 19 from torch.utils.data import DataLoader 20 from transformers import AutoConfig, AutoTokenizer 21 from transformers.modeling_utils import no_init_weights 22 23 24 def get_free_port(): 25 with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: 26 s.bind(('', 0)) 27 return s.getsockname()[1] 28 29 30 def get_local_ip(): 31 with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: 32 s.connect(('8.8.8.8', 80)) 33 return s.getsockname()[0] 34 35 36 def main(args): 37 master_addr = str(get_local_ip()) 38 # trainer_env_info 39 trainer_port = str(get_free_port()) 40 env_info_trainers = [{ 41 'local_rank': '0', 42 'rank': str(rank), 43 'world_size': str(args.num_trainers), 44 'master_port': trainer_port, 45 'master_addr': master_addr 46 } for rank in range(args.num_trainers)] 47 48 # maker_env_info 49 maker_port = str(get_free_port()) 50 env_info_makers = [{ 51 'local_rank': '0', 52 'rank': str(rank), 53 'world_size': str(args.num_makers), 54 'master_port': maker_port, 55 'master_addr': master_addr 56 } for rank in range(args.num_makers)] 57 58 # configure tokenizer 59 tokenizer = AutoTokenizer.from_pretrained(args.pretrain) 60 tokenizer.pad_token = tokenizer.eos_token 61 62 def model_fn(): 63 actor = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda() 64 critic = get_critic_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda() 65 reward_model = get_reward_model_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda() 66 if args.initial_model_quant_ckpt is not None and args.model == 'llama': 67 # quantize initial model 68 actor_cfg = AutoConfig.from_pretrained(args.pretrain) 69 with low_resource_init(), no_init_weights(): 70 initial_model = get_actor_from_args(args.model, config=actor_cfg) 71 initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits, 72 args.quant_group_size).cuda().requires_grad_(False) 73 else: 74 initial_model = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda() 75 return actor, critic, reward_model, initial_model 76 77 # configure Experience Maker 78 experience_holder_refs = [ 79 ExperienceMakerHolder.options(name=f"maker{i}", num_gpus=1, max_concurrency=2).remote( 80 detached_trainer_name_list=[ 81 f'trainer{x}' 82 for x in get_receivers_per_sender(i, args.num_makers, args.num_trainers, allow_idle_sender=False) 83 ], 84 strategy_fn=partial(get_strategy_from_args, args.maker_strategy), 85 model_fn=model_fn, 86 env_info=env_info_maker, 87 kl_coef=0.1, 88 debug=args.debug, 89 update_lora_weights=not (args.lora_rank == 0), 90 # sync_models_from_trainers=True, 91 # generation kwargs: 92 max_length=512, 93 do_sample=True, 94 temperature=1.0, 95 top_k=50, 96 pad_token_id=tokenizer.pad_token_id, 97 eos_token_id=tokenizer.eos_token_id, 98 eval_performance=True, 99 use_cache=True, 100 ) 101 for i, env_info_maker in enumerate(env_info_makers) 102 ] 103 104 def trainer_model_fn(): 105 actor = get_actor_from_args(args.model, args.pretrain, lora_rank=args.lora_rank).half().cuda() 106 critic = get_critic_from_args(args.model, args.critic_pretrain, lora_rank=args.lora_rank).half().cuda() 107 return actor, critic 108 109 # configure Trainer 110 trainer_refs = [ 111 DetachedPPOTrainer.options(name=f"trainer{i}", num_gpus=1, max_concurrency=2).remote( 112 experience_maker_holder_name_list=[ 113 f"maker{x}" 114 for x in get_receivers_per_sender(i, args.num_trainers, args.num_makers, allow_idle_sender=True) 115 ], 116 strategy_fn=partial(get_strategy_from_args, args.trainer_strategy), 117 model_fn=trainer_model_fn, 118 env_info=env_info_trainer, 119 train_batch_size=args.train_batch_size, 120 buffer_limit=16, 121 eval_performance=True, 122 debug=args.debug, 123 update_lora_weights=not (args.lora_rank == 0), 124 ) 125 for i, env_info_trainer in enumerate(env_info_trainers) 126 ] 127 128 dataset_size = args.experience_batch_size * 4 129 130 def build_dataloader(): 131 132 def tokenize_fn(texts): 133 batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True) 134 return {k: v.cuda() for k, v in batch.items()} 135 136 dataset = pd.read_csv(args.prompt_path)['prompt'] 137 dataloader = DataLoader(dataset=dataset, batch_size=dataset_size, shuffle=True, collate_fn=tokenize_fn) 138 return dataloader 139 140 # uncomment this function if sync_models_from_trainers is True 141 # ray.get([ 142 # trainer_ref.sync_models_to_remote_makers.remote() 143 # for trainer_ref in trainer_refs 144 # ]) 145 146 wait_tasks = [] 147 148 for experience_holder_ref in experience_holder_refs: 149 wait_tasks.append(experience_holder_ref.workingloop.remote(build_dataloader, num_steps=args.experience_steps)) 150 151 total_steps = args.experience_batch_size * args.experience_steps * \ 152 args.num_makers // (args.num_trainers * args.train_batch_size) 153 for trainer_ref in trainer_refs: 154 wait_tasks.append(trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs)) 155 156 ray.get(wait_tasks) 157 158 159 if __name__ == '__main__': 160 parser = argparse.ArgumentParser() 161 parser.add_argument('--prompt_path', type=str, default=None) 162 parser.add_argument('--num_makers', type=int, default=1) 163 parser.add_argument('--num_trainers', type=int, default=1) 164 parser.add_argument('--trainer_strategy', 165 choices=[ 166 'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu', 167 'colossalai_zero2_cpu' 168 ], 169 default='ddp') 170 parser.add_argument('--maker_strategy', choices=['naive'], default='naive') 171 parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) 172 parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) 173 parser.add_argument('--pretrain', type=str, default=None) 174 parser.add_argument('--critic_pretrain', type=str, default=None) 175 parser.add_argument('--experience_steps', type=int, default=4) 176 parser.add_argument('--experience_batch_size', type=int, default=8) 177 parser.add_argument('--train_epochs', type=int, default=1) 178 parser.add_argument('--update_steps', type=int, default=2) 179 parser.add_argument('--train_batch_size', type=int, default=8) 180 parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank") 181 182 parser.add_argument('--initial_model_quant_ckpt', type=str, default=None) 183 parser.add_argument('--quant_bits', type=int, default=4) 184 parser.add_argument('--quant_group_size', type=int, default=128) 185 parser.add_argument('--debug', action='store_true') 186 args = parser.parse_args() 187 188 ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": dict(os.environ)}) 189 main(args) 190 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/applications/Chat/examples/ray/mmmt_prompt.py b/applications/Chat/examples/ray/mmmt_prompt.py --- a/applications/Chat/examples/ray/mmmt_prompt.py +++ b/applications/Chat/examples/ray/mmmt_prompt.py @@ -87,8 +87,8 @@ kl_coef=0.1, debug=args.debug, update_lora_weights=not (args.lora_rank == 0), - # sync_models_from_trainers=True, - # generation kwargs: + # sync_models_from_trainers=True, + # generation kwargs: max_length=512, do_sample=True, temperature=1.0, @@ -161,12 +161,10 @@ parser.add_argument('--prompt_path', type=str, default=None) parser.add_argument('--num_makers', type=int, default=1) parser.add_argument('--num_trainers', type=int, default=1) - parser.add_argument('--trainer_strategy', - choices=[ - 'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu', - 'colossalai_zero2_cpu' - ], - default='ddp') + parser.add_argument( + '--trainer_strategy', + choices=['ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu', 'colossalai_zero2_cpu'], + default='ddp') parser.add_argument('--maker_strategy', choices=['naive'], default='naive') parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama']) parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])
{"golden_diff": "diff --git a/applications/Chat/examples/ray/mmmt_prompt.py b/applications/Chat/examples/ray/mmmt_prompt.py\n--- a/applications/Chat/examples/ray/mmmt_prompt.py\n+++ b/applications/Chat/examples/ray/mmmt_prompt.py\n@@ -87,8 +87,8 @@\n kl_coef=0.1,\n debug=args.debug,\n update_lora_weights=not (args.lora_rank == 0),\n- # sync_models_from_trainers=True,\n- # generation kwargs:\n+ # sync_models_from_trainers=True,\n+ # generation kwargs:\n max_length=512,\n do_sample=True,\n temperature=1.0,\n@@ -161,12 +161,10 @@\n parser.add_argument('--prompt_path', type=str, default=None)\n parser.add_argument('--num_makers', type=int, default=1)\n parser.add_argument('--num_trainers', type=int, default=1)\n- parser.add_argument('--trainer_strategy',\n- choices=[\n- 'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu',\n- 'colossalai_zero2_cpu'\n- ],\n- default='ddp')\n+ parser.add_argument(\n+ '--trainer_strategy',\n+ choices=['ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu', 'colossalai_zero2_cpu'],\n+ default='ddp')\n parser.add_argument('--maker_strategy', choices=['naive'], default='naive')\n parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])\n parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import argparse\nimport os\nimport socket\nfrom functools import partial\n\nimport pandas as pd\nimport ray\nimport torch\nfrom coati.quant import llama_load_quant, low_resource_init\nfrom coati.ray.detached_trainer_ppo import DetachedPPOTrainer\nfrom coati.ray.experience_maker_holder import ExperienceMakerHolder\nfrom coati.ray.utils import (\n get_actor_from_args,\n get_critic_from_args,\n get_receivers_per_sender,\n get_reward_model_from_args,\n get_strategy_from_args,\n)\nfrom torch.utils.data import DataLoader\nfrom transformers import AutoConfig, AutoTokenizer\nfrom transformers.modeling_utils import no_init_weights\n\n\ndef get_free_port():\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind(('', 0))\n return s.getsockname()[1]\n\n\ndef get_local_ip():\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n s.connect(('8.8.8.8', 80))\n return s.getsockname()[0]\n\n\ndef main(args):\n master_addr = str(get_local_ip())\n # trainer_env_info\n trainer_port = str(get_free_port())\n env_info_trainers = [{\n 'local_rank': '0',\n 'rank': str(rank),\n 'world_size': str(args.num_trainers),\n 'master_port': trainer_port,\n 'master_addr': master_addr\n } for rank in range(args.num_trainers)]\n\n # maker_env_info\n maker_port = str(get_free_port())\n env_info_makers = [{\n 'local_rank': '0',\n 'rank': str(rank),\n 'world_size': str(args.num_makers),\n 'master_port': maker_port,\n 'master_addr': master_addr\n } for rank in range(args.num_makers)]\n\n # configure tokenizer\n tokenizer = AutoTokenizer.from_pretrained(args.pretrain)\n tokenizer.pad_token = tokenizer.eos_token\n\n def model_fn():\n actor = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda()\n critic = get_critic_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda()\n reward_model = get_reward_model_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda()\n if args.initial_model_quant_ckpt is not None and args.model == 'llama':\n # quantize initial model\n actor_cfg = AutoConfig.from_pretrained(args.pretrain)\n with low_resource_init(), no_init_weights():\n initial_model = get_actor_from_args(args.model, config=actor_cfg)\n initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits,\n args.quant_group_size).cuda().requires_grad_(False)\n else:\n initial_model = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda()\n return actor, critic, reward_model, initial_model\n\n # configure Experience Maker\n experience_holder_refs = [\n ExperienceMakerHolder.options(name=f\"maker{i}\", num_gpus=1, max_concurrency=2).remote(\n detached_trainer_name_list=[\n f'trainer{x}'\n for x in get_receivers_per_sender(i, args.num_makers, args.num_trainers, allow_idle_sender=False)\n ],\n strategy_fn=partial(get_strategy_from_args, args.maker_strategy),\n model_fn=model_fn,\n env_info=env_info_maker,\n kl_coef=0.1,\n debug=args.debug,\n update_lora_weights=not (args.lora_rank == 0),\n # sync_models_from_trainers=True,\n # generation kwargs:\n max_length=512,\n do_sample=True,\n temperature=1.0,\n top_k=50,\n pad_token_id=tokenizer.pad_token_id,\n eos_token_id=tokenizer.eos_token_id,\n eval_performance=True,\n use_cache=True,\n )\n for i, env_info_maker in enumerate(env_info_makers)\n ]\n\n def trainer_model_fn():\n actor = get_actor_from_args(args.model, args.pretrain, lora_rank=args.lora_rank).half().cuda()\n critic = get_critic_from_args(args.model, args.critic_pretrain, lora_rank=args.lora_rank).half().cuda()\n return actor, critic\n\n # configure Trainer\n trainer_refs = [\n DetachedPPOTrainer.options(name=f\"trainer{i}\", num_gpus=1, max_concurrency=2).remote(\n experience_maker_holder_name_list=[\n f\"maker{x}\"\n for x in get_receivers_per_sender(i, args.num_trainers, args.num_makers, allow_idle_sender=True)\n ],\n strategy_fn=partial(get_strategy_from_args, args.trainer_strategy),\n model_fn=trainer_model_fn,\n env_info=env_info_trainer,\n train_batch_size=args.train_batch_size,\n buffer_limit=16,\n eval_performance=True,\n debug=args.debug,\n update_lora_weights=not (args.lora_rank == 0),\n )\n for i, env_info_trainer in enumerate(env_info_trainers)\n ]\n\n dataset_size = args.experience_batch_size * 4\n\n def build_dataloader():\n\n def tokenize_fn(texts):\n batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True)\n return {k: v.cuda() for k, v in batch.items()}\n\n dataset = pd.read_csv(args.prompt_path)['prompt']\n dataloader = DataLoader(dataset=dataset, batch_size=dataset_size, shuffle=True, collate_fn=tokenize_fn)\n return dataloader\n\n # uncomment this function if sync_models_from_trainers is True\n # ray.get([\n # trainer_ref.sync_models_to_remote_makers.remote()\n # for trainer_ref in trainer_refs\n # ])\n\n wait_tasks = []\n\n for experience_holder_ref in experience_holder_refs:\n wait_tasks.append(experience_holder_ref.workingloop.remote(build_dataloader, num_steps=args.experience_steps))\n\n total_steps = args.experience_batch_size * args.experience_steps * \\\n args.num_makers // (args.num_trainers * args.train_batch_size)\n for trainer_ref in trainer_refs:\n wait_tasks.append(trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs))\n\n ray.get(wait_tasks)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--prompt_path', type=str, default=None)\n parser.add_argument('--num_makers', type=int, default=1)\n parser.add_argument('--num_trainers', type=int, default=1)\n parser.add_argument('--trainer_strategy',\n choices=[\n 'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu',\n 'colossalai_zero2_cpu'\n ],\n default='ddp')\n parser.add_argument('--maker_strategy', choices=['naive'], default='naive')\n parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])\n parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])\n parser.add_argument('--pretrain', type=str, default=None)\n parser.add_argument('--critic_pretrain', type=str, default=None)\n parser.add_argument('--experience_steps', type=int, default=4)\n parser.add_argument('--experience_batch_size', type=int, default=8)\n parser.add_argument('--train_epochs', type=int, default=1)\n parser.add_argument('--update_steps', type=int, default=2)\n parser.add_argument('--train_batch_size', type=int, default=8)\n parser.add_argument('--lora_rank', type=int, default=0, help=\"low-rank adaptation matrices rank\")\n\n parser.add_argument('--initial_model_quant_ckpt', type=str, default=None)\n parser.add_argument('--quant_bits', type=int, default=4)\n parser.add_argument('--quant_group_size', type=int, default=128)\n parser.add_argument('--debug', action='store_true')\n args = parser.parse_args()\n\n ray.init(namespace=os.environ[\"RAY_NAMESPACE\"], runtime_env={\"env_vars\": dict(os.environ)})\n main(args)\n", "path": "applications/Chat/examples/ray/mmmt_prompt.py"}], "after_files": [{"content": "import argparse\nimport os\nimport socket\nfrom functools import partial\n\nimport pandas as pd\nimport ray\nimport torch\nfrom coati.quant import llama_load_quant, low_resource_init\nfrom coati.ray.detached_trainer_ppo import DetachedPPOTrainer\nfrom coati.ray.experience_maker_holder import ExperienceMakerHolder\nfrom coati.ray.utils import (\n get_actor_from_args,\n get_critic_from_args,\n get_receivers_per_sender,\n get_reward_model_from_args,\n get_strategy_from_args,\n)\nfrom torch.utils.data import DataLoader\nfrom transformers import AutoConfig, AutoTokenizer\nfrom transformers.modeling_utils import no_init_weights\n\n\ndef get_free_port():\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind(('', 0))\n return s.getsockname()[1]\n\n\ndef get_local_ip():\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n s.connect(('8.8.8.8', 80))\n return s.getsockname()[0]\n\n\ndef main(args):\n master_addr = str(get_local_ip())\n # trainer_env_info\n trainer_port = str(get_free_port())\n env_info_trainers = [{\n 'local_rank': '0',\n 'rank': str(rank),\n 'world_size': str(args.num_trainers),\n 'master_port': trainer_port,\n 'master_addr': master_addr\n } for rank in range(args.num_trainers)]\n\n # maker_env_info\n maker_port = str(get_free_port())\n env_info_makers = [{\n 'local_rank': '0',\n 'rank': str(rank),\n 'world_size': str(args.num_makers),\n 'master_port': maker_port,\n 'master_addr': master_addr\n } for rank in range(args.num_makers)]\n\n # configure tokenizer\n tokenizer = AutoTokenizer.from_pretrained(args.pretrain)\n tokenizer.pad_token = tokenizer.eos_token\n\n def model_fn():\n actor = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda()\n critic = get_critic_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda()\n reward_model = get_reward_model_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda()\n if args.initial_model_quant_ckpt is not None and args.model == 'llama':\n # quantize initial model\n actor_cfg = AutoConfig.from_pretrained(args.pretrain)\n with low_resource_init(), no_init_weights():\n initial_model = get_actor_from_args(args.model, config=actor_cfg)\n initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits,\n args.quant_group_size).cuda().requires_grad_(False)\n else:\n initial_model = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda()\n return actor, critic, reward_model, initial_model\n\n # configure Experience Maker\n experience_holder_refs = [\n ExperienceMakerHolder.options(name=f\"maker{i}\", num_gpus=1, max_concurrency=2).remote(\n detached_trainer_name_list=[\n f'trainer{x}'\n for x in get_receivers_per_sender(i, args.num_makers, args.num_trainers, allow_idle_sender=False)\n ],\n strategy_fn=partial(get_strategy_from_args, args.maker_strategy),\n model_fn=model_fn,\n env_info=env_info_maker,\n kl_coef=0.1,\n debug=args.debug,\n update_lora_weights=not (args.lora_rank == 0),\n # sync_models_from_trainers=True,\n # generation kwargs:\n max_length=512,\n do_sample=True,\n temperature=1.0,\n top_k=50,\n pad_token_id=tokenizer.pad_token_id,\n eos_token_id=tokenizer.eos_token_id,\n eval_performance=True,\n use_cache=True,\n )\n for i, env_info_maker in enumerate(env_info_makers)\n ]\n\n def trainer_model_fn():\n actor = get_actor_from_args(args.model, args.pretrain, lora_rank=args.lora_rank).half().cuda()\n critic = get_critic_from_args(args.model, args.critic_pretrain, lora_rank=args.lora_rank).half().cuda()\n return actor, critic\n\n # configure Trainer\n trainer_refs = [\n DetachedPPOTrainer.options(name=f\"trainer{i}\", num_gpus=1, max_concurrency=2).remote(\n experience_maker_holder_name_list=[\n f\"maker{x}\"\n for x in get_receivers_per_sender(i, args.num_trainers, args.num_makers, allow_idle_sender=True)\n ],\n strategy_fn=partial(get_strategy_from_args, args.trainer_strategy),\n model_fn=trainer_model_fn,\n env_info=env_info_trainer,\n train_batch_size=args.train_batch_size,\n buffer_limit=16,\n eval_performance=True,\n debug=args.debug,\n update_lora_weights=not (args.lora_rank == 0),\n )\n for i, env_info_trainer in enumerate(env_info_trainers)\n ]\n\n dataset_size = args.experience_batch_size * 4\n\n def build_dataloader():\n\n def tokenize_fn(texts):\n batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True)\n return {k: v.cuda() for k, v in batch.items()}\n\n dataset = pd.read_csv(args.prompt_path)['prompt']\n dataloader = DataLoader(dataset=dataset, batch_size=dataset_size, shuffle=True, collate_fn=tokenize_fn)\n return dataloader\n\n # uncomment this function if sync_models_from_trainers is True\n # ray.get([\n # trainer_ref.sync_models_to_remote_makers.remote()\n # for trainer_ref in trainer_refs\n # ])\n\n wait_tasks = []\n\n for experience_holder_ref in experience_holder_refs:\n wait_tasks.append(experience_holder_ref.workingloop.remote(build_dataloader, num_steps=args.experience_steps))\n\n total_steps = args.experience_batch_size * args.experience_steps * \\\n args.num_makers // (args.num_trainers * args.train_batch_size)\n for trainer_ref in trainer_refs:\n wait_tasks.append(trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs))\n\n ray.get(wait_tasks)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--prompt_path', type=str, default=None)\n parser.add_argument('--num_makers', type=int, default=1)\n parser.add_argument('--num_trainers', type=int, default=1)\n parser.add_argument(\n '--trainer_strategy',\n choices=['ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu', 'colossalai_zero2_cpu'],\n default='ddp')\n parser.add_argument('--maker_strategy', choices=['naive'], default='naive')\n parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])\n parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])\n parser.add_argument('--pretrain', type=str, default=None)\n parser.add_argument('--critic_pretrain', type=str, default=None)\n parser.add_argument('--experience_steps', type=int, default=4)\n parser.add_argument('--experience_batch_size', type=int, default=8)\n parser.add_argument('--train_epochs', type=int, default=1)\n parser.add_argument('--update_steps', type=int, default=2)\n parser.add_argument('--train_batch_size', type=int, default=8)\n parser.add_argument('--lora_rank', type=int, default=0, help=\"low-rank adaptation matrices rank\")\n\n parser.add_argument('--initial_model_quant_ckpt', type=str, default=None)\n parser.add_argument('--quant_bits', type=int, default=4)\n parser.add_argument('--quant_group_size', type=int, default=128)\n parser.add_argument('--debug', action='store_true')\n args = parser.parse_args()\n\n ray.init(namespace=os.environ[\"RAY_NAMESPACE\"], runtime_env={\"env_vars\": dict(os.environ)})\n main(args)\n", "path": "applications/Chat/examples/ray/mmmt_prompt.py"}]}
2,561
408
gh_patches_debug_12493
rasdani/github-patches
git_diff
streamlink__streamlink-1082
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unable to open Smashcast streams I get the following error whenever I try to open a Smashcast stream ``` streamlink smashcast.tv/greatvaluesmash best [cli][info] Found matching plugin hitbox for URL smashcast.tv/greatvaluesmash Traceback (most recent call last): File "C:\Program Files (x86)\Streamlink\bin\streamlink-script.py", line 15, in <module> main() File "C:\Program Files (x86)\Streamlink\pkgs\streamlink_cli\main.py", line 103 8, in main handle_url() File "C:\Program Files (x86)\Streamlink\pkgs\streamlink_cli\main.py", line 482 , in handle_url streams = fetch_streams(plugin) File "C:\Program Files (x86)\Streamlink\pkgs\streamlink_cli\main.py", line 394 , in fetch_streams sorting_excludes=args.stream_sorting_excludes) File "C:\Program Files (x86)\Streamlink\pkgs\streamlink\plugin\plugin.py", lin e 345, in get_streams return self.streams(*args, **kwargs) File "C:\Program Files (x86)\Streamlink\pkgs\streamlink\plugin\plugin.py", lin e 248, in streams ostreams = self._get_streams() File "C:\Program Files (x86)\Streamlink\pkgs\streamlink\plugins\hitbox.py", li ne 181, in _get_streams if livestream["media_hosted_media"]: KeyError: 'media_hosted_media' ``` Tried with multiple different streams and using a hitbox url instead of smashcast and it still occurs. It still works perfectly fine on Livestreamer, so I'm not really sure what's up. Asked a friend to test it out to see if it's not just me and he had the same error. Using the latest version (0.7.0) on Windows 7, and my friend was using Windows 8.1. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/streamlink/plugins/hitbox.py` Content: ``` 1 import re 2 3 from itertools import chain 4 5 from streamlink.compat import urlparse 6 from streamlink.plugin import Plugin 7 from streamlink.plugin.api import StreamMapper, http, validate 8 from streamlink.stream import HLSStream, HTTPStream, RTMPStream 9 from streamlink.utils import absolute_url 10 11 HLS_PLAYLIST_BASE = "http://www.smashcast.tv{0}" 12 LIVE_API = "http://www.smashcast.tv/api/media/live/{0}?showHidden=true&liveonly=false" 13 PLAYER_API = "http://www.smashcast.tv/api/player/config/{0}/{1}?embed=false&showHidden=true" 14 SWF_BASE = "http://edge.vie.hitbox.tv/static/player/flowplayer/" 15 SWF_URL = SWF_BASE + "flowplayer.commercial-3.2.16.swf" 16 VOD_BASE_URL = "http://www.smashcast.tv/" 17 18 _quality_re = re.compile(r"(\d+p)$") 19 _url_re = re.compile(r""" 20 http(s)?://(www\.)?(hitbox|smashcast).tv 21 /(?P<channel>[^/]+) 22 (?: 23 (?:/videos)?/(?P<media_id>[^/]+) 24 )? 25 """, re.VERBOSE) 26 27 _live_schema = validate.Schema( 28 { 29 "livestream": [{ 30 "media_user_name": validate.text, 31 validate.optional("media_hosted_media"): object, 32 "media_is_live": validate.all( 33 validate.text, 34 validate.transform(int), 35 validate.transform(bool) 36 ), 37 "media_id": validate.text 38 }], 39 }, 40 validate.get("livestream"), 41 validate.length(1), 42 validate.get(0) 43 ) 44 _player_schema = validate.Schema( 45 { 46 "clip": { 47 "baseUrl": validate.any(None, validate.text), 48 "bitrates": validate.all( 49 validate.filter(lambda b: b.get("url") and b.get("label")), 50 [{ 51 "label": validate.text, 52 "url": validate.text, 53 }], 54 ) 55 }, 56 validate.optional("playlist"): [{ 57 validate.optional("connectionProvider"): validate.text, 58 validate.optional("netConnectionUrl"): validate.text, 59 validate.optional("bitrates"): [{ 60 "label": validate.text, 61 "url": validate.text, 62 "provider": validate.text 63 }] 64 }], 65 "plugins": validate.all( 66 dict, 67 validate.filter(lambda k, v: k in ["rtmp", "rtmpHitbox", "hls"]), 68 { 69 validate.text: { 70 validate.optional("netConnectionUrl"): validate.text, 71 "url": validate.text 72 } 73 } 74 ) 75 } 76 ) 77 78 79 class Hitbox(Plugin): 80 @classmethod 81 def can_handle_url(cls, url): 82 return _url_re.match(url) 83 84 def _get_quality(self, label): 85 match = _quality_re.search(label) 86 if match: 87 return match.group(1) 88 89 return "live" 90 91 def _create_hls_streams(self, bitrate): 92 url = bitrate["url"] 93 quality = self._get_quality(bitrate["label"]) 94 95 if not url.startswith("http"): 96 url = HLS_PLAYLIST_BASE.format(url) 97 98 if bitrate["label"] == "Auto": 99 try: 100 streams = HLSStream.parse_variant_playlist(self.session, url) 101 return streams.items() 102 except IOError as err: 103 self.logger.warning("Failed to extract HLS streams: {0}", err) 104 else: 105 return quality, HLSStream(self.session, url) 106 107 def _create_rtmp_stream(self, rtmp, swf_url, bitrate): 108 quality = self._get_quality(bitrate["label"]) 109 url = bitrate["url"] 110 stream = RTMPStream(self.session, { 111 "rtmp": rtmp, 112 "pageUrl": self.url, 113 "playpath": url, 114 "swfVfy": swf_url, 115 "live": True 116 }) 117 118 return quality, stream 119 120 def _get_live_streams(self, player): 121 mappers = [] 122 swf_url = SWF_URL 123 for playlist in player.get("playlist", []): 124 bitrates = playlist.get("bitrates") 125 provider = playlist.get("connectionProvider") 126 rtmp = None 127 128 if bitrates: 129 rtmp = playlist.get("netConnectionUrl") 130 elif provider and provider in player["plugins"]: 131 provider = player["plugins"][provider] 132 swf_name = provider["url"] 133 swf_url = SWF_BASE + swf_name 134 rtmp = provider["netConnectionUrl"] 135 bitrates = player["clip"]["bitrates"] 136 else: 137 continue 138 139 mapper = StreamMapper( 140 cmp=lambda provider, bitrate: bitrate["provider"].startswith(provider) 141 ) 142 mapper.map("hls", self._create_hls_streams) 143 mapper.map("rtmp", self._create_rtmp_stream, rtmp, swf_url) 144 mappers.append(mapper(bitrates)) 145 146 return chain.from_iterable(mappers) 147 148 def _create_video_stream(self, cls, base_url, bitrate): 149 url = absolute_url(base_url, bitrate["url"]) 150 if bitrate["label"].lower() == "auto": 151 try: 152 return cls.parse_variant_playlist(self.session, url).items() 153 except IOError as err: 154 self.logger.warning("Failed to extract HLS streams: {0}", err) 155 return 156 157 quality = self._get_quality(bitrate["label"]) 158 return quality, cls(self.session, url) 159 160 def _get_video_streams(self, player): 161 base_url = player["clip"]["baseUrl"] or VOD_BASE_URL 162 mapper = StreamMapper( 163 cmp=lambda ext, bitrate: urlparse(bitrate["url"]).path.endswith(ext) 164 ) 165 mapper.map(".m3u8", self._create_video_stream, HLSStream, base_url) 166 mapper.map(".mp4", self._create_video_stream, HTTPStream, base_url) 167 mapper.map(".flv", self._create_video_stream, HTTPStream, base_url) 168 169 return mapper(player["clip"]["bitrates"]) 170 171 def _get_streams(self): 172 match = _url_re.match(self.url) 173 if not match: 174 return 175 176 channel, media_id = match.group("channel", "media_id") 177 self.logger.debug("Matched URL: channel={0}, media_id={1}".format(channel, media_id)) 178 if not media_id: 179 res = http.get(LIVE_API.format(channel)) 180 livestream = http.json(res, schema=_live_schema) 181 if livestream["media_hosted_media"]: 182 hosted = _live_schema.validate(livestream["media_hosted_media"]) 183 self.logger.info("{0} is hosting {1}", livestream["media_user_name"], hosted["media_user_name"]) 184 livestream = hosted 185 186 if not livestream["media_is_live"]: 187 return 188 189 media_id = livestream["media_id"] 190 media_type = "live" 191 else: 192 media_type = "video" 193 194 res = http.get(PLAYER_API.format(media_type, media_id)) 195 player = http.json(res, schema=_player_schema) 196 197 if media_type == "live": 198 return self._get_live_streams(player) 199 else: 200 return self._get_video_streams(player) 201 202 203 __plugin__ = Hitbox 204 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/streamlink/plugins/hitbox.py b/src/streamlink/plugins/hitbox.py --- a/src/streamlink/plugins/hitbox.py +++ b/src/streamlink/plugins/hitbox.py @@ -178,7 +178,7 @@ if not media_id: res = http.get(LIVE_API.format(channel)) livestream = http.json(res, schema=_live_schema) - if livestream["media_hosted_media"]: + if livestream.get("media_hosted_media"): hosted = _live_schema.validate(livestream["media_hosted_media"]) self.logger.info("{0} is hosting {1}", livestream["media_user_name"], hosted["media_user_name"]) livestream = hosted
{"golden_diff": "diff --git a/src/streamlink/plugins/hitbox.py b/src/streamlink/plugins/hitbox.py\n--- a/src/streamlink/plugins/hitbox.py\n+++ b/src/streamlink/plugins/hitbox.py\n@@ -178,7 +178,7 @@\n if not media_id:\n res = http.get(LIVE_API.format(channel))\n livestream = http.json(res, schema=_live_schema)\n- if livestream[\"media_hosted_media\"]:\n+ if livestream.get(\"media_hosted_media\"):\n hosted = _live_schema.validate(livestream[\"media_hosted_media\"])\n self.logger.info(\"{0} is hosting {1}\", livestream[\"media_user_name\"], hosted[\"media_user_name\"])\n livestream = hosted\n", "issue": "Unable to open Smashcast streams\nI get the following error whenever I try to open a Smashcast stream \r\n\r\n```\r\nstreamlink smashcast.tv/greatvaluesmash best\r\n[cli][info] Found matching plugin hitbox for URL smashcast.tv/greatvaluesmash\r\nTraceback (most recent call last):\r\n File \"C:\\Program Files (x86)\\Streamlink\\bin\\streamlink-script.py\", line 15, in\r\n <module>\r\n main()\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink_cli\\main.py\", line 103\r\n8, in main\r\n handle_url()\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink_cli\\main.py\", line 482\r\n, in handle_url\r\n streams = fetch_streams(plugin)\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink_cli\\main.py\", line 394\r\n, in fetch_streams\r\n sorting_excludes=args.stream_sorting_excludes)\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink\\plugin\\plugin.py\", lin\r\ne 345, in get_streams\r\n return self.streams(*args, **kwargs)\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink\\plugin\\plugin.py\", lin\r\ne 248, in streams\r\n ostreams = self._get_streams()\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink\\plugins\\hitbox.py\", li\r\nne 181, in _get_streams\r\n if livestream[\"media_hosted_media\"]:\r\nKeyError: 'media_hosted_media'\r\n```\r\n\r\nTried with multiple different streams and using a hitbox url instead of smashcast and it still occurs. It still works perfectly fine on Livestreamer, so I'm not really sure what's up. Asked a friend to test it out to see if it's not just me and he had the same error. \r\n\r\nUsing the latest version (0.7.0) on Windows 7, and my friend was using Windows 8.1. \n", "before_files": [{"content": "import re\n\nfrom itertools import chain\n\nfrom streamlink.compat import urlparse\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import StreamMapper, http, validate\nfrom streamlink.stream import HLSStream, HTTPStream, RTMPStream\nfrom streamlink.utils import absolute_url\n\nHLS_PLAYLIST_BASE = \"http://www.smashcast.tv{0}\"\nLIVE_API = \"http://www.smashcast.tv/api/media/live/{0}?showHidden=true&liveonly=false\"\nPLAYER_API = \"http://www.smashcast.tv/api/player/config/{0}/{1}?embed=false&showHidden=true\"\nSWF_BASE = \"http://edge.vie.hitbox.tv/static/player/flowplayer/\"\nSWF_URL = SWF_BASE + \"flowplayer.commercial-3.2.16.swf\"\nVOD_BASE_URL = \"http://www.smashcast.tv/\"\n\n_quality_re = re.compile(r\"(\\d+p)$\")\n_url_re = re.compile(r\"\"\"\n http(s)?://(www\\.)?(hitbox|smashcast).tv\n /(?P<channel>[^/]+)\n (?:\n (?:/videos)?/(?P<media_id>[^/]+)\n )?\n\"\"\", re.VERBOSE)\n\n_live_schema = validate.Schema(\n {\n \"livestream\": [{\n \"media_user_name\": validate.text,\n validate.optional(\"media_hosted_media\"): object,\n \"media_is_live\": validate.all(\n validate.text,\n validate.transform(int),\n validate.transform(bool)\n ),\n \"media_id\": validate.text\n }],\n },\n validate.get(\"livestream\"),\n validate.length(1),\n validate.get(0)\n)\n_player_schema = validate.Schema(\n {\n \"clip\": {\n \"baseUrl\": validate.any(None, validate.text),\n \"bitrates\": validate.all(\n validate.filter(lambda b: b.get(\"url\") and b.get(\"label\")),\n [{\n \"label\": validate.text,\n \"url\": validate.text,\n }],\n )\n },\n validate.optional(\"playlist\"): [{\n validate.optional(\"connectionProvider\"): validate.text,\n validate.optional(\"netConnectionUrl\"): validate.text,\n validate.optional(\"bitrates\"): [{\n \"label\": validate.text,\n \"url\": validate.text,\n \"provider\": validate.text\n }]\n }],\n \"plugins\": validate.all(\n dict,\n validate.filter(lambda k, v: k in [\"rtmp\", \"rtmpHitbox\", \"hls\"]),\n {\n validate.text: {\n validate.optional(\"netConnectionUrl\"): validate.text,\n \"url\": validate.text\n }\n }\n )\n }\n)\n\n\nclass Hitbox(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _get_quality(self, label):\n match = _quality_re.search(label)\n if match:\n return match.group(1)\n\n return \"live\"\n\n def _create_hls_streams(self, bitrate):\n url = bitrate[\"url\"]\n quality = self._get_quality(bitrate[\"label\"])\n\n if not url.startswith(\"http\"):\n url = HLS_PLAYLIST_BASE.format(url)\n\n if bitrate[\"label\"] == \"Auto\":\n try:\n streams = HLSStream.parse_variant_playlist(self.session, url)\n return streams.items()\n except IOError as err:\n self.logger.warning(\"Failed to extract HLS streams: {0}\", err)\n else:\n return quality, HLSStream(self.session, url)\n\n def _create_rtmp_stream(self, rtmp, swf_url, bitrate):\n quality = self._get_quality(bitrate[\"label\"])\n url = bitrate[\"url\"]\n stream = RTMPStream(self.session, {\n \"rtmp\": rtmp,\n \"pageUrl\": self.url,\n \"playpath\": url,\n \"swfVfy\": swf_url,\n \"live\": True\n })\n\n return quality, stream\n\n def _get_live_streams(self, player):\n mappers = []\n swf_url = SWF_URL\n for playlist in player.get(\"playlist\", []):\n bitrates = playlist.get(\"bitrates\")\n provider = playlist.get(\"connectionProvider\")\n rtmp = None\n\n if bitrates:\n rtmp = playlist.get(\"netConnectionUrl\")\n elif provider and provider in player[\"plugins\"]:\n provider = player[\"plugins\"][provider]\n swf_name = provider[\"url\"]\n swf_url = SWF_BASE + swf_name\n rtmp = provider[\"netConnectionUrl\"]\n bitrates = player[\"clip\"][\"bitrates\"]\n else:\n continue\n\n mapper = StreamMapper(\n cmp=lambda provider, bitrate: bitrate[\"provider\"].startswith(provider)\n )\n mapper.map(\"hls\", self._create_hls_streams)\n mapper.map(\"rtmp\", self._create_rtmp_stream, rtmp, swf_url)\n mappers.append(mapper(bitrates))\n\n return chain.from_iterable(mappers)\n\n def _create_video_stream(self, cls, base_url, bitrate):\n url = absolute_url(base_url, bitrate[\"url\"])\n if bitrate[\"label\"].lower() == \"auto\":\n try:\n return cls.parse_variant_playlist(self.session, url).items()\n except IOError as err:\n self.logger.warning(\"Failed to extract HLS streams: {0}\", err)\n return\n\n quality = self._get_quality(bitrate[\"label\"])\n return quality, cls(self.session, url)\n\n def _get_video_streams(self, player):\n base_url = player[\"clip\"][\"baseUrl\"] or VOD_BASE_URL\n mapper = StreamMapper(\n cmp=lambda ext, bitrate: urlparse(bitrate[\"url\"]).path.endswith(ext)\n )\n mapper.map(\".m3u8\", self._create_video_stream, HLSStream, base_url)\n mapper.map(\".mp4\", self._create_video_stream, HTTPStream, base_url)\n mapper.map(\".flv\", self._create_video_stream, HTTPStream, base_url)\n\n return mapper(player[\"clip\"][\"bitrates\"])\n\n def _get_streams(self):\n match = _url_re.match(self.url)\n if not match:\n return\n\n channel, media_id = match.group(\"channel\", \"media_id\")\n self.logger.debug(\"Matched URL: channel={0}, media_id={1}\".format(channel, media_id))\n if not media_id:\n res = http.get(LIVE_API.format(channel))\n livestream = http.json(res, schema=_live_schema)\n if livestream[\"media_hosted_media\"]:\n hosted = _live_schema.validate(livestream[\"media_hosted_media\"])\n self.logger.info(\"{0} is hosting {1}\", livestream[\"media_user_name\"], hosted[\"media_user_name\"])\n livestream = hosted\n\n if not livestream[\"media_is_live\"]:\n return\n\n media_id = livestream[\"media_id\"]\n media_type = \"live\"\n else:\n media_type = \"video\"\n\n res = http.get(PLAYER_API.format(media_type, media_id))\n player = http.json(res, schema=_player_schema)\n\n if media_type == \"live\":\n return self._get_live_streams(player)\n else:\n return self._get_video_streams(player)\n\n\n__plugin__ = Hitbox\n", "path": "src/streamlink/plugins/hitbox.py"}], "after_files": [{"content": "import re\n\nfrom itertools import chain\n\nfrom streamlink.compat import urlparse\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import StreamMapper, http, validate\nfrom streamlink.stream import HLSStream, HTTPStream, RTMPStream\nfrom streamlink.utils import absolute_url\n\nHLS_PLAYLIST_BASE = \"http://www.smashcast.tv{0}\"\nLIVE_API = \"http://www.smashcast.tv/api/media/live/{0}?showHidden=true&liveonly=false\"\nPLAYER_API = \"http://www.smashcast.tv/api/player/config/{0}/{1}?embed=false&showHidden=true\"\nSWF_BASE = \"http://edge.vie.hitbox.tv/static/player/flowplayer/\"\nSWF_URL = SWF_BASE + \"flowplayer.commercial-3.2.16.swf\"\nVOD_BASE_URL = \"http://www.smashcast.tv/\"\n\n_quality_re = re.compile(r\"(\\d+p)$\")\n_url_re = re.compile(r\"\"\"\n http(s)?://(www\\.)?(hitbox|smashcast).tv\n /(?P<channel>[^/]+)\n (?:\n (?:/videos)?/(?P<media_id>[^/]+)\n )?\n\"\"\", re.VERBOSE)\n\n_live_schema = validate.Schema(\n {\n \"livestream\": [{\n \"media_user_name\": validate.text,\n validate.optional(\"media_hosted_media\"): object,\n \"media_is_live\": validate.all(\n validate.text,\n validate.transform(int),\n validate.transform(bool)\n ),\n \"media_id\": validate.text\n }],\n },\n validate.get(\"livestream\"),\n validate.length(1),\n validate.get(0)\n)\n_player_schema = validate.Schema(\n {\n \"clip\": {\n \"baseUrl\": validate.any(None, validate.text),\n \"bitrates\": validate.all(\n validate.filter(lambda b: b.get(\"url\") and b.get(\"label\")),\n [{\n \"label\": validate.text,\n \"url\": validate.text,\n }],\n )\n },\n validate.optional(\"playlist\"): [{\n validate.optional(\"connectionProvider\"): validate.text,\n validate.optional(\"netConnectionUrl\"): validate.text,\n validate.optional(\"bitrates\"): [{\n \"label\": validate.text,\n \"url\": validate.text,\n \"provider\": validate.text\n }]\n }],\n \"plugins\": validate.all(\n dict,\n validate.filter(lambda k, v: k in [\"rtmp\", \"rtmpHitbox\", \"hls\"]),\n {\n validate.text: {\n validate.optional(\"netConnectionUrl\"): validate.text,\n \"url\": validate.text\n }\n }\n )\n }\n)\n\n\nclass Hitbox(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _get_quality(self, label):\n match = _quality_re.search(label)\n if match:\n return match.group(1)\n\n return \"live\"\n\n def _create_hls_streams(self, bitrate):\n url = bitrate[\"url\"]\n quality = self._get_quality(bitrate[\"label\"])\n\n if not url.startswith(\"http\"):\n url = HLS_PLAYLIST_BASE.format(url)\n\n if bitrate[\"label\"] == \"Auto\":\n try:\n streams = HLSStream.parse_variant_playlist(self.session, url)\n return streams.items()\n except IOError as err:\n self.logger.warning(\"Failed to extract HLS streams: {0}\", err)\n else:\n return quality, HLSStream(self.session, url)\n\n def _create_rtmp_stream(self, rtmp, swf_url, bitrate):\n quality = self._get_quality(bitrate[\"label\"])\n url = bitrate[\"url\"]\n stream = RTMPStream(self.session, {\n \"rtmp\": rtmp,\n \"pageUrl\": self.url,\n \"playpath\": url,\n \"swfVfy\": swf_url,\n \"live\": True\n })\n\n return quality, stream\n\n def _get_live_streams(self, player):\n mappers = []\n swf_url = SWF_URL\n for playlist in player.get(\"playlist\", []):\n bitrates = playlist.get(\"bitrates\")\n provider = playlist.get(\"connectionProvider\")\n rtmp = None\n\n if bitrates:\n rtmp = playlist.get(\"netConnectionUrl\")\n elif provider and provider in player[\"plugins\"]:\n provider = player[\"plugins\"][provider]\n swf_name = provider[\"url\"]\n swf_url = SWF_BASE + swf_name\n rtmp = provider[\"netConnectionUrl\"]\n bitrates = player[\"clip\"][\"bitrates\"]\n else:\n continue\n\n mapper = StreamMapper(\n cmp=lambda provider, bitrate: bitrate[\"provider\"].startswith(provider)\n )\n mapper.map(\"hls\", self._create_hls_streams)\n mapper.map(\"rtmp\", self._create_rtmp_stream, rtmp, swf_url)\n mappers.append(mapper(bitrates))\n\n return chain.from_iterable(mappers)\n\n def _create_video_stream(self, cls, base_url, bitrate):\n url = absolute_url(base_url, bitrate[\"url\"])\n if bitrate[\"label\"].lower() == \"auto\":\n try:\n return cls.parse_variant_playlist(self.session, url).items()\n except IOError as err:\n self.logger.warning(\"Failed to extract HLS streams: {0}\", err)\n return\n\n quality = self._get_quality(bitrate[\"label\"])\n return quality, cls(self.session, url)\n\n def _get_video_streams(self, player):\n base_url = player[\"clip\"][\"baseUrl\"] or VOD_BASE_URL\n mapper = StreamMapper(\n cmp=lambda ext, bitrate: urlparse(bitrate[\"url\"]).path.endswith(ext)\n )\n mapper.map(\".m3u8\", self._create_video_stream, HLSStream, base_url)\n mapper.map(\".mp4\", self._create_video_stream, HTTPStream, base_url)\n mapper.map(\".flv\", self._create_video_stream, HTTPStream, base_url)\n\n return mapper(player[\"clip\"][\"bitrates\"])\n\n def _get_streams(self):\n match = _url_re.match(self.url)\n if not match:\n return\n\n channel, media_id = match.group(\"channel\", \"media_id\")\n self.logger.debug(\"Matched URL: channel={0}, media_id={1}\".format(channel, media_id))\n if not media_id:\n res = http.get(LIVE_API.format(channel))\n livestream = http.json(res, schema=_live_schema)\n if livestream.get(\"media_hosted_media\"):\n hosted = _live_schema.validate(livestream[\"media_hosted_media\"])\n self.logger.info(\"{0} is hosting {1}\", livestream[\"media_user_name\"], hosted[\"media_user_name\"])\n livestream = hosted\n\n if not livestream[\"media_is_live\"]:\n return\n\n media_id = livestream[\"media_id\"]\n media_type = \"live\"\n else:\n media_type = \"video\"\n\n res = http.get(PLAYER_API.format(media_type, media_id))\n player = http.json(res, schema=_player_schema)\n\n if media_type == \"live\":\n return self._get_live_streams(player)\n else:\n return self._get_video_streams(player)\n\n\n__plugin__ = Hitbox\n", "path": "src/streamlink/plugins/hitbox.py"}]}
2,799
159
gh_patches_debug_29324
rasdani/github-patches
git_diff
dask__distributed-228
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- No such file or directory "bokeh" - in release 1.9.2 (conda install) Get this error in my logs: ``` scheduler_1 | distributed.scheduler - INFO - http at: 172.17.0.4:9786 scheduler_1 | distributed.scheduler - WARNING - Could not start Bokeh web UI scheduler_1 | Traceback (most recent call last): scheduler_1 | File "/opt/conda/envs/fire/lib/python3.5/site-packages/distributed/cli/dscheduler.py", line scheduler_1 | bokeh_proc[0] = subprocess.Popen(args) scheduler_1 | File "/opt/conda/envs/fire/lib/python3.5/subprocess.py", line 950, in __init__ scheduler_1 | restore_signals, start_new_session) scheduler_1 | File "/opt/conda/envs/fire/lib/python3.5/subprocess.py", line 1544, in _execute_child scheduler_1 | raise child_exception_type(errno_num, err_msg) scheduler_1 | FileNotFoundError: [Errno 2] No such file or directory: 'bokeh' scheduler_1 | distributed.core - INFO - Connection from 172.17.0.6:60119 to Scheduler scheduler_1 | distributed.scheduler - INFO - Register 172.17.0.6:5700 scheduler_1 | distributed.scheduler - INFO - Starting worker compute stream, 172.17.0.6:5700 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `distributed/cli/dscheduler.py` Content: ``` 1 from __future__ import print_function, division, absolute_import 2 3 import logging 4 import os 5 import socket 6 import subprocess 7 from sys import argv, exit 8 from time import sleep 9 10 import click 11 12 import distributed 13 from distributed import Scheduler 14 from distributed.utils import get_ip 15 from distributed.http import HTTPScheduler 16 from distributed.cli.utils import check_python_3 17 from tornado.ioloop import IOLoop 18 19 logger = logging.getLogger('distributed.scheduler') 20 21 ip = get_ip() 22 23 import signal 24 25 bokeh_proc = [False] 26 27 28 def handle_signal(sig, frame): 29 if bokeh_proc[0]: 30 bokeh_proc[0].terminate() 31 IOLoop.instance().add_callback(IOLoop.instance().stop) 32 33 signal.signal(signal.SIGINT, handle_signal) 34 signal.signal(signal.SIGTERM, handle_signal) 35 36 37 @click.command() 38 @click.argument('center', type=str, default='') 39 @click.option('--port', type=int, default=8786, help="Serving port") 40 @click.option('--http-port', type=int, default=9786, help="HTTP port") 41 @click.option('--bokeh-port', type=int, default=8787, help="HTTP port") 42 @click.option('--bokeh/--no-bokeh', '_bokeh', default=True, show_default=True, 43 required=False, help="Launch Bokeh Web UI") 44 @click.option('--host', type=str, default=ip, 45 help="Serving host defaults to %s" % ip) 46 @click.option('--show/--no-show', default=False, help="Show web UI") 47 def main(center, host, port, http_port, bokeh_port, show, _bokeh): 48 ip = socket.gethostbyname(host) 49 loop = IOLoop.current() 50 scheduler = Scheduler(center, ip=ip, 51 services={('http', http_port): HTTPScheduler}) 52 if center: 53 loop.run_sync(scheduler.sync_center) 54 scheduler.start(port) 55 56 if _bokeh: 57 try: 58 import bokeh 59 import distributed.bokeh 60 hosts = ['%s:%d' % (h, bokeh_port) for h in 61 ['localhost', '127.0.0.1', ip, socket.gethostname(), host]] 62 dirname = os.path.dirname(distributed.__file__) 63 paths = [os.path.join(dirname, 'bokeh', name) 64 for name in ['status', 'tasks']] 65 args = (['bokeh', 'serve'] + paths + 66 ['--log-level', 'warning', 67 '--check-unused-sessions=50', 68 '--unused-session-lifetime=1', 69 '--port', str(bokeh_port)] + 70 sum([['--host', host] for host in hosts], [])) 71 if show: 72 args.append('--show') 73 bokeh_proc[0] = subprocess.Popen(args) 74 75 logger.info(" Start Bokeh UI at: http://%s:%d/status/" 76 % (ip, bokeh_port)) 77 except ImportError: 78 logger.info("Please install Bokeh to get Web UI") 79 except Exception as e: 80 logger.warn("Could not start Bokeh web UI", exc_info=True) 81 82 loop.start() 83 loop.close() 84 scheduler.stop() 85 bokeh_proc[0].terminate() 86 87 logger.info("End scheduler at %s:%d", ip, port) 88 89 90 def go(): 91 check_python_3() 92 main() 93 94 95 if __name__ == '__main__': 96 go() 97 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/distributed/cli/dscheduler.py b/distributed/cli/dscheduler.py --- a/distributed/cli/dscheduler.py +++ b/distributed/cli/dscheduler.py @@ -1,9 +1,9 @@ from __future__ import print_function, division, absolute_import import logging +import multiprocessing import os import socket -import subprocess from sys import argv, exit from time import sleep @@ -22,12 +22,8 @@ import signal -bokeh_proc = [False] - def handle_signal(sig, frame): - if bokeh_proc[0]: - bokeh_proc[0].terminate() IOLoop.instance().add_callback(IOLoop.instance().stop) signal.signal(signal.SIGINT, handle_signal) @@ -70,7 +66,10 @@ sum([['--host', host] for host in hosts], [])) if show: args.append('--show') - bokeh_proc[0] = subprocess.Popen(args) + from bokeh.command.bootstrap import main + proc = multiprocessing.Process(target=main, args=(args,)) + proc.daemon = True + proc.start() logger.info(" Start Bokeh UI at: http://%s:%d/status/" % (ip, bokeh_port)) @@ -82,7 +81,7 @@ loop.start() loop.close() scheduler.stop() - bokeh_proc[0].terminate() + proc.terminate() logger.info("End scheduler at %s:%d", ip, port)
{"golden_diff": "diff --git a/distributed/cli/dscheduler.py b/distributed/cli/dscheduler.py\n--- a/distributed/cli/dscheduler.py\n+++ b/distributed/cli/dscheduler.py\n@@ -1,9 +1,9 @@\n from __future__ import print_function, division, absolute_import\n \n import logging\n+import multiprocessing\n import os\n import socket\n-import subprocess\n from sys import argv, exit\n from time import sleep\n \n@@ -22,12 +22,8 @@\n \n import signal\n \n-bokeh_proc = [False]\n-\n \n def handle_signal(sig, frame):\n- if bokeh_proc[0]:\n- bokeh_proc[0].terminate()\n IOLoop.instance().add_callback(IOLoop.instance().stop)\n \n signal.signal(signal.SIGINT, handle_signal)\n@@ -70,7 +66,10 @@\n sum([['--host', host] for host in hosts], []))\n if show:\n args.append('--show')\n- bokeh_proc[0] = subprocess.Popen(args)\n+ from bokeh.command.bootstrap import main\n+ proc = multiprocessing.Process(target=main, args=(args,))\n+ proc.daemon = True\n+ proc.start()\n \n logger.info(\" Start Bokeh UI at: http://%s:%d/status/\"\n % (ip, bokeh_port))\n@@ -82,7 +81,7 @@\n loop.start()\n loop.close()\n scheduler.stop()\n- bokeh_proc[0].terminate()\n+ proc.terminate()\n \n logger.info(\"End scheduler at %s:%d\", ip, port)\n", "issue": "No such file or directory \"bokeh\" - in release 1.9.2 (conda install)\nGet this error in my logs:\n\n```\nscheduler_1 | distributed.scheduler - INFO - http at: 172.17.0.4:9786\nscheduler_1 | distributed.scheduler - WARNING - Could not start Bokeh web UI\nscheduler_1 | Traceback (most recent call last):\nscheduler_1 | File \"/opt/conda/envs/fire/lib/python3.5/site-packages/distributed/cli/dscheduler.py\", line scheduler_1 | bokeh_proc[0] = subprocess.Popen(args)\nscheduler_1 | File \"/opt/conda/envs/fire/lib/python3.5/subprocess.py\", line 950, in __init__\nscheduler_1 | restore_signals, start_new_session)\nscheduler_1 | File \"/opt/conda/envs/fire/lib/python3.5/subprocess.py\", line 1544, in _execute_child\nscheduler_1 | raise child_exception_type(errno_num, err_msg)\nscheduler_1 | FileNotFoundError: [Errno 2] No such file or directory: 'bokeh'\nscheduler_1 | distributed.core - INFO - Connection from 172.17.0.6:60119 to Scheduler\nscheduler_1 | distributed.scheduler - INFO - Register 172.17.0.6:5700\nscheduler_1 | distributed.scheduler - INFO - Starting worker compute stream, 172.17.0.6:5700\n```\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport logging\nimport os\nimport socket\nimport subprocess\nfrom sys import argv, exit\nfrom time import sleep\n\nimport click\n\nimport distributed\nfrom distributed import Scheduler\nfrom distributed.utils import get_ip\nfrom distributed.http import HTTPScheduler\nfrom distributed.cli.utils import check_python_3\nfrom tornado.ioloop import IOLoop\n\nlogger = logging.getLogger('distributed.scheduler')\n\nip = get_ip()\n\nimport signal\n\nbokeh_proc = [False]\n\n\ndef handle_signal(sig, frame):\n if bokeh_proc[0]:\n bokeh_proc[0].terminate()\n IOLoop.instance().add_callback(IOLoop.instance().stop)\n\nsignal.signal(signal.SIGINT, handle_signal)\nsignal.signal(signal.SIGTERM, handle_signal)\n\n\[email protected]()\[email protected]('center', type=str, default='')\[email protected]('--port', type=int, default=8786, help=\"Serving port\")\[email protected]('--http-port', type=int, default=9786, help=\"HTTP port\")\[email protected]('--bokeh-port', type=int, default=8787, help=\"HTTP port\")\[email protected]('--bokeh/--no-bokeh', '_bokeh', default=True, show_default=True,\n required=False, help=\"Launch Bokeh Web UI\")\[email protected]('--host', type=str, default=ip,\n help=\"Serving host defaults to %s\" % ip)\[email protected]('--show/--no-show', default=False, help=\"Show web UI\")\ndef main(center, host, port, http_port, bokeh_port, show, _bokeh):\n ip = socket.gethostbyname(host)\n loop = IOLoop.current()\n scheduler = Scheduler(center, ip=ip,\n services={('http', http_port): HTTPScheduler})\n if center:\n loop.run_sync(scheduler.sync_center)\n scheduler.start(port)\n\n if _bokeh:\n try:\n import bokeh\n import distributed.bokeh\n hosts = ['%s:%d' % (h, bokeh_port) for h in\n ['localhost', '127.0.0.1', ip, socket.gethostname(), host]]\n dirname = os.path.dirname(distributed.__file__)\n paths = [os.path.join(dirname, 'bokeh', name)\n for name in ['status', 'tasks']]\n args = (['bokeh', 'serve'] + paths +\n ['--log-level', 'warning',\n '--check-unused-sessions=50',\n '--unused-session-lifetime=1',\n '--port', str(bokeh_port)] +\n sum([['--host', host] for host in hosts], []))\n if show:\n args.append('--show')\n bokeh_proc[0] = subprocess.Popen(args)\n\n logger.info(\" Start Bokeh UI at: http://%s:%d/status/\"\n % (ip, bokeh_port))\n except ImportError:\n logger.info(\"Please install Bokeh to get Web UI\")\n except Exception as e:\n logger.warn(\"Could not start Bokeh web UI\", exc_info=True)\n\n loop.start()\n loop.close()\n scheduler.stop()\n bokeh_proc[0].terminate()\n\n logger.info(\"End scheduler at %s:%d\", ip, port)\n\n\ndef go():\n check_python_3()\n main()\n\n\nif __name__ == '__main__':\n go()\n", "path": "distributed/cli/dscheduler.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport logging\nimport multiprocessing\nimport os\nimport socket\nfrom sys import argv, exit\nfrom time import sleep\n\nimport click\n\nimport distributed\nfrom distributed import Scheduler\nfrom distributed.utils import get_ip\nfrom distributed.http import HTTPScheduler\nfrom distributed.cli.utils import check_python_3\nfrom tornado.ioloop import IOLoop\n\nlogger = logging.getLogger('distributed.scheduler')\n\nip = get_ip()\n\nimport signal\n\n\ndef handle_signal(sig, frame):\n IOLoop.instance().add_callback(IOLoop.instance().stop)\n\nsignal.signal(signal.SIGINT, handle_signal)\nsignal.signal(signal.SIGTERM, handle_signal)\n\n\[email protected]()\[email protected]('center', type=str, default='')\[email protected]('--port', type=int, default=8786, help=\"Serving port\")\[email protected]('--http-port', type=int, default=9786, help=\"HTTP port\")\[email protected]('--bokeh-port', type=int, default=8787, help=\"HTTP port\")\[email protected]('--bokeh/--no-bokeh', '_bokeh', default=True, show_default=True,\n required=False, help=\"Launch Bokeh Web UI\")\[email protected]('--host', type=str, default=ip,\n help=\"Serving host defaults to %s\" % ip)\[email protected]('--show/--no-show', default=False, help=\"Show web UI\")\ndef main(center, host, port, http_port, bokeh_port, show, _bokeh):\n ip = socket.gethostbyname(host)\n loop = IOLoop.current()\n scheduler = Scheduler(center, ip=ip,\n services={('http', http_port): HTTPScheduler})\n if center:\n loop.run_sync(scheduler.sync_center)\n scheduler.start(port)\n\n if _bokeh:\n try:\n import bokeh\n import distributed.bokeh\n hosts = ['%s:%d' % (h, bokeh_port) for h in\n ['localhost', '127.0.0.1', ip, socket.gethostname(), host]]\n dirname = os.path.dirname(distributed.__file__)\n paths = [os.path.join(dirname, 'bokeh', name)\n for name in ['status', 'tasks']]\n args = (['bokeh', 'serve'] + paths +\n ['--log-level', 'warning',\n '--check-unused-sessions=50',\n '--unused-session-lifetime=1',\n '--port', str(bokeh_port)] +\n sum([['--host', host] for host in hosts], []))\n if show:\n args.append('--show')\n from bokeh.command.bootstrap import main\n proc = multiprocessing.Process(target=main, args=(args,))\n proc.daemon = True\n proc.start()\n\n logger.info(\" Start Bokeh UI at: http://%s:%d/status/\"\n % (ip, bokeh_port))\n except ImportError:\n logger.info(\"Please install Bokeh to get Web UI\")\n except Exception as e:\n logger.warn(\"Could not start Bokeh web UI\", exc_info=True)\n\n loop.start()\n loop.close()\n scheduler.stop()\n proc.terminate()\n\n logger.info(\"End scheduler at %s:%d\", ip, port)\n\n\ndef go():\n check_python_3()\n main()\n\n\nif __name__ == '__main__':\n go()\n", "path": "distributed/cli/dscheduler.py"}]}
1,523
337
gh_patches_debug_12036
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-354
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix TFExample codec --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `record_codec/__init__.py` Content: ``` ``` Path: `record_codec/tf_example_codec.py` Content: ``` 1 from enum import Enum 2 import tensorflow as tf 3 4 5 class TFExampleCodec(object): 6 def __init__(self, feature_columns): 7 self._f_name2type = {} 8 self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns) 9 for f_col in feature_columns: 10 self._f_name2type[f_col.key] = f_col.dtype 11 12 def encode(self, example): 13 f_dict = {} 14 for f_name, f_value in example: 15 f_type = self._f_name2type[f_name] 16 if f_type == tf.string: 17 f_dict[f_name] = tf.train.Feature( 18 bytes_list=tf.train.BytesList(value=f_value) 19 ) 20 elif f_type == tf.float32: 21 f_dict[f_name] = tf.train.Feature( 22 float_list=tf.train.FloatList(value=f_value.flatten()) 23 ) 24 elif f_type == tf.int64: 25 f_dict[f_name] = tf.train.Feature( 26 int64_list=tf.train.Int64List(value=f_value.flatten()) 27 ) 28 else: 29 raise ValueError( 30 "not supported tensorflow data type: " + str(f_type) 31 ) 32 33 example = tf.train.Example(features=tf.train.Features(feature=f_dict)) 34 return example.SerializeToString() 35 36 def decode(self, raw): 37 return tf.parse_single_example(raw, self._example_spec) 38 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/record_codec/__init__.py b/record_codec/__init__.py --- a/record_codec/__init__.py +++ b/record_codec/__init__.py @@ -0,0 +1,3 @@ +from record_codec.tf_example_codec import TFExampleCodec + +__all__=[TFExampleCodec] diff --git a/record_codec/tf_example_codec.py b/record_codec/tf_example_codec.py --- a/record_codec/tf_example_codec.py +++ b/record_codec/tf_example_codec.py @@ -4,10 +4,8 @@ class TFExampleCodec(object): def __init__(self, feature_columns): - self._f_name2type = {} self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns) - for f_col in feature_columns: - self._f_name2type[f_col.key] = f_col.dtype + self._f_name2type = {f_col.key: f_col.dtype for f_col in feature_columns} def encode(self, example): f_dict = {}
{"golden_diff": "diff --git a/record_codec/__init__.py b/record_codec/__init__.py\n--- a/record_codec/__init__.py\n+++ b/record_codec/__init__.py\n@@ -0,0 +1,3 @@\n+from record_codec.tf_example_codec import TFExampleCodec\n+\n+__all__=[TFExampleCodec]\ndiff --git a/record_codec/tf_example_codec.py b/record_codec/tf_example_codec.py\n--- a/record_codec/tf_example_codec.py\n+++ b/record_codec/tf_example_codec.py\n@@ -4,10 +4,8 @@\n \n class TFExampleCodec(object):\n def __init__(self, feature_columns):\n- self._f_name2type = {}\n self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns)\n- for f_col in feature_columns:\n- self._f_name2type[f_col.key] = f_col.dtype\n+ self._f_name2type = {f_col.key: f_col.dtype for f_col in feature_columns}\n \n def encode(self, example):\n f_dict = {}\n", "issue": "Fix TFExample codec\n\n", "before_files": [{"content": "", "path": "record_codec/__init__.py"}, {"content": "from enum import Enum\nimport tensorflow as tf\n\n\nclass TFExampleCodec(object):\n def __init__(self, feature_columns):\n self._f_name2type = {}\n self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns)\n for f_col in feature_columns:\n self._f_name2type[f_col.key] = f_col.dtype\n\n def encode(self, example):\n f_dict = {}\n for f_name, f_value in example:\n f_type = self._f_name2type[f_name]\n if f_type == tf.string:\n f_dict[f_name] = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=f_value)\n )\n elif f_type == tf.float32:\n f_dict[f_name] = tf.train.Feature(\n float_list=tf.train.FloatList(value=f_value.flatten())\n )\n elif f_type == tf.int64:\n f_dict[f_name] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=f_value.flatten())\n )\n else:\n raise ValueError(\n \"not supported tensorflow data type: \" + str(f_type)\n )\n\n example = tf.train.Example(features=tf.train.Features(feature=f_dict))\n return example.SerializeToString()\n\n def decode(self, raw):\n return tf.parse_single_example(raw, self._example_spec)\n", "path": "record_codec/tf_example_codec.py"}], "after_files": [{"content": "from record_codec.tf_example_codec import TFExampleCodec\n\n__all__=[TFExampleCodec]\n", "path": "record_codec/__init__.py"}, {"content": "from enum import Enum\nimport tensorflow as tf\n\n\nclass TFExampleCodec(object):\n def __init__(self, feature_columns):\n self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns)\n self._f_name2type = {f_col.key: f_col.dtype for f_col in feature_columns}\n\n def encode(self, example):\n f_dict = {}\n for f_name, f_value in example:\n f_type = self._f_name2type[f_name]\n if f_type == tf.string:\n f_dict[f_name] = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=f_value)\n )\n elif f_type == tf.float32:\n f_dict[f_name] = tf.train.Feature(\n float_list=tf.train.FloatList(value=f_value.flatten())\n )\n elif f_type == tf.int64:\n f_dict[f_name] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=f_value.flatten())\n )\n else:\n raise ValueError(\n \"not supported tensorflow data type: \" + str(f_type)\n )\n\n example = tf.train.Example(features=tf.train.Features(feature=f_dict))\n return example.SerializeToString()\n\n def decode(self, raw):\n return tf.parse_single_example(raw, self._example_spec)\n", "path": "record_codec/tf_example_codec.py"}]}
634
233
gh_patches_debug_18359
rasdani/github-patches
git_diff
pantsbuild__pants-6361
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `pants_requirement` should include the `; python_version<'3'` environment marker Until pants can run on python 3, the `pants_requirement` should add an environment marker to restict the python environment it applies to. After pants only supports `>=3.5` a change should be made to the environment marker. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/python/pants/backend/python/pants_requirement.py` Content: ``` 1 # coding=utf-8 2 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 3 # Licensed under the Apache License, Version 2.0 (see LICENSE). 4 5 from __future__ import absolute_import, division, print_function, unicode_literals 6 7 import os 8 from builtins import object 9 10 from pants.backend.python.python_requirement import PythonRequirement 11 from pants.base.build_environment import pants_version 12 13 14 class PantsRequirement(object): 15 """Exports a `python_requirement_library` pointing at the active pants' corresponding sdist. 16 17 This requirement is useful for custom plugin authors who want to build and test their plugin with 18 pants itself. Using the resulting target as a dependency of their plugin target ensures the 19 dependency stays true to the surrounding repo's version of pants. 20 21 NB: The requirement generated is for official pants releases on pypi; so may not be appropriate 22 for use in a repo that tracks `pantsbuild/pants` or otherwise uses custom pants sdists. 23 24 :API: public 25 """ 26 27 def __init__(self, parse_context): 28 self._parse_context = parse_context 29 30 def __call__(self, name=None): 31 """ 32 :param string name: The name to use for the target, defaults to the parent dir name. 33 """ 34 name = name or os.path.basename(self._parse_context.rel_path) 35 requirement = PythonRequirement(requirement='pantsbuild.pants=={}'.format(pants_version())) 36 self._parse_context.create_object('python_requirement_library', 37 name=name, 38 requirements=[requirement]) 39 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/python/pants/backend/python/pants_requirement.py b/src/python/pants/backend/python/pants_requirement.py --- a/src/python/pants/backend/python/pants_requirement.py +++ b/src/python/pants/backend/python/pants_requirement.py @@ -32,7 +32,14 @@ :param string name: The name to use for the target, defaults to the parent dir name. """ name = name or os.path.basename(self._parse_context.rel_path) - requirement = PythonRequirement(requirement='pantsbuild.pants=={}'.format(pants_version())) + + # TODO(John Sirois): Modify to constraint to >=3.5,<4 as part of + # https://github.com/pantsbuild/pants/issues/6062 + env_marker = "python_version>='2.7' and python_version<'3'" + + requirement = PythonRequirement(requirement="pantsbuild.pants=={version} ; {env_marker}" + .format(version=pants_version(), env_marker=env_marker)) + self._parse_context.create_object('python_requirement_library', name=name, requirements=[requirement])
{"golden_diff": "diff --git a/src/python/pants/backend/python/pants_requirement.py b/src/python/pants/backend/python/pants_requirement.py\n--- a/src/python/pants/backend/python/pants_requirement.py\n+++ b/src/python/pants/backend/python/pants_requirement.py\n@@ -32,7 +32,14 @@\n :param string name: The name to use for the target, defaults to the parent dir name.\n \"\"\"\n name = name or os.path.basename(self._parse_context.rel_path)\n- requirement = PythonRequirement(requirement='pantsbuild.pants=={}'.format(pants_version()))\n+\n+ # TODO(John Sirois): Modify to constraint to >=3.5,<4 as part of\n+ # https://github.com/pantsbuild/pants/issues/6062\n+ env_marker = \"python_version>='2.7' and python_version<'3'\"\n+\n+ requirement = PythonRequirement(requirement=\"pantsbuild.pants=={version} ; {env_marker}\"\n+ .format(version=pants_version(), env_marker=env_marker))\n+\n self._parse_context.create_object('python_requirement_library',\n name=name,\n requirements=[requirement])\n", "issue": "`pants_requirement` should include the `; python_version<'3'` environment marker\nUntil pants can run on python 3, the `pants_requirement` should add an environment marker to restict the python environment it applies to. After pants only supports `>=3.5` a change should be made to the environment marker.\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom builtins import object\n\nfrom pants.backend.python.python_requirement import PythonRequirement\nfrom pants.base.build_environment import pants_version\n\n\nclass PantsRequirement(object):\n \"\"\"Exports a `python_requirement_library` pointing at the active pants' corresponding sdist.\n\n This requirement is useful for custom plugin authors who want to build and test their plugin with\n pants itself. Using the resulting target as a dependency of their plugin target ensures the\n dependency stays true to the surrounding repo's version of pants.\n\n NB: The requirement generated is for official pants releases on pypi; so may not be appropriate\n for use in a repo that tracks `pantsbuild/pants` or otherwise uses custom pants sdists.\n\n :API: public\n \"\"\"\n\n def __init__(self, parse_context):\n self._parse_context = parse_context\n\n def __call__(self, name=None):\n \"\"\"\n :param string name: The name to use for the target, defaults to the parent dir name.\n \"\"\"\n name = name or os.path.basename(self._parse_context.rel_path)\n requirement = PythonRequirement(requirement='pantsbuild.pants=={}'.format(pants_version()))\n self._parse_context.create_object('python_requirement_library',\n name=name,\n requirements=[requirement])\n", "path": "src/python/pants/backend/python/pants_requirement.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom builtins import object\n\nfrom pants.backend.python.python_requirement import PythonRequirement\nfrom pants.base.build_environment import pants_version\n\n\nclass PantsRequirement(object):\n \"\"\"Exports a `python_requirement_library` pointing at the active pants' corresponding sdist.\n\n This requirement is useful for custom plugin authors who want to build and test their plugin with\n pants itself. Using the resulting target as a dependency of their plugin target ensures the\n dependency stays true to the surrounding repo's version of pants.\n\n NB: The requirement generated is for official pants releases on pypi; so may not be appropriate\n for use in a repo that tracks `pantsbuild/pants` or otherwise uses custom pants sdists.\n\n :API: public\n \"\"\"\n\n def __init__(self, parse_context):\n self._parse_context = parse_context\n\n def __call__(self, name=None):\n \"\"\"\n :param string name: The name to use for the target, defaults to the parent dir name.\n \"\"\"\n name = name or os.path.basename(self._parse_context.rel_path)\n\n # TODO(John Sirois): Modify to constraint to >=3.5,<4 as part of\n # https://github.com/pantsbuild/pants/issues/6062\n env_marker = \"python_version>='2.7' and python_version<'3'\"\n\n requirement = PythonRequirement(requirement=\"pantsbuild.pants=={version} ; {env_marker}\"\n .format(version=pants_version(), env_marker=env_marker))\n\n self._parse_context.create_object('python_requirement_library',\n name=name,\n requirements=[requirement])\n", "path": "src/python/pants/backend/python/pants_requirement.py"}]}
733
253
gh_patches_debug_19531
rasdani/github-patches
git_diff
fidals__shopelectro-733
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Adapt page usage to the new refarm pages interface At https://github.com/fidals/refarm-site/issues/240 we changed page templates and page context interfaces. Adapt site code to the new interface. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `shopelectro/context.py` Content: ``` 1 from functools import partial 2 3 from django.conf import settings 4 from django.shortcuts import get_object_or_404 5 6 from catalog import newcontext 7 from images.models import Image 8 from pages import models as pages_models, newcontext as pages_newcontext 9 from shopelectro import models, request_data 10 11 12 # @todo #255:60m Improve `SortingOption` interface. 13 # Now it's located in context and this is wrong. 14 # Maybe refactor `CATEGORY_SORTING_OPTIONS`. 15 class SortingOption: 16 def __init__(self, index=0): 17 options = settings.CATEGORY_SORTING_OPTIONS[index] 18 self.label = options['label'] 19 self.field = options['field'] 20 self.direction = options['direction'] 21 22 @property 23 def directed_field(self): 24 return self.direction + self.field 25 26 27 class Page(newcontext.Context): 28 29 def __init__(self, page, tags: newcontext.Tags): 30 self._page = page 31 self._tags = tags 32 33 def context(self): 34 def template_context(page, tag_titles, tags): 35 return { 36 'page': page, 37 'tag_titles': tag_titles, 38 'tags': tags, 39 } 40 41 tags_qs = self._tags.qs() 42 self._page.get_template_render_context = partial( 43 template_context, self._page, tags_qs.as_title(), tags_qs 44 ) 45 46 return { 47 'page': self._page, 48 } 49 50 51 class Catalog(newcontext.Context): 52 53 def __init__(self, request_data_: request_data.Catalog): 54 self.request_data = request_data_ 55 56 @property 57 def page(self): 58 return get_object_or_404( 59 pages_models.ModelPage, 60 slug=self.request_data.slug 61 ) 62 63 @property 64 def category(self): 65 return self.page.model 66 67 def context(self) -> dict: 68 all_tags = newcontext.Tags(models.Tag.objects.all()) 69 70 selected_tags = newcontext.tags.ParsedTags( 71 tags=all_tags, 72 raw_tags=self.request_data.tags, 73 ) 74 if self.request_data.tags: 75 selected_tags = newcontext.tags.Checked404Tags(selected_tags) 76 77 products = ( 78 models.Product.objects.active() 79 .filter_descendants(self.category) 80 .tagged_or_all(selected_tags.qs()) 81 .order_by(SortingOption(index=self.request_data.sorting_index).directed_field) 82 ) 83 84 """ 85 We have to use separated variable for pagination. 86 87 Because paginated QuerySet can not used as QuerySet. 88 It's not the most strong place of Django ORM, of course. 89 :return: ProductsContext with paginated QuerySet inside 90 """ 91 # @todo #683:30m Remove *Tags and *Products suffixes from catalog.newcontext classes. 92 # Rename Checked404Tags to ExistingOr404. 93 paginated = newcontext.products.PaginatedProducts( 94 products=products, 95 url=self.request_data.request.path, 96 page_number=self.request_data.pagination_page_number, 97 per_page=self.request_data.pagination_per_page, 98 ) 99 100 images = newcontext.products.ProductImages(paginated.products, Image.objects.all()) 101 brands = newcontext.products.ProductBrands(paginated.products, all_tags) 102 grouped_tags = newcontext.tags.GroupedTags( 103 tags=newcontext.tags.TagsByProducts(all_tags, products) 104 ) 105 page = Page(self.page, selected_tags) 106 category = newcontext.category.Context(self.category) 107 params = { 108 'view_type': self.request_data.get_view_type(), 109 'sorting_options': settings.CATEGORY_SORTING_OPTIONS.values(), 110 'limits': settings.CATEGORY_STEP_MULTIPLIERS, 111 'sort': self.request_data.sorting_index, 112 } 113 114 return { 115 **params, 116 **pages_newcontext.Contexts([ 117 page, category, paginated, 118 images, brands, grouped_tags 119 ]).context() 120 } 121 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/shopelectro/context.py b/shopelectro/context.py --- a/shopelectro/context.py +++ b/shopelectro/context.py @@ -1,5 +1,3 @@ -from functools import partial - from django.conf import settings from django.shortcuts import get_object_or_404 @@ -31,18 +29,15 @@ self._tags = tags def context(self): - def template_context(page, tag_titles, tags): - return { - 'page': page, - 'tag_titles': tag_titles, - 'tags': tags, - } - tags_qs = self._tags.qs() - self._page.get_template_render_context = partial( - template_context, self._page, tags_qs.as_title(), tags_qs - ) - + # use dirty patch here, because it's the most simple method + # to make shared templates work. + # For example `templates/layout/metadata.html`. + self._page.display = { + 'page': self._page, + 'tag_titles': tags_qs.as_title(), + 'tags': tags_qs, + } return { 'page': self._page, }
{"golden_diff": "diff --git a/shopelectro/context.py b/shopelectro/context.py\n--- a/shopelectro/context.py\n+++ b/shopelectro/context.py\n@@ -1,5 +1,3 @@\n-from functools import partial\n-\n from django.conf import settings\n from django.shortcuts import get_object_or_404\n \n@@ -31,18 +29,15 @@\n self._tags = tags\n \n def context(self):\n- def template_context(page, tag_titles, tags):\n- return {\n- 'page': page,\n- 'tag_titles': tag_titles,\n- 'tags': tags,\n- }\n-\n tags_qs = self._tags.qs()\n- self._page.get_template_render_context = partial(\n- template_context, self._page, tags_qs.as_title(), tags_qs\n- )\n-\n+ # use dirty patch here, because it's the most simple method\n+ # to make shared templates work.\n+ # For example `templates/layout/metadata.html`.\n+ self._page.display = {\n+ 'page': self._page,\n+ 'tag_titles': tags_qs.as_title(),\n+ 'tags': tags_qs,\n+ }\n return {\n 'page': self._page,\n }\n", "issue": "Adapt page usage to the new refarm pages interface\nAt https://github.com/fidals/refarm-site/issues/240 we changed page templates and page context interfaces.\r\n\r\nAdapt site code to the new interface.\r\n\n", "before_files": [{"content": "from functools import partial\n\nfrom django.conf import settings\nfrom django.shortcuts import get_object_or_404\n\nfrom catalog import newcontext\nfrom images.models import Image\nfrom pages import models as pages_models, newcontext as pages_newcontext\nfrom shopelectro import models, request_data\n\n\n# @todo #255:60m Improve `SortingOption` interface.\n# Now it's located in context and this is wrong.\n# Maybe refactor `CATEGORY_SORTING_OPTIONS`.\nclass SortingOption:\n def __init__(self, index=0):\n options = settings.CATEGORY_SORTING_OPTIONS[index]\n self.label = options['label']\n self.field = options['field']\n self.direction = options['direction']\n\n @property\n def directed_field(self):\n return self.direction + self.field\n\n\nclass Page(newcontext.Context):\n\n def __init__(self, page, tags: newcontext.Tags):\n self._page = page\n self._tags = tags\n\n def context(self):\n def template_context(page, tag_titles, tags):\n return {\n 'page': page,\n 'tag_titles': tag_titles,\n 'tags': tags,\n }\n\n tags_qs = self._tags.qs()\n self._page.get_template_render_context = partial(\n template_context, self._page, tags_qs.as_title(), tags_qs\n )\n\n return {\n 'page': self._page,\n }\n\n\nclass Catalog(newcontext.Context):\n\n def __init__(self, request_data_: request_data.Catalog):\n self.request_data = request_data_\n\n @property\n def page(self):\n return get_object_or_404(\n pages_models.ModelPage,\n slug=self.request_data.slug\n )\n\n @property\n def category(self):\n return self.page.model\n\n def context(self) -> dict:\n all_tags = newcontext.Tags(models.Tag.objects.all())\n\n selected_tags = newcontext.tags.ParsedTags(\n tags=all_tags,\n raw_tags=self.request_data.tags,\n )\n if self.request_data.tags:\n selected_tags = newcontext.tags.Checked404Tags(selected_tags)\n\n products = (\n models.Product.objects.active()\n .filter_descendants(self.category)\n .tagged_or_all(selected_tags.qs())\n .order_by(SortingOption(index=self.request_data.sorting_index).directed_field)\n )\n\n \"\"\"\n We have to use separated variable for pagination.\n\n Because paginated QuerySet can not used as QuerySet.\n It's not the most strong place of Django ORM, of course.\n :return: ProductsContext with paginated QuerySet inside\n \"\"\"\n # @todo #683:30m Remove *Tags and *Products suffixes from catalog.newcontext classes.\n # Rename Checked404Tags to ExistingOr404.\n paginated = newcontext.products.PaginatedProducts(\n products=products,\n url=self.request_data.request.path,\n page_number=self.request_data.pagination_page_number,\n per_page=self.request_data.pagination_per_page,\n )\n\n images = newcontext.products.ProductImages(paginated.products, Image.objects.all())\n brands = newcontext.products.ProductBrands(paginated.products, all_tags)\n grouped_tags = newcontext.tags.GroupedTags(\n tags=newcontext.tags.TagsByProducts(all_tags, products)\n )\n page = Page(self.page, selected_tags)\n category = newcontext.category.Context(self.category)\n params = {\n 'view_type': self.request_data.get_view_type(),\n 'sorting_options': settings.CATEGORY_SORTING_OPTIONS.values(),\n 'limits': settings.CATEGORY_STEP_MULTIPLIERS,\n 'sort': self.request_data.sorting_index,\n }\n\n return {\n **params,\n **pages_newcontext.Contexts([\n page, category, paginated,\n images, brands, grouped_tags\n ]).context()\n }\n", "path": "shopelectro/context.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.shortcuts import get_object_or_404\n\nfrom catalog import newcontext\nfrom images.models import Image\nfrom pages import models as pages_models, newcontext as pages_newcontext\nfrom shopelectro import models, request_data\n\n\n# @todo #255:60m Improve `SortingOption` interface.\n# Now it's located in context and this is wrong.\n# Maybe refactor `CATEGORY_SORTING_OPTIONS`.\nclass SortingOption:\n def __init__(self, index=0):\n options = settings.CATEGORY_SORTING_OPTIONS[index]\n self.label = options['label']\n self.field = options['field']\n self.direction = options['direction']\n\n @property\n def directed_field(self):\n return self.direction + self.field\n\n\nclass Page(newcontext.Context):\n\n def __init__(self, page, tags: newcontext.Tags):\n self._page = page\n self._tags = tags\n\n def context(self):\n tags_qs = self._tags.qs()\n # use dirty patch here, because it's the most simple method\n # to make shared templates work.\n # For example `templates/layout/metadata.html`.\n self._page.display = {\n 'page': self._page,\n 'tag_titles': tags_qs.as_title(),\n 'tags': tags_qs,\n }\n return {\n 'page': self._page,\n }\n\n\nclass Catalog(newcontext.Context):\n\n def __init__(self, request_data_: request_data.Catalog):\n self.request_data = request_data_\n\n @property\n def page(self):\n return get_object_or_404(\n pages_models.ModelPage,\n slug=self.request_data.slug\n )\n\n @property\n def category(self):\n return self.page.model\n\n def context(self) -> dict:\n all_tags = newcontext.Tags(models.Tag.objects.all())\n\n selected_tags = newcontext.tags.ParsedTags(\n tags=all_tags,\n raw_tags=self.request_data.tags,\n )\n if self.request_data.tags:\n selected_tags = newcontext.tags.Checked404Tags(selected_tags)\n\n products = (\n models.Product.objects.active()\n .filter_descendants(self.category)\n .tagged_or_all(selected_tags.qs())\n .order_by(SortingOption(index=self.request_data.sorting_index).directed_field)\n )\n\n \"\"\"\n We have to use separated variable for pagination.\n\n Because paginated QuerySet can not used as QuerySet.\n It's not the most strong place of Django ORM, of course.\n :return: ProductsContext with paginated QuerySet inside\n \"\"\"\n # @todo #683:30m Remove *Tags and *Products suffixes from catalog.newcontext classes.\n # Rename Checked404Tags to ExistingOr404.\n paginated = newcontext.products.PaginatedProducts(\n products=products,\n url=self.request_data.request.path,\n page_number=self.request_data.pagination_page_number,\n per_page=self.request_data.pagination_per_page,\n )\n\n images = newcontext.products.ProductImages(paginated.products, Image.objects.all())\n brands = newcontext.products.ProductBrands(paginated.products, all_tags)\n grouped_tags = newcontext.tags.GroupedTags(\n tags=newcontext.tags.TagsByProducts(all_tags, products)\n )\n page = Page(self.page, selected_tags)\n category = newcontext.category.Context(self.category)\n params = {\n 'view_type': self.request_data.get_view_type(),\n 'sorting_options': settings.CATEGORY_SORTING_OPTIONS.values(),\n 'limits': settings.CATEGORY_STEP_MULTIPLIERS,\n 'sort': self.request_data.sorting_index,\n }\n\n return {\n **params,\n **pages_newcontext.Contexts([\n page, category, paginated,\n images, brands, grouped_tags\n ]).context()\n }\n", "path": "shopelectro/context.py"}]}
1,397
271
gh_patches_debug_41380
rasdani/github-patches
git_diff
scoutapp__scout_apm_python-462
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix Jinja2 async integration Since https://github.com/pallets/jinja/issues/765 , Jinja2 only lazily imports and adds the `render_async` function that we instrument (since #398). We need to update to only instrument at the point the `render_async` method becomes available. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/scout_apm/instruments/jinja2.py` Content: ``` 1 # coding=utf-8 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 4 import logging 5 6 import wrapt 7 8 from scout_apm.core.tracked_request import TrackedRequest 9 10 try: 11 from jinja2 import Template 12 except ImportError: # pragma: no cover 13 Template = None 14 15 # The async_ module can only be shipped on Python 3.6+ 16 try: 17 from scout_apm.async_.instruments.jinja2 import wrapped_render_async 18 except ImportError: 19 wrapped_render_async = None 20 21 22 logger = logging.getLogger(__name__) 23 24 25 have_patched_template_render = False 26 have_patched_template_render_async = False 27 28 29 def ensure_installed(): 30 global have_patched_template_render 31 global have_patched_template_render_async 32 33 logger.info("Ensuring Jinja2 instrumentation is installed.") 34 35 if Template is None: 36 logger.info("Unable to import jinja2.Template") 37 return 38 39 if not have_patched_template_render: 40 try: 41 Template.render = wrapped_render(Template.render) 42 except Exception as exc: 43 logger.warning( 44 "Unable to instrument jinja2.Template.render: %r", exc, exc_info=exc 45 ) 46 else: 47 have_patched_template_render = True 48 49 if not have_patched_template_render_async and wrapped_render_async is not None: 50 try: 51 Template.render_async = wrapped_render_async(Template.render_async) 52 except Exception as exc: 53 logger.warning( 54 "Unable to instrument jinja2.Template.render_async: %r", 55 exc, 56 exc_info=exc, 57 ) 58 else: 59 have_patched_template_render_async = True 60 61 62 @wrapt.decorator 63 def wrapped_render(wrapped, instance, args, kwargs): 64 tracked_request = TrackedRequest.instance() 65 span = tracked_request.start_span(operation="Template/Render") 66 span.tag("name", instance.name) 67 try: 68 return wrapped(*args, **kwargs) 69 finally: 70 tracked_request.stop_span() 71 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/scout_apm/instruments/jinja2.py b/src/scout_apm/instruments/jinja2.py --- a/src/scout_apm/instruments/jinja2.py +++ b/src/scout_apm/instruments/jinja2.py @@ -2,11 +2,17 @@ from __future__ import absolute_import, division, print_function, unicode_literals import logging +import sys import wrapt from scout_apm.core.tracked_request import TrackedRequest +try: + from jinja2 import Environment +except ImportError: # pragma: no cover + Environment = None + try: from jinja2 import Template except ImportError: # pragma: no cover @@ -22,13 +28,14 @@ logger = logging.getLogger(__name__) +have_patched_environment_init = False have_patched_template_render = False have_patched_template_render_async = False def ensure_installed(): + global have_patched_environment_init global have_patched_template_render - global have_patched_template_render_async logger.info("Ensuring Jinja2 instrumentation is installed.") @@ -36,27 +43,27 @@ logger.info("Unable to import jinja2.Template") return - if not have_patched_template_render: + if not have_patched_environment_init: try: - Template.render = wrapped_render(Template.render) + Environment.__init__ = wrapped_environment_init(Environment.__init__) except Exception as exc: logger.warning( - "Unable to instrument jinja2.Template.render: %r", exc, exc_info=exc + "Unable to instrument jinja2.Environment.__init__: %r", + exc, + exc_info=exc, ) else: - have_patched_template_render = True + have_patched_environment_init = True - if not have_patched_template_render_async and wrapped_render_async is not None: + if not have_patched_template_render: try: - Template.render_async = wrapped_render_async(Template.render_async) + Template.render = wrapped_render(Template.render) except Exception as exc: logger.warning( - "Unable to instrument jinja2.Template.render_async: %r", - exc, - exc_info=exc, + "Unable to instrument jinja2.Template.render: %r", exc, exc_info=exc ) else: - have_patched_template_render_async = True + have_patched_template_render = True @wrapt.decorator @@ -68,3 +75,33 @@ return wrapped(*args, **kwargs) finally: tracked_request.stop_span() + + [email protected] +def wrapped_environment_init(wrapped, instance, args, kwargs): + """ + Delayed wrapping of render_async(), since Template won't have this method + until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is + done conditionally in Environment.__init__: + https://github.com/pallets/jinja/issues/765 + """ + global have_patched_template_render_async + result = wrapped(*args, **kwargs) + + if ( + wrapped_render_async is not None + and not have_patched_template_render_async + and "jinja2.asyncsupport" in sys.modules + ): + try: + Template.render_async = wrapped_render_async(Template.render_async) + except Exception as exc: + logger.warning( + "Unable to instrument jinja2.Template.render_async: %r", + exc, + exc_info=exc, + ) + else: + have_patched_template_render_async = True + + return result
{"golden_diff": "diff --git a/src/scout_apm/instruments/jinja2.py b/src/scout_apm/instruments/jinja2.py\n--- a/src/scout_apm/instruments/jinja2.py\n+++ b/src/scout_apm/instruments/jinja2.py\n@@ -2,11 +2,17 @@\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n import logging\n+import sys\n \n import wrapt\n \n from scout_apm.core.tracked_request import TrackedRequest\n \n+try:\n+ from jinja2 import Environment\n+except ImportError: # pragma: no cover\n+ Environment = None\n+\n try:\n from jinja2 import Template\n except ImportError: # pragma: no cover\n@@ -22,13 +28,14 @@\n logger = logging.getLogger(__name__)\n \n \n+have_patched_environment_init = False\n have_patched_template_render = False\n have_patched_template_render_async = False\n \n \n def ensure_installed():\n+ global have_patched_environment_init\n global have_patched_template_render\n- global have_patched_template_render_async\n \n logger.info(\"Ensuring Jinja2 instrumentation is installed.\")\n \n@@ -36,27 +43,27 @@\n logger.info(\"Unable to import jinja2.Template\")\n return\n \n- if not have_patched_template_render:\n+ if not have_patched_environment_init:\n try:\n- Template.render = wrapped_render(Template.render)\n+ Environment.__init__ = wrapped_environment_init(Environment.__init__)\n except Exception as exc:\n logger.warning(\n- \"Unable to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n+ \"Unable to instrument jinja2.Environment.__init__: %r\",\n+ exc,\n+ exc_info=exc,\n )\n else:\n- have_patched_template_render = True\n+ have_patched_environment_init = True\n \n- if not have_patched_template_render_async and wrapped_render_async is not None:\n+ if not have_patched_template_render:\n try:\n- Template.render_async = wrapped_render_async(Template.render_async)\n+ Template.render = wrapped_render(Template.render)\n except Exception as exc:\n logger.warning(\n- \"Unable to instrument jinja2.Template.render_async: %r\",\n- exc,\n- exc_info=exc,\n+ \"Unable to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n )\n else:\n- have_patched_template_render_async = True\n+ have_patched_template_render = True\n \n \n @wrapt.decorator\n@@ -68,3 +75,33 @@\n return wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n+\n+\[email protected]\n+def wrapped_environment_init(wrapped, instance, args, kwargs):\n+ \"\"\"\n+ Delayed wrapping of render_async(), since Template won't have this method\n+ until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is\n+ done conditionally in Environment.__init__:\n+ https://github.com/pallets/jinja/issues/765\n+ \"\"\"\n+ global have_patched_template_render_async\n+ result = wrapped(*args, **kwargs)\n+\n+ if (\n+ wrapped_render_async is not None\n+ and not have_patched_template_render_async\n+ and \"jinja2.asyncsupport\" in sys.modules\n+ ):\n+ try:\n+ Template.render_async = wrapped_render_async(Template.render_async)\n+ except Exception as exc:\n+ logger.warning(\n+ \"Unable to instrument jinja2.Template.render_async: %r\",\n+ exc,\n+ exc_info=exc,\n+ )\n+ else:\n+ have_patched_template_render_async = True\n+\n+ return result\n", "issue": "Fix Jinja2 async integration\nSince https://github.com/pallets/jinja/issues/765 , Jinja2 only lazily imports and adds the `render_async` function that we instrument (since #398).\r\n\r\nWe need to update to only instrument at the point the `render_async` method becomes available.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nimport wrapt\n\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from jinja2 import Template\nexcept ImportError: # pragma: no cover\n Template = None\n\n# The async_ module can only be shipped on Python 3.6+\ntry:\n from scout_apm.async_.instruments.jinja2 import wrapped_render_async\nexcept ImportError:\n wrapped_render_async = None\n\n\nlogger = logging.getLogger(__name__)\n\n\nhave_patched_template_render = False\nhave_patched_template_render_async = False\n\n\ndef ensure_installed():\n global have_patched_template_render\n global have_patched_template_render_async\n\n logger.info(\"Ensuring Jinja2 instrumentation is installed.\")\n\n if Template is None:\n logger.info(\"Unable to import jinja2.Template\")\n return\n\n if not have_patched_template_render:\n try:\n Template.render = wrapped_render(Template.render)\n except Exception as exc:\n logger.warning(\n \"Unable to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n )\n else:\n have_patched_template_render = True\n\n if not have_patched_template_render_async and wrapped_render_async is not None:\n try:\n Template.render_async = wrapped_render_async(Template.render_async)\n except Exception as exc:\n logger.warning(\n \"Unable to instrument jinja2.Template.render_async: %r\",\n exc,\n exc_info=exc,\n )\n else:\n have_patched_template_render_async = True\n\n\[email protected]\ndef wrapped_render(wrapped, instance, args, kwargs):\n tracked_request = TrackedRequest.instance()\n span = tracked_request.start_span(operation=\"Template/Render\")\n span.tag(\"name\", instance.name)\n try:\n return wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n", "path": "src/scout_apm/instruments/jinja2.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport sys\n\nimport wrapt\n\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from jinja2 import Environment\nexcept ImportError: # pragma: no cover\n Environment = None\n\ntry:\n from jinja2 import Template\nexcept ImportError: # pragma: no cover\n Template = None\n\n# The async_ module can only be shipped on Python 3.6+\ntry:\n from scout_apm.async_.instruments.jinja2 import wrapped_render_async\nexcept ImportError:\n wrapped_render_async = None\n\n\nlogger = logging.getLogger(__name__)\n\n\nhave_patched_environment_init = False\nhave_patched_template_render = False\nhave_patched_template_render_async = False\n\n\ndef ensure_installed():\n global have_patched_environment_init\n global have_patched_template_render\n\n logger.info(\"Ensuring Jinja2 instrumentation is installed.\")\n\n if Template is None:\n logger.info(\"Unable to import jinja2.Template\")\n return\n\n if not have_patched_environment_init:\n try:\n Environment.__init__ = wrapped_environment_init(Environment.__init__)\n except Exception as exc:\n logger.warning(\n \"Unable to instrument jinja2.Environment.__init__: %r\",\n exc,\n exc_info=exc,\n )\n else:\n have_patched_environment_init = True\n\n if not have_patched_template_render:\n try:\n Template.render = wrapped_render(Template.render)\n except Exception as exc:\n logger.warning(\n \"Unable to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n )\n else:\n have_patched_template_render = True\n\n\[email protected]\ndef wrapped_render(wrapped, instance, args, kwargs):\n tracked_request = TrackedRequest.instance()\n span = tracked_request.start_span(operation=\"Template/Render\")\n span.tag(\"name\", instance.name)\n try:\n return wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n\n\[email protected]\ndef wrapped_environment_init(wrapped, instance, args, kwargs):\n \"\"\"\n Delayed wrapping of render_async(), since Template won't have this method\n until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is\n done conditionally in Environment.__init__:\n https://github.com/pallets/jinja/issues/765\n \"\"\"\n global have_patched_template_render_async\n result = wrapped(*args, **kwargs)\n\n if (\n wrapped_render_async is not None\n and not have_patched_template_render_async\n and \"jinja2.asyncsupport\" in sys.modules\n ):\n try:\n Template.render_async = wrapped_render_async(Template.render_async)\n except Exception as exc:\n logger.warning(\n \"Unable to instrument jinja2.Template.render_async: %r\",\n exc,\n exc_info=exc,\n )\n else:\n have_patched_template_render_async = True\n\n return result\n", "path": "src/scout_apm/instruments/jinja2.py"}]}
893
836
gh_patches_debug_2580
rasdani/github-patches
git_diff
pypa__setuptools-4127
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] Setuptools 69.0.0 breaks Astropy's setup ### setuptools version setuptools==69.0.0 ### Python version 3.12 ### OS Ubuntu ### Additional environment information _No response_ ### Description About 15h ago, Astropy's CI started failing to build with ``` ImportError: cannot import name 'newer_group' from 'setuptools.dep_util' ``` This seems to correspond to an [intentional change in setuptools 69](https://setuptools.pypa.io/en/latest/history.html#features). Nonetheless, from reading the PR that introduced the change (https://github.com/pypa/setuptools/pull/4069), I'm not sure that this was supposed to break immediately. Was this intended ? ### Expected behavior a deprecation warning instead of a hard error ? ### How to Reproduce ```shell $ python -c "from setuptools.dep_util import newer_group" ``` ### Output ```console Traceback (most recent call last): File "<string>", line 1, in <module> ImportError: cannot import name 'newer_group' from 'setuptools.dep_util' (/private/tmp/venv/lib/python3.12/site-packages/setuptools/dep_util.py) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setuptools/dep_util.py` Content: ``` 1 import warnings 2 3 from ._distutils import _modified 4 5 6 def __getattr__(name): 7 if name not in ['newer_pairwise_group']: 8 raise AttributeError(name) 9 warnings.warn( 10 "dep_util is Deprecated. Use functions from setuptools.modified instead.", 11 DeprecationWarning, 12 stacklevel=2, 13 ) 14 return getattr(_modified, name) 15 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setuptools/dep_util.py b/setuptools/dep_util.py --- a/setuptools/dep_util.py +++ b/setuptools/dep_util.py @@ -4,7 +4,7 @@ def __getattr__(name): - if name not in ['newer_pairwise_group']: + if name not in ['newer_group', 'newer_pairwise_group']: raise AttributeError(name) warnings.warn( "dep_util is Deprecated. Use functions from setuptools.modified instead.",
{"golden_diff": "diff --git a/setuptools/dep_util.py b/setuptools/dep_util.py\n--- a/setuptools/dep_util.py\n+++ b/setuptools/dep_util.py\n@@ -4,7 +4,7 @@\n \n \n def __getattr__(name):\n- if name not in ['newer_pairwise_group']:\n+ if name not in ['newer_group', 'newer_pairwise_group']:\n raise AttributeError(name)\n warnings.warn(\n \"dep_util is Deprecated. Use functions from setuptools.modified instead.\",\n", "issue": "[BUG] Setuptools 69.0.0 breaks Astropy's setup\n### setuptools version\n\nsetuptools==69.0.0\n\n### Python version\n\n3.12\n\n### OS\n\nUbuntu\n\n### Additional environment information\n\n_No response_\n\n### Description\n\nAbout 15h ago, Astropy's CI started failing to build with\r\n```\r\nImportError: cannot import name 'newer_group' from 'setuptools.dep_util'\r\n```\r\nThis seems to correspond to an [intentional change in setuptools 69](https://setuptools.pypa.io/en/latest/history.html#features).\r\nNonetheless, from reading the PR that introduced the change (https://github.com/pypa/setuptools/pull/4069), I'm not sure that this was supposed to break immediately. Was this intended ?\n\n### Expected behavior\n\na deprecation warning instead of a hard error ?\n\n### How to Reproduce\n\n```shell\r\n$ python -c \"from setuptools.dep_util import newer_group\"\r\n```\n\n### Output\n\n```console\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\nImportError: cannot import name 'newer_group' from 'setuptools.dep_util' (/private/tmp/venv/lib/python3.12/site-packages/setuptools/dep_util.py)\r\n```\r\n\n", "before_files": [{"content": "import warnings\n\nfrom ._distutils import _modified\n\n\ndef __getattr__(name):\n if name not in ['newer_pairwise_group']:\n raise AttributeError(name)\n warnings.warn(\n \"dep_util is Deprecated. Use functions from setuptools.modified instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return getattr(_modified, name)\n", "path": "setuptools/dep_util.py"}], "after_files": [{"content": "import warnings\n\nfrom ._distutils import _modified\n\n\ndef __getattr__(name):\n if name not in ['newer_group', 'newer_pairwise_group']:\n raise AttributeError(name)\n warnings.warn(\n \"dep_util is Deprecated. Use functions from setuptools.modified instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return getattr(_modified, name)\n", "path": "setuptools/dep_util.py"}]}
638
107
gh_patches_debug_21137
rasdani/github-patches
git_diff
localstack__localstack-1746
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- describe-stream vs describe-stream-summary on non-existent streams **Problem** For *non-existent* Kinesis streams: * the command `awslocal kinesis describe-stream` returns a ResourceNotFoundException (as expected) * whereas `describe-stream-summary` returns stream details (as if it existed). **Expected behaviour** It's expected that both commands return a ResourceNotFoundException for non-existent streams. It has been verified that this is the case with AWS CLI with real AWS. **Reproduction** ``` ✗ docker run -d -e SERVICES=s3,iam,kinesis localstack/localstack:0.10.2 513dcffc9a7e8f7b4c12b20972119c5fd797f269ae4cbfe62d8bc9f66a6116eb ✗ docker container ls | head -n2 CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 513dcffc9a7e localstack/localstack:0.10.2 "docker-entrypoint.sh" 44 seconds ago Up 42 seconds 4567-4597/tcp, 8080/tcp nostalgic_mcclintock ✗ docker exec -it nostalgic_mcclintock sh /opt/code/localstack # awslocal kinesis describe-stream --stream-name idontexist An error occurred (ResourceNotFoundException) when calling the DescribeStream operation: Stream idontexist under account 000000000000 not found. /opt/code/localstack # awslocal kinesis describe-stream-summary --stream-name idontexist { "StreamDescriptionSummary": { "StreamName": "idontexist", "StreamARN": "idontexist", (...) "OpenShardCount": 0, "ConsumerCount": 0 } } /opt/code/localstack # awslocal kinesis list-streams { "StreamNames": [] } /opt/code/localstack # ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `localstack/services/kinesis/kinesis_listener.py` Content: ``` 1 import json 2 import random 3 from requests.models import Response 4 from localstack import config 5 from localstack.utils.common import to_str 6 from localstack.utils.analytics import event_publisher 7 from localstack.services.awslambda import lambda_api 8 from localstack.services.generic_proxy import ProxyListener 9 10 # action headers 11 ACTION_PREFIX = 'Kinesis_20131202' 12 ACTION_PUT_RECORD = '%s.PutRecord' % ACTION_PREFIX 13 ACTION_PUT_RECORDS = '%s.PutRecords' % ACTION_PREFIX 14 ACTION_CREATE_STREAM = '%s.CreateStream' % ACTION_PREFIX 15 ACTION_DELETE_STREAM = '%s.DeleteStream' % ACTION_PREFIX 16 ACTION_UPDATE_SHARD_COUNT = '%s.UpdateShardCount' % ACTION_PREFIX 17 18 19 class ProxyListenerKinesis(ProxyListener): 20 21 def forward_request(self, method, path, data, headers): 22 data = json.loads(to_str(data)) 23 action = headers.get('X-Amz-Target') 24 25 if action == '%s.DescribeStreamSummary' % ACTION_PREFIX: 26 stream_arn = data.get('StreamARN') or data['StreamName'] 27 # TODO fix values below 28 result = { 29 'StreamDescriptionSummary': { 30 'ConsumerCount': 0, 31 'EnhancedMonitoring': [], 32 'KeyId': 'string', 33 'OpenShardCount': 0, 34 'RetentionPeriodHours': 1, 35 'StreamARN': stream_arn, 36 # 'StreamCreationTimestamp': number, 37 'StreamName': data['StreamName'], 38 'StreamStatus': 'ACTIVE' 39 } 40 } 41 return result 42 if action == '%s.DescribeStreamConsumer' % ACTION_PREFIX: 43 consumer_arn = data.get('ConsumerARN') or data['ConsumerName'] 44 consumer_name = data.get('ConsumerName') or data['ConsumerARN'] 45 result = { 46 'ConsumerDescription': { 47 'ConsumerARN': consumer_arn, 48 # 'ConsumerCreationTimestamp': number, 49 'ConsumerName': consumer_name, 50 'ConsumerStatus': 'ACTIVE', 51 'StreamARN': data.get('StreamARN') 52 } 53 } 54 return result 55 56 if random.random() < config.KINESIS_ERROR_PROBABILITY: 57 action = headers.get('X-Amz-Target') 58 if action in [ACTION_PUT_RECORD, ACTION_PUT_RECORDS]: 59 return kinesis_error_response(data, action) 60 return True 61 62 def return_response(self, method, path, data, headers, response): 63 action = headers.get('X-Amz-Target') 64 data = json.loads(to_str(data)) 65 66 records = [] 67 if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM): 68 event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM if action == ACTION_CREATE_STREAM 69 else event_publisher.EVENT_KINESIS_DELETE_STREAM) 70 payload = {'n': event_publisher.get_hash(data.get('StreamName'))} 71 if action == ACTION_CREATE_STREAM: 72 payload['s'] = data.get('ShardCount') 73 event_publisher.fire_event(event_type, payload=payload) 74 elif action == ACTION_PUT_RECORD: 75 response_body = json.loads(to_str(response.content)) 76 event_record = { 77 'data': data['Data'], 78 'partitionKey': data['PartitionKey'], 79 'sequenceNumber': response_body.get('SequenceNumber') 80 } 81 event_records = [event_record] 82 stream_name = data['StreamName'] 83 lambda_api.process_kinesis_records(event_records, stream_name) 84 elif action == ACTION_PUT_RECORDS: 85 event_records = [] 86 response_body = json.loads(to_str(response.content)) 87 if 'Records' in response_body: 88 response_records = response_body['Records'] 89 records = data['Records'] 90 for index in range(0, len(records)): 91 record = records[index] 92 event_record = { 93 'data': record['Data'], 94 'partitionKey': record['PartitionKey'], 95 'sequenceNumber': response_records[index].get('SequenceNumber') 96 } 97 event_records.append(event_record) 98 stream_name = data['StreamName'] 99 lambda_api.process_kinesis_records(event_records, stream_name) 100 elif action == ACTION_UPDATE_SHARD_COUNT: 101 # Currently kinesalite, which backs the Kinesis implementation for localstack, does 102 # not support UpdateShardCount: 103 # https://github.com/mhart/kinesalite/issues/61 104 # 105 # [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it 106 # applies Kinesis resources. A Terraform run fails when this is not present. 107 # 108 # The code that follows just returns a successful response, bypassing the 400 109 # response that kinesalite returns. 110 # 111 response = Response() 112 response.status_code = 200 113 content = { 114 'CurrentShardCount': 1, 115 'StreamName': data['StreamName'], 116 'TargetShardCount': data['TargetShardCount'] 117 } 118 response.encoding = 'UTF-8' 119 response._content = json.dumps(content) 120 return response 121 122 123 # instantiate listener 124 UPDATE_KINESIS = ProxyListenerKinesis() 125 126 127 def kinesis_error_response(data, action): 128 error_response = Response() 129 130 if action == ACTION_PUT_RECORD: 131 error_response.status_code = 400 132 content = { 133 'ErrorCode': 'ProvisionedThroughputExceededException', 134 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.' 135 } 136 else: 137 error_response.status_code = 200 138 content = {'FailedRecordCount': 1, 'Records': []} 139 for record in data.get('Records', []): 140 content['Records'].append({ 141 'ErrorCode': 'ProvisionedThroughputExceededException', 142 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.' 143 }) 144 145 error_response._content = json.dumps(content) 146 return error_response 147 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/localstack/services/kinesis/kinesis_listener.py b/localstack/services/kinesis/kinesis_listener.py --- a/localstack/services/kinesis/kinesis_listener.py +++ b/localstack/services/kinesis/kinesis_listener.py @@ -22,23 +22,6 @@ data = json.loads(to_str(data)) action = headers.get('X-Amz-Target') - if action == '%s.DescribeStreamSummary' % ACTION_PREFIX: - stream_arn = data.get('StreamARN') or data['StreamName'] - # TODO fix values below - result = { - 'StreamDescriptionSummary': { - 'ConsumerCount': 0, - 'EnhancedMonitoring': [], - 'KeyId': 'string', - 'OpenShardCount': 0, - 'RetentionPeriodHours': 1, - 'StreamARN': stream_arn, - # 'StreamCreationTimestamp': number, - 'StreamName': data['StreamName'], - 'StreamStatus': 'ACTIVE' - } - } - return result if action == '%s.DescribeStreamConsumer' % ACTION_PREFIX: consumer_arn = data.get('ConsumerARN') or data['ConsumerName'] consumer_name = data.get('ConsumerName') or data['ConsumerARN']
{"golden_diff": "diff --git a/localstack/services/kinesis/kinesis_listener.py b/localstack/services/kinesis/kinesis_listener.py\n--- a/localstack/services/kinesis/kinesis_listener.py\n+++ b/localstack/services/kinesis/kinesis_listener.py\n@@ -22,23 +22,6 @@\n data = json.loads(to_str(data))\n action = headers.get('X-Amz-Target')\n \n- if action == '%s.DescribeStreamSummary' % ACTION_PREFIX:\n- stream_arn = data.get('StreamARN') or data['StreamName']\n- # TODO fix values below\n- result = {\n- 'StreamDescriptionSummary': {\n- 'ConsumerCount': 0,\n- 'EnhancedMonitoring': [],\n- 'KeyId': 'string',\n- 'OpenShardCount': 0,\n- 'RetentionPeriodHours': 1,\n- 'StreamARN': stream_arn,\n- # 'StreamCreationTimestamp': number,\n- 'StreamName': data['StreamName'],\n- 'StreamStatus': 'ACTIVE'\n- }\n- }\n- return result\n if action == '%s.DescribeStreamConsumer' % ACTION_PREFIX:\n consumer_arn = data.get('ConsumerARN') or data['ConsumerName']\n consumer_name = data.get('ConsumerName') or data['ConsumerARN']\n", "issue": "describe-stream vs describe-stream-summary on non-existent streams\n**Problem**\r\nFor *non-existent* Kinesis streams:\r\n* the command `awslocal kinesis describe-stream` returns a ResourceNotFoundException (as expected) \r\n* whereas `describe-stream-summary` returns stream details (as if it existed).\r\n\r\n**Expected behaviour**\r\nIt's expected that both commands return a ResourceNotFoundException for non-existent streams. It has been verified that this is the case with AWS CLI with real AWS.\r\n\r\n**Reproduction**\r\n```\r\n\u2717 docker run -d -e SERVICES=s3,iam,kinesis localstack/localstack:0.10.2\r\n513dcffc9a7e8f7b4c12b20972119c5fd797f269ae4cbfe62d8bc9f66a6116eb\r\n\r\n\u2717 docker container ls | head -n2\r\nCONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\r\n513dcffc9a7e localstack/localstack:0.10.2 \"docker-entrypoint.sh\" 44 seconds ago Up 42 seconds 4567-4597/tcp, 8080/tcp nostalgic_mcclintock\r\n\r\n\u2717 docker exec -it nostalgic_mcclintock sh\r\n/opt/code/localstack # awslocal kinesis describe-stream --stream-name idontexist\r\nAn error occurred (ResourceNotFoundException) when calling the DescribeStream operation: Stream idontexist under account 000000000000 not found.\r\n\r\n/opt/code/localstack # awslocal kinesis describe-stream-summary --stream-name idontexist\r\n{\r\n \"StreamDescriptionSummary\": {\r\n \"StreamName\": \"idontexist\",\r\n \"StreamARN\": \"idontexist\",\r\n (...)\r\n \"OpenShardCount\": 0,\r\n \"ConsumerCount\": 0\r\n }\r\n}\r\n/opt/code/localstack # awslocal kinesis list-streams\r\n{\r\n \"StreamNames\": []\r\n}\r\n/opt/code/localstack # \r\n```\n", "before_files": [{"content": "import json\nimport random\nfrom requests.models import Response\nfrom localstack import config\nfrom localstack.utils.common import to_str\nfrom localstack.utils.analytics import event_publisher\nfrom localstack.services.awslambda import lambda_api\nfrom localstack.services.generic_proxy import ProxyListener\n\n# action headers\nACTION_PREFIX = 'Kinesis_20131202'\nACTION_PUT_RECORD = '%s.PutRecord' % ACTION_PREFIX\nACTION_PUT_RECORDS = '%s.PutRecords' % ACTION_PREFIX\nACTION_CREATE_STREAM = '%s.CreateStream' % ACTION_PREFIX\nACTION_DELETE_STREAM = '%s.DeleteStream' % ACTION_PREFIX\nACTION_UPDATE_SHARD_COUNT = '%s.UpdateShardCount' % ACTION_PREFIX\n\n\nclass ProxyListenerKinesis(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n data = json.loads(to_str(data))\n action = headers.get('X-Amz-Target')\n\n if action == '%s.DescribeStreamSummary' % ACTION_PREFIX:\n stream_arn = data.get('StreamARN') or data['StreamName']\n # TODO fix values below\n result = {\n 'StreamDescriptionSummary': {\n 'ConsumerCount': 0,\n 'EnhancedMonitoring': [],\n 'KeyId': 'string',\n 'OpenShardCount': 0,\n 'RetentionPeriodHours': 1,\n 'StreamARN': stream_arn,\n # 'StreamCreationTimestamp': number,\n 'StreamName': data['StreamName'],\n 'StreamStatus': 'ACTIVE'\n }\n }\n return result\n if action == '%s.DescribeStreamConsumer' % ACTION_PREFIX:\n consumer_arn = data.get('ConsumerARN') or data['ConsumerName']\n consumer_name = data.get('ConsumerName') or data['ConsumerARN']\n result = {\n 'ConsumerDescription': {\n 'ConsumerARN': consumer_arn,\n # 'ConsumerCreationTimestamp': number,\n 'ConsumerName': consumer_name,\n 'ConsumerStatus': 'ACTIVE',\n 'StreamARN': data.get('StreamARN')\n }\n }\n return result\n\n if random.random() < config.KINESIS_ERROR_PROBABILITY:\n action = headers.get('X-Amz-Target')\n if action in [ACTION_PUT_RECORD, ACTION_PUT_RECORDS]:\n return kinesis_error_response(data, action)\n return True\n\n def return_response(self, method, path, data, headers, response):\n action = headers.get('X-Amz-Target')\n data = json.loads(to_str(data))\n\n records = []\n if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM):\n event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM if action == ACTION_CREATE_STREAM\n else event_publisher.EVENT_KINESIS_DELETE_STREAM)\n payload = {'n': event_publisher.get_hash(data.get('StreamName'))}\n if action == ACTION_CREATE_STREAM:\n payload['s'] = data.get('ShardCount')\n event_publisher.fire_event(event_type, payload=payload)\n elif action == ACTION_PUT_RECORD:\n response_body = json.loads(to_str(response.content))\n event_record = {\n 'data': data['Data'],\n 'partitionKey': data['PartitionKey'],\n 'sequenceNumber': response_body.get('SequenceNumber')\n }\n event_records = [event_record]\n stream_name = data['StreamName']\n lambda_api.process_kinesis_records(event_records, stream_name)\n elif action == ACTION_PUT_RECORDS:\n event_records = []\n response_body = json.loads(to_str(response.content))\n if 'Records' in response_body:\n response_records = response_body['Records']\n records = data['Records']\n for index in range(0, len(records)):\n record = records[index]\n event_record = {\n 'data': record['Data'],\n 'partitionKey': record['PartitionKey'],\n 'sequenceNumber': response_records[index].get('SequenceNumber')\n }\n event_records.append(event_record)\n stream_name = data['StreamName']\n lambda_api.process_kinesis_records(event_records, stream_name)\n elif action == ACTION_UPDATE_SHARD_COUNT:\n # Currently kinesalite, which backs the Kinesis implementation for localstack, does\n # not support UpdateShardCount:\n # https://github.com/mhart/kinesalite/issues/61\n #\n # [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it\n # applies Kinesis resources. A Terraform run fails when this is not present.\n #\n # The code that follows just returns a successful response, bypassing the 400\n # response that kinesalite returns.\n #\n response = Response()\n response.status_code = 200\n content = {\n 'CurrentShardCount': 1,\n 'StreamName': data['StreamName'],\n 'TargetShardCount': data['TargetShardCount']\n }\n response.encoding = 'UTF-8'\n response._content = json.dumps(content)\n return response\n\n\n# instantiate listener\nUPDATE_KINESIS = ProxyListenerKinesis()\n\n\ndef kinesis_error_response(data, action):\n error_response = Response()\n\n if action == ACTION_PUT_RECORD:\n error_response.status_code = 400\n content = {\n 'ErrorCode': 'ProvisionedThroughputExceededException',\n 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'\n }\n else:\n error_response.status_code = 200\n content = {'FailedRecordCount': 1, 'Records': []}\n for record in data.get('Records', []):\n content['Records'].append({\n 'ErrorCode': 'ProvisionedThroughputExceededException',\n 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'\n })\n\n error_response._content = json.dumps(content)\n return error_response\n", "path": "localstack/services/kinesis/kinesis_listener.py"}], "after_files": [{"content": "import json\nimport random\nfrom requests.models import Response\nfrom localstack import config\nfrom localstack.utils.common import to_str\nfrom localstack.utils.analytics import event_publisher\nfrom localstack.services.awslambda import lambda_api\nfrom localstack.services.generic_proxy import ProxyListener\n\n# action headers\nACTION_PREFIX = 'Kinesis_20131202'\nACTION_PUT_RECORD = '%s.PutRecord' % ACTION_PREFIX\nACTION_PUT_RECORDS = '%s.PutRecords' % ACTION_PREFIX\nACTION_CREATE_STREAM = '%s.CreateStream' % ACTION_PREFIX\nACTION_DELETE_STREAM = '%s.DeleteStream' % ACTION_PREFIX\nACTION_UPDATE_SHARD_COUNT = '%s.UpdateShardCount' % ACTION_PREFIX\n\n\nclass ProxyListenerKinesis(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n data = json.loads(to_str(data))\n action = headers.get('X-Amz-Target')\n\n if action == '%s.DescribeStreamConsumer' % ACTION_PREFIX:\n consumer_arn = data.get('ConsumerARN') or data['ConsumerName']\n consumer_name = data.get('ConsumerName') or data['ConsumerARN']\n result = {\n 'ConsumerDescription': {\n 'ConsumerARN': consumer_arn,\n # 'ConsumerCreationTimestamp': number,\n 'ConsumerName': consumer_name,\n 'ConsumerStatus': 'ACTIVE',\n 'StreamARN': data.get('StreamARN')\n }\n }\n return result\n\n if random.random() < config.KINESIS_ERROR_PROBABILITY:\n action = headers.get('X-Amz-Target')\n if action in [ACTION_PUT_RECORD, ACTION_PUT_RECORDS]:\n return kinesis_error_response(data, action)\n return True\n\n def return_response(self, method, path, data, headers, response):\n action = headers.get('X-Amz-Target')\n data = json.loads(to_str(data))\n\n records = []\n if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM):\n event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM if action == ACTION_CREATE_STREAM\n else event_publisher.EVENT_KINESIS_DELETE_STREAM)\n payload = {'n': event_publisher.get_hash(data.get('StreamName'))}\n if action == ACTION_CREATE_STREAM:\n payload['s'] = data.get('ShardCount')\n event_publisher.fire_event(event_type, payload=payload)\n elif action == ACTION_PUT_RECORD:\n response_body = json.loads(to_str(response.content))\n event_record = {\n 'data': data['Data'],\n 'partitionKey': data['PartitionKey'],\n 'sequenceNumber': response_body.get('SequenceNumber')\n }\n event_records = [event_record]\n stream_name = data['StreamName']\n lambda_api.process_kinesis_records(event_records, stream_name)\n elif action == ACTION_PUT_RECORDS:\n event_records = []\n response_body = json.loads(to_str(response.content))\n if 'Records' in response_body:\n response_records = response_body['Records']\n records = data['Records']\n for index in range(0, len(records)):\n record = records[index]\n event_record = {\n 'data': record['Data'],\n 'partitionKey': record['PartitionKey'],\n 'sequenceNumber': response_records[index].get('SequenceNumber')\n }\n event_records.append(event_record)\n stream_name = data['StreamName']\n lambda_api.process_kinesis_records(event_records, stream_name)\n elif action == ACTION_UPDATE_SHARD_COUNT:\n # Currently kinesalite, which backs the Kinesis implementation for localstack, does\n # not support UpdateShardCount:\n # https://github.com/mhart/kinesalite/issues/61\n #\n # [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it\n # applies Kinesis resources. A Terraform run fails when this is not present.\n #\n # The code that follows just returns a successful response, bypassing the 400\n # response that kinesalite returns.\n #\n response = Response()\n response.status_code = 200\n content = {\n 'CurrentShardCount': 1,\n 'StreamName': data['StreamName'],\n 'TargetShardCount': data['TargetShardCount']\n }\n response.encoding = 'UTF-8'\n response._content = json.dumps(content)\n return response\n\n\n# instantiate listener\nUPDATE_KINESIS = ProxyListenerKinesis()\n\n\ndef kinesis_error_response(data, action):\n error_response = Response()\n\n if action == ACTION_PUT_RECORD:\n error_response.status_code = 400\n content = {\n 'ErrorCode': 'ProvisionedThroughputExceededException',\n 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'\n }\n else:\n error_response.status_code = 200\n content = {'FailedRecordCount': 1, 'Records': []}\n for record in data.get('Records', []):\n content['Records'].append({\n 'ErrorCode': 'ProvisionedThroughputExceededException',\n 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'\n })\n\n error_response._content = json.dumps(content)\n return error_response\n", "path": "localstack/services/kinesis/kinesis_listener.py"}]}
2,293
283
gh_patches_debug_10442
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-4975
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/language/bert/finetune.py` Content: ``` 1 import argparse 2 from typing import Callable, List, Union 3 4 import evaluate 5 import torch 6 import torch.distributed as dist 7 import torch.nn as nn 8 from data import GLUEDataBuilder 9 from torch.optim import Optimizer 10 from torch.optim.lr_scheduler import _LRScheduler as LRScheduler 11 from torch.utils.data import DataLoader 12 from tqdm import tqdm 13 from transformers import ( 14 AlbertForSequenceClassification, 15 AutoConfig, 16 BertForSequenceClassification, 17 get_linear_schedule_with_warmup, 18 ) 19 20 import colossalai 21 from colossalai.booster import Booster 22 from colossalai.booster.plugin import GeminiPlugin, HybridParallelPlugin, LowLevelZeroPlugin, TorchDDPPlugin 23 from colossalai.cluster import DistCoordinator 24 from colossalai.nn.optimizer import HybridAdam 25 from colossalai.utils import get_current_device 26 27 # ============================== 28 # Prepare Hyperparameters 29 # ============================== 30 NUM_EPOCHS = 3 31 BATCH_SIZE = 32 32 LEARNING_RATE = 2.4e-5 33 WEIGHT_DECAY = 0.01 34 WARMUP_FRACTION = 0.1 35 36 output_transform_fn = lambda x: x 37 criterion = lambda x: x.loss 38 39 40 def move_to_cuda(batch): 41 return {k: v.cuda() for k, v in batch.items()} 42 43 44 @torch.no_grad() 45 def evaluate_model( 46 model: nn.Module, 47 criterion, 48 test_dataloader: Union[DataLoader, List[DataLoader]], 49 num_labels: int, 50 task_name: str, 51 eval_splits: List[str], 52 booster: Booster, 53 coordinator: DistCoordinator, 54 ): 55 metric = evaluate.load("glue", task_name, process_id=coordinator.rank, num_process=coordinator.world_size) 56 model.eval() 57 58 def evaluate_subset(dataloader: DataLoader): 59 use_pipeline = isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1 60 is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage() 61 62 accum_loss = torch.zeros(1, device=get_current_device()) 63 for batch in dataloader: 64 batch = move_to_cuda(batch) 65 labels = batch["labels"] 66 if use_pipeline: 67 pg_mesh = booster.plugin.pg_mesh 68 pp_group = booster.plugin.pp_group 69 current_pp_group_ranks = pg_mesh.get_ranks_in_group(pp_group) 70 current_rank = dist.get_rank() 71 batch = iter([batch]) 72 outputs = booster.execute_pipeline(batch, model, criterion, return_loss=True, return_outputs=True) 73 74 if is_pp_last_stage: 75 logits = outputs["outputs"]["logits"] 76 val_loss = outputs["loss"] 77 accum_loss.add_(val_loss) 78 79 if num_labels > 1: 80 preds = torch.argmax(logits, axis=1) 81 elif num_labels == 1: 82 preds = logits.squeeze() 83 84 dist.broadcast_object_list([preds, val_loss], src=current_pp_group_ranks[-1], group=pp_group) 85 86 metric.add_batch(predictions=preds, references=labels) 87 elif current_rank in current_pp_group_ranks: 88 object_list = [None, None] 89 dist.broadcast_object_list(object_list, src=current_pp_group_ranks[-1], group=pp_group) 90 91 metric.add_batch(predictions=object_list[0].to(get_current_device()), references=labels) 92 accum_loss.add_(object_list[1].to(get_current_device())) 93 94 else: 95 batch = move_to_cuda(batch) 96 outputs = model(**batch) 97 val_loss, logits = outputs[:2] 98 accum_loss.add_(val_loss) 99 100 if num_labels > 1: 101 preds = torch.argmax(logits, axis=1) 102 elif num_labels == 1: 103 preds = logits.squeeze() 104 105 metric.add_batch(predictions=preds, references=labels) 106 107 results = metric.compute() 108 dist.all_reduce(accum_loss.div_(len(dataloader))) 109 if coordinator.is_master() and results is not None: 110 results["loss"] = accum_loss.item() / coordinator.world_size 111 112 return results 113 114 if isinstance(test_dataloader, DataLoader): 115 return evaluate_subset(test_dataloader) 116 else: 117 assert len(test_dataloader) == len(eval_splits) 118 final_results = {} 119 for split, sub_loader in zip(eval_splits, test_dataloader): 120 results = evaluate_subset(sub_loader) 121 final_results.update({f"{k}_{split}": v for k, v in results.items()}) 122 return final_results 123 124 125 def train_epoch( 126 epoch: int, 127 model: nn.Module, 128 optimizer: Optimizer, 129 _criterion: Callable, 130 lr_scheduler: LRScheduler, 131 train_dataloader: DataLoader, 132 booster: Booster, 133 coordinator: DistCoordinator, 134 ): 135 use_pipeline = isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1 136 is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage() 137 print_flag = (not use_pipeline and coordinator.is_master()) or (use_pipeline and is_pp_last_stage) 138 total_step = len(train_dataloader) 139 140 model.train() 141 optimizer.zero_grad() 142 train_dataloader_iter = iter(train_dataloader) 143 with tqdm(range(total_step), desc=f"Epoch [{epoch + 1}/{NUM_EPOCHS}]", disable=not print_flag) as pbar: 144 # Forward pass 145 for _ in pbar: 146 if use_pipeline: 147 outputs = booster.execute_pipeline( 148 train_dataloader_iter, model, _criterion, optimizer, return_loss=True, return_outputs=True 149 ) 150 # Backward and optimize 151 if is_pp_last_stage: 152 loss = outputs["loss"] 153 pbar.set_postfix({"loss": loss.item()}) 154 else: 155 data = next(train_dataloader_iter) 156 data = move_to_cuda(data) 157 outputs = model(**data) 158 loss = _criterion(outputs, None) 159 # Backward 160 booster.backward(loss, optimizer) 161 pbar.set_postfix({"loss": loss.item()}) 162 163 optimizer.step() 164 optimizer.zero_grad() 165 lr_scheduler.step() 166 167 168 def main(): 169 # ============================== 170 # Parse Arguments 171 # ============================== 172 parser = argparse.ArgumentParser() 173 parser.add_argument("-t", "--task", default="mrpc", help="GLUE task to run") 174 parser.add_argument( 175 "-p", 176 "--plugin", 177 type=str, 178 default="torch_ddp", 179 choices=["torch_ddp", "torch_ddp_fp16", "gemini", "low_level_zero", "hybrid_parallel"], 180 help="plugin to use", 181 ) 182 parser.add_argument( 183 "--model_type", 184 type=str, 185 default="bert", 186 help="bert or albert", 187 ) 188 parser.add_argument("--target_f1", type=float, default=None, help="target f1 score. Raise exception if not reached") 189 parser.add_argument("--use_lazy_init", type=bool, default=False, help="for initiating lazy init context") 190 args = parser.parse_args() 191 192 if args.model_type == "bert": 193 model_name = "bert-base-uncased" 194 elif args.model_type == "albert": 195 model_name = "albert-xxlarge-v2" 196 else: 197 raise RuntimeError 198 199 # ============================== 200 # Launch Distributed Environment 201 # ============================== 202 colossalai.launch_from_torch(config={}, seed=42) 203 coordinator = DistCoordinator() 204 205 lr = LEARNING_RATE * coordinator.world_size 206 207 # ============================== 208 # Instantiate Plugin and Booster 209 # ============================== 210 booster_kwargs = {} 211 if args.plugin == "torch_ddp_fp16": 212 booster_kwargs["mixed_precision"] = "fp16" 213 if args.plugin.startswith("torch_ddp"): 214 plugin = TorchDDPPlugin() 215 elif args.plugin == "gemini": 216 plugin = GeminiPlugin(initial_scale=2**5) 217 elif args.plugin == "low_level_zero": 218 plugin = LowLevelZeroPlugin(initial_scale=2**5) 219 elif args.plugin == "hybrid_parallel": 220 # modify the param accordingly for finetuning test cases 221 plugin = HybridParallelPlugin( 222 tp_size=1, 223 pp_size=2, 224 num_microbatches=None, 225 microbatch_size=1, 226 enable_all_optimization=True, 227 zero_stage=1, 228 precision="fp16", 229 initial_scale=1, 230 ) 231 232 booster = Booster(plugin=plugin, **booster_kwargs) 233 234 # ============================== 235 # Prepare Dataloader 236 # ============================== 237 data_builder = GLUEDataBuilder( 238 model_name, plugin, args.task, train_batch_size=BATCH_SIZE, eval_batch_size=BATCH_SIZE 239 ) 240 train_dataloader = data_builder.train_dataloader() 241 test_dataloader = data_builder.test_dataloader() 242 243 # ==================================== 244 # Prepare model, optimizer 245 # ==================================== 246 # bert pretrained model 247 248 cfg = AutoConfig.from_pretrained(model_name, num_labels=data_builder.num_labels) 249 250 if model_name == "bert-base-uncased": 251 model = BertForSequenceClassification.from_pretrained(model_name, config=cfg).cuda() 252 elif model_name == "albert-xxlarge-v2": 253 model = AlbertForSequenceClassification.from_pretrained(model_name, config=cfg) 254 else: 255 raise RuntimeError 256 257 # optimizer 258 no_decay = ["bias", "LayerNorm.weight"] 259 optimizer_grouped_parameters = [ 260 { 261 "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 262 "weight_decay": WEIGHT_DECAY, 263 }, 264 { 265 "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 266 "weight_decay": 0.0, 267 }, 268 ] 269 270 optimizer = HybridAdam(optimizer_grouped_parameters, lr=lr, eps=1e-8) 271 272 # lr scheduler 273 total_steps = len(train_dataloader) * NUM_EPOCHS 274 num_warmup_steps = int(WARMUP_FRACTION * total_steps) 275 lr_scheduler = get_linear_schedule_with_warmup( 276 optimizer, 277 num_warmup_steps=num_warmup_steps, 278 num_training_steps=total_steps, 279 ) 280 281 def _criterion(outputs, inputs): 282 outputs = output_transform_fn(outputs) 283 loss = criterion(outputs) 284 return loss 285 286 # ============================== 287 # Boost with ColossalAI 288 # ============================== 289 model, optimizer, _criterion, _, lr_scheduler = booster.boost( 290 model, optimizer, criterion=_criterion, lr_scheduler=lr_scheduler 291 ) 292 293 # ============================== 294 # Train model 295 # ============================== 296 for epoch in range(NUM_EPOCHS): 297 train_epoch(epoch, model, optimizer, _criterion, lr_scheduler, train_dataloader, booster, coordinator) 298 299 results = evaluate_model( 300 model, 301 _criterion, 302 test_dataloader, 303 data_builder.num_labels, 304 args.task, 305 data_builder.eval_splits, 306 booster, 307 coordinator, 308 ) 309 310 if coordinator.is_master(): 311 print(results) 312 if args.target_f1 is not None and "f1" in results: 313 assert results["f1"] >= args.target_f1, f'f1 score {results["f1"]} is lower than target {args.target_f1}' 314 315 316 if __name__ == "__main__": 317 main() 318 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/language/bert/finetune.py b/examples/language/bert/finetune.py --- a/examples/language/bert/finetune.py +++ b/examples/language/bert/finetune.py @@ -248,7 +248,7 @@ cfg = AutoConfig.from_pretrained(model_name, num_labels=data_builder.num_labels) if model_name == "bert-base-uncased": - model = BertForSequenceClassification.from_pretrained(model_name, config=cfg).cuda() + model = BertForSequenceClassification.from_pretrained(model_name, config=cfg, cache_dir="./").cuda() elif model_name == "albert-xxlarge-v2": model = AlbertForSequenceClassification.from_pretrained(model_name, config=cfg) else:
{"golden_diff": "diff --git a/examples/language/bert/finetune.py b/examples/language/bert/finetune.py\n--- a/examples/language/bert/finetune.py\n+++ b/examples/language/bert/finetune.py\n@@ -248,7 +248,7 @@\n cfg = AutoConfig.from_pretrained(model_name, num_labels=data_builder.num_labels)\n \n if model_name == \"bert-base-uncased\":\n- model = BertForSequenceClassification.from_pretrained(model_name, config=cfg).cuda()\n+ model = BertForSequenceClassification.from_pretrained(model_name, config=cfg, cache_dir=\"./\").cuda()\n elif model_name == \"albert-xxlarge-v2\":\n model = AlbertForSequenceClassification.from_pretrained(model_name, config=cfg)\n else:\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import argparse\nfrom typing import Callable, List, Union\n\nimport evaluate\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nfrom data import GLUEDataBuilder\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler as LRScheduler\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom transformers import (\n AlbertForSequenceClassification,\n AutoConfig,\n BertForSequenceClassification,\n get_linear_schedule_with_warmup,\n)\n\nimport colossalai\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import GeminiPlugin, HybridParallelPlugin, LowLevelZeroPlugin, TorchDDPPlugin\nfrom colossalai.cluster import DistCoordinator\nfrom colossalai.nn.optimizer import HybridAdam\nfrom colossalai.utils import get_current_device\n\n# ==============================\n# Prepare Hyperparameters\n# ==============================\nNUM_EPOCHS = 3\nBATCH_SIZE = 32\nLEARNING_RATE = 2.4e-5\nWEIGHT_DECAY = 0.01\nWARMUP_FRACTION = 0.1\n\noutput_transform_fn = lambda x: x\ncriterion = lambda x: x.loss\n\n\ndef move_to_cuda(batch):\n return {k: v.cuda() for k, v in batch.items()}\n\n\[email protected]_grad()\ndef evaluate_model(\n model: nn.Module,\n criterion,\n test_dataloader: Union[DataLoader, List[DataLoader]],\n num_labels: int,\n task_name: str,\n eval_splits: List[str],\n booster: Booster,\n coordinator: DistCoordinator,\n):\n metric = evaluate.load(\"glue\", task_name, process_id=coordinator.rank, num_process=coordinator.world_size)\n model.eval()\n\n def evaluate_subset(dataloader: DataLoader):\n use_pipeline = isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1\n is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage()\n\n accum_loss = torch.zeros(1, device=get_current_device())\n for batch in dataloader:\n batch = move_to_cuda(batch)\n labels = batch[\"labels\"]\n if use_pipeline:\n pg_mesh = booster.plugin.pg_mesh\n pp_group = booster.plugin.pp_group\n current_pp_group_ranks = pg_mesh.get_ranks_in_group(pp_group)\n current_rank = dist.get_rank()\n batch = iter([batch])\n outputs = booster.execute_pipeline(batch, model, criterion, return_loss=True, return_outputs=True)\n\n if is_pp_last_stage:\n logits = outputs[\"outputs\"][\"logits\"]\n val_loss = outputs[\"loss\"]\n accum_loss.add_(val_loss)\n\n if num_labels > 1:\n preds = torch.argmax(logits, axis=1)\n elif num_labels == 1:\n preds = logits.squeeze()\n\n dist.broadcast_object_list([preds, val_loss], src=current_pp_group_ranks[-1], group=pp_group)\n\n metric.add_batch(predictions=preds, references=labels)\n elif current_rank in current_pp_group_ranks:\n object_list = [None, None]\n dist.broadcast_object_list(object_list, src=current_pp_group_ranks[-1], group=pp_group)\n\n metric.add_batch(predictions=object_list[0].to(get_current_device()), references=labels)\n accum_loss.add_(object_list[1].to(get_current_device()))\n\n else:\n batch = move_to_cuda(batch)\n outputs = model(**batch)\n val_loss, logits = outputs[:2]\n accum_loss.add_(val_loss)\n\n if num_labels > 1:\n preds = torch.argmax(logits, axis=1)\n elif num_labels == 1:\n preds = logits.squeeze()\n\n metric.add_batch(predictions=preds, references=labels)\n\n results = metric.compute()\n dist.all_reduce(accum_loss.div_(len(dataloader)))\n if coordinator.is_master() and results is not None:\n results[\"loss\"] = accum_loss.item() / coordinator.world_size\n\n return results\n\n if isinstance(test_dataloader, DataLoader):\n return evaluate_subset(test_dataloader)\n else:\n assert len(test_dataloader) == len(eval_splits)\n final_results = {}\n for split, sub_loader in zip(eval_splits, test_dataloader):\n results = evaluate_subset(sub_loader)\n final_results.update({f\"{k}_{split}\": v for k, v in results.items()})\n return final_results\n\n\ndef train_epoch(\n epoch: int,\n model: nn.Module,\n optimizer: Optimizer,\n _criterion: Callable,\n lr_scheduler: LRScheduler,\n train_dataloader: DataLoader,\n booster: Booster,\n coordinator: DistCoordinator,\n):\n use_pipeline = isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1\n is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage()\n print_flag = (not use_pipeline and coordinator.is_master()) or (use_pipeline and is_pp_last_stage)\n total_step = len(train_dataloader)\n\n model.train()\n optimizer.zero_grad()\n train_dataloader_iter = iter(train_dataloader)\n with tqdm(range(total_step), desc=f\"Epoch [{epoch + 1}/{NUM_EPOCHS}]\", disable=not print_flag) as pbar:\n # Forward pass\n for _ in pbar:\n if use_pipeline:\n outputs = booster.execute_pipeline(\n train_dataloader_iter, model, _criterion, optimizer, return_loss=True, return_outputs=True\n )\n # Backward and optimize\n if is_pp_last_stage:\n loss = outputs[\"loss\"]\n pbar.set_postfix({\"loss\": loss.item()})\n else:\n data = next(train_dataloader_iter)\n data = move_to_cuda(data)\n outputs = model(**data)\n loss = _criterion(outputs, None)\n # Backward\n booster.backward(loss, optimizer)\n pbar.set_postfix({\"loss\": loss.item()})\n\n optimizer.step()\n optimizer.zero_grad()\n lr_scheduler.step()\n\n\ndef main():\n # ==============================\n # Parse Arguments\n # ==============================\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--task\", default=\"mrpc\", help=\"GLUE task to run\")\n parser.add_argument(\n \"-p\",\n \"--plugin\",\n type=str,\n default=\"torch_ddp\",\n choices=[\"torch_ddp\", \"torch_ddp_fp16\", \"gemini\", \"low_level_zero\", \"hybrid_parallel\"],\n help=\"plugin to use\",\n )\n parser.add_argument(\n \"--model_type\",\n type=str,\n default=\"bert\",\n help=\"bert or albert\",\n )\n parser.add_argument(\"--target_f1\", type=float, default=None, help=\"target f1 score. Raise exception if not reached\")\n parser.add_argument(\"--use_lazy_init\", type=bool, default=False, help=\"for initiating lazy init context\")\n args = parser.parse_args()\n\n if args.model_type == \"bert\":\n model_name = \"bert-base-uncased\"\n elif args.model_type == \"albert\":\n model_name = \"albert-xxlarge-v2\"\n else:\n raise RuntimeError\n\n # ==============================\n # Launch Distributed Environment\n # ==============================\n colossalai.launch_from_torch(config={}, seed=42)\n coordinator = DistCoordinator()\n\n lr = LEARNING_RATE * coordinator.world_size\n\n # ==============================\n # Instantiate Plugin and Booster\n # ==============================\n booster_kwargs = {}\n if args.plugin == \"torch_ddp_fp16\":\n booster_kwargs[\"mixed_precision\"] = \"fp16\"\n if args.plugin.startswith(\"torch_ddp\"):\n plugin = TorchDDPPlugin()\n elif args.plugin == \"gemini\":\n plugin = GeminiPlugin(initial_scale=2**5)\n elif args.plugin == \"low_level_zero\":\n plugin = LowLevelZeroPlugin(initial_scale=2**5)\n elif args.plugin == \"hybrid_parallel\":\n # modify the param accordingly for finetuning test cases\n plugin = HybridParallelPlugin(\n tp_size=1,\n pp_size=2,\n num_microbatches=None,\n microbatch_size=1,\n enable_all_optimization=True,\n zero_stage=1,\n precision=\"fp16\",\n initial_scale=1,\n )\n\n booster = Booster(plugin=plugin, **booster_kwargs)\n\n # ==============================\n # Prepare Dataloader\n # ==============================\n data_builder = GLUEDataBuilder(\n model_name, plugin, args.task, train_batch_size=BATCH_SIZE, eval_batch_size=BATCH_SIZE\n )\n train_dataloader = data_builder.train_dataloader()\n test_dataloader = data_builder.test_dataloader()\n\n # ====================================\n # Prepare model, optimizer\n # ====================================\n # bert pretrained model\n\n cfg = AutoConfig.from_pretrained(model_name, num_labels=data_builder.num_labels)\n\n if model_name == \"bert-base-uncased\":\n model = BertForSequenceClassification.from_pretrained(model_name, config=cfg).cuda()\n elif model_name == \"albert-xxlarge-v2\":\n model = AlbertForSequenceClassification.from_pretrained(model_name, config=cfg)\n else:\n raise RuntimeError\n\n # optimizer\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": WEIGHT_DECAY,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n\n optimizer = HybridAdam(optimizer_grouped_parameters, lr=lr, eps=1e-8)\n\n # lr scheduler\n total_steps = len(train_dataloader) * NUM_EPOCHS\n num_warmup_steps = int(WARMUP_FRACTION * total_steps)\n lr_scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=total_steps,\n )\n\n def _criterion(outputs, inputs):\n outputs = output_transform_fn(outputs)\n loss = criterion(outputs)\n return loss\n\n # ==============================\n # Boost with ColossalAI\n # ==============================\n model, optimizer, _criterion, _, lr_scheduler = booster.boost(\n model, optimizer, criterion=_criterion, lr_scheduler=lr_scheduler\n )\n\n # ==============================\n # Train model\n # ==============================\n for epoch in range(NUM_EPOCHS):\n train_epoch(epoch, model, optimizer, _criterion, lr_scheduler, train_dataloader, booster, coordinator)\n\n results = evaluate_model(\n model,\n _criterion,\n test_dataloader,\n data_builder.num_labels,\n args.task,\n data_builder.eval_splits,\n booster,\n coordinator,\n )\n\n if coordinator.is_master():\n print(results)\n if args.target_f1 is not None and \"f1\" in results:\n assert results[\"f1\"] >= args.target_f1, f'f1 score {results[\"f1\"]} is lower than target {args.target_f1}'\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/language/bert/finetune.py"}], "after_files": [{"content": "import argparse\nfrom typing import Callable, List, Union\n\nimport evaluate\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nfrom data import GLUEDataBuilder\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler as LRScheduler\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom transformers import (\n AlbertForSequenceClassification,\n AutoConfig,\n BertForSequenceClassification,\n get_linear_schedule_with_warmup,\n)\n\nimport colossalai\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import GeminiPlugin, HybridParallelPlugin, LowLevelZeroPlugin, TorchDDPPlugin\nfrom colossalai.cluster import DistCoordinator\nfrom colossalai.nn.optimizer import HybridAdam\nfrom colossalai.utils import get_current_device\n\n# ==============================\n# Prepare Hyperparameters\n# ==============================\nNUM_EPOCHS = 3\nBATCH_SIZE = 32\nLEARNING_RATE = 2.4e-5\nWEIGHT_DECAY = 0.01\nWARMUP_FRACTION = 0.1\n\noutput_transform_fn = lambda x: x\ncriterion = lambda x: x.loss\n\n\ndef move_to_cuda(batch):\n return {k: v.cuda() for k, v in batch.items()}\n\n\[email protected]_grad()\ndef evaluate_model(\n model: nn.Module,\n criterion,\n test_dataloader: Union[DataLoader, List[DataLoader]],\n num_labels: int,\n task_name: str,\n eval_splits: List[str],\n booster: Booster,\n coordinator: DistCoordinator,\n):\n metric = evaluate.load(\"glue\", task_name, process_id=coordinator.rank, num_process=coordinator.world_size)\n model.eval()\n\n def evaluate_subset(dataloader: DataLoader):\n use_pipeline = isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1\n is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage()\n\n accum_loss = torch.zeros(1, device=get_current_device())\n for batch in dataloader:\n batch = move_to_cuda(batch)\n labels = batch[\"labels\"]\n if use_pipeline:\n pg_mesh = booster.plugin.pg_mesh\n pp_group = booster.plugin.pp_group\n current_pp_group_ranks = pg_mesh.get_ranks_in_group(pp_group)\n current_rank = dist.get_rank()\n batch = iter([batch])\n outputs = booster.execute_pipeline(batch, model, criterion, return_loss=True, return_outputs=True)\n\n if is_pp_last_stage:\n logits = outputs[\"outputs\"][\"logits\"]\n val_loss = outputs[\"loss\"]\n accum_loss.add_(val_loss)\n\n if num_labels > 1:\n preds = torch.argmax(logits, axis=1)\n elif num_labels == 1:\n preds = logits.squeeze()\n\n dist.broadcast_object_list([preds, val_loss], src=current_pp_group_ranks[-1], group=pp_group)\n\n metric.add_batch(predictions=preds, references=labels)\n elif current_rank in current_pp_group_ranks:\n object_list = [None, None]\n dist.broadcast_object_list(object_list, src=current_pp_group_ranks[-1], group=pp_group)\n\n metric.add_batch(predictions=object_list[0].to(get_current_device()), references=labels)\n accum_loss.add_(object_list[1].to(get_current_device()))\n\n else:\n batch = move_to_cuda(batch)\n outputs = model(**batch)\n val_loss, logits = outputs[:2]\n accum_loss.add_(val_loss)\n\n if num_labels > 1:\n preds = torch.argmax(logits, axis=1)\n elif num_labels == 1:\n preds = logits.squeeze()\n\n metric.add_batch(predictions=preds, references=labels)\n\n results = metric.compute()\n dist.all_reduce(accum_loss.div_(len(dataloader)))\n if coordinator.is_master() and results is not None:\n results[\"loss\"] = accum_loss.item() / coordinator.world_size\n\n return results\n\n if isinstance(test_dataloader, DataLoader):\n return evaluate_subset(test_dataloader)\n else:\n assert len(test_dataloader) == len(eval_splits)\n final_results = {}\n for split, sub_loader in zip(eval_splits, test_dataloader):\n results = evaluate_subset(sub_loader)\n final_results.update({f\"{k}_{split}\": v for k, v in results.items()})\n return final_results\n\n\ndef train_epoch(\n epoch: int,\n model: nn.Module,\n optimizer: Optimizer,\n _criterion: Callable,\n lr_scheduler: LRScheduler,\n train_dataloader: DataLoader,\n booster: Booster,\n coordinator: DistCoordinator,\n):\n use_pipeline = isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1\n is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage()\n print_flag = (not use_pipeline and coordinator.is_master()) or (use_pipeline and is_pp_last_stage)\n total_step = len(train_dataloader)\n\n model.train()\n optimizer.zero_grad()\n train_dataloader_iter = iter(train_dataloader)\n with tqdm(range(total_step), desc=f\"Epoch [{epoch + 1}/{NUM_EPOCHS}]\", disable=not print_flag) as pbar:\n # Forward pass\n for _ in pbar:\n if use_pipeline:\n outputs = booster.execute_pipeline(\n train_dataloader_iter, model, _criterion, optimizer, return_loss=True, return_outputs=True\n )\n # Backward and optimize\n if is_pp_last_stage:\n loss = outputs[\"loss\"]\n pbar.set_postfix({\"loss\": loss.item()})\n else:\n data = next(train_dataloader_iter)\n data = move_to_cuda(data)\n outputs = model(**data)\n loss = _criterion(outputs, None)\n # Backward\n booster.backward(loss, optimizer)\n pbar.set_postfix({\"loss\": loss.item()})\n\n optimizer.step()\n optimizer.zero_grad()\n lr_scheduler.step()\n\n\ndef main():\n # ==============================\n # Parse Arguments\n # ==============================\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--task\", default=\"mrpc\", help=\"GLUE task to run\")\n parser.add_argument(\n \"-p\",\n \"--plugin\",\n type=str,\n default=\"torch_ddp\",\n choices=[\"torch_ddp\", \"torch_ddp_fp16\", \"gemini\", \"low_level_zero\", \"hybrid_parallel\"],\n help=\"plugin to use\",\n )\n parser.add_argument(\n \"--model_type\",\n type=str,\n default=\"bert\",\n help=\"bert or albert\",\n )\n parser.add_argument(\"--target_f1\", type=float, default=None, help=\"target f1 score. Raise exception if not reached\")\n parser.add_argument(\"--use_lazy_init\", type=bool, default=False, help=\"for initiating lazy init context\")\n args = parser.parse_args()\n\n if args.model_type == \"bert\":\n model_name = \"bert-base-uncased\"\n elif args.model_type == \"albert\":\n model_name = \"albert-xxlarge-v2\"\n else:\n raise RuntimeError\n\n # ==============================\n # Launch Distributed Environment\n # ==============================\n colossalai.launch_from_torch(config={}, seed=42)\n coordinator = DistCoordinator()\n\n lr = LEARNING_RATE * coordinator.world_size\n\n # ==============================\n # Instantiate Plugin and Booster\n # ==============================\n booster_kwargs = {}\n if args.plugin == \"torch_ddp_fp16\":\n booster_kwargs[\"mixed_precision\"] = \"fp16\"\n if args.plugin.startswith(\"torch_ddp\"):\n plugin = TorchDDPPlugin()\n elif args.plugin == \"gemini\":\n plugin = GeminiPlugin(initial_scale=2**5)\n elif args.plugin == \"low_level_zero\":\n plugin = LowLevelZeroPlugin(initial_scale=2**5)\n elif args.plugin == \"hybrid_parallel\":\n # modify the param accordingly for finetuning test cases\n plugin = HybridParallelPlugin(\n tp_size=1,\n pp_size=2,\n num_microbatches=None,\n microbatch_size=1,\n enable_all_optimization=True,\n zero_stage=1,\n precision=\"fp16\",\n initial_scale=1,\n )\n\n booster = Booster(plugin=plugin, **booster_kwargs)\n\n # ==============================\n # Prepare Dataloader\n # ==============================\n data_builder = GLUEDataBuilder(\n model_name, plugin, args.task, train_batch_size=BATCH_SIZE, eval_batch_size=BATCH_SIZE\n )\n train_dataloader = data_builder.train_dataloader()\n test_dataloader = data_builder.test_dataloader()\n\n # ====================================\n # Prepare model, optimizer\n # ====================================\n # bert pretrained model\n\n cfg = AutoConfig.from_pretrained(model_name, num_labels=data_builder.num_labels)\n\n if model_name == \"bert-base-uncased\":\n model = BertForSequenceClassification.from_pretrained(model_name, config=cfg, cache_dir=\"./\").cuda()\n elif model_name == \"albert-xxlarge-v2\":\n model = AlbertForSequenceClassification.from_pretrained(model_name, config=cfg)\n else:\n raise RuntimeError\n\n # optimizer\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": WEIGHT_DECAY,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n\n optimizer = HybridAdam(optimizer_grouped_parameters, lr=lr, eps=1e-8)\n\n # lr scheduler\n total_steps = len(train_dataloader) * NUM_EPOCHS\n num_warmup_steps = int(WARMUP_FRACTION * total_steps)\n lr_scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=total_steps,\n )\n\n def _criterion(outputs, inputs):\n outputs = output_transform_fn(outputs)\n loss = criterion(outputs)\n return loss\n\n # ==============================\n # Boost with ColossalAI\n # ==============================\n model, optimizer, _criterion, _, lr_scheduler = booster.boost(\n model, optimizer, criterion=_criterion, lr_scheduler=lr_scheduler\n )\n\n # ==============================\n # Train model\n # ==============================\n for epoch in range(NUM_EPOCHS):\n train_epoch(epoch, model, optimizer, _criterion, lr_scheduler, train_dataloader, booster, coordinator)\n\n results = evaluate_model(\n model,\n _criterion,\n test_dataloader,\n data_builder.num_labels,\n args.task,\n data_builder.eval_splits,\n booster,\n coordinator,\n )\n\n if coordinator.is_master():\n print(results)\n if args.target_f1 is not None and \"f1\" in results:\n assert results[\"f1\"] >= args.target_f1, f'f1 score {results[\"f1\"]} is lower than target {args.target_f1}'\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/language/bert/finetune.py"}]}
3,546
171
gh_patches_debug_21331
rasdani/github-patches
git_diff
rasterio__rasterio-426
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Switch to the canonical click plugins implementation The plugin functionality from `cligj.plugins` landed in its own click extension package called [click-plugins](https://github.com/click-contrib/click-plugins). The cligj implementation will be deprecated according to https://github.com/mapbox/cligj/issues/6. Switch to the canonical click plugins implementation The plugin functionality from `cligj.plugins` landed in its own click extension package called [click-plugins](https://github.com/click-contrib/click-plugins). The cligj implementation will be deprecated according to https://github.com/mapbox/cligj/issues/6. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 # Two environmental variables influence this script. 4 # 5 # GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers, 6 # libraries, and data files. 7 # 8 # PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the 9 # source or binary distribution. This is essential when creating self-contained 10 # binary wheels. 11 12 import logging 13 import os 14 import pprint 15 import shutil 16 import subprocess 17 import sys 18 19 from setuptools import setup 20 from setuptools.extension import Extension 21 22 logging.basicConfig() 23 log = logging.getLogger() 24 25 # python -W all setup.py ... 26 if 'all' in sys.warnoptions: 27 log.level = logging.DEBUG 28 29 def check_output(cmd): 30 # since subprocess.check_output doesn't exist in 2.6 31 # we wrap it here. 32 try: 33 out = subprocess.check_output(cmd) 34 return out.decode('utf') 35 except AttributeError: 36 # For some reasone check_output doesn't exist 37 # So fall back on Popen 38 p = subprocess.Popen(cmd, stdout=subprocess.PIPE) 39 out, err = p.communicate() 40 return out 41 42 def copy_data_tree(datadir, destdir): 43 try: 44 shutil.rmtree(destdir) 45 except OSError: 46 pass 47 shutil.copytree(datadir, destdir) 48 49 # Parse the version from the rasterio module. 50 with open('rasterio/__init__.py') as f: 51 for line in f: 52 if line.find("__version__") >= 0: 53 version = line.split("=")[1].strip() 54 version = version.strip('"') 55 version = version.strip("'") 56 continue 57 58 with open('VERSION.txt', 'w') as f: 59 f.write(version) 60 61 # Use Cython if available. 62 try: 63 from Cython.Build import cythonize 64 except ImportError: 65 cythonize = None 66 67 # By default we'll try to get options via gdal-config. On systems without, 68 # options will need to be set in setup.cfg or on the setup command line. 69 include_dirs = [] 70 library_dirs = [] 71 libraries = [] 72 extra_link_args = [] 73 gdal_output = [None]*3 74 75 try: 76 import numpy 77 include_dirs.append(numpy.get_include()) 78 except ImportError: 79 log.critical("Numpy and its headers are required to run setup(). Exiting.") 80 sys.exit(1) 81 82 try: 83 gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config') 84 for i, flag in enumerate(("--cflags", "--libs", "--datadir")): 85 gdal_output[i] = check_output([gdal_config, flag]).strip() 86 87 for item in gdal_output[0].split(): 88 if item.startswith("-I"): 89 include_dirs.extend(item[2:].split(":")) 90 for item in gdal_output[1].split(): 91 if item.startswith("-L"): 92 library_dirs.extend(item[2:].split(":")) 93 elif item.startswith("-l"): 94 libraries.append(item[2:]) 95 else: 96 # e.g. -framework GDAL 97 extra_link_args.append(item) 98 99 except Exception as e: 100 if os.name == "nt": 101 log.info(("Building on Windows requires extra options to setup.py to locate needed GDAL files.\n" 102 "More information is available in the README.")) 103 else: 104 log.warning("Failed to get options via gdal-config: %s", str(e)) 105 106 107 # Conditionally copy the GDAL data. To be used in conjunction with 108 # the bdist_wheel command to make self-contained binary wheels. 109 if os.environ.get('PACKAGE_DATA'): 110 destdir = 'rasterio/gdal_data' 111 if gdal_output[2]: 112 log.info("Copying gdal data from %s" % gdal_output[2]) 113 copy_data_tree(gdal_output[2], destdir) 114 else: 115 # check to see if GDAL_DATA is defined 116 gdal_data = os.environ.get('GDAL_DATA', None) 117 if gdal_data: 118 log.info("Copying gdal_data from %s" % gdal_data) 119 copy_data_tree(gdal_data, destdir) 120 121 # Conditionally copy PROJ.4 data. 122 projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj') 123 if os.path.exists(projdatadir): 124 log.info("Copying proj_data from %s" % projdatadir) 125 copy_data_tree(projdatadir, 'rasterio/proj_data') 126 127 ext_options = dict( 128 include_dirs=include_dirs, 129 library_dirs=library_dirs, 130 libraries=libraries, 131 extra_link_args=extra_link_args) 132 133 if not os.name == "nt": 134 # These options fail on Windows if using Visual Studio 135 ext_options['extra_compile_args'] = ['-Wno-unused-parameter', 136 '-Wno-unused-function'] 137 138 log.debug('ext_options:\n%s', pprint.pformat(ext_options)) 139 140 # When building from a repo, Cython is required. 141 if os.path.exists("MANIFEST.in") and "clean" not in sys.argv: 142 log.info("MANIFEST.in found, presume a repo, cythonizing...") 143 if not cythonize: 144 log.critical( 145 "Cython.Build.cythonize not found. " 146 "Cython is required to build from a repo.") 147 sys.exit(1) 148 ext_modules = cythonize([ 149 Extension( 150 'rasterio._base', ['rasterio/_base.pyx'], **ext_options), 151 Extension( 152 'rasterio._io', ['rasterio/_io.pyx'], **ext_options), 153 Extension( 154 'rasterio._copy', ['rasterio/_copy.pyx'], **ext_options), 155 Extension( 156 'rasterio._features', ['rasterio/_features.pyx'], **ext_options), 157 Extension( 158 'rasterio._drivers', ['rasterio/_drivers.pyx'], **ext_options), 159 Extension( 160 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options), 161 Extension( 162 'rasterio._fill', ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp'], **ext_options), 163 Extension( 164 'rasterio._err', ['rasterio/_err.pyx'], **ext_options), 165 Extension( 166 'rasterio._example', ['rasterio/_example.pyx'], **ext_options), 167 ], quiet=True) 168 169 # If there's no manifest template, as in an sdist, we just specify .c files. 170 else: 171 ext_modules = [ 172 Extension( 173 'rasterio._base', ['rasterio/_base.c'], **ext_options), 174 Extension( 175 'rasterio._io', ['rasterio/_io.c'], **ext_options), 176 Extension( 177 'rasterio._copy', ['rasterio/_copy.c'], **ext_options), 178 Extension( 179 'rasterio._features', ['rasterio/_features.c'], **ext_options), 180 Extension( 181 'rasterio._drivers', ['rasterio/_drivers.c'], **ext_options), 182 Extension( 183 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options), 184 Extension( 185 'rasterio._fill', ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp'], **ext_options), 186 Extension( 187 'rasterio._err', ['rasterio/_err.c'], **ext_options), 188 Extension( 189 'rasterio._example', ['rasterio/_example.c'], **ext_options), 190 ] 191 192 with open('README.rst') as f: 193 readme = f.read() 194 195 # Runtime requirements. 196 inst_reqs = [ 197 'affine>=1.0', 198 'cligj>=0.2.0', 199 'Numpy>=1.7', 200 'snuggs>=1.3.1'] 201 202 if sys.version_info < (3, 4): 203 inst_reqs.append('enum34') 204 205 setup_args = dict( 206 name='rasterio', 207 version=version, 208 description="Fast and direct raster I/O for use with Numpy and SciPy", 209 long_description=readme, 210 classifiers=[ 211 'Development Status :: 4 - Beta', 212 'Intended Audience :: Developers', 213 'Intended Audience :: Information Technology', 214 'Intended Audience :: Science/Research', 215 'License :: OSI Approved :: BSD License', 216 'Programming Language :: C', 217 'Programming Language :: Python :: 2.6', 218 'Programming Language :: Python :: 2.7', 219 'Programming Language :: Python :: 3.3', 220 'Programming Language :: Python :: 3.4', 221 'Topic :: Multimedia :: Graphics :: Graphics Conversion', 222 'Topic :: Scientific/Engineering :: GIS'], 223 keywords='raster gdal', 224 author='Sean Gillies', 225 author_email='[email protected]', 226 url='https://github.com/mapbox/rasterio', 227 license='BSD', 228 package_dir={'': '.'}, 229 packages=['rasterio', 'rasterio.rio'], 230 entry_points=''' 231 [console_scripts] 232 rio=rasterio.rio.main:main_group 233 234 [rasterio.rio_commands] 235 bounds=rasterio.rio.features:bounds 236 calc=rasterio.rio.calc:calc 237 convert=rasterio.rio.convert:convert 238 edit-info=rasterio.rio.info:edit 239 env=rasterio.rio.info:env 240 info=rasterio.rio.info:info 241 insp=rasterio.rio.info:insp 242 mask=rasterio.rio.features:mask 243 merge=rasterio.rio.merge:merge 244 overview=rasterio.rio.overview:overview 245 rasterize=rasterio.rio.features:rasterize 246 sample=rasterio.rio.sample:sample 247 shapes=rasterio.rio.features:shapes 248 stack=rasterio.rio.bands:stack 249 warp=rasterio.rio.warp:warp 250 transform=rasterio.rio.info:transform 251 ''', 252 include_package_data=True, 253 ext_modules=ext_modules, 254 zip_safe=False, 255 install_requires=inst_reqs, 256 extras_require={ 257 'ipython': ['ipython>=2.0']}) 258 259 if os.environ.get('PACKAGE_DATA'): 260 setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']} 261 262 setup(**setup_args) 263 ``` Path: `rasterio/rio/main.py` Content: ``` 1 """ 2 Main click group for CLI 3 """ 4 5 6 import logging 7 from pkg_resources import iter_entry_points 8 import sys 9 10 import click 11 import cligj 12 import cligj.plugins 13 14 import rasterio 15 16 17 def configure_logging(verbosity): 18 log_level = max(10, 30 - 10*verbosity) 19 logging.basicConfig(stream=sys.stderr, level=log_level) 20 21 22 @cligj.plugins.group(plugins=( 23 ep for ep in list(iter_entry_points('rasterio.rio_commands')) + 24 list(iter_entry_points('rasterio.rio_plugins')))) 25 @cligj.verbose_opt 26 @cligj.quiet_opt 27 @click.version_option(version=rasterio.__version__, message='%(version)s') 28 @click.pass_context 29 def main_group(ctx, verbose, quiet): 30 31 """ 32 Rasterio command line interface. 33 """ 34 35 verbosity = verbose - quiet 36 configure_logging(verbosity) 37 ctx.obj = {} 38 ctx.obj['verbosity'] = verbosity 39 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rasterio/rio/main.py b/rasterio/rio/main.py --- a/rasterio/rio/main.py +++ b/rasterio/rio/main.py @@ -8,8 +8,8 @@ import sys import click +from click_plugins import with_plugins import cligj -import cligj.plugins import rasterio @@ -19,9 +19,9 @@ logging.basicConfig(stream=sys.stderr, level=log_level) [email protected](plugins=( - ep for ep in list(iter_entry_points('rasterio.rio_commands')) + - list(iter_entry_points('rasterio.rio_plugins')))) +@with_plugins(ep for ep in list(iter_entry_points('rasterio.rio_commands')) + + list(iter_entry_points('rasterio.rio_plugins'))) [email protected]() @cligj.verbose_opt @cligj.quiet_opt @click.version_option(version=rasterio.__version__, message='%(version)s') diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -197,7 +197,8 @@ 'affine>=1.0', 'cligj>=0.2.0', 'Numpy>=1.7', - 'snuggs>=1.3.1'] + 'snuggs>=1.3.1', + 'click-plugins'] if sys.version_info < (3, 4): inst_reqs.append('enum34')
{"golden_diff": "diff --git a/rasterio/rio/main.py b/rasterio/rio/main.py\n--- a/rasterio/rio/main.py\n+++ b/rasterio/rio/main.py\n@@ -8,8 +8,8 @@\n import sys\n \n import click\n+from click_plugins import with_plugins\n import cligj\n-import cligj.plugins\n \n import rasterio\n \n@@ -19,9 +19,9 @@\n logging.basicConfig(stream=sys.stderr, level=log_level)\n \n \[email protected](plugins=(\n- ep for ep in list(iter_entry_points('rasterio.rio_commands')) +\n- list(iter_entry_points('rasterio.rio_plugins'))))\n+@with_plugins(ep for ep in list(iter_entry_points('rasterio.rio_commands')) +\n+ list(iter_entry_points('rasterio.rio_plugins')))\[email protected]()\n @cligj.verbose_opt\n @cligj.quiet_opt\n @click.version_option(version=rasterio.__version__, message='%(version)s')\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -197,7 +197,8 @@\n 'affine>=1.0',\n 'cligj>=0.2.0',\n 'Numpy>=1.7',\n- 'snuggs>=1.3.1']\n+ 'snuggs>=1.3.1',\n+ 'click-plugins']\n \n if sys.version_info < (3, 4):\n inst_reqs.append('enum34')\n", "issue": "Switch to the canonical click plugins implementation\nThe plugin functionality from `cligj.plugins` landed in its own click extension package called [click-plugins](https://github.com/click-contrib/click-plugins). The cligj implementation will be deprecated according to https://github.com/mapbox/cligj/issues/6.\n\nSwitch to the canonical click plugins implementation\nThe plugin functionality from `cligj.plugins` landed in its own click extension package called [click-plugins](https://github.com/click-contrib/click-plugins). The cligj implementation will be deprecated according to https://github.com/mapbox/cligj/issues/6.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Two environmental variables influence this script.\n#\n# GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,\n# libraries, and data files.\n#\n# PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the\n# source or binary distribution. This is essential when creating self-contained\n# binary wheels.\n\nimport logging\nimport os\nimport pprint\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\nlogging.basicConfig()\nlog = logging.getLogger()\n\n# python -W all setup.py ...\nif 'all' in sys.warnoptions:\n log.level = logging.DEBUG\n\ndef check_output(cmd):\n # since subprocess.check_output doesn't exist in 2.6\n # we wrap it here.\n try:\n out = subprocess.check_output(cmd)\n return out.decode('utf')\n except AttributeError:\n # For some reasone check_output doesn't exist\n # So fall back on Popen\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out\n\ndef copy_data_tree(datadir, destdir):\n try:\n shutil.rmtree(destdir)\n except OSError:\n pass\n shutil.copytree(datadir, destdir)\n\n# Parse the version from the rasterio module.\nwith open('rasterio/__init__.py') as f:\n for line in f:\n if line.find(\"__version__\") >= 0:\n version = line.split(\"=\")[1].strip()\n version = version.strip('\"')\n version = version.strip(\"'\")\n continue\n\nwith open('VERSION.txt', 'w') as f:\n f.write(version)\n\n# Use Cython if available.\ntry:\n from Cython.Build import cythonize\nexcept ImportError:\n cythonize = None\n\n# By default we'll try to get options via gdal-config. On systems without,\n# options will need to be set in setup.cfg or on the setup command line.\ninclude_dirs = []\nlibrary_dirs = []\nlibraries = []\nextra_link_args = []\ngdal_output = [None]*3\n\ntry:\n import numpy\n include_dirs.append(numpy.get_include())\nexcept ImportError:\n log.critical(\"Numpy and its headers are required to run setup(). Exiting.\")\n sys.exit(1)\n\ntry:\n gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')\n for i, flag in enumerate((\"--cflags\", \"--libs\", \"--datadir\")):\n gdal_output[i] = check_output([gdal_config, flag]).strip()\n\n for item in gdal_output[0].split():\n if item.startswith(\"-I\"):\n include_dirs.extend(item[2:].split(\":\"))\n for item in gdal_output[1].split():\n if item.startswith(\"-L\"):\n library_dirs.extend(item[2:].split(\":\"))\n elif item.startswith(\"-l\"):\n libraries.append(item[2:])\n else:\n # e.g. -framework GDAL\n extra_link_args.append(item)\n\nexcept Exception as e:\n if os.name == \"nt\":\n log.info((\"Building on Windows requires extra options to setup.py to locate needed GDAL files.\\n\"\n \"More information is available in the README.\"))\n else:\n log.warning(\"Failed to get options via gdal-config: %s\", str(e))\n\n\n# Conditionally copy the GDAL data. To be used in conjunction with\n# the bdist_wheel command to make self-contained binary wheels.\nif os.environ.get('PACKAGE_DATA'):\n destdir = 'rasterio/gdal_data'\n if gdal_output[2]:\n log.info(\"Copying gdal data from %s\" % gdal_output[2])\n copy_data_tree(gdal_output[2], destdir)\n else:\n # check to see if GDAL_DATA is defined\n gdal_data = os.environ.get('GDAL_DATA', None)\n if gdal_data:\n log.info(\"Copying gdal_data from %s\" % gdal_data)\n copy_data_tree(gdal_data, destdir)\n\n # Conditionally copy PROJ.4 data.\n projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj')\n if os.path.exists(projdatadir):\n log.info(\"Copying proj_data from %s\" % projdatadir)\n copy_data_tree(projdatadir, 'rasterio/proj_data')\n\next_options = dict(\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n libraries=libraries,\n extra_link_args=extra_link_args)\n\nif not os.name == \"nt\":\n # These options fail on Windows if using Visual Studio\n ext_options['extra_compile_args'] = ['-Wno-unused-parameter',\n '-Wno-unused-function']\n\nlog.debug('ext_options:\\n%s', pprint.pformat(ext_options))\n\n# When building from a repo, Cython is required.\nif os.path.exists(\"MANIFEST.in\") and \"clean\" not in sys.argv:\n log.info(\"MANIFEST.in found, presume a repo, cythonizing...\")\n if not cythonize:\n log.critical(\n \"Cython.Build.cythonize not found. \"\n \"Cython is required to build from a repo.\")\n sys.exit(1)\n ext_modules = cythonize([\n Extension(\n 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.pyx'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.pyx'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),\n ], quiet=True)\n\n# If there's no manifest template, as in an sdist, we just specify .c files.\nelse:\n ext_modules = [\n Extension(\n 'rasterio._base', ['rasterio/_base.c'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.c'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.c'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.c'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.c'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.c'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.c'], **ext_options),\n ]\n\nwith open('README.rst') as f:\n readme = f.read()\n\n# Runtime requirements.\ninst_reqs = [\n 'affine>=1.0',\n 'cligj>=0.2.0',\n 'Numpy>=1.7',\n 'snuggs>=1.3.1']\n\nif sys.version_info < (3, 4):\n inst_reqs.append('enum34')\n\nsetup_args = dict(\n name='rasterio',\n version=version,\n description=\"Fast and direct raster I/O for use with Numpy and SciPy\",\n long_description=readme,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Multimedia :: Graphics :: Graphics Conversion',\n 'Topic :: Scientific/Engineering :: GIS'],\n keywords='raster gdal',\n author='Sean Gillies',\n author_email='[email protected]',\n url='https://github.com/mapbox/rasterio',\n license='BSD',\n package_dir={'': '.'},\n packages=['rasterio', 'rasterio.rio'],\n entry_points='''\n [console_scripts]\n rio=rasterio.rio.main:main_group\n\n [rasterio.rio_commands]\n bounds=rasterio.rio.features:bounds\n calc=rasterio.rio.calc:calc\n convert=rasterio.rio.convert:convert\n edit-info=rasterio.rio.info:edit\n env=rasterio.rio.info:env\n info=rasterio.rio.info:info\n insp=rasterio.rio.info:insp\n mask=rasterio.rio.features:mask\n merge=rasterio.rio.merge:merge\n overview=rasterio.rio.overview:overview\n rasterize=rasterio.rio.features:rasterize\n sample=rasterio.rio.sample:sample\n shapes=rasterio.rio.features:shapes\n stack=rasterio.rio.bands:stack\n warp=rasterio.rio.warp:warp\n transform=rasterio.rio.info:transform\n ''',\n include_package_data=True,\n ext_modules=ext_modules,\n zip_safe=False,\n install_requires=inst_reqs,\n extras_require={\n 'ipython': ['ipython>=2.0']})\n\nif os.environ.get('PACKAGE_DATA'):\n setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}\n\nsetup(**setup_args)\n", "path": "setup.py"}, {"content": "\"\"\"\nMain click group for CLI\n\"\"\"\n\n\nimport logging\nfrom pkg_resources import iter_entry_points\nimport sys\n\nimport click\nimport cligj\nimport cligj.plugins\n\nimport rasterio\n\n\ndef configure_logging(verbosity):\n log_level = max(10, 30 - 10*verbosity)\n logging.basicConfig(stream=sys.stderr, level=log_level)\n\n\[email protected](plugins=(\n ep for ep in list(iter_entry_points('rasterio.rio_commands')) +\n list(iter_entry_points('rasterio.rio_plugins'))))\[email protected]_opt\[email protected]_opt\[email protected]_option(version=rasterio.__version__, message='%(version)s')\[email protected]_context\ndef main_group(ctx, verbose, quiet):\n\n \"\"\"\n Rasterio command line interface.\n \"\"\"\n\n verbosity = verbose - quiet\n configure_logging(verbosity)\n ctx.obj = {}\n ctx.obj['verbosity'] = verbosity\n", "path": "rasterio/rio/main.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# Two environmental variables influence this script.\n#\n# GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,\n# libraries, and data files.\n#\n# PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the\n# source or binary distribution. This is essential when creating self-contained\n# binary wheels.\n\nimport logging\nimport os\nimport pprint\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\nlogging.basicConfig()\nlog = logging.getLogger()\n\n# python -W all setup.py ...\nif 'all' in sys.warnoptions:\n log.level = logging.DEBUG\n\ndef check_output(cmd):\n # since subprocess.check_output doesn't exist in 2.6\n # we wrap it here.\n try:\n out = subprocess.check_output(cmd)\n return out.decode('utf')\n except AttributeError:\n # For some reasone check_output doesn't exist\n # So fall back on Popen\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out\n\ndef copy_data_tree(datadir, destdir):\n try:\n shutil.rmtree(destdir)\n except OSError:\n pass\n shutil.copytree(datadir, destdir)\n\n# Parse the version from the rasterio module.\nwith open('rasterio/__init__.py') as f:\n for line in f:\n if line.find(\"__version__\") >= 0:\n version = line.split(\"=\")[1].strip()\n version = version.strip('\"')\n version = version.strip(\"'\")\n continue\n\nwith open('VERSION.txt', 'w') as f:\n f.write(version)\n\n# Use Cython if available.\ntry:\n from Cython.Build import cythonize\nexcept ImportError:\n cythonize = None\n\n# By default we'll try to get options via gdal-config. On systems without,\n# options will need to be set in setup.cfg or on the setup command line.\ninclude_dirs = []\nlibrary_dirs = []\nlibraries = []\nextra_link_args = []\ngdal_output = [None]*3\n\ntry:\n import numpy\n include_dirs.append(numpy.get_include())\nexcept ImportError:\n log.critical(\"Numpy and its headers are required to run setup(). Exiting.\")\n sys.exit(1)\n\ntry:\n gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')\n for i, flag in enumerate((\"--cflags\", \"--libs\", \"--datadir\")):\n gdal_output[i] = check_output([gdal_config, flag]).strip()\n\n for item in gdal_output[0].split():\n if item.startswith(\"-I\"):\n include_dirs.extend(item[2:].split(\":\"))\n for item in gdal_output[1].split():\n if item.startswith(\"-L\"):\n library_dirs.extend(item[2:].split(\":\"))\n elif item.startswith(\"-l\"):\n libraries.append(item[2:])\n else:\n # e.g. -framework GDAL\n extra_link_args.append(item)\n\nexcept Exception as e:\n if os.name == \"nt\":\n log.info((\"Building on Windows requires extra options to setup.py to locate needed GDAL files.\\n\"\n \"More information is available in the README.\"))\n else:\n log.warning(\"Failed to get options via gdal-config: %s\", str(e))\n\n\n# Conditionally copy the GDAL data. To be used in conjunction with\n# the bdist_wheel command to make self-contained binary wheels.\nif os.environ.get('PACKAGE_DATA'):\n destdir = 'rasterio/gdal_data'\n if gdal_output[2]:\n log.info(\"Copying gdal data from %s\" % gdal_output[2])\n copy_data_tree(gdal_output[2], destdir)\n else:\n # check to see if GDAL_DATA is defined\n gdal_data = os.environ.get('GDAL_DATA', None)\n if gdal_data:\n log.info(\"Copying gdal_data from %s\" % gdal_data)\n copy_data_tree(gdal_data, destdir)\n\n # Conditionally copy PROJ.4 data.\n projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj')\n if os.path.exists(projdatadir):\n log.info(\"Copying proj_data from %s\" % projdatadir)\n copy_data_tree(projdatadir, 'rasterio/proj_data')\n\next_options = dict(\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n libraries=libraries,\n extra_link_args=extra_link_args)\n\nif not os.name == \"nt\":\n # These options fail on Windows if using Visual Studio\n ext_options['extra_compile_args'] = ['-Wno-unused-parameter',\n '-Wno-unused-function']\n\nlog.debug('ext_options:\\n%s', pprint.pformat(ext_options))\n\n# When building from a repo, Cython is required.\nif os.path.exists(\"MANIFEST.in\") and \"clean\" not in sys.argv:\n log.info(\"MANIFEST.in found, presume a repo, cythonizing...\")\n if not cythonize:\n log.critical(\n \"Cython.Build.cythonize not found. \"\n \"Cython is required to build from a repo.\")\n sys.exit(1)\n ext_modules = cythonize([\n Extension(\n 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.pyx'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.pyx'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),\n ], quiet=True)\n\n# If there's no manifest template, as in an sdist, we just specify .c files.\nelse:\n ext_modules = [\n Extension(\n 'rasterio._base', ['rasterio/_base.c'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.c'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.c'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.c'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.c'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.c'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.c'], **ext_options),\n ]\n\nwith open('README.rst') as f:\n readme = f.read()\n\n# Runtime requirements.\ninst_reqs = [\n 'affine>=1.0',\n 'cligj>=0.2.0',\n 'Numpy>=1.7',\n 'snuggs>=1.3.1',\n 'click-plugins']\n\nif sys.version_info < (3, 4):\n inst_reqs.append('enum34')\n\nsetup_args = dict(\n name='rasterio',\n version=version,\n description=\"Fast and direct raster I/O for use with Numpy and SciPy\",\n long_description=readme,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Multimedia :: Graphics :: Graphics Conversion',\n 'Topic :: Scientific/Engineering :: GIS'],\n keywords='raster gdal',\n author='Sean Gillies',\n author_email='[email protected]',\n url='https://github.com/mapbox/rasterio',\n license='BSD',\n package_dir={'': '.'},\n packages=['rasterio', 'rasterio.rio'],\n entry_points='''\n [console_scripts]\n rio=rasterio.rio.main:main_group\n\n [rasterio.rio_commands]\n bounds=rasterio.rio.features:bounds\n calc=rasterio.rio.calc:calc\n convert=rasterio.rio.convert:convert\n edit-info=rasterio.rio.info:edit\n env=rasterio.rio.info:env\n info=rasterio.rio.info:info\n insp=rasterio.rio.info:insp\n mask=rasterio.rio.features:mask\n merge=rasterio.rio.merge:merge\n overview=rasterio.rio.overview:overview\n rasterize=rasterio.rio.features:rasterize\n sample=rasterio.rio.sample:sample\n shapes=rasterio.rio.features:shapes\n stack=rasterio.rio.bands:stack\n warp=rasterio.rio.warp:warp\n transform=rasterio.rio.info:transform\n ''',\n include_package_data=True,\n ext_modules=ext_modules,\n zip_safe=False,\n install_requires=inst_reqs,\n extras_require={\n 'ipython': ['ipython>=2.0']})\n\nif os.environ.get('PACKAGE_DATA'):\n setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}\n\nsetup(**setup_args)\n", "path": "setup.py"}, {"content": "\"\"\"\nMain click group for CLI\n\"\"\"\n\n\nimport logging\nfrom pkg_resources import iter_entry_points\nimport sys\n\nimport click\nfrom click_plugins import with_plugins\nimport cligj\n\nimport rasterio\n\n\ndef configure_logging(verbosity):\n log_level = max(10, 30 - 10*verbosity)\n logging.basicConfig(stream=sys.stderr, level=log_level)\n\n\n@with_plugins(ep for ep in list(iter_entry_points('rasterio.rio_commands')) +\n list(iter_entry_points('rasterio.rio_plugins')))\[email protected]()\[email protected]_opt\[email protected]_opt\[email protected]_option(version=rasterio.__version__, message='%(version)s')\[email protected]_context\ndef main_group(ctx, verbose, quiet):\n\n \"\"\"\n Rasterio command line interface.\n \"\"\"\n\n verbosity = verbose - quiet\n configure_logging(verbosity)\n ctx.obj = {}\n ctx.obj['verbosity'] = verbosity\n", "path": "rasterio/rio/main.py"}]}
3,624
337
gh_patches_debug_23752
rasdani/github-patches
git_diff
Azure__azure-cli-extensions-3136
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Transition to GA: databox Command module `databox` has been released for a long time and is using stable sdk version `2019-09-01`. Please check [Extension GA guidelines](https://github.com/Azure/azure-cli/blob/dev/doc/onboarding_guide.md#preview-extension-to-ga-extension) and remove `experimental` tag if necessary. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/databox/azext_databox/commands.py` Content: ``` 1 # -------------------------------------------------------------------------------------------- 2 # Copyright (c) Microsoft Corporation. All rights reserved. 3 # Licensed under the MIT License. See License.txt in the project root for license information. 4 # -------------------------------------------------------------------------------------------- 5 6 # pylint: disable=line-too-long 7 # pylint: disable=too-many-lines 8 # pylint: disable=too-many-statements 9 # pylint: disable=too-many-locals 10 from azext_databox._validators import validate_create_input_parameters 11 from azure.cli.core.commands import CliCommandType 12 13 14 def load_command_table(self, _): 15 16 from azext_databox._client_factory import cf_jobs 17 databox_jobs = CliCommandType( 18 operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}', 19 client_factory=cf_jobs) 20 with self.command_group('databox job', databox_jobs, client_factory=cf_jobs, is_experimental=True) as g: 21 g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters) 22 g.custom_command('update', 'update_databox_job') 23 g.custom_command('delete', 'delete_databox_job', confirmation=True) 24 g.custom_show_command('show', 'get_databox_job') 25 g.custom_command('list', 'list_databox_job') 26 g.custom_command('cancel', 'cancel_databox_job', confirmation=True) 27 g.custom_command('list-credentials', 'list_credentials_databox_job') 28 ``` Path: `src/databox/setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 # -------------------------------------------------------------------------------------------- 4 # Copyright (c) Microsoft Corporation. All rights reserved. 5 # Licensed under the MIT License. See License.txt in the project root for license information. 6 # -------------------------------------------------------------------------------------------- 7 8 9 from codecs import open 10 from setuptools import setup, find_packages 11 try: 12 from azure_bdist_wheel import cmdclass 13 except ImportError: 14 from distutils import log as logger 15 logger.warn("Wheel is not available, disabling bdist_wheel hook") 16 17 # TODO: Confirm this is the right version number you want and it matches your 18 # HISTORY.rst entry. 19 VERSION = '0.1.0' 20 21 # The full list of classifiers is available at 22 # https://pypi.python.org/pypi?%3Aaction=list_classifiers 23 CLASSIFIERS = [ 24 'Development Status :: 4 - Beta', 25 'Intended Audience :: Developers', 26 'Intended Audience :: System Administrators', 27 'Programming Language :: Python', 28 'Programming Language :: Python :: 3', 29 'Programming Language :: Python :: 3.4', 30 'Programming Language :: Python :: 3.5', 31 'Programming Language :: Python :: 3.6', 32 'Programming Language :: Python :: 3.7', 33 'Programming Language :: Python :: 3.8', 34 'License :: OSI Approved :: MIT License', 35 ] 36 37 # TODO: Add any additional SDK dependencies here 38 DEPENDENCIES = [] 39 40 with open('README.md', 'r', encoding='utf-8') as f: 41 README = f.read() 42 with open('HISTORY.rst', 'r', encoding='utf-8') as f: 43 HISTORY = f.read() 44 45 setup( 46 name='databox', 47 version=VERSION, 48 description='Microsoft Azure Command-Line Tools DataBox Extension', 49 # TODO: Update author and email, if applicable 50 author='Microsoft Corporation', 51 author_email='[email protected]', 52 url='https://github.com/Azure/azure-cli-extensions/tree/master/src/databox', 53 long_description=README + '\n\n' + HISTORY, 54 license='MIT', 55 classifiers=CLASSIFIERS, 56 packages=find_packages(), 57 install_requires=DEPENDENCIES, 58 package_data={'azext_databox': ['azext_metadata.json']}, 59 ) 60 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/databox/azext_databox/commands.py b/src/databox/azext_databox/commands.py --- a/src/databox/azext_databox/commands.py +++ b/src/databox/azext_databox/commands.py @@ -17,7 +17,7 @@ databox_jobs = CliCommandType( operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}', client_factory=cf_jobs) - with self.command_group('databox job', databox_jobs, client_factory=cf_jobs, is_experimental=True) as g: + with self.command_group('databox job', databox_jobs, client_factory=cf_jobs) as g: g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters) g.custom_command('update', 'update_databox_job') g.custom_command('delete', 'delete_databox_job', confirmation=True) diff --git a/src/databox/setup.py b/src/databox/setup.py --- a/src/databox/setup.py +++ b/src/databox/setup.py @@ -16,7 +16,7 @@ # TODO: Confirm this is the right version number you want and it matches your # HISTORY.rst entry. -VERSION = '0.1.0' +VERSION = '0.1.1' # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers
{"golden_diff": "diff --git a/src/databox/azext_databox/commands.py b/src/databox/azext_databox/commands.py\n--- a/src/databox/azext_databox/commands.py\n+++ b/src/databox/azext_databox/commands.py\n@@ -17,7 +17,7 @@\n databox_jobs = CliCommandType(\n operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}',\n client_factory=cf_jobs)\n- with self.command_group('databox job', databox_jobs, client_factory=cf_jobs, is_experimental=True) as g:\n+ with self.command_group('databox job', databox_jobs, client_factory=cf_jobs) as g:\n g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters)\n g.custom_command('update', 'update_databox_job')\n g.custom_command('delete', 'delete_databox_job', confirmation=True)\ndiff --git a/src/databox/setup.py b/src/databox/setup.py\n--- a/src/databox/setup.py\n+++ b/src/databox/setup.py\n@@ -16,7 +16,7 @@\n \n # TODO: Confirm this is the right version number you want and it matches your\n # HISTORY.rst entry.\n-VERSION = '0.1.0'\n+VERSION = '0.1.1'\n \n # The full list of classifiers is available at\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n", "issue": "Transition to GA: databox\nCommand module `databox` has been released for a long time and is using stable sdk version `2019-09-01`.\r\n\r\nPlease check [Extension GA guidelines](https://github.com/Azure/azure-cli/blob/dev/doc/onboarding_guide.md#preview-extension-to-ga-extension) and remove `experimental` tag if necessary.\n", "before_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n# pylint: disable=line-too-long\n# pylint: disable=too-many-lines\n# pylint: disable=too-many-statements\n# pylint: disable=too-many-locals\nfrom azext_databox._validators import validate_create_input_parameters\nfrom azure.cli.core.commands import CliCommandType\n\n\ndef load_command_table(self, _):\n\n from azext_databox._client_factory import cf_jobs\n databox_jobs = CliCommandType(\n operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}',\n client_factory=cf_jobs)\n with self.command_group('databox job', databox_jobs, client_factory=cf_jobs, is_experimental=True) as g:\n g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters)\n g.custom_command('update', 'update_databox_job')\n g.custom_command('delete', 'delete_databox_job', confirmation=True)\n g.custom_show_command('show', 'get_databox_job')\n g.custom_command('list', 'list_databox_job')\n g.custom_command('cancel', 'cancel_databox_job', confirmation=True)\n g.custom_command('list-credentials', 'list_credentials_databox_job')\n", "path": "src/databox/azext_databox/commands.py"}, {"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\ntry:\n from azure_bdist_wheel import cmdclass\nexcept ImportError:\n from distutils import log as logger\n logger.warn(\"Wheel is not available, disabling bdist_wheel hook\")\n\n# TODO: Confirm this is the right version number you want and it matches your\n# HISTORY.rst entry.\nVERSION = '0.1.0'\n\n# The full list of classifiers is available at\n# https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'License :: OSI Approved :: MIT License',\n]\n\n# TODO: Add any additional SDK dependencies here\nDEPENDENCIES = []\n\nwith open('README.md', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='databox',\n version=VERSION,\n description='Microsoft Azure Command-Line Tools DataBox Extension',\n # TODO: Update author and email, if applicable\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/databox',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n classifiers=CLASSIFIERS,\n packages=find_packages(),\n install_requires=DEPENDENCIES,\n package_data={'azext_databox': ['azext_metadata.json']},\n)\n", "path": "src/databox/setup.py"}], "after_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n# pylint: disable=line-too-long\n# pylint: disable=too-many-lines\n# pylint: disable=too-many-statements\n# pylint: disable=too-many-locals\nfrom azext_databox._validators import validate_create_input_parameters\nfrom azure.cli.core.commands import CliCommandType\n\n\ndef load_command_table(self, _):\n\n from azext_databox._client_factory import cf_jobs\n databox_jobs = CliCommandType(\n operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}',\n client_factory=cf_jobs)\n with self.command_group('databox job', databox_jobs, client_factory=cf_jobs) as g:\n g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters)\n g.custom_command('update', 'update_databox_job')\n g.custom_command('delete', 'delete_databox_job', confirmation=True)\n g.custom_show_command('show', 'get_databox_job')\n g.custom_command('list', 'list_databox_job')\n g.custom_command('cancel', 'cancel_databox_job', confirmation=True)\n g.custom_command('list-credentials', 'list_credentials_databox_job')\n", "path": "src/databox/azext_databox/commands.py"}, {"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\ntry:\n from azure_bdist_wheel import cmdclass\nexcept ImportError:\n from distutils import log as logger\n logger.warn(\"Wheel is not available, disabling bdist_wheel hook\")\n\n# TODO: Confirm this is the right version number you want and it matches your\n# HISTORY.rst entry.\nVERSION = '0.1.1'\n\n# The full list of classifiers is available at\n# https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'License :: OSI Approved :: MIT License',\n]\n\n# TODO: Add any additional SDK dependencies here\nDEPENDENCIES = []\n\nwith open('README.md', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='databox',\n version=VERSION,\n description='Microsoft Azure Command-Line Tools DataBox Extension',\n # TODO: Update author and email, if applicable\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/databox',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n classifiers=CLASSIFIERS,\n packages=find_packages(),\n install_requires=DEPENDENCIES,\n package_data={'azext_databox': ['azext_metadata.json']},\n)\n", "path": "src/databox/setup.py"}]}
1,315
347
gh_patches_debug_5162
rasdani/github-patches
git_diff
pretix__pretix-558
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Voucher not being counted correctly In the voucher by tag overview, the number sometimes just don't add up ;-) ![image](https://user-images.githubusercontent.com/654548/28206928-1c7ff030-6888-11e7-8891-14e5c794e730.png) So the progressbar is showing nothing, because it's class is set to 200%. Okay, that's kinda expected to not work ;-) Explanation for the numbers: - 2: Two vouchers are created with this tag - 4: Those two vouchers have been redeem twice each. So I guess we have to decide if the overview should show the amount of vouchers or the amount of possible voucher redemptions. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pretix/control/views/vouchers.py` Content: ``` 1 import csv 2 import io 3 4 from django.conf import settings 5 from django.contrib import messages 6 from django.core.urlresolvers import resolve, reverse 7 from django.db import transaction 8 from django.db.models import Count, Q, Sum 9 from django.http import ( 10 Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect, 11 JsonResponse, 12 ) 13 from django.utils.timezone import now 14 from django.utils.translation import ugettext_lazy as _ 15 from django.views.generic import ( 16 CreateView, DeleteView, ListView, TemplateView, UpdateView, View, 17 ) 18 19 from pretix.base.models import Voucher 20 from pretix.base.models.vouchers import _generate_random_code 21 from pretix.control.forms.vouchers import VoucherBulkForm, VoucherForm 22 from pretix.control.permissions import EventPermissionRequiredMixin 23 from pretix.control.signals import voucher_form_class 24 25 26 class VoucherList(EventPermissionRequiredMixin, ListView): 27 model = Voucher 28 context_object_name = 'vouchers' 29 paginate_by = 30 30 template_name = 'pretixcontrol/vouchers/index.html' 31 permission = 'can_view_vouchers' 32 33 def get_queryset(self): 34 qs = self.request.event.vouchers.all().select_related('item', 'variation') 35 if self.request.GET.get("search", "") != "": 36 s = self.request.GET.get("search", "").strip() 37 qs = qs.filter(Q(code__icontains=s) | Q(tag__icontains=s) | Q(comment__icontains=s)) 38 if self.request.GET.get("tag", "") != "": 39 s = self.request.GET.get("tag", "") 40 qs = qs.filter(tag__icontains=s) 41 if self.request.GET.get("status", "") != "": 42 s = self.request.GET.get("status", "") 43 if s == 'v': 44 qs = qs.filter(Q(valid_until__isnull=True) | Q(valid_until__gt=now())).filter(redeemed=0) 45 elif s == 'r': 46 qs = qs.filter(redeemed__gt=0) 47 elif s == 'e': 48 qs = qs.filter(Q(valid_until__isnull=False) & Q(valid_until__lt=now())).filter(redeemed=0) 49 if self.request.GET.get("subevent", "") != "": 50 s = self.request.GET.get("subevent", "") 51 qs = qs.filter(subevent_id=s) 52 return qs 53 54 def get(self, request, *args, **kwargs): 55 if request.GET.get("download", "") == "yes": 56 return self._download_csv() 57 return super().get(request, *args, **kwargs) 58 59 def _download_csv(self): 60 output = io.StringIO() 61 writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC, delimiter=",") 62 63 headers = [ 64 _('Voucher code'), _('Valid until'), _('Product'), _('Reserve quota'), _('Bypass quota'), 65 _('Price effect'), _('Value'), _('Tag'), _('Redeemed'), _('Maximum usages') 66 ] 67 writer.writerow(headers) 68 69 for v in self.get_queryset(): 70 if v.item: 71 if v.variation: 72 prod = '%s – %s' % (str(v.item.name), str(v.variation.name)) 73 else: 74 prod = '%s' % str(v.item.name) 75 elif v.quota: 76 prod = _('Any product in quota "{quota}"').format(quota=str(v.quota.name)) 77 row = [ 78 v.code, 79 v.valid_until.isoformat() if v.valid_until else "", 80 prod, 81 _("Yes") if v.block_quota else _("No"), 82 _("Yes") if v.allow_ignore_quota else _("No"), 83 v.get_price_mode_display(), 84 str(v.value) if v.value is not None else "", 85 v.tag, 86 str(v.redeemed), 87 str(v.max_usages) 88 ] 89 writer.writerow(row) 90 91 r = HttpResponse(output.getvalue().encode("utf-8"), content_type='text/csv') 92 r['Content-Disposition'] = 'attachment; filename="vouchers.csv"' 93 return r 94 95 96 class VoucherTags(EventPermissionRequiredMixin, TemplateView): 97 template_name = 'pretixcontrol/vouchers/tags.html' 98 permission = 'can_view_vouchers' 99 100 def get_context_data(self, **kwargs): 101 ctx = super().get_context_data(**kwargs) 102 103 tags = self.request.event.vouchers.order_by('tag').filter(tag__isnull=False).values('tag').annotate( 104 total=Count('id'), 105 redeemed=Sum('redeemed') 106 ) 107 for t in tags: 108 t['percentage'] = int((t['redeemed'] / t['total']) * 100) 109 110 ctx['tags'] = tags 111 return ctx 112 113 114 class VoucherDelete(EventPermissionRequiredMixin, DeleteView): 115 model = Voucher 116 template_name = 'pretixcontrol/vouchers/delete.html' 117 permission = 'can_change_vouchers' 118 context_object_name = 'voucher' 119 120 def get_object(self, queryset=None) -> Voucher: 121 try: 122 return self.request.event.vouchers.get( 123 id=self.kwargs['voucher'] 124 ) 125 except Voucher.DoesNotExist: 126 raise Http404(_("The requested voucher does not exist.")) 127 128 def get(self, request, *args, **kwargs): 129 if self.get_object().redeemed > 0: 130 messages.error(request, _('A voucher can not be deleted if it already has been redeemed.')) 131 return HttpResponseRedirect(self.get_success_url()) 132 return super().get(request, *args, **kwargs) 133 134 @transaction.atomic 135 def delete(self, request, *args, **kwargs): 136 self.object = self.get_object() 137 success_url = self.get_success_url() 138 139 if self.object.redeemed > 0: 140 messages.error(request, _('A voucher can not be deleted if it already has been redeemed.')) 141 else: 142 self.object.log_action('pretix.voucher.deleted', user=self.request.user) 143 self.object.delete() 144 messages.success(request, _('The selected voucher has been deleted.')) 145 return HttpResponseRedirect(success_url) 146 147 def get_success_url(self) -> str: 148 return reverse('control:event.vouchers', kwargs={ 149 'organizer': self.request.event.organizer.slug, 150 'event': self.request.event.slug, 151 }) 152 153 154 class VoucherUpdate(EventPermissionRequiredMixin, UpdateView): 155 model = Voucher 156 template_name = 'pretixcontrol/vouchers/detail.html' 157 permission = 'can_change_vouchers' 158 context_object_name = 'voucher' 159 160 def get_form_class(self): 161 form_class = VoucherForm 162 for receiver, response in voucher_form_class.send(self.request.event, cls=form_class): 163 if response: 164 form_class = response 165 return form_class 166 167 def get_object(self, queryset=None) -> VoucherForm: 168 url = resolve(self.request.path_info) 169 try: 170 return self.request.event.vouchers.get( 171 id=url.kwargs['voucher'] 172 ) 173 except Voucher.DoesNotExist: 174 raise Http404(_("The requested voucher does not exist.")) 175 176 @transaction.atomic 177 def form_valid(self, form): 178 messages.success(self.request, _('Your changes have been saved.')) 179 if form.has_changed(): 180 self.object.log_action( 181 'pretix.voucher.changed', user=self.request.user, data={ 182 k: form.cleaned_data.get(k) for k in form.changed_data 183 } 184 ) 185 return super().form_valid(form) 186 187 def get_success_url(self) -> str: 188 return reverse('control:event.vouchers', kwargs={ 189 'organizer': self.request.event.organizer.slug, 190 'event': self.request.event.slug, 191 }) 192 193 194 class VoucherCreate(EventPermissionRequiredMixin, CreateView): 195 model = Voucher 196 template_name = 'pretixcontrol/vouchers/detail.html' 197 permission = 'can_change_vouchers' 198 context_object_name = 'voucher' 199 200 def get_form_class(self): 201 form_class = VoucherForm 202 for receiver, response in voucher_form_class.send(self.request.event, cls=form_class): 203 if response: 204 form_class = response 205 return form_class 206 207 def get_success_url(self) -> str: 208 return reverse('control:event.vouchers', kwargs={ 209 'organizer': self.request.event.organizer.slug, 210 'event': self.request.event.slug, 211 }) 212 213 def get_form_kwargs(self): 214 kwargs = super().get_form_kwargs() 215 kwargs['instance'] = Voucher(event=self.request.event) 216 return kwargs 217 218 @transaction.atomic 219 def form_valid(self, form): 220 form.instance.event = self.request.event 221 messages.success(self.request, _('The new voucher has been created: {code}').format(code=form.instance.code)) 222 ret = super().form_valid(form) 223 form.instance.log_action('pretix.voucher.added', data=dict(form.cleaned_data), user=self.request.user) 224 return ret 225 226 def post(self, request, *args, **kwargs): 227 # TODO: Transform this into an asynchronous call? 228 with request.event.lock(): 229 return super().post(request, *args, **kwargs) 230 231 232 class VoucherBulkCreate(EventPermissionRequiredMixin, CreateView): 233 model = Voucher 234 template_name = 'pretixcontrol/vouchers/bulk.html' 235 permission = 'can_change_vouchers' 236 context_object_name = 'voucher' 237 238 def get_success_url(self) -> str: 239 return reverse('control:event.vouchers', kwargs={ 240 'organizer': self.request.event.organizer.slug, 241 'event': self.request.event.slug, 242 }) 243 244 def get_form_kwargs(self): 245 kwargs = super().get_form_kwargs() 246 kwargs['instance'] = Voucher(event=self.request.event) 247 return kwargs 248 249 @transaction.atomic 250 def form_valid(self, form): 251 for o in form.save(self.request.event): 252 o.log_action('pretix.voucher.added', data=form.cleaned_data, user=self.request.user) 253 messages.success(self.request, _('The new vouchers have been created.')) 254 return HttpResponseRedirect(self.get_success_url()) 255 256 def get_form_class(self): 257 form_class = VoucherBulkForm 258 for receiver, response in voucher_form_class.send(self.request.event, cls=form_class): 259 if response: 260 form_class = response 261 return form_class 262 263 def get_context_data(self, **kwargs): 264 ctx = super().get_context_data(**kwargs) 265 ctx['code_length'] = settings.ENTROPY['voucher_code'] 266 return ctx 267 268 def post(self, request, *args, **kwargs): 269 # TODO: Transform this into an asynchronous call? 270 with request.event.lock(): 271 return super().post(request, *args, **kwargs) 272 273 274 class VoucherRNG(EventPermissionRequiredMixin, View): 275 permission = 'can_change_vouchers' 276 277 def get(self, request, *args, **kwargs): 278 codes = set() 279 try: 280 num = int(request.GET.get('num', '5')) 281 except ValueError: # NOQA 282 return HttpResponseBadRequest() 283 284 prefix = request.GET.get('prefix') 285 while len(codes) < num: 286 new_codes = set() 287 for i in range(min(num - len(codes), 500)): # Work around SQLite's SQLITE_MAX_VARIABLE_NUMBER 288 new_codes.add(_generate_random_code(prefix=prefix)) 289 new_codes -= set([v['code'] for v in Voucher.objects.filter(code__in=new_codes).values('code')]) 290 codes |= new_codes 291 292 return JsonResponse({ 293 'codes': list(codes) 294 }) 295 296 def get_success_url(self) -> str: 297 return reverse('control:event.vouchers', kwargs={ 298 'organizer': self.request.event.organizer.slug, 299 'event': self.request.event.slug, 300 }) 301 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pretix/control/views/vouchers.py b/src/pretix/control/views/vouchers.py --- a/src/pretix/control/views/vouchers.py +++ b/src/pretix/control/views/vouchers.py @@ -101,7 +101,7 @@ ctx = super().get_context_data(**kwargs) tags = self.request.event.vouchers.order_by('tag').filter(tag__isnull=False).values('tag').annotate( - total=Count('id'), + total=Sum('max_usages'), redeemed=Sum('redeemed') ) for t in tags:
{"golden_diff": "diff --git a/src/pretix/control/views/vouchers.py b/src/pretix/control/views/vouchers.py\n--- a/src/pretix/control/views/vouchers.py\n+++ b/src/pretix/control/views/vouchers.py\n@@ -101,7 +101,7 @@\n ctx = super().get_context_data(**kwargs)\n \n tags = self.request.event.vouchers.order_by('tag').filter(tag__isnull=False).values('tag').annotate(\n- total=Count('id'),\n+ total=Sum('max_usages'),\n redeemed=Sum('redeemed')\n )\n for t in tags:\n", "issue": "Voucher not being counted correctly\nIn the voucher by tag overview, the number sometimes just don't add up ;-)\r\n\r\n![image](https://user-images.githubusercontent.com/654548/28206928-1c7ff030-6888-11e7-8891-14e5c794e730.png)\r\n\r\nSo the progressbar is showing nothing, because it's class is set to 200%. Okay, that's kinda expected to not work ;-)\r\n\r\nExplanation for the numbers:\r\n- 2: Two vouchers are created with this tag\r\n- 4: Those two vouchers have been redeem twice each.\r\n\r\nSo I guess we have to decide if the overview should show the amount of vouchers or the amount of possible voucher redemptions.\n", "before_files": [{"content": "import csv\nimport io\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.urlresolvers import resolve, reverse\nfrom django.db import transaction\nfrom django.db.models import Count, Q, Sum\nfrom django.http import (\n Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect,\n JsonResponse,\n)\nfrom django.utils.timezone import now\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import (\n CreateView, DeleteView, ListView, TemplateView, UpdateView, View,\n)\n\nfrom pretix.base.models import Voucher\nfrom pretix.base.models.vouchers import _generate_random_code\nfrom pretix.control.forms.vouchers import VoucherBulkForm, VoucherForm\nfrom pretix.control.permissions import EventPermissionRequiredMixin\nfrom pretix.control.signals import voucher_form_class\n\n\nclass VoucherList(EventPermissionRequiredMixin, ListView):\n model = Voucher\n context_object_name = 'vouchers'\n paginate_by = 30\n template_name = 'pretixcontrol/vouchers/index.html'\n permission = 'can_view_vouchers'\n\n def get_queryset(self):\n qs = self.request.event.vouchers.all().select_related('item', 'variation')\n if self.request.GET.get(\"search\", \"\") != \"\":\n s = self.request.GET.get(\"search\", \"\").strip()\n qs = qs.filter(Q(code__icontains=s) | Q(tag__icontains=s) | Q(comment__icontains=s))\n if self.request.GET.get(\"tag\", \"\") != \"\":\n s = self.request.GET.get(\"tag\", \"\")\n qs = qs.filter(tag__icontains=s)\n if self.request.GET.get(\"status\", \"\") != \"\":\n s = self.request.GET.get(\"status\", \"\")\n if s == 'v':\n qs = qs.filter(Q(valid_until__isnull=True) | Q(valid_until__gt=now())).filter(redeemed=0)\n elif s == 'r':\n qs = qs.filter(redeemed__gt=0)\n elif s == 'e':\n qs = qs.filter(Q(valid_until__isnull=False) & Q(valid_until__lt=now())).filter(redeemed=0)\n if self.request.GET.get(\"subevent\", \"\") != \"\":\n s = self.request.GET.get(\"subevent\", \"\")\n qs = qs.filter(subevent_id=s)\n return qs\n\n def get(self, request, *args, **kwargs):\n if request.GET.get(\"download\", \"\") == \"yes\":\n return self._download_csv()\n return super().get(request, *args, **kwargs)\n\n def _download_csv(self):\n output = io.StringIO()\n writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC, delimiter=\",\")\n\n headers = [\n _('Voucher code'), _('Valid until'), _('Product'), _('Reserve quota'), _('Bypass quota'),\n _('Price effect'), _('Value'), _('Tag'), _('Redeemed'), _('Maximum usages')\n ]\n writer.writerow(headers)\n\n for v in self.get_queryset():\n if v.item:\n if v.variation:\n prod = '%s \u2013 %s' % (str(v.item.name), str(v.variation.name))\n else:\n prod = '%s' % str(v.item.name)\n elif v.quota:\n prod = _('Any product in quota \"{quota}\"').format(quota=str(v.quota.name))\n row = [\n v.code,\n v.valid_until.isoformat() if v.valid_until else \"\",\n prod,\n _(\"Yes\") if v.block_quota else _(\"No\"),\n _(\"Yes\") if v.allow_ignore_quota else _(\"No\"),\n v.get_price_mode_display(),\n str(v.value) if v.value is not None else \"\",\n v.tag,\n str(v.redeemed),\n str(v.max_usages)\n ]\n writer.writerow(row)\n\n r = HttpResponse(output.getvalue().encode(\"utf-8\"), content_type='text/csv')\n r['Content-Disposition'] = 'attachment; filename=\"vouchers.csv\"'\n return r\n\n\nclass VoucherTags(EventPermissionRequiredMixin, TemplateView):\n template_name = 'pretixcontrol/vouchers/tags.html'\n permission = 'can_view_vouchers'\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n\n tags = self.request.event.vouchers.order_by('tag').filter(tag__isnull=False).values('tag').annotate(\n total=Count('id'),\n redeemed=Sum('redeemed')\n )\n for t in tags:\n t['percentage'] = int((t['redeemed'] / t['total']) * 100)\n\n ctx['tags'] = tags\n return ctx\n\n\nclass VoucherDelete(EventPermissionRequiredMixin, DeleteView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/delete.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_object(self, queryset=None) -> Voucher:\n try:\n return self.request.event.vouchers.get(\n id=self.kwargs['voucher']\n )\n except Voucher.DoesNotExist:\n raise Http404(_(\"The requested voucher does not exist.\"))\n\n def get(self, request, *args, **kwargs):\n if self.get_object().redeemed > 0:\n messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))\n return HttpResponseRedirect(self.get_success_url())\n return super().get(request, *args, **kwargs)\n\n @transaction.atomic\n def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n success_url = self.get_success_url()\n\n if self.object.redeemed > 0:\n messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))\n else:\n self.object.log_action('pretix.voucher.deleted', user=self.request.user)\n self.object.delete()\n messages.success(request, _('The selected voucher has been deleted.'))\n return HttpResponseRedirect(success_url)\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n\nclass VoucherUpdate(EventPermissionRequiredMixin, UpdateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/detail.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_form_class(self):\n form_class = VoucherForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_object(self, queryset=None) -> VoucherForm:\n url = resolve(self.request.path_info)\n try:\n return self.request.event.vouchers.get(\n id=url.kwargs['voucher']\n )\n except Voucher.DoesNotExist:\n raise Http404(_(\"The requested voucher does not exist.\"))\n\n @transaction.atomic\n def form_valid(self, form):\n messages.success(self.request, _('Your changes have been saved.'))\n if form.has_changed():\n self.object.log_action(\n 'pretix.voucher.changed', user=self.request.user, data={\n k: form.cleaned_data.get(k) for k in form.changed_data\n }\n )\n return super().form_valid(form)\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n\nclass VoucherCreate(EventPermissionRequiredMixin, CreateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/detail.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_form_class(self):\n form_class = VoucherForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['instance'] = Voucher(event=self.request.event)\n return kwargs\n\n @transaction.atomic\n def form_valid(self, form):\n form.instance.event = self.request.event\n messages.success(self.request, _('The new voucher has been created: {code}').format(code=form.instance.code))\n ret = super().form_valid(form)\n form.instance.log_action('pretix.voucher.added', data=dict(form.cleaned_data), user=self.request.user)\n return ret\n\n def post(self, request, *args, **kwargs):\n # TODO: Transform this into an asynchronous call?\n with request.event.lock():\n return super().post(request, *args, **kwargs)\n\n\nclass VoucherBulkCreate(EventPermissionRequiredMixin, CreateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/bulk.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['instance'] = Voucher(event=self.request.event)\n return kwargs\n\n @transaction.atomic\n def form_valid(self, form):\n for o in form.save(self.request.event):\n o.log_action('pretix.voucher.added', data=form.cleaned_data, user=self.request.user)\n messages.success(self.request, _('The new vouchers have been created.'))\n return HttpResponseRedirect(self.get_success_url())\n\n def get_form_class(self):\n form_class = VoucherBulkForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['code_length'] = settings.ENTROPY['voucher_code']\n return ctx\n\n def post(self, request, *args, **kwargs):\n # TODO: Transform this into an asynchronous call?\n with request.event.lock():\n return super().post(request, *args, **kwargs)\n\n\nclass VoucherRNG(EventPermissionRequiredMixin, View):\n permission = 'can_change_vouchers'\n\n def get(self, request, *args, **kwargs):\n codes = set()\n try:\n num = int(request.GET.get('num', '5'))\n except ValueError: # NOQA\n return HttpResponseBadRequest()\n\n prefix = request.GET.get('prefix')\n while len(codes) < num:\n new_codes = set()\n for i in range(min(num - len(codes), 500)): # Work around SQLite's SQLITE_MAX_VARIABLE_NUMBER\n new_codes.add(_generate_random_code(prefix=prefix))\n new_codes -= set([v['code'] for v in Voucher.objects.filter(code__in=new_codes).values('code')])\n codes |= new_codes\n\n return JsonResponse({\n 'codes': list(codes)\n })\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n", "path": "src/pretix/control/views/vouchers.py"}], "after_files": [{"content": "import csv\nimport io\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.urlresolvers import resolve, reverse\nfrom django.db import transaction\nfrom django.db.models import Count, Q, Sum\nfrom django.http import (\n Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect,\n JsonResponse,\n)\nfrom django.utils.timezone import now\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import (\n CreateView, DeleteView, ListView, TemplateView, UpdateView, View,\n)\n\nfrom pretix.base.models import Voucher\nfrom pretix.base.models.vouchers import _generate_random_code\nfrom pretix.control.forms.vouchers import VoucherBulkForm, VoucherForm\nfrom pretix.control.permissions import EventPermissionRequiredMixin\nfrom pretix.control.signals import voucher_form_class\n\n\nclass VoucherList(EventPermissionRequiredMixin, ListView):\n model = Voucher\n context_object_name = 'vouchers'\n paginate_by = 30\n template_name = 'pretixcontrol/vouchers/index.html'\n permission = 'can_view_vouchers'\n\n def get_queryset(self):\n qs = self.request.event.vouchers.all().select_related('item', 'variation')\n if self.request.GET.get(\"search\", \"\") != \"\":\n s = self.request.GET.get(\"search\", \"\").strip()\n qs = qs.filter(Q(code__icontains=s) | Q(tag__icontains=s) | Q(comment__icontains=s))\n if self.request.GET.get(\"tag\", \"\") != \"\":\n s = self.request.GET.get(\"tag\", \"\")\n qs = qs.filter(tag__icontains=s)\n if self.request.GET.get(\"status\", \"\") != \"\":\n s = self.request.GET.get(\"status\", \"\")\n if s == 'v':\n qs = qs.filter(Q(valid_until__isnull=True) | Q(valid_until__gt=now())).filter(redeemed=0)\n elif s == 'r':\n qs = qs.filter(redeemed__gt=0)\n elif s == 'e':\n qs = qs.filter(Q(valid_until__isnull=False) & Q(valid_until__lt=now())).filter(redeemed=0)\n if self.request.GET.get(\"subevent\", \"\") != \"\":\n s = self.request.GET.get(\"subevent\", \"\")\n qs = qs.filter(subevent_id=s)\n return qs\n\n def get(self, request, *args, **kwargs):\n if request.GET.get(\"download\", \"\") == \"yes\":\n return self._download_csv()\n return super().get(request, *args, **kwargs)\n\n def _download_csv(self):\n output = io.StringIO()\n writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC, delimiter=\",\")\n\n headers = [\n _('Voucher code'), _('Valid until'), _('Product'), _('Reserve quota'), _('Bypass quota'),\n _('Price effect'), _('Value'), _('Tag'), _('Redeemed'), _('Maximum usages')\n ]\n writer.writerow(headers)\n\n for v in self.get_queryset():\n if v.item:\n if v.variation:\n prod = '%s \u2013 %s' % (str(v.item.name), str(v.variation.name))\n else:\n prod = '%s' % str(v.item.name)\n elif v.quota:\n prod = _('Any product in quota \"{quota}\"').format(quota=str(v.quota.name))\n row = [\n v.code,\n v.valid_until.isoformat() if v.valid_until else \"\",\n prod,\n _(\"Yes\") if v.block_quota else _(\"No\"),\n _(\"Yes\") if v.allow_ignore_quota else _(\"No\"),\n v.get_price_mode_display(),\n str(v.value) if v.value is not None else \"\",\n v.tag,\n str(v.redeemed),\n str(v.max_usages)\n ]\n writer.writerow(row)\n\n r = HttpResponse(output.getvalue().encode(\"utf-8\"), content_type='text/csv')\n r['Content-Disposition'] = 'attachment; filename=\"vouchers.csv\"'\n return r\n\n\nclass VoucherTags(EventPermissionRequiredMixin, TemplateView):\n template_name = 'pretixcontrol/vouchers/tags.html'\n permission = 'can_view_vouchers'\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n\n tags = self.request.event.vouchers.order_by('tag').filter(tag__isnull=False).values('tag').annotate(\n total=Sum('max_usages'),\n redeemed=Sum('redeemed')\n )\n for t in tags:\n t['percentage'] = int((t['redeemed'] / t['total']) * 100)\n\n ctx['tags'] = tags\n return ctx\n\n\nclass VoucherDelete(EventPermissionRequiredMixin, DeleteView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/delete.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_object(self, queryset=None) -> Voucher:\n try:\n return self.request.event.vouchers.get(\n id=self.kwargs['voucher']\n )\n except Voucher.DoesNotExist:\n raise Http404(_(\"The requested voucher does not exist.\"))\n\n def get(self, request, *args, **kwargs):\n if self.get_object().redeemed > 0:\n messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))\n return HttpResponseRedirect(self.get_success_url())\n return super().get(request, *args, **kwargs)\n\n @transaction.atomic\n def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n success_url = self.get_success_url()\n\n if self.object.redeemed > 0:\n messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))\n else:\n self.object.log_action('pretix.voucher.deleted', user=self.request.user)\n self.object.delete()\n messages.success(request, _('The selected voucher has been deleted.'))\n return HttpResponseRedirect(success_url)\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n\nclass VoucherUpdate(EventPermissionRequiredMixin, UpdateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/detail.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_form_class(self):\n form_class = VoucherForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_object(self, queryset=None) -> VoucherForm:\n url = resolve(self.request.path_info)\n try:\n return self.request.event.vouchers.get(\n id=url.kwargs['voucher']\n )\n except Voucher.DoesNotExist:\n raise Http404(_(\"The requested voucher does not exist.\"))\n\n @transaction.atomic\n def form_valid(self, form):\n messages.success(self.request, _('Your changes have been saved.'))\n if form.has_changed():\n self.object.log_action(\n 'pretix.voucher.changed', user=self.request.user, data={\n k: form.cleaned_data.get(k) for k in form.changed_data\n }\n )\n return super().form_valid(form)\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n\nclass VoucherCreate(EventPermissionRequiredMixin, CreateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/detail.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_form_class(self):\n form_class = VoucherForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['instance'] = Voucher(event=self.request.event)\n return kwargs\n\n @transaction.atomic\n def form_valid(self, form):\n form.instance.event = self.request.event\n messages.success(self.request, _('The new voucher has been created: {code}').format(code=form.instance.code))\n ret = super().form_valid(form)\n form.instance.log_action('pretix.voucher.added', data=dict(form.cleaned_data), user=self.request.user)\n return ret\n\n def post(self, request, *args, **kwargs):\n # TODO: Transform this into an asynchronous call?\n with request.event.lock():\n return super().post(request, *args, **kwargs)\n\n\nclass VoucherBulkCreate(EventPermissionRequiredMixin, CreateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/bulk.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['instance'] = Voucher(event=self.request.event)\n return kwargs\n\n @transaction.atomic\n def form_valid(self, form):\n for o in form.save(self.request.event):\n o.log_action('pretix.voucher.added', data=form.cleaned_data, user=self.request.user)\n messages.success(self.request, _('The new vouchers have been created.'))\n return HttpResponseRedirect(self.get_success_url())\n\n def get_form_class(self):\n form_class = VoucherBulkForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['code_length'] = settings.ENTROPY['voucher_code']\n return ctx\n\n def post(self, request, *args, **kwargs):\n # TODO: Transform this into an asynchronous call?\n with request.event.lock():\n return super().post(request, *args, **kwargs)\n\n\nclass VoucherRNG(EventPermissionRequiredMixin, View):\n permission = 'can_change_vouchers'\n\n def get(self, request, *args, **kwargs):\n codes = set()\n try:\n num = int(request.GET.get('num', '5'))\n except ValueError: # NOQA\n return HttpResponseBadRequest()\n\n prefix = request.GET.get('prefix')\n while len(codes) < num:\n new_codes = set()\n for i in range(min(num - len(codes), 500)): # Work around SQLite's SQLITE_MAX_VARIABLE_NUMBER\n new_codes.add(_generate_random_code(prefix=prefix))\n new_codes -= set([v['code'] for v in Voucher.objects.filter(code__in=new_codes).values('code')])\n codes |= new_codes\n\n return JsonResponse({\n 'codes': list(codes)\n })\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n", "path": "src/pretix/control/views/vouchers.py"}]}
3,711
134
gh_patches_debug_37030
rasdani/github-patches
git_diff
RedHatInsights__insights-core-2713
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- yum.repos.d parser is too tolerant The yum repos d parser will accept almost anything, including binary. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `insights/parsers/yum_repos_d.py` Content: ``` 1 from .. import Parser, parser, get_active_lines, LegacyItemAccess 2 from insights.specs import Specs 3 4 5 @parser(Specs.yum_repos_d) 6 class YumReposD(LegacyItemAccess, Parser): 7 """Class to parse the files under ``yum.repos.d`` """ 8 9 def get(self, key): 10 return self.data.get(key) 11 12 def parse_content(self, content): 13 ''' 14 Return an object contains a dict. 15 { 16 "rhel-source": { 17 "gpgcheck": "1", 18 "gpgkey": ["file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release", 19 "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak"] 20 "enabled": "0", 21 "name": "Red Hat Enterprise Linux $releasever - $basearch - Source", 22 "baseurl": "ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/" 23 } 24 } 25 ---------------------------------------------------- 26 There are several files in 'yum.repos.d' directory, which have the same 27 format. For example: 28 --------one of the files : rhel-source.repo--------- 29 [rhel-source] 30 name=Red Hat Enterprise Linux $releasever - $basearch - Source 31 baseurl=ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/ 32 enabled=0 33 gpgcheck=1 34 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release 35 file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak 36 ''' 37 repos_dict = {} 38 section_dict = {} 39 key = None 40 for line in get_active_lines(content): 41 if line.startswith('['): 42 section_dict = {} 43 repos_dict[line[1:-1]] = section_dict 44 elif '=' in line: 45 key, value = [s.strip() for s in line.split("=", 1)] 46 if key in ('baseurl', 'gpgkey'): 47 section_dict[key] = [v.strip() for v in value.split(",")] 48 else: 49 section_dict[key] = value 50 else: 51 if key and isinstance(section_dict[key], list): 52 section_dict[key].extend(v.strip() for v in line.split(",")) 53 # Otherwise ignore line if no key or we don't store multiple values 54 self.data = repos_dict 55 56 def __iter__(self): 57 for repo in self.data: 58 yield repo 59 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/insights/parsers/yum_repos_d.py b/insights/parsers/yum_repos_d.py --- a/insights/parsers/yum_repos_d.py +++ b/insights/parsers/yum_repos_d.py @@ -1,5 +1,39 @@ -from .. import Parser, parser, get_active_lines, LegacyItemAccess +import re +import string + +from .. import Parser, parser, LegacyItemAccess from insights.specs import Specs +from insights.parsr import (Char, EOF, HangingString, InSet, Many, OneLineComment, Opt, + skip_none, String, WithIndent, WS) + + +header_chars = (set(string.printable) - set(string.whitespace) - set("[]")) | set(" ") +sep_chars = set(":=") +key_chars = header_chars - sep_chars - set(" ") +value_chars = set(string.printable) - set("\n\r") + +LeftEnd = WS >> Char("[") << WS +RightEnd = WS >> Char("]") << WS +Header = LeftEnd >> String(header_chars) << RightEnd +Key = WS >> String(key_chars) << WS +Sep = InSet(sep_chars) +Value = WS >> HangingString(value_chars) +KVPair = WithIndent(Key + Opt(Sep >> Value)) +Comment = WS >> (OneLineComment("#") | OneLineComment(";")).map(lambda x: None) + +Line = Comment | KVPair.map(tuple) +Sect = (Header + Many(Line).map(skip_none).map(dict)).map(tuple) +Doc = Many(Comment | Sect).map(skip_none).map(dict) +Top = Doc << WS << EOF + + +def parse_yum_repos(content): + doc = Top(content) + for k, v in doc.items(): + for special in ("baseurl", "gpgkey"): + if special in v: + v[special] = [i.strip() for i in re.split(",| ", v[special])] + return doc @parser(Specs.yum_repos_d) @@ -10,7 +44,7 @@ return self.data.get(key) def parse_content(self, content): - ''' + """ Return an object contains a dict. { "rhel-source": { @@ -33,25 +67,8 @@ gpgcheck=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak - ''' - repos_dict = {} - section_dict = {} - key = None - for line in get_active_lines(content): - if line.startswith('['): - section_dict = {} - repos_dict[line[1:-1]] = section_dict - elif '=' in line: - key, value = [s.strip() for s in line.split("=", 1)] - if key in ('baseurl', 'gpgkey'): - section_dict[key] = [v.strip() for v in value.split(",")] - else: - section_dict[key] = value - else: - if key and isinstance(section_dict[key], list): - section_dict[key].extend(v.strip() for v in line.split(",")) - # Otherwise ignore line if no key or we don't store multiple values - self.data = repos_dict + """ + self.data = parse_yum_repos("\n".join(content)) def __iter__(self): for repo in self.data:
{"golden_diff": "diff --git a/insights/parsers/yum_repos_d.py b/insights/parsers/yum_repos_d.py\n--- a/insights/parsers/yum_repos_d.py\n+++ b/insights/parsers/yum_repos_d.py\n@@ -1,5 +1,39 @@\n-from .. import Parser, parser, get_active_lines, LegacyItemAccess\n+import re\n+import string\n+\n+from .. import Parser, parser, LegacyItemAccess\n from insights.specs import Specs\n+from insights.parsr import (Char, EOF, HangingString, InSet, Many, OneLineComment, Opt,\n+ skip_none, String, WithIndent, WS)\n+\n+\n+header_chars = (set(string.printable) - set(string.whitespace) - set(\"[]\")) | set(\" \")\n+sep_chars = set(\":=\")\n+key_chars = header_chars - sep_chars - set(\" \")\n+value_chars = set(string.printable) - set(\"\\n\\r\")\n+\n+LeftEnd = WS >> Char(\"[\") << WS\n+RightEnd = WS >> Char(\"]\") << WS\n+Header = LeftEnd >> String(header_chars) << RightEnd\n+Key = WS >> String(key_chars) << WS\n+Sep = InSet(sep_chars)\n+Value = WS >> HangingString(value_chars)\n+KVPair = WithIndent(Key + Opt(Sep >> Value))\n+Comment = WS >> (OneLineComment(\"#\") | OneLineComment(\";\")).map(lambda x: None)\n+\n+Line = Comment | KVPair.map(tuple)\n+Sect = (Header + Many(Line).map(skip_none).map(dict)).map(tuple)\n+Doc = Many(Comment | Sect).map(skip_none).map(dict)\n+Top = Doc << WS << EOF\n+\n+\n+def parse_yum_repos(content):\n+ doc = Top(content)\n+ for k, v in doc.items():\n+ for special in (\"baseurl\", \"gpgkey\"):\n+ if special in v:\n+ v[special] = [i.strip() for i in re.split(\",| \", v[special])]\n+ return doc\n \n \n @parser(Specs.yum_repos_d)\n@@ -10,7 +44,7 @@\n return self.data.get(key)\n \n def parse_content(self, content):\n- '''\n+ \"\"\"\n Return an object contains a dict.\n {\n \"rhel-source\": {\n@@ -33,25 +67,8 @@\n gpgcheck=1\n gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release\n file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak\n- '''\n- repos_dict = {}\n- section_dict = {}\n- key = None\n- for line in get_active_lines(content):\n- if line.startswith('['):\n- section_dict = {}\n- repos_dict[line[1:-1]] = section_dict\n- elif '=' in line:\n- key, value = [s.strip() for s in line.split(\"=\", 1)]\n- if key in ('baseurl', 'gpgkey'):\n- section_dict[key] = [v.strip() for v in value.split(\",\")]\n- else:\n- section_dict[key] = value\n- else:\n- if key and isinstance(section_dict[key], list):\n- section_dict[key].extend(v.strip() for v in line.split(\",\"))\n- # Otherwise ignore line if no key or we don't store multiple values\n- self.data = repos_dict\n+ \"\"\"\n+ self.data = parse_yum_repos(\"\\n\".join(content))\n \n def __iter__(self):\n for repo in self.data:\n", "issue": "yum.repos.d parser is too tolerant\nThe yum repos d parser will accept almost anything, including binary.\n", "before_files": [{"content": "from .. import Parser, parser, get_active_lines, LegacyItemAccess\nfrom insights.specs import Specs\n\n\n@parser(Specs.yum_repos_d)\nclass YumReposD(LegacyItemAccess, Parser):\n \"\"\"Class to parse the files under ``yum.repos.d`` \"\"\"\n\n def get(self, key):\n return self.data.get(key)\n\n def parse_content(self, content):\n '''\n Return an object contains a dict.\n {\n \"rhel-source\": {\n \"gpgcheck\": \"1\",\n \"gpgkey\": [\"file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release\",\n \"file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak\"]\n \"enabled\": \"0\",\n \"name\": \"Red Hat Enterprise Linux $releasever - $basearch - Source\",\n \"baseurl\": \"ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/\"\n }\n }\n ----------------------------------------------------\n There are several files in 'yum.repos.d' directory, which have the same\n format. For example:\n --------one of the files : rhel-source.repo---------\n [rhel-source]\n name=Red Hat Enterprise Linux $releasever - $basearch - Source\n baseurl=ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/\n enabled=0\n gpgcheck=1\n gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release\n file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak\n '''\n repos_dict = {}\n section_dict = {}\n key = None\n for line in get_active_lines(content):\n if line.startswith('['):\n section_dict = {}\n repos_dict[line[1:-1]] = section_dict\n elif '=' in line:\n key, value = [s.strip() for s in line.split(\"=\", 1)]\n if key in ('baseurl', 'gpgkey'):\n section_dict[key] = [v.strip() for v in value.split(\",\")]\n else:\n section_dict[key] = value\n else:\n if key and isinstance(section_dict[key], list):\n section_dict[key].extend(v.strip() for v in line.split(\",\"))\n # Otherwise ignore line if no key or we don't store multiple values\n self.data = repos_dict\n\n def __iter__(self):\n for repo in self.data:\n yield repo\n", "path": "insights/parsers/yum_repos_d.py"}], "after_files": [{"content": "import re\nimport string\n\nfrom .. import Parser, parser, LegacyItemAccess\nfrom insights.specs import Specs\nfrom insights.parsr import (Char, EOF, HangingString, InSet, Many, OneLineComment, Opt,\n skip_none, String, WithIndent, WS)\n\n\nheader_chars = (set(string.printable) - set(string.whitespace) - set(\"[]\")) | set(\" \")\nsep_chars = set(\":=\")\nkey_chars = header_chars - sep_chars - set(\" \")\nvalue_chars = set(string.printable) - set(\"\\n\\r\")\n\nLeftEnd = WS >> Char(\"[\") << WS\nRightEnd = WS >> Char(\"]\") << WS\nHeader = LeftEnd >> String(header_chars) << RightEnd\nKey = WS >> String(key_chars) << WS\nSep = InSet(sep_chars)\nValue = WS >> HangingString(value_chars)\nKVPair = WithIndent(Key + Opt(Sep >> Value))\nComment = WS >> (OneLineComment(\"#\") | OneLineComment(\";\")).map(lambda x: None)\n\nLine = Comment | KVPair.map(tuple)\nSect = (Header + Many(Line).map(skip_none).map(dict)).map(tuple)\nDoc = Many(Comment | Sect).map(skip_none).map(dict)\nTop = Doc << WS << EOF\n\n\ndef parse_yum_repos(content):\n doc = Top(content)\n for k, v in doc.items():\n for special in (\"baseurl\", \"gpgkey\"):\n if special in v:\n v[special] = [i.strip() for i in re.split(\",| \", v[special])]\n return doc\n\n\n@parser(Specs.yum_repos_d)\nclass YumReposD(LegacyItemAccess, Parser):\n \"\"\"Class to parse the files under ``yum.repos.d`` \"\"\"\n\n def get(self, key):\n return self.data.get(key)\n\n def parse_content(self, content):\n \"\"\"\n Return an object contains a dict.\n {\n \"rhel-source\": {\n \"gpgcheck\": \"1\",\n \"gpgkey\": [\"file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release\",\n \"file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak\"]\n \"enabled\": \"0\",\n \"name\": \"Red Hat Enterprise Linux $releasever - $basearch - Source\",\n \"baseurl\": \"ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/\"\n }\n }\n ----------------------------------------------------\n There are several files in 'yum.repos.d' directory, which have the same\n format. For example:\n --------one of the files : rhel-source.repo---------\n [rhel-source]\n name=Red Hat Enterprise Linux $releasever - $basearch - Source\n baseurl=ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/\n enabled=0\n gpgcheck=1\n gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release\n file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak\n \"\"\"\n self.data = parse_yum_repos(\"\\n\".join(content))\n\n def __iter__(self):\n for repo in self.data:\n yield repo\n", "path": "insights/parsers/yum_repos_d.py"}]}
952
801
gh_patches_debug_16524
rasdani/github-patches
git_diff
keras-team__autokeras-325
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Temp folder conflict <!--- **If you are reporting a bug:** * Verify that your issue is not being currently addressed by other issues or pull requests. * Please note that Auto-Keras is only compatible with **Python 3.6**. * Tag the issue with the `bug report` tag. --> ### Bug Description <!--- --> Temp folder generated will be conflict if one server has multiple users use the package. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `autokeras/utils.py` Content: ``` 1 import csv 2 import os 3 import pickle 4 import sys 5 import tempfile 6 import zipfile 7 8 import warnings 9 import imageio 10 import numpy 11 import requests 12 from skimage.transform import resize 13 import torch 14 import subprocess 15 16 from autokeras.constant import Constant 17 18 19 class NoImprovementError(Exception): 20 def __init__(self, message): 21 self.message = message 22 23 24 def ensure_dir(directory): 25 """Create directory if it does not exist.""" 26 if not os.path.exists(directory): 27 os.makedirs(directory) 28 29 30 def ensure_file_dir(path): 31 """Create path if it does not exist.""" 32 ensure_dir(os.path.dirname(path)) 33 34 35 def has_file(path): 36 """Check if the given path exists.""" 37 return os.path.exists(path) 38 39 40 def pickle_from_file(path): 41 """Load the pickle file from the provided path and returns the object.""" 42 return pickle.load(open(path, 'rb')) 43 44 45 def pickle_to_file(obj, path): 46 """Save the pickle file to the specified path.""" 47 pickle.dump(obj, open(path, 'wb')) 48 49 50 def get_device(): 51 """ If CUDA is available, use CUDA device, else use CPU device. 52 53 When choosing from CUDA devices, this function will choose the one with max memory available. 54 55 Returns: string device name. 56 """ 57 # TODO: could use gputil in the future 58 device = 'cpu' 59 if torch.cuda.is_available(): 60 try: 61 # smi_out= 62 # Free : xxxxxx MiB 63 # Free : xxxxxx MiB 64 # .... 65 smi_out = subprocess.check_output('nvidia-smi -q -d Memory | grep -A4 GPU|grep Free', shell=True) 66 if isinstance(smi_out, bytes): 67 smi_out = smi_out.decode('utf-8') 68 print(smi_out) 69 except subprocess.SubprocessError: 70 warnings.warn('Cuda device successfully detected. However, nvidia-smi cannot be invoked') 71 return 'cpu' 72 visible_devices = os.getenv('CUDA_VISIBLE_DEVICES', '').split(',') 73 if len(visible_devices) == 1 and visible_devices[0] == '': 74 visible_devices = [] 75 visible_devices = [int(x) for x in visible_devices] 76 memory_available = [int(x.split()[2]) for x in smi_out.splitlines()] 77 for cuda_index, _ in enumerate(memory_available): 78 if cuda_index not in visible_devices and visible_devices: 79 memory_available[cuda_index] = 0 80 81 if memory_available: 82 if max(memory_available) != 0: 83 device = 'cuda:' + str(memory_available.index(max(memory_available))) 84 return device 85 86 87 def temp_folder_generator(): 88 """Create and return a temporary directory with the path name '/temp_dir_name/autokeras' (E:g:- /tmp/autokeras).""" 89 sys_temp = tempfile.gettempdir() 90 path = os.path.join(sys_temp, 'autokeras') 91 ensure_dir(path) 92 return path 93 94 95 def download_file(file_link, file_path): 96 """Download the file specified in `file_link` and saves it in `file_path`.""" 97 if not os.path.exists(file_path): 98 with open(file_path, "wb") as f: 99 print("Downloading %s" % file_path) 100 response = requests.get(file_link, stream=True) 101 total_length = response.headers.get('content-length') 102 103 if total_length is None: # no content length header 104 f.write(response.content) 105 else: 106 dl = 0 107 total_length = int(total_length) 108 for data in response.iter_content(chunk_size=4096): 109 dl += len(data) 110 f.write(data) 111 done = int(50 * dl / total_length) 112 sys.stdout.write("\r[%s%s]" % ('=' * done, ' ' * (50 - done))) 113 sys.stdout.flush() 114 115 116 def download_file_with_extract(file_link, file_path, extract_path): 117 """Download the file specified in `file_link`, save to `file_path` and extract to the directory `extract_path`.""" 118 if not os.path.exists(extract_path): 119 download_file(file_link, file_path) 120 zip_ref = zipfile.ZipFile(file_path, 'r') 121 print("extracting downloaded file...") 122 zip_ref.extractall(extract_path) 123 os.remove(file_path) 124 print("extracted and removed downloaded zip file") 125 print("file already extracted in the path %s" % extract_path) 126 127 128 def verbose_print(new_father_id, new_graph): 129 """Print information about the operation performed on father model to obtain current model and father's id.""" 130 cell_size = [24, 49] 131 header = ['Father Model ID', 'Added Operation'] 132 line = '|'.join(str(x).center(cell_size[i]) for i, x in enumerate(header)) 133 print('\n' + '+' + '-' * len(line) + '+') 134 print('|' + line + '|') 135 print('+' + '-' * len(line) + '+') 136 for i in range(len(new_graph.operation_history)): 137 if i == len(new_graph.operation_history) // 2: 138 r = [new_father_id, new_graph.operation_history[i]] 139 else: 140 r = [' ', new_graph.operation_history[i]] 141 line = '|'.join(str(x).center(cell_size[i]) for i, x in enumerate(r)) 142 print('|' + line + '|') 143 print('+' + '-' * len(line) + '+') 144 145 146 def validate_xy(x_train, y_train): 147 """Validate `x_train`'s type and the shape of `x_train`, `y_train`.""" 148 try: 149 x_train = x_train.astype('float64') 150 except ValueError: 151 raise ValueError('x_train should only contain numerical data.') 152 153 if len(x_train.shape) < 2: 154 raise ValueError('x_train should at least has 2 dimensions.') 155 156 if x_train.shape[0] != y_train.shape[0]: 157 raise ValueError('x_train and y_train should have the same number of instances.') 158 159 160 def read_csv_file(csv_file_path): 161 """Read the csv file and returns two separate list containing file names and their labels. 162 163 Args: 164 csv_file_path: Path to the CSV file. 165 166 Returns: 167 file_names: List containing files names. 168 file_label: List containing their respective labels. 169 """ 170 file_names = [] 171 file_labels = [] 172 with open(csv_file_path, 'r') as files_path: 173 path_list = csv.DictReader(files_path) 174 fieldnames = path_list.fieldnames 175 for path in path_list: 176 file_names.append(path[fieldnames[0]]) 177 file_labels.append(path[fieldnames[1]]) 178 return file_names, file_labels 179 180 181 def read_image(img_path): 182 """Read the image contained in the provided path `image_path`.""" 183 img = imageio.imread(uri=img_path) 184 return img 185 186 187 def compute_image_resize_params(data): 188 """Compute median height and width of all images in data. 189 190 These values are used to resize the images at later point. Number of channels do not change from the original 191 images. Currently, only 2-D images are supported. 192 193 Args: 194 data: 2-D Image data with shape N x H x W x C. 195 196 Returns: 197 median height: Median height of all images in the data. 198 median width: Median width of all images in the data. 199 """ 200 median_height, median_width = numpy.median(numpy.array(list(map(lambda x: x.shape, data))), axis=0)[:2] 201 202 if median_height * median_width > Constant.MAX_IMAGE_SIZE: 203 reduction_factor = numpy.sqrt(median_height * median_width / Constant.MAX_IMAGE_SIZE) 204 median_height = median_height / reduction_factor 205 median_width = median_width / reduction_factor 206 207 return int(median_height), int(median_width) 208 209 210 def resize_image_data(data, height, weight): 211 """Resize images to provided height and width. 212 213 Resize all images in data to size h x w x c, where h is the height, w is the width and c is the number of channels. 214 The number of channels c does not change from data. The function supports only 2-D image data. 215 216 Args: 217 data: 2-D Image data with shape N x H x W x C. 218 h: Image resize height. 219 w: Image resize width. 220 221 Returns: 222 data: Resize data. 223 """ 224 225 output_data = [] 226 for im in data: 227 if len(im.shape) != 3: 228 return data 229 output_data.append(resize(image=im, 230 output_shape=(height, weight, im.shape[-1]), 231 mode='edge', 232 preserve_range=True)) 233 234 return numpy.array(output_data) 235 236 237 def get_system(): 238 """Get the current system environment. If the current system is not supported, raise an exception. 239 240 Returns: 241 A string to represent the current OS name. 242 "posix" stands for Linux, Mac or Solaris architecture. 243 "nt" stands for Windows system. 244 """ 245 print(os.name) 246 if 'google.colab' in sys.modules: 247 return Constant.SYS_GOOGLE_COLAB 248 if os.name == 'posix': 249 return Constant.SYS_LINUX 250 if os.name == 'nt': 251 return Constant.SYS_WINDOWS 252 raise EnvironmentError('Unsupported environment') 253 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/autokeras/utils.py b/autokeras/utils.py --- a/autokeras/utils.py +++ b/autokeras/utils.py @@ -12,7 +12,8 @@ from skimage.transform import resize import torch import subprocess - +import string +import random from autokeras.constant import Constant @@ -86,8 +87,11 @@ def temp_folder_generator(): """Create and return a temporary directory with the path name '/temp_dir_name/autokeras' (E:g:- /tmp/autokeras).""" + chars = string.ascii_uppercase + string.digits + size = 6 sys_temp = tempfile.gettempdir() - path = os.path.join(sys_temp, 'autokeras') + random_suffix = ''.join(random.choice(chars) for _ in range(size)) + path = os.path.join(sys_temp, 'autokeras_' + random_suffix) ensure_dir(path) return path
{"golden_diff": "diff --git a/autokeras/utils.py b/autokeras/utils.py\n--- a/autokeras/utils.py\n+++ b/autokeras/utils.py\n@@ -12,7 +12,8 @@\n from skimage.transform import resize\n import torch\n import subprocess\n-\n+import string\n+import random\n from autokeras.constant import Constant\n \n \n@@ -86,8 +87,11 @@\n \n def temp_folder_generator():\n \"\"\"Create and return a temporary directory with the path name '/temp_dir_name/autokeras' (E:g:- /tmp/autokeras).\"\"\"\n+ chars = string.ascii_uppercase + string.digits\n+ size = 6\n sys_temp = tempfile.gettempdir()\n- path = os.path.join(sys_temp, 'autokeras')\n+ random_suffix = ''.join(random.choice(chars) for _ in range(size))\n+ path = os.path.join(sys_temp, 'autokeras_' + random_suffix)\n ensure_dir(path)\n return path\n", "issue": "Temp folder conflict\n<!---\r\n**If you are reporting a bug:**\r\n* Verify that your issue is not being currently addressed by other issues or pull requests.\r\n* Please note that Auto-Keras is only compatible with **Python 3.6**.\r\n* Tag the issue with the `bug report` tag.\r\n-->\r\n\r\n### Bug Description\r\n<!---\r\n\r\n-->\r\nTemp folder generated will be conflict if one server has multiple users use the package.\r\n\r\n\n", "before_files": [{"content": "import csv\nimport os\nimport pickle\nimport sys\nimport tempfile\nimport zipfile\n\nimport warnings\nimport imageio\nimport numpy\nimport requests\nfrom skimage.transform import resize\nimport torch\nimport subprocess\n\nfrom autokeras.constant import Constant\n\n\nclass NoImprovementError(Exception):\n def __init__(self, message):\n self.message = message\n\n\ndef ensure_dir(directory):\n \"\"\"Create directory if it does not exist.\"\"\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef ensure_file_dir(path):\n \"\"\"Create path if it does not exist.\"\"\"\n ensure_dir(os.path.dirname(path))\n\n\ndef has_file(path):\n \"\"\"Check if the given path exists.\"\"\"\n return os.path.exists(path)\n\n\ndef pickle_from_file(path):\n \"\"\"Load the pickle file from the provided path and returns the object.\"\"\"\n return pickle.load(open(path, 'rb'))\n\n\ndef pickle_to_file(obj, path):\n \"\"\"Save the pickle file to the specified path.\"\"\"\n pickle.dump(obj, open(path, 'wb'))\n\n\ndef get_device():\n \"\"\" If CUDA is available, use CUDA device, else use CPU device.\n\n When choosing from CUDA devices, this function will choose the one with max memory available.\n\n Returns: string device name.\n \"\"\"\n # TODO: could use gputil in the future\n device = 'cpu'\n if torch.cuda.is_available():\n try:\n # smi_out=\n # Free : xxxxxx MiB\n # Free : xxxxxx MiB\n # ....\n smi_out = subprocess.check_output('nvidia-smi -q -d Memory | grep -A4 GPU|grep Free', shell=True)\n if isinstance(smi_out, bytes):\n smi_out = smi_out.decode('utf-8')\n print(smi_out)\n except subprocess.SubprocessError:\n warnings.warn('Cuda device successfully detected. However, nvidia-smi cannot be invoked')\n return 'cpu'\n visible_devices = os.getenv('CUDA_VISIBLE_DEVICES', '').split(',')\n if len(visible_devices) == 1 and visible_devices[0] == '':\n visible_devices = []\n visible_devices = [int(x) for x in visible_devices]\n memory_available = [int(x.split()[2]) for x in smi_out.splitlines()]\n for cuda_index, _ in enumerate(memory_available):\n if cuda_index not in visible_devices and visible_devices:\n memory_available[cuda_index] = 0\n\n if memory_available:\n if max(memory_available) != 0:\n device = 'cuda:' + str(memory_available.index(max(memory_available)))\n return device\n\n\ndef temp_folder_generator():\n \"\"\"Create and return a temporary directory with the path name '/temp_dir_name/autokeras' (E:g:- /tmp/autokeras).\"\"\"\n sys_temp = tempfile.gettempdir()\n path = os.path.join(sys_temp, 'autokeras')\n ensure_dir(path)\n return path\n\n\ndef download_file(file_link, file_path):\n \"\"\"Download the file specified in `file_link` and saves it in `file_path`.\"\"\"\n if not os.path.exists(file_path):\n with open(file_path, \"wb\") as f:\n print(\"Downloading %s\" % file_path)\n response = requests.get(file_link, stream=True)\n total_length = response.headers.get('content-length')\n\n if total_length is None: # no content length header\n f.write(response.content)\n else:\n dl = 0\n total_length = int(total_length)\n for data in response.iter_content(chunk_size=4096):\n dl += len(data)\n f.write(data)\n done = int(50 * dl / total_length)\n sys.stdout.write(\"\\r[%s%s]\" % ('=' * done, ' ' * (50 - done)))\n sys.stdout.flush()\n\n\ndef download_file_with_extract(file_link, file_path, extract_path):\n \"\"\"Download the file specified in `file_link`, save to `file_path` and extract to the directory `extract_path`.\"\"\"\n if not os.path.exists(extract_path):\n download_file(file_link, file_path)\n zip_ref = zipfile.ZipFile(file_path, 'r')\n print(\"extracting downloaded file...\")\n zip_ref.extractall(extract_path)\n os.remove(file_path)\n print(\"extracted and removed downloaded zip file\")\n print(\"file already extracted in the path %s\" % extract_path)\n\n\ndef verbose_print(new_father_id, new_graph):\n \"\"\"Print information about the operation performed on father model to obtain current model and father's id.\"\"\"\n cell_size = [24, 49]\n header = ['Father Model ID', 'Added Operation']\n line = '|'.join(str(x).center(cell_size[i]) for i, x in enumerate(header))\n print('\\n' + '+' + '-' * len(line) + '+')\n print('|' + line + '|')\n print('+' + '-' * len(line) + '+')\n for i in range(len(new_graph.operation_history)):\n if i == len(new_graph.operation_history) // 2:\n r = [new_father_id, new_graph.operation_history[i]]\n else:\n r = [' ', new_graph.operation_history[i]]\n line = '|'.join(str(x).center(cell_size[i]) for i, x in enumerate(r))\n print('|' + line + '|')\n print('+' + '-' * len(line) + '+')\n\n\ndef validate_xy(x_train, y_train):\n \"\"\"Validate `x_train`'s type and the shape of `x_train`, `y_train`.\"\"\"\n try:\n x_train = x_train.astype('float64')\n except ValueError:\n raise ValueError('x_train should only contain numerical data.')\n\n if len(x_train.shape) < 2:\n raise ValueError('x_train should at least has 2 dimensions.')\n\n if x_train.shape[0] != y_train.shape[0]:\n raise ValueError('x_train and y_train should have the same number of instances.')\n\n\ndef read_csv_file(csv_file_path):\n \"\"\"Read the csv file and returns two separate list containing file names and their labels.\n\n Args:\n csv_file_path: Path to the CSV file.\n\n Returns:\n file_names: List containing files names.\n file_label: List containing their respective labels.\n \"\"\"\n file_names = []\n file_labels = []\n with open(csv_file_path, 'r') as files_path:\n path_list = csv.DictReader(files_path)\n fieldnames = path_list.fieldnames\n for path in path_list:\n file_names.append(path[fieldnames[0]])\n file_labels.append(path[fieldnames[1]])\n return file_names, file_labels\n\n\ndef read_image(img_path):\n \"\"\"Read the image contained in the provided path `image_path`.\"\"\"\n img = imageio.imread(uri=img_path)\n return img\n\n\ndef compute_image_resize_params(data):\n \"\"\"Compute median height and width of all images in data.\n\n These values are used to resize the images at later point. Number of channels do not change from the original\n images. Currently, only 2-D images are supported.\n\n Args:\n data: 2-D Image data with shape N x H x W x C.\n\n Returns:\n median height: Median height of all images in the data.\n median width: Median width of all images in the data.\n \"\"\"\n median_height, median_width = numpy.median(numpy.array(list(map(lambda x: x.shape, data))), axis=0)[:2]\n\n if median_height * median_width > Constant.MAX_IMAGE_SIZE:\n reduction_factor = numpy.sqrt(median_height * median_width / Constant.MAX_IMAGE_SIZE)\n median_height = median_height / reduction_factor\n median_width = median_width / reduction_factor\n\n return int(median_height), int(median_width)\n\n\ndef resize_image_data(data, height, weight):\n \"\"\"Resize images to provided height and width.\n\n Resize all images in data to size h x w x c, where h is the height, w is the width and c is the number of channels.\n The number of channels c does not change from data. The function supports only 2-D image data.\n\n Args:\n data: 2-D Image data with shape N x H x W x C.\n h: Image resize height.\n w: Image resize width.\n\n Returns:\n data: Resize data.\n \"\"\"\n\n output_data = []\n for im in data:\n if len(im.shape) != 3:\n return data\n output_data.append(resize(image=im,\n output_shape=(height, weight, im.shape[-1]),\n mode='edge',\n preserve_range=True))\n\n return numpy.array(output_data)\n\n\ndef get_system():\n \"\"\"Get the current system environment. If the current system is not supported, raise an exception.\n\n Returns:\n A string to represent the current OS name.\n \"posix\" stands for Linux, Mac or Solaris architecture.\n \"nt\" stands for Windows system.\n \"\"\"\n print(os.name)\n if 'google.colab' in sys.modules:\n return Constant.SYS_GOOGLE_COLAB\n if os.name == 'posix':\n return Constant.SYS_LINUX\n if os.name == 'nt':\n return Constant.SYS_WINDOWS\n raise EnvironmentError('Unsupported environment')\n", "path": "autokeras/utils.py"}], "after_files": [{"content": "import csv\nimport os\nimport pickle\nimport sys\nimport tempfile\nimport zipfile\n\nimport warnings\nimport imageio\nimport numpy\nimport requests\nfrom skimage.transform import resize\nimport torch\nimport subprocess\nimport string\nimport random\nfrom autokeras.constant import Constant\n\n\nclass NoImprovementError(Exception):\n def __init__(self, message):\n self.message = message\n\n\ndef ensure_dir(directory):\n \"\"\"Create directory if it does not exist.\"\"\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef ensure_file_dir(path):\n \"\"\"Create path if it does not exist.\"\"\"\n ensure_dir(os.path.dirname(path))\n\n\ndef has_file(path):\n \"\"\"Check if the given path exists.\"\"\"\n return os.path.exists(path)\n\n\ndef pickle_from_file(path):\n \"\"\"Load the pickle file from the provided path and returns the object.\"\"\"\n return pickle.load(open(path, 'rb'))\n\n\ndef pickle_to_file(obj, path):\n \"\"\"Save the pickle file to the specified path.\"\"\"\n pickle.dump(obj, open(path, 'wb'))\n\n\ndef get_device():\n \"\"\" If CUDA is available, use CUDA device, else use CPU device.\n\n When choosing from CUDA devices, this function will choose the one with max memory available.\n\n Returns: string device name.\n \"\"\"\n # TODO: could use gputil in the future\n device = 'cpu'\n if torch.cuda.is_available():\n try:\n # smi_out=\n # Free : xxxxxx MiB\n # Free : xxxxxx MiB\n # ....\n smi_out = subprocess.check_output('nvidia-smi -q -d Memory | grep -A4 GPU|grep Free', shell=True)\n if isinstance(smi_out, bytes):\n smi_out = smi_out.decode('utf-8')\n print(smi_out)\n except subprocess.SubprocessError:\n warnings.warn('Cuda device successfully detected. However, nvidia-smi cannot be invoked')\n return 'cpu'\n visible_devices = os.getenv('CUDA_VISIBLE_DEVICES', '').split(',')\n if len(visible_devices) == 1 and visible_devices[0] == '':\n visible_devices = []\n visible_devices = [int(x) for x in visible_devices]\n memory_available = [int(x.split()[2]) for x in smi_out.splitlines()]\n for cuda_index, _ in enumerate(memory_available):\n if cuda_index not in visible_devices and visible_devices:\n memory_available[cuda_index] = 0\n\n if memory_available:\n if max(memory_available) != 0:\n device = 'cuda:' + str(memory_available.index(max(memory_available)))\n return device\n\n\ndef temp_folder_generator():\n \"\"\"Create and return a temporary directory with the path name '/temp_dir_name/autokeras' (E:g:- /tmp/autokeras).\"\"\"\n chars = string.ascii_uppercase + string.digits\n size = 6\n sys_temp = tempfile.gettempdir()\n random_suffix = ''.join(random.choice(chars) for _ in range(size))\n path = os.path.join(sys_temp, 'autokeras_' + random_suffix)\n ensure_dir(path)\n return path\n\n\ndef download_file(file_link, file_path):\n \"\"\"Download the file specified in `file_link` and saves it in `file_path`.\"\"\"\n if not os.path.exists(file_path):\n with open(file_path, \"wb\") as f:\n print(\"Downloading %s\" % file_path)\n response = requests.get(file_link, stream=True)\n total_length = response.headers.get('content-length')\n\n if total_length is None: # no content length header\n f.write(response.content)\n else:\n dl = 0\n total_length = int(total_length)\n for data in response.iter_content(chunk_size=4096):\n dl += len(data)\n f.write(data)\n done = int(50 * dl / total_length)\n sys.stdout.write(\"\\r[%s%s]\" % ('=' * done, ' ' * (50 - done)))\n sys.stdout.flush()\n\n\ndef download_file_with_extract(file_link, file_path, extract_path):\n \"\"\"Download the file specified in `file_link`, save to `file_path` and extract to the directory `extract_path`.\"\"\"\n if not os.path.exists(extract_path):\n download_file(file_link, file_path)\n zip_ref = zipfile.ZipFile(file_path, 'r')\n print(\"extracting downloaded file...\")\n zip_ref.extractall(extract_path)\n os.remove(file_path)\n print(\"extracted and removed downloaded zip file\")\n print(\"file already extracted in the path %s\" % extract_path)\n\n\ndef verbose_print(new_father_id, new_graph):\n \"\"\"Print information about the operation performed on father model to obtain current model and father's id.\"\"\"\n cell_size = [24, 49]\n header = ['Father Model ID', 'Added Operation']\n line = '|'.join(str(x).center(cell_size[i]) for i, x in enumerate(header))\n print('\\n' + '+' + '-' * len(line) + '+')\n print('|' + line + '|')\n print('+' + '-' * len(line) + '+')\n for i in range(len(new_graph.operation_history)):\n if i == len(new_graph.operation_history) // 2:\n r = [new_father_id, new_graph.operation_history[i]]\n else:\n r = [' ', new_graph.operation_history[i]]\n line = '|'.join(str(x).center(cell_size[i]) for i, x in enumerate(r))\n print('|' + line + '|')\n print('+' + '-' * len(line) + '+')\n\n\ndef validate_xy(x_train, y_train):\n \"\"\"Validate `x_train`'s type and the shape of `x_train`, `y_train`.\"\"\"\n try:\n x_train = x_train.astype('float64')\n except ValueError:\n raise ValueError('x_train should only contain numerical data.')\n\n if len(x_train.shape) < 2:\n raise ValueError('x_train should at least has 2 dimensions.')\n\n if x_train.shape[0] != y_train.shape[0]:\n raise ValueError('x_train and y_train should have the same number of instances.')\n\n\ndef read_csv_file(csv_file_path):\n \"\"\"Read the csv file and returns two separate list containing file names and their labels.\n\n Args:\n csv_file_path: Path to the CSV file.\n\n Returns:\n file_names: List containing files names.\n file_label: List containing their respective labels.\n \"\"\"\n file_names = []\n file_labels = []\n with open(csv_file_path, 'r') as files_path:\n path_list = csv.DictReader(files_path)\n fieldnames = path_list.fieldnames\n for path in path_list:\n file_names.append(path[fieldnames[0]])\n file_labels.append(path[fieldnames[1]])\n return file_names, file_labels\n\n\ndef read_image(img_path):\n \"\"\"Read the image contained in the provided path `image_path`.\"\"\"\n img = imageio.imread(uri=img_path)\n return img\n\n\ndef compute_image_resize_params(data):\n \"\"\"Compute median height and width of all images in data.\n\n These values are used to resize the images at later point. Number of channels do not change from the original\n images. Currently, only 2-D images are supported.\n\n Args:\n data: 2-D Image data with shape N x H x W x C.\n\n Returns:\n median height: Median height of all images in the data.\n median width: Median width of all images in the data.\n \"\"\"\n median_height, median_width = numpy.median(numpy.array(list(map(lambda x: x.shape, data))), axis=0)[:2]\n\n if median_height * median_width > Constant.MAX_IMAGE_SIZE:\n reduction_factor = numpy.sqrt(median_height * median_width / Constant.MAX_IMAGE_SIZE)\n median_height = median_height / reduction_factor\n median_width = median_width / reduction_factor\n\n return int(median_height), int(median_width)\n\n\ndef resize_image_data(data, height, weight):\n \"\"\"Resize images to provided height and width.\n\n Resize all images in data to size h x w x c, where h is the height, w is the width and c is the number of channels.\n The number of channels c does not change from data. The function supports only 2-D image data.\n\n Args:\n data: 2-D Image data with shape N x H x W x C.\n h: Image resize height.\n w: Image resize width.\n\n Returns:\n data: Resize data.\n \"\"\"\n\n output_data = []\n for im in data:\n if len(im.shape) != 3:\n return data\n output_data.append(resize(image=im,\n output_shape=(height, weight, im.shape[-1]),\n mode='edge',\n preserve_range=True))\n\n return numpy.array(output_data)\n\n\ndef get_system():\n \"\"\"Get the current system environment. If the current system is not supported, raise an exception.\n\n Returns:\n A string to represent the current OS name.\n \"posix\" stands for Linux, Mac or Solaris architecture.\n \"nt\" stands for Windows system.\n \"\"\"\n print(os.name)\n if 'google.colab' in sys.modules:\n return Constant.SYS_GOOGLE_COLAB\n if os.name == 'posix':\n return Constant.SYS_LINUX\n if os.name == 'nt':\n return Constant.SYS_WINDOWS\n raise EnvironmentError('Unsupported environment')\n", "path": "autokeras/utils.py"}]}
3,006
219
gh_patches_debug_43147
rasdani/github-patches
git_diff
getsentry__sentry-3328
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- In-App for Cocoa needs to support Multiple Images Currently the in_app flag only covers a single image: the application. However for people with multiple frameworks we need to have a second mode where we detect the application images based on the path. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/sentry/lang/native/plugin.py` Content: ``` 1 from __future__ import absolute_import, print_function 2 3 import logging 4 import posixpath 5 6 from sentry.models import Project, EventError 7 from sentry.plugins import Plugin2 8 from sentry.lang.native.symbolizer import Symbolizer, have_symsynd 9 from sentry.models.dsymfile import SDK_MAPPING 10 11 12 logger = logging.getLogger(__name__) 13 14 15 def append_error(data, err): 16 data.setdefault('errors', []).append(err) 17 18 19 def exception_from_apple_error_or_diagnosis(error, diagnosis=None): 20 error = error or {} 21 22 if error: 23 nsexception = error.get('nsexception') 24 if nsexception: 25 return { 26 'type': nsexception['name'], 27 'value': error['reason'], 28 } 29 30 if diagnosis: 31 return { 32 'type': 'Error', 33 'value': diagnosis 34 } 35 36 37 def inject_apple_backtrace(data, frames, diagnosis=None, error=None, 38 system=None): 39 # TODO: 40 # user report stacktraces from unity 41 42 app_uuid = None 43 if system: 44 app_uuid = system.get('app_uuid') 45 if app_uuid is not None: 46 app_uuid = app_uuid.lower() 47 48 converted_frames = [] 49 longest_addr = 0 50 for frame in reversed(frames): 51 fn = frame.get('filename') 52 in_app = False 53 54 if app_uuid is not None: 55 frame_uuid = frame.get('uuid') 56 if frame_uuid == app_uuid: 57 in_app = True 58 59 # We only record the offset if we found a symbol but we did not 60 # find a line number. In that case it's the offset in bytes from 61 # the beginning of the symbol. 62 function = frame['symbol_name'] or '<unknown>' 63 lineno = frame.get('line') 64 offset = None 65 if not lineno: 66 offset = frame['instruction_addr'] - frame['symbol_addr'] 67 68 cframe = { 69 'in_app': in_app, 70 'abs_path': fn, 71 'filename': fn and posixpath.basename(fn) or None, 72 # This can come back as `None` from the symbolizer, in which 73 # case we need to fill something else in or we will fail 74 # later fulfill the interface requirements which say that a 75 # function needs to be provided. 76 'function': function, 77 'package': frame['object_name'], 78 'symbol_addr': '%x' % frame['symbol_addr'], 79 'instruction_addr': '%x' % frame['instruction_addr'], 80 'instruction_offset': offset, 81 'lineno': lineno, 82 } 83 converted_frames.append(cframe) 84 longest_addr = max(longest_addr, len(cframe['symbol_addr']), 85 len(cframe['instruction_addr'])) 86 87 # Pad out addresses to be of the same length and add prefix 88 for frame in converted_frames: 89 for key in 'symbol_addr', 'instruction_addr': 90 frame[key] = '0x' + frame[key][2:].rjust(longest_addr, '0') 91 92 stacktrace = {'frames': converted_frames} 93 94 if error or diagnosis: 95 error = error or {} 96 exc = exception_from_apple_error_or_diagnosis(error, diagnosis) 97 if exc is not None: 98 exc['stacktrace'] = stacktrace 99 data['sentry.interfaces.Exception'] = {'values': [exc]} 100 # Since we inject the exception late we need to make sure that 101 # we set the event type to error as it would be set to 102 # 'default' otherwise. 103 data['type'] = 'error' 104 return 105 106 data['sentry.interfaces.Stacktrace'] = stacktrace 107 108 109 def inject_apple_device_data(data, system): 110 container = data.setdefault('device', {}) 111 try: 112 container['name'] = SDK_MAPPING[system['system_name']] 113 except LookupError: 114 container['name'] = system.get('system_name') or 'Generic Apple' 115 116 if 'system_version' in system: 117 container['version'] = system['system_version'] 118 if 'os_version' in system: 119 container['build'] = system['os_version'] 120 121 extra = container.setdefault('data', {}) 122 if 'cpu_arch' in system: 123 extra['cpu_arch'] = system['cpu_arch'] 124 if 'model' in system: 125 extra['device_model_id'] = system['model'] 126 if 'machine' in system: 127 extra['device_model'] = system['machine'] 128 if 'kernel_version' in system: 129 extra['kernel_version'] = system['kernel_version'] 130 131 132 def preprocess_apple_crash_event(data): 133 crash_report = data.get('sentry.interfaces.AppleCrashReport') 134 if crash_report is None: 135 return 136 137 project = Project.objects.get_from_cache( 138 id=data['project'], 139 ) 140 141 crash = crash_report['crash'] 142 crashed_thread = None 143 for thread in crash['threads']: 144 if thread['crashed']: 145 crashed_thread = thread 146 if crashed_thread is None: 147 append_error(data, { 148 'type': EventError.NATIVE_NO_CRASHED_THREAD, 149 }) 150 151 else: 152 system = crash_report.get('system') 153 try: 154 sym = Symbolizer(project, crash_report['binary_images'], 155 threads=[crashed_thread]) 156 with sym: 157 bt = sym.symbolize_backtrace( 158 crashed_thread['backtrace']['contents'], system) 159 inject_apple_backtrace(data, bt, crash.get('diagnosis'), 160 crash.get('error'), system) 161 except Exception as e: 162 logger.exception('Failed to symbolicate') 163 append_error(data, { 164 'type': EventError.NATIVE_INTERNAL_FAILURE, 165 'error': '%s: %s' % (e.__class__.__name__, str(e)), 166 }) 167 return 168 169 if system: 170 inject_apple_device_data(data, system) 171 172 return data 173 174 175 class NativePlugin(Plugin2): 176 can_disable = False 177 178 def get_event_preprocessors(self, **kwargs): 179 if not have_symsynd: 180 return [] 181 return [preprocess_apple_crash_event] 182 ``` Path: `src/sentry/lang/native/symbolizer.py` Content: ``` 1 try: 2 from symsynd.driver import Driver 3 from symsynd.report import ReportSymbolizer 4 from symsynd.macho.arch import get_cpu_name 5 from symsynd.demangle import demangle_symbol 6 have_symsynd = True 7 except ImportError: 8 have_symsynd = False 9 10 from sentry import options 11 from sentry.lang.native.dsymcache import dsymcache 12 from sentry.utils.safe import trim 13 from sentry.models import DSymSymbol 14 from sentry.models.dsymfile import MAX_SYM 15 16 17 def trim_frame(frame): 18 # This matches what's in stacktrace.py 19 frame['symbol_name'] = trim(frame.get('symbol_name'), MAX_SYM) 20 frame['filename'] = trim(frame.get('filename'), 256) 21 return frame 22 23 24 def find_system_symbol(img, instruction_addr, system_info=None): 25 """Finds a system symbol.""" 26 return DSymSymbol.objects.lookup_symbol( 27 instruction_addr=instruction_addr, 28 image_addr=img['image_addr'], 29 uuid=img['uuid'], 30 cpu_name=get_cpu_name(img['cpu_type'], 31 img['cpu_subtype']), 32 object_path=img['name'], 33 system_info=system_info 34 ) 35 36 37 def make_symbolizer(project, binary_images, threads=None): 38 """Creates a symbolizer for the given project and binary images. If a 39 list of threads is referenced (from an apple crash report) then only 40 images needed by those frames are loaded. 41 """ 42 if not have_symsynd: 43 raise RuntimeError('symsynd is unavailable. Install sentry with ' 44 'the dsym feature flag.') 45 driver = Driver(options.get('dsym.llvm-symbolizer-path') or None) 46 47 if threads is None: 48 to_load = [x['uuid'] for x in binary_images] 49 else: 50 image_map = {} 51 for image in binary_images: 52 image_map[image['image_addr']] = image['uuid'] 53 to_load = set() 54 for thread in threads: 55 for frame in thread['backtrace']['contents']: 56 img_uuid = image_map.get(frame['object_addr']) 57 if img_uuid is not None: 58 to_load.add(img_uuid) 59 to_load = list(to_load) 60 61 dsym_paths, loaded = dsymcache.fetch_dsyms(project, to_load) 62 return ReportSymbolizer(driver, dsym_paths, binary_images) 63 64 65 class Symbolizer(object): 66 67 def __init__(self, project, binary_images, threads=None): 68 self.symsynd_symbolizer = make_symbolizer(project, binary_images, 69 threads=threads) 70 self.images = dict((img['image_addr'], img) for img in binary_images) 71 72 def __enter__(self): 73 return self.symsynd_symbolizer.driver.__enter__() 74 75 def __exit__(self, *args): 76 return self.symsynd_symbolizer.driver.__exit__(*args) 77 78 def symbolize_frame(self, frame, system_info=None): 79 # Step one: try to symbolize with cached dsym files. 80 new_frame = self.symsynd_symbolizer.symbolize_frame(frame) 81 if new_frame is not None: 82 return trim_frame(new_frame) 83 84 # If that does not work, look up system symbols. 85 img = self.images.get(frame['object_addr']) 86 if img is not None: 87 symbol = find_system_symbol(img, frame['instruction_addr'], 88 system_info) 89 if symbol is not None: 90 symbol = demangle_symbol(symbol) or symbol 91 rv = dict(frame, symbol_name=symbol, filename=None, 92 line=0, column=0, uuid=img['uuid']) 93 return trim_frame(rv) 94 95 def symbolize_backtrace(self, backtrace, system_info=None): 96 rv = [] 97 for frame in backtrace: 98 new_frame = self.symbolize_frame(frame, system_info) 99 rv.append(new_frame or frame) 100 return rv 101 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/sentry/lang/native/plugin.py b/src/sentry/lang/native/plugin.py --- a/src/sentry/lang/native/plugin.py +++ b/src/sentry/lang/native/plugin.py @@ -11,6 +11,15 @@ logger = logging.getLogger(__name__) +APP_BUNDLE_PATHS = ( + '/var/containers/Bundle/Application/', + '/private/var/containers/Bundle/Application/', +) + +NON_APP_FRAMEWORKS = ( + '/Frameworks/libswiftCore.dylib', +) + def append_error(data, err): data.setdefault('errors', []).append(err) @@ -34,6 +43,19 @@ } +def is_in_app(frame, app_uuid=None): + if app_uuid is not None: + frame_uuid = frame.get('uuid') + if frame_uuid == app_uuid: + return True + object_name = frame.get('object_name', '') + if not object_name.startswith(APP_BUNDLE_PATHS): + return False + if object_name.endswith(NON_APP_FRAMEWORKS): + return False + return True + + def inject_apple_backtrace(data, frames, diagnosis=None, error=None, system=None): # TODO: @@ -49,12 +71,6 @@ longest_addr = 0 for frame in reversed(frames): fn = frame.get('filename') - in_app = False - - if app_uuid is not None: - frame_uuid = frame.get('uuid') - if frame_uuid == app_uuid: - in_app = True # We only record the offset if we found a symbol but we did not # find a line number. In that case it's the offset in bytes from @@ -66,7 +82,7 @@ offset = frame['instruction_addr'] - frame['symbol_addr'] cframe = { - 'in_app': in_app, + 'in_app': is_in_app(frame, app_uuid), 'abs_path': fn, 'filename': fn and posixpath.basename(fn) or None, # This can come back as `None` from the symbolizer, in which diff --git a/src/sentry/lang/native/symbolizer.py b/src/sentry/lang/native/symbolizer.py --- a/src/sentry/lang/native/symbolizer.py +++ b/src/sentry/lang/native/symbolizer.py @@ -75,26 +75,33 @@ def __exit__(self, *args): return self.symsynd_symbolizer.driver.__exit__(*args) + def _process_frame(self, frame, img): + rv = trim_frame(frame) + if img is not None: + rv['object_name'] = img['name'] + rv['uuid'] = img['uuid'] + return rv + def symbolize_frame(self, frame, system_info=None): + img = self.images.get(frame['object_addr']) + # Step one: try to symbolize with cached dsym files. new_frame = self.symsynd_symbolizer.symbolize_frame(frame) if new_frame is not None: - return trim_frame(new_frame) + return self._process_frame(new_frame, img) # If that does not work, look up system symbols. - img = self.images.get(frame['object_addr']) if img is not None: symbol = find_system_symbol(img, frame['instruction_addr'], system_info) if symbol is not None: symbol = demangle_symbol(symbol) or symbol rv = dict(frame, symbol_name=symbol, filename=None, - line=0, column=0, uuid=img['uuid']) - return trim_frame(rv) + line=0, column=0, uuid=img['uuid'], + object_name=img['object_name']) + return self._process_frame(rv, img) + + return self._process_frame(frame, img) def symbolize_backtrace(self, backtrace, system_info=None): - rv = [] - for frame in backtrace: - new_frame = self.symbolize_frame(frame, system_info) - rv.append(new_frame or frame) - return rv + return [self.symbolize_frame(frm, system_info) for frm in backtrace]
{"golden_diff": "diff --git a/src/sentry/lang/native/plugin.py b/src/sentry/lang/native/plugin.py\n--- a/src/sentry/lang/native/plugin.py\n+++ b/src/sentry/lang/native/plugin.py\n@@ -11,6 +11,15 @@\n \n logger = logging.getLogger(__name__)\n \n+APP_BUNDLE_PATHS = (\n+ '/var/containers/Bundle/Application/',\n+ '/private/var/containers/Bundle/Application/',\n+)\n+\n+NON_APP_FRAMEWORKS = (\n+ '/Frameworks/libswiftCore.dylib',\n+)\n+\n \n def append_error(data, err):\n data.setdefault('errors', []).append(err)\n@@ -34,6 +43,19 @@\n }\n \n \n+def is_in_app(frame, app_uuid=None):\n+ if app_uuid is not None:\n+ frame_uuid = frame.get('uuid')\n+ if frame_uuid == app_uuid:\n+ return True\n+ object_name = frame.get('object_name', '')\n+ if not object_name.startswith(APP_BUNDLE_PATHS):\n+ return False\n+ if object_name.endswith(NON_APP_FRAMEWORKS):\n+ return False\n+ return True\n+\n+\n def inject_apple_backtrace(data, frames, diagnosis=None, error=None,\n system=None):\n # TODO:\n@@ -49,12 +71,6 @@\n longest_addr = 0\n for frame in reversed(frames):\n fn = frame.get('filename')\n- in_app = False\n-\n- if app_uuid is not None:\n- frame_uuid = frame.get('uuid')\n- if frame_uuid == app_uuid:\n- in_app = True\n \n # We only record the offset if we found a symbol but we did not\n # find a line number. In that case it's the offset in bytes from\n@@ -66,7 +82,7 @@\n offset = frame['instruction_addr'] - frame['symbol_addr']\n \n cframe = {\n- 'in_app': in_app,\n+ 'in_app': is_in_app(frame, app_uuid),\n 'abs_path': fn,\n 'filename': fn and posixpath.basename(fn) or None,\n # This can come back as `None` from the symbolizer, in which\ndiff --git a/src/sentry/lang/native/symbolizer.py b/src/sentry/lang/native/symbolizer.py\n--- a/src/sentry/lang/native/symbolizer.py\n+++ b/src/sentry/lang/native/symbolizer.py\n@@ -75,26 +75,33 @@\n def __exit__(self, *args):\n return self.symsynd_symbolizer.driver.__exit__(*args)\n \n+ def _process_frame(self, frame, img):\n+ rv = trim_frame(frame)\n+ if img is not None:\n+ rv['object_name'] = img['name']\n+ rv['uuid'] = img['uuid']\n+ return rv\n+\n def symbolize_frame(self, frame, system_info=None):\n+ img = self.images.get(frame['object_addr'])\n+\n # Step one: try to symbolize with cached dsym files.\n new_frame = self.symsynd_symbolizer.symbolize_frame(frame)\n if new_frame is not None:\n- return trim_frame(new_frame)\n+ return self._process_frame(new_frame, img)\n \n # If that does not work, look up system symbols.\n- img = self.images.get(frame['object_addr'])\n if img is not None:\n symbol = find_system_symbol(img, frame['instruction_addr'],\n system_info)\n if symbol is not None:\n symbol = demangle_symbol(symbol) or symbol\n rv = dict(frame, symbol_name=symbol, filename=None,\n- line=0, column=0, uuid=img['uuid'])\n- return trim_frame(rv)\n+ line=0, column=0, uuid=img['uuid'],\n+ object_name=img['object_name'])\n+ return self._process_frame(rv, img)\n+\n+ return self._process_frame(frame, img)\n \n def symbolize_backtrace(self, backtrace, system_info=None):\n- rv = []\n- for frame in backtrace:\n- new_frame = self.symbolize_frame(frame, system_info)\n- rv.append(new_frame or frame)\n- return rv\n+ return [self.symbolize_frame(frm, system_info) for frm in backtrace]\n", "issue": "In-App for Cocoa needs to support Multiple Images\nCurrently the in_app flag only covers a single image: the application. However for people with multiple frameworks we need to have a second mode where we detect the application images based on the path.\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function\n\nimport logging\nimport posixpath\n\nfrom sentry.models import Project, EventError\nfrom sentry.plugins import Plugin2\nfrom sentry.lang.native.symbolizer import Symbolizer, have_symsynd\nfrom sentry.models.dsymfile import SDK_MAPPING\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef append_error(data, err):\n data.setdefault('errors', []).append(err)\n\n\ndef exception_from_apple_error_or_diagnosis(error, diagnosis=None):\n error = error or {}\n\n if error:\n nsexception = error.get('nsexception')\n if nsexception:\n return {\n 'type': nsexception['name'],\n 'value': error['reason'],\n }\n\n if diagnosis:\n return {\n 'type': 'Error',\n 'value': diagnosis\n }\n\n\ndef inject_apple_backtrace(data, frames, diagnosis=None, error=None,\n system=None):\n # TODO:\n # user report stacktraces from unity\n\n app_uuid = None\n if system:\n app_uuid = system.get('app_uuid')\n if app_uuid is not None:\n app_uuid = app_uuid.lower()\n\n converted_frames = []\n longest_addr = 0\n for frame in reversed(frames):\n fn = frame.get('filename')\n in_app = False\n\n if app_uuid is not None:\n frame_uuid = frame.get('uuid')\n if frame_uuid == app_uuid:\n in_app = True\n\n # We only record the offset if we found a symbol but we did not\n # find a line number. In that case it's the offset in bytes from\n # the beginning of the symbol.\n function = frame['symbol_name'] or '<unknown>'\n lineno = frame.get('line')\n offset = None\n if not lineno:\n offset = frame['instruction_addr'] - frame['symbol_addr']\n\n cframe = {\n 'in_app': in_app,\n 'abs_path': fn,\n 'filename': fn and posixpath.basename(fn) or None,\n # This can come back as `None` from the symbolizer, in which\n # case we need to fill something else in or we will fail\n # later fulfill the interface requirements which say that a\n # function needs to be provided.\n 'function': function,\n 'package': frame['object_name'],\n 'symbol_addr': '%x' % frame['symbol_addr'],\n 'instruction_addr': '%x' % frame['instruction_addr'],\n 'instruction_offset': offset,\n 'lineno': lineno,\n }\n converted_frames.append(cframe)\n longest_addr = max(longest_addr, len(cframe['symbol_addr']),\n len(cframe['instruction_addr']))\n\n # Pad out addresses to be of the same length and add prefix\n for frame in converted_frames:\n for key in 'symbol_addr', 'instruction_addr':\n frame[key] = '0x' + frame[key][2:].rjust(longest_addr, '0')\n\n stacktrace = {'frames': converted_frames}\n\n if error or diagnosis:\n error = error or {}\n exc = exception_from_apple_error_or_diagnosis(error, diagnosis)\n if exc is not None:\n exc['stacktrace'] = stacktrace\n data['sentry.interfaces.Exception'] = {'values': [exc]}\n # Since we inject the exception late we need to make sure that\n # we set the event type to error as it would be set to\n # 'default' otherwise.\n data['type'] = 'error'\n return\n\n data['sentry.interfaces.Stacktrace'] = stacktrace\n\n\ndef inject_apple_device_data(data, system):\n container = data.setdefault('device', {})\n try:\n container['name'] = SDK_MAPPING[system['system_name']]\n except LookupError:\n container['name'] = system.get('system_name') or 'Generic Apple'\n\n if 'system_version' in system:\n container['version'] = system['system_version']\n if 'os_version' in system:\n container['build'] = system['os_version']\n\n extra = container.setdefault('data', {})\n if 'cpu_arch' in system:\n extra['cpu_arch'] = system['cpu_arch']\n if 'model' in system:\n extra['device_model_id'] = system['model']\n if 'machine' in system:\n extra['device_model'] = system['machine']\n if 'kernel_version' in system:\n extra['kernel_version'] = system['kernel_version']\n\n\ndef preprocess_apple_crash_event(data):\n crash_report = data.get('sentry.interfaces.AppleCrashReport')\n if crash_report is None:\n return\n\n project = Project.objects.get_from_cache(\n id=data['project'],\n )\n\n crash = crash_report['crash']\n crashed_thread = None\n for thread in crash['threads']:\n if thread['crashed']:\n crashed_thread = thread\n if crashed_thread is None:\n append_error(data, {\n 'type': EventError.NATIVE_NO_CRASHED_THREAD,\n })\n\n else:\n system = crash_report.get('system')\n try:\n sym = Symbolizer(project, crash_report['binary_images'],\n threads=[crashed_thread])\n with sym:\n bt = sym.symbolize_backtrace(\n crashed_thread['backtrace']['contents'], system)\n inject_apple_backtrace(data, bt, crash.get('diagnosis'),\n crash.get('error'), system)\n except Exception as e:\n logger.exception('Failed to symbolicate')\n append_error(data, {\n 'type': EventError.NATIVE_INTERNAL_FAILURE,\n 'error': '%s: %s' % (e.__class__.__name__, str(e)),\n })\n return\n\n if system:\n inject_apple_device_data(data, system)\n\n return data\n\n\nclass NativePlugin(Plugin2):\n can_disable = False\n\n def get_event_preprocessors(self, **kwargs):\n if not have_symsynd:\n return []\n return [preprocess_apple_crash_event]\n", "path": "src/sentry/lang/native/plugin.py"}, {"content": "try:\n from symsynd.driver import Driver\n from symsynd.report import ReportSymbolizer\n from symsynd.macho.arch import get_cpu_name\n from symsynd.demangle import demangle_symbol\n have_symsynd = True\nexcept ImportError:\n have_symsynd = False\n\nfrom sentry import options\nfrom sentry.lang.native.dsymcache import dsymcache\nfrom sentry.utils.safe import trim\nfrom sentry.models import DSymSymbol\nfrom sentry.models.dsymfile import MAX_SYM\n\n\ndef trim_frame(frame):\n # This matches what's in stacktrace.py\n frame['symbol_name'] = trim(frame.get('symbol_name'), MAX_SYM)\n frame['filename'] = trim(frame.get('filename'), 256)\n return frame\n\n\ndef find_system_symbol(img, instruction_addr, system_info=None):\n \"\"\"Finds a system symbol.\"\"\"\n return DSymSymbol.objects.lookup_symbol(\n instruction_addr=instruction_addr,\n image_addr=img['image_addr'],\n uuid=img['uuid'],\n cpu_name=get_cpu_name(img['cpu_type'],\n img['cpu_subtype']),\n object_path=img['name'],\n system_info=system_info\n )\n\n\ndef make_symbolizer(project, binary_images, threads=None):\n \"\"\"Creates a symbolizer for the given project and binary images. If a\n list of threads is referenced (from an apple crash report) then only\n images needed by those frames are loaded.\n \"\"\"\n if not have_symsynd:\n raise RuntimeError('symsynd is unavailable. Install sentry with '\n 'the dsym feature flag.')\n driver = Driver(options.get('dsym.llvm-symbolizer-path') or None)\n\n if threads is None:\n to_load = [x['uuid'] for x in binary_images]\n else:\n image_map = {}\n for image in binary_images:\n image_map[image['image_addr']] = image['uuid']\n to_load = set()\n for thread in threads:\n for frame in thread['backtrace']['contents']:\n img_uuid = image_map.get(frame['object_addr'])\n if img_uuid is not None:\n to_load.add(img_uuid)\n to_load = list(to_load)\n\n dsym_paths, loaded = dsymcache.fetch_dsyms(project, to_load)\n return ReportSymbolizer(driver, dsym_paths, binary_images)\n\n\nclass Symbolizer(object):\n\n def __init__(self, project, binary_images, threads=None):\n self.symsynd_symbolizer = make_symbolizer(project, binary_images,\n threads=threads)\n self.images = dict((img['image_addr'], img) for img in binary_images)\n\n def __enter__(self):\n return self.symsynd_symbolizer.driver.__enter__()\n\n def __exit__(self, *args):\n return self.symsynd_symbolizer.driver.__exit__(*args)\n\n def symbolize_frame(self, frame, system_info=None):\n # Step one: try to symbolize with cached dsym files.\n new_frame = self.symsynd_symbolizer.symbolize_frame(frame)\n if new_frame is not None:\n return trim_frame(new_frame)\n\n # If that does not work, look up system symbols.\n img = self.images.get(frame['object_addr'])\n if img is not None:\n symbol = find_system_symbol(img, frame['instruction_addr'],\n system_info)\n if symbol is not None:\n symbol = demangle_symbol(symbol) or symbol\n rv = dict(frame, symbol_name=symbol, filename=None,\n line=0, column=0, uuid=img['uuid'])\n return trim_frame(rv)\n\n def symbolize_backtrace(self, backtrace, system_info=None):\n rv = []\n for frame in backtrace:\n new_frame = self.symbolize_frame(frame, system_info)\n rv.append(new_frame or frame)\n return rv\n", "path": "src/sentry/lang/native/symbolizer.py"}], "after_files": [{"content": "from __future__ import absolute_import, print_function\n\nimport logging\nimport posixpath\n\nfrom sentry.models import Project, EventError\nfrom sentry.plugins import Plugin2\nfrom sentry.lang.native.symbolizer import Symbolizer, have_symsynd\nfrom sentry.models.dsymfile import SDK_MAPPING\n\n\nlogger = logging.getLogger(__name__)\n\nAPP_BUNDLE_PATHS = (\n '/var/containers/Bundle/Application/',\n '/private/var/containers/Bundle/Application/',\n)\n\nNON_APP_FRAMEWORKS = (\n '/Frameworks/libswiftCore.dylib',\n)\n\n\ndef append_error(data, err):\n data.setdefault('errors', []).append(err)\n\n\ndef exception_from_apple_error_or_diagnosis(error, diagnosis=None):\n error = error or {}\n\n if error:\n nsexception = error.get('nsexception')\n if nsexception:\n return {\n 'type': nsexception['name'],\n 'value': error['reason'],\n }\n\n if diagnosis:\n return {\n 'type': 'Error',\n 'value': diagnosis\n }\n\n\ndef is_in_app(frame, app_uuid=None):\n if app_uuid is not None:\n frame_uuid = frame.get('uuid')\n if frame_uuid == app_uuid:\n return True\n object_name = frame.get('object_name', '')\n if not object_name.startswith(APP_BUNDLE_PATHS):\n return False\n if object_name.endswith(NON_APP_FRAMEWORKS):\n return False\n return True\n\n\ndef inject_apple_backtrace(data, frames, diagnosis=None, error=None,\n system=None):\n # TODO:\n # user report stacktraces from unity\n\n app_uuid = None\n if system:\n app_uuid = system.get('app_uuid')\n if app_uuid is not None:\n app_uuid = app_uuid.lower()\n\n converted_frames = []\n longest_addr = 0\n for frame in reversed(frames):\n fn = frame.get('filename')\n\n # We only record the offset if we found a symbol but we did not\n # find a line number. In that case it's the offset in bytes from\n # the beginning of the symbol.\n function = frame['symbol_name'] or '<unknown>'\n lineno = frame.get('line')\n offset = None\n if not lineno:\n offset = frame['instruction_addr'] - frame['symbol_addr']\n\n cframe = {\n 'in_app': is_in_app(frame, app_uuid),\n 'abs_path': fn,\n 'filename': fn and posixpath.basename(fn) or None,\n # This can come back as `None` from the symbolizer, in which\n # case we need to fill something else in or we will fail\n # later fulfill the interface requirements which say that a\n # function needs to be provided.\n 'function': function,\n 'package': frame['object_name'],\n 'symbol_addr': '%x' % frame['symbol_addr'],\n 'instruction_addr': '%x' % frame['instruction_addr'],\n 'instruction_offset': offset,\n 'lineno': lineno,\n }\n converted_frames.append(cframe)\n longest_addr = max(longest_addr, len(cframe['symbol_addr']),\n len(cframe['instruction_addr']))\n\n # Pad out addresses to be of the same length and add prefix\n for frame in converted_frames:\n for key in 'symbol_addr', 'instruction_addr':\n frame[key] = '0x' + frame[key][2:].rjust(longest_addr, '0')\n\n stacktrace = {'frames': converted_frames}\n\n if error or diagnosis:\n error = error or {}\n exc = exception_from_apple_error_or_diagnosis(error, diagnosis)\n if exc is not None:\n exc['stacktrace'] = stacktrace\n data['sentry.interfaces.Exception'] = {'values': [exc]}\n # Since we inject the exception late we need to make sure that\n # we set the event type to error as it would be set to\n # 'default' otherwise.\n data['type'] = 'error'\n return\n\n data['sentry.interfaces.Stacktrace'] = stacktrace\n\n\ndef inject_apple_device_data(data, system):\n container = data.setdefault('device', {})\n try:\n container['name'] = SDK_MAPPING[system['system_name']]\n except LookupError:\n container['name'] = system.get('system_name') or 'Generic Apple'\n\n if 'system_version' in system:\n container['version'] = system['system_version']\n if 'os_version' in system:\n container['build'] = system['os_version']\n\n extra = container.setdefault('data', {})\n if 'cpu_arch' in system:\n extra['cpu_arch'] = system['cpu_arch']\n if 'model' in system:\n extra['device_model_id'] = system['model']\n if 'machine' in system:\n extra['device_model'] = system['machine']\n if 'kernel_version' in system:\n extra['kernel_version'] = system['kernel_version']\n\n\ndef preprocess_apple_crash_event(data):\n crash_report = data.get('sentry.interfaces.AppleCrashReport')\n if crash_report is None:\n return\n\n project = Project.objects.get_from_cache(\n id=data['project'],\n )\n\n crash = crash_report['crash']\n crashed_thread = None\n for thread in crash['threads']:\n if thread['crashed']:\n crashed_thread = thread\n if crashed_thread is None:\n append_error(data, {\n 'type': EventError.NATIVE_NO_CRASHED_THREAD,\n })\n\n else:\n system = crash_report.get('system')\n try:\n sym = Symbolizer(project, crash_report['binary_images'],\n threads=[crashed_thread])\n with sym:\n bt = sym.symbolize_backtrace(\n crashed_thread['backtrace']['contents'], system)\n inject_apple_backtrace(data, bt, crash.get('diagnosis'),\n crash.get('error'), system)\n except Exception as e:\n logger.exception('Failed to symbolicate')\n append_error(data, {\n 'type': EventError.NATIVE_INTERNAL_FAILURE,\n 'error': '%s: %s' % (e.__class__.__name__, str(e)),\n })\n return\n\n if system:\n inject_apple_device_data(data, system)\n\n return data\n\n\nclass NativePlugin(Plugin2):\n can_disable = False\n\n def get_event_preprocessors(self, **kwargs):\n if not have_symsynd:\n return []\n return [preprocess_apple_crash_event]\n", "path": "src/sentry/lang/native/plugin.py"}, {"content": "try:\n from symsynd.driver import Driver\n from symsynd.report import ReportSymbolizer\n from symsynd.macho.arch import get_cpu_name\n from symsynd.demangle import demangle_symbol\n have_symsynd = True\nexcept ImportError:\n have_symsynd = False\n\nfrom sentry import options\nfrom sentry.lang.native.dsymcache import dsymcache\nfrom sentry.utils.safe import trim\nfrom sentry.models import DSymSymbol\nfrom sentry.models.dsymfile import MAX_SYM\n\n\ndef trim_frame(frame):\n # This matches what's in stacktrace.py\n frame['symbol_name'] = trim(frame.get('symbol_name'), MAX_SYM)\n frame['filename'] = trim(frame.get('filename'), 256)\n return frame\n\n\ndef find_system_symbol(img, instruction_addr, system_info=None):\n \"\"\"Finds a system symbol.\"\"\"\n return DSymSymbol.objects.lookup_symbol(\n instruction_addr=instruction_addr,\n image_addr=img['image_addr'],\n uuid=img['uuid'],\n cpu_name=get_cpu_name(img['cpu_type'],\n img['cpu_subtype']),\n object_path=img['name'],\n system_info=system_info\n )\n\n\ndef make_symbolizer(project, binary_images, threads=None):\n \"\"\"Creates a symbolizer for the given project and binary images. If a\n list of threads is referenced (from an apple crash report) then only\n images needed by those frames are loaded.\n \"\"\"\n if not have_symsynd:\n raise RuntimeError('symsynd is unavailable. Install sentry with '\n 'the dsym feature flag.')\n driver = Driver(options.get('dsym.llvm-symbolizer-path') or None)\n\n if threads is None:\n to_load = [x['uuid'] for x in binary_images]\n else:\n image_map = {}\n for image in binary_images:\n image_map[image['image_addr']] = image['uuid']\n to_load = set()\n for thread in threads:\n for frame in thread['backtrace']['contents']:\n img_uuid = image_map.get(frame['object_addr'])\n if img_uuid is not None:\n to_load.add(img_uuid)\n to_load = list(to_load)\n\n dsym_paths, loaded = dsymcache.fetch_dsyms(project, to_load)\n return ReportSymbolizer(driver, dsym_paths, binary_images)\n\n\nclass Symbolizer(object):\n\n def __init__(self, project, binary_images, threads=None):\n self.symsynd_symbolizer = make_symbolizer(project, binary_images,\n threads=threads)\n self.images = dict((img['image_addr'], img) for img in binary_images)\n\n def __enter__(self):\n return self.symsynd_symbolizer.driver.__enter__()\n\n def __exit__(self, *args):\n return self.symsynd_symbolizer.driver.__exit__(*args)\n\n def _process_frame(self, frame, img):\n rv = trim_frame(frame)\n if img is not None:\n rv['object_name'] = img['name']\n rv['uuid'] = img['uuid']\n return rv\n\n def symbolize_frame(self, frame, system_info=None):\n img = self.images.get(frame['object_addr'])\n\n # Step one: try to symbolize with cached dsym files.\n new_frame = self.symsynd_symbolizer.symbolize_frame(frame)\n if new_frame is not None:\n return self._process_frame(new_frame, img)\n\n # If that does not work, look up system symbols.\n if img is not None:\n symbol = find_system_symbol(img, frame['instruction_addr'],\n system_info)\n if symbol is not None:\n symbol = demangle_symbol(symbol) or symbol\n rv = dict(frame, symbol_name=symbol, filename=None,\n line=0, column=0, uuid=img['uuid'],\n object_name=img['object_name'])\n return self._process_frame(rv, img)\n\n return self._process_frame(frame, img)\n\n def symbolize_backtrace(self, backtrace, system_info=None):\n return [self.symbolize_frame(frm, system_info) for frm in backtrace]\n", "path": "src/sentry/lang/native/symbolizer.py"}]}
3,113
937
gh_patches_debug_9034
rasdani/github-patches
git_diff
e-valuation__EvaP-1029
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Staff users should see the public view of results by default Staff users should by default see the public view of results pages. It shows less data (primarily no comments), which is good, and random people looking over one's shoulder won't ask "omg you see more stuff here why is that" --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `evap/results/views.py` Content: ``` 1 from collections import OrderedDict, namedtuple 2 3 from django.core.exceptions import PermissionDenied 4 from django.shortcuts import get_object_or_404, render 5 from django.contrib.auth.decorators import login_required 6 7 from evap.evaluation.models import Semester, Degree, Contribution 8 from evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY 9 10 11 @login_required 12 def index(request): 13 semesters = Semester.get_all_with_published_courses() 14 15 return render(request, "results_index.html", dict(semesters=semesters)) 16 17 18 @login_required 19 def semester_detail(request, semester_id): 20 semester = get_object_or_404(Semester, id=semester_id) 21 if request.user.is_reviewer: 22 courses = list(semester.course_set.filter(state__in=["in_evaluation", "evaluated", "reviewed", "published"]).prefetch_related("degrees")) 23 else: 24 courses = list(semester.course_set.filter(state="published").prefetch_related("degrees")) 25 26 courses = [course for course in courses if course.can_user_see_course(request.user)] 27 28 # Annotate each course object with its grades. 29 for course in courses: 30 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course) 31 32 CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results')) 33 34 courses_by_degree = OrderedDict() 35 for degree in Degree.objects.all(): 36 courses_by_degree[degree] = CourseTuple([], []) 37 for course in courses: 38 if course.is_single_result: 39 for degree in course.degrees.all(): 40 section = calculate_results(course)[0] 41 result = section.results[0] 42 courses_by_degree[degree].single_results.append((course, result)) 43 else: 44 for degree in course.degrees.all(): 45 courses_by_degree[degree].courses.append(course) 46 47 template_data = dict(semester=semester, courses_by_degree=courses_by_degree) 48 return render(request, "results_semester_detail.html", template_data) 49 50 51 @login_required 52 def course_detail(request, semester_id, course_id): 53 semester = get_object_or_404(Semester, id=semester_id) 54 course = get_object_or_404(semester.course_set, id=course_id, semester=semester) 55 56 if not course.can_user_see_results(request.user): 57 raise PermissionDenied 58 59 sections = calculate_results(course) 60 61 public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view. 62 63 represented_users = list(request.user.represented_users.all()) 64 represented_users.append(request.user) 65 66 # filter text answers 67 for section in sections: 68 results = [] 69 for result in section.results: 70 if isinstance(result, TextResult): 71 answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)] 72 if answers: 73 results.append(TextResult(question=result.question, answers=answers)) 74 else: 75 results.append(result) 76 section.results[:] = results 77 78 # remove empty sections 79 sections = [section for section in sections if section.results] 80 81 # group by contributor 82 course_sections = [] 83 contributor_sections = OrderedDict() 84 for section in sections: 85 if not section.results: 86 continue 87 if section.contributor is None: 88 course_sections.append(section) 89 else: 90 contributor_sections.setdefault(section.contributor, 91 {'total_votes': 0, 'sections': []})['sections'].append(section) 92 93 # Sum up all Sections for this contributor. 94 # If section is not a RatingResult: 95 # Add 1 as we assume it is a TextResult or something similar that should be displayed. 96 contributor_sections[section.contributor]['total_votes'] +=\ 97 sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results]) 98 99 # Show a warning if course is still in evaluation (for reviewer preview). 100 evaluation_warning = course.state != 'published' 101 102 # Results for a course might not be visible because there are not enough answers 103 # but it can still be "published" e.g. to show the comment results to contributors. 104 # Users who can open the results page see a warning message in this case. 105 sufficient_votes_warning = not course.can_publish_grades 106 107 show_grades = request.user.is_reviewer or course.can_publish_grades 108 109 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course) 110 111 template_data = dict( 112 course=course, 113 course_sections=course_sections, 114 contributor_sections=contributor_sections, 115 evaluation_warning=evaluation_warning, 116 sufficient_votes_warning=sufficient_votes_warning, 117 show_grades=show_grades, 118 reviewer=request.user.is_reviewer, 119 contributor=course.is_user_contributor_or_delegate(request.user), 120 can_download_grades=request.user.can_download_grades, 121 public_view=public_view) 122 return render(request, "results_course_detail.html", template_data) 123 124 125 def user_can_see_text_answer(user, represented_users, text_answer, public_view=False): 126 if public_view: 127 return False 128 if text_answer.state not in COMMENT_STATES_REQUIRED_FOR_VISIBILITY: 129 return False 130 if user.is_reviewer: 131 return True 132 133 contributor = text_answer.contribution.contributor 134 135 if text_answer.is_private: 136 return contributor == user 137 138 if text_answer.is_published: 139 if text_answer.contribution.responsible: 140 return contributor == user or user in contributor.delegates.all() 141 142 if contributor in represented_users: 143 return True 144 if text_answer.contribution.course.contributions.filter( 145 contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists(): 146 return True 147 if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter( 148 contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists(): 149 return True 150 151 return False 152 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/evap/results/views.py b/evap/results/views.py --- a/evap/results/views.py +++ b/evap/results/views.py @@ -58,7 +58,10 @@ sections = calculate_results(course) - public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view. + if request.user.is_staff or request.user.is_reviewer: + public_view = request.GET.get('public_view') != 'false' # if parameter is not given, show public view. + else: + public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view. represented_users = list(request.user.represented_users.all()) represented_users.append(request.user)
{"golden_diff": "diff --git a/evap/results/views.py b/evap/results/views.py\n--- a/evap/results/views.py\n+++ b/evap/results/views.py\n@@ -58,7 +58,10 @@\n \n sections = calculate_results(course)\n \n- public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n+ if request.user.is_staff or request.user.is_reviewer:\n+ public_view = request.GET.get('public_view') != 'false' # if parameter is not given, show public view.\n+ else:\n+ public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n \n represented_users = list(request.user.represented_users.all())\n represented_users.append(request.user)\n", "issue": "Staff users should see the public view of results by default\nStaff users should by default see the public view of results pages. It shows less data (primarily no comments), which is good, and random people looking over one's shoulder won't ask \"omg you see more stuff here why is that\"\n", "before_files": [{"content": "from collections import OrderedDict, namedtuple\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth.decorators import login_required\n\nfrom evap.evaluation.models import Semester, Degree, Contribution\nfrom evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY\n\n\n@login_required\ndef index(request):\n semesters = Semester.get_all_with_published_courses()\n\n return render(request, \"results_index.html\", dict(semesters=semesters))\n\n\n@login_required\ndef semester_detail(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n if request.user.is_reviewer:\n courses = list(semester.course_set.filter(state__in=[\"in_evaluation\", \"evaluated\", \"reviewed\", \"published\"]).prefetch_related(\"degrees\"))\n else:\n courses = list(semester.course_set.filter(state=\"published\").prefetch_related(\"degrees\"))\n\n courses = [course for course in courses if course.can_user_see_course(request.user)]\n\n # Annotate each course object with its grades.\n for course in courses:\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))\n\n courses_by_degree = OrderedDict()\n for degree in Degree.objects.all():\n courses_by_degree[degree] = CourseTuple([], [])\n for course in courses:\n if course.is_single_result:\n for degree in course.degrees.all():\n section = calculate_results(course)[0]\n result = section.results[0]\n courses_by_degree[degree].single_results.append((course, result))\n else:\n for degree in course.degrees.all():\n courses_by_degree[degree].courses.append(course)\n\n template_data = dict(semester=semester, courses_by_degree=courses_by_degree)\n return render(request, \"results_semester_detail.html\", template_data)\n\n\n@login_required\ndef course_detail(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(semester.course_set, id=course_id, semester=semester)\n\n if not course.can_user_see_results(request.user):\n raise PermissionDenied\n\n sections = calculate_results(course)\n\n public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n\n represented_users = list(request.user.represented_users.all())\n represented_users.append(request.user)\n\n # filter text answers\n for section in sections:\n results = []\n for result in section.results:\n if isinstance(result, TextResult):\n answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]\n if answers:\n results.append(TextResult(question=result.question, answers=answers))\n else:\n results.append(result)\n section.results[:] = results\n\n # remove empty sections\n sections = [section for section in sections if section.results]\n\n # group by contributor\n course_sections = []\n contributor_sections = OrderedDict()\n for section in sections:\n if not section.results:\n continue\n if section.contributor is None:\n course_sections.append(section)\n else:\n contributor_sections.setdefault(section.contributor,\n {'total_votes': 0, 'sections': []})['sections'].append(section)\n\n # Sum up all Sections for this contributor.\n # If section is not a RatingResult:\n # Add 1 as we assume it is a TextResult or something similar that should be displayed.\n contributor_sections[section.contributor]['total_votes'] +=\\\n sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])\n\n # Show a warning if course is still in evaluation (for reviewer preview).\n evaluation_warning = course.state != 'published'\n\n # Results for a course might not be visible because there are not enough answers\n # but it can still be \"published\" e.g. to show the comment results to contributors.\n # Users who can open the results page see a warning message in this case.\n sufficient_votes_warning = not course.can_publish_grades\n\n show_grades = request.user.is_reviewer or course.can_publish_grades\n\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n template_data = dict(\n course=course,\n course_sections=course_sections,\n contributor_sections=contributor_sections,\n evaluation_warning=evaluation_warning,\n sufficient_votes_warning=sufficient_votes_warning,\n show_grades=show_grades,\n reviewer=request.user.is_reviewer,\n contributor=course.is_user_contributor_or_delegate(request.user),\n can_download_grades=request.user.can_download_grades,\n public_view=public_view)\n return render(request, \"results_course_detail.html\", template_data)\n\n\ndef user_can_see_text_answer(user, represented_users, text_answer, public_view=False):\n if public_view:\n return False\n if text_answer.state not in COMMENT_STATES_REQUIRED_FOR_VISIBILITY:\n return False\n if user.is_reviewer:\n return True\n\n contributor = text_answer.contribution.contributor\n\n if text_answer.is_private:\n return contributor == user\n\n if text_answer.is_published:\n if text_answer.contribution.responsible:\n return contributor == user or user in contributor.delegates.all()\n\n if contributor in represented_users:\n return True\n if text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():\n return True\n if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():\n return True\n\n return False\n", "path": "evap/results/views.py"}], "after_files": [{"content": "from collections import OrderedDict, namedtuple\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth.decorators import login_required\n\nfrom evap.evaluation.models import Semester, Degree, Contribution\nfrom evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY\n\n\n@login_required\ndef index(request):\n semesters = Semester.get_all_with_published_courses()\n\n return render(request, \"results_index.html\", dict(semesters=semesters))\n\n\n@login_required\ndef semester_detail(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n if request.user.is_reviewer:\n courses = list(semester.course_set.filter(state__in=[\"in_evaluation\", \"evaluated\", \"reviewed\", \"published\"]).prefetch_related(\"degrees\"))\n else:\n courses = list(semester.course_set.filter(state=\"published\").prefetch_related(\"degrees\"))\n\n courses = [course for course in courses if course.can_user_see_course(request.user)]\n\n # Annotate each course object with its grades.\n for course in courses:\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))\n\n courses_by_degree = OrderedDict()\n for degree in Degree.objects.all():\n courses_by_degree[degree] = CourseTuple([], [])\n for course in courses:\n if course.is_single_result:\n for degree in course.degrees.all():\n section = calculate_results(course)[0]\n result = section.results[0]\n courses_by_degree[degree].single_results.append((course, result))\n else:\n for degree in course.degrees.all():\n courses_by_degree[degree].courses.append(course)\n\n template_data = dict(semester=semester, courses_by_degree=courses_by_degree)\n return render(request, \"results_semester_detail.html\", template_data)\n\n\n@login_required\ndef course_detail(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(semester.course_set, id=course_id, semester=semester)\n\n if not course.can_user_see_results(request.user):\n raise PermissionDenied\n\n sections = calculate_results(course)\n\n if request.user.is_staff or request.user.is_reviewer:\n public_view = request.GET.get('public_view') != 'false' # if parameter is not given, show public view.\n else:\n public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n\n represented_users = list(request.user.represented_users.all())\n represented_users.append(request.user)\n\n # filter text answers\n for section in sections:\n results = []\n for result in section.results:\n if isinstance(result, TextResult):\n answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]\n if answers:\n results.append(TextResult(question=result.question, answers=answers))\n else:\n results.append(result)\n section.results[:] = results\n\n # remove empty sections\n sections = [section for section in sections if section.results]\n\n # group by contributor\n course_sections = []\n contributor_sections = OrderedDict()\n for section in sections:\n if not section.results:\n continue\n if section.contributor is None:\n course_sections.append(section)\n else:\n contributor_sections.setdefault(section.contributor,\n {'total_votes': 0, 'sections': []})['sections'].append(section)\n\n # Sum up all Sections for this contributor.\n # If section is not a RatingResult:\n # Add 1 as we assume it is a TextResult or something similar that should be displayed.\n contributor_sections[section.contributor]['total_votes'] +=\\\n sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])\n\n # Show a warning if course is still in evaluation (for reviewer preview).\n evaluation_warning = course.state != 'published'\n\n # Results for a course might not be visible because there are not enough answers\n # but it can still be \"published\" e.g. to show the comment results to contributors.\n # Users who can open the results page see a warning message in this case.\n sufficient_votes_warning = not course.can_publish_grades\n\n show_grades = request.user.is_reviewer or course.can_publish_grades\n\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n template_data = dict(\n course=course,\n course_sections=course_sections,\n contributor_sections=contributor_sections,\n evaluation_warning=evaluation_warning,\n sufficient_votes_warning=sufficient_votes_warning,\n show_grades=show_grades,\n reviewer=request.user.is_reviewer,\n contributor=course.is_user_contributor_or_delegate(request.user),\n can_download_grades=request.user.can_download_grades,\n public_view=public_view)\n return render(request, \"results_course_detail.html\", template_data)\n\n\ndef user_can_see_text_answer(user, represented_users, text_answer, public_view=False):\n if public_view:\n return False\n if text_answer.state not in COMMENT_STATES_REQUIRED_FOR_VISIBILITY:\n return False\n if user.is_reviewer:\n return True\n\n contributor = text_answer.contribution.contributor\n\n if text_answer.is_private:\n return contributor == user\n\n if text_answer.is_published:\n if text_answer.contribution.responsible:\n return contributor == user or user in contributor.delegates.all()\n\n if contributor in represented_users:\n return True\n if text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():\n return True\n if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():\n return True\n\n return False\n", "path": "evap/results/views.py"}]}
1,947
181
gh_patches_debug_20105
rasdani/github-patches
git_diff
DDMAL__CantusDB-768
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- assigning-sources-to-users process could be smoother I recently tried to add myself as an editor for a test source on staging. I went into the admin area, selected my user account, and tried to add CDN-Test2020 to my sources: So far so good...: ![Screenshot 2023-06-19 at 4 17 36 PM](https://github.com/DDMAL/CantusDB/assets/58090591/badfa6ac-7256-4c9c-b384-24c1d41fee42) Oh dear...: ![Screenshot 2023-06-19 at 4 17 26 PM](https://github.com/DDMAL/CantusDB/assets/58090591/69407ac3-6aa2-418b-b4d3-588480c14b98) Let me try finding the source in the admin area, and then add myself to the source that way! oh... oh dear...: ![Screenshot 2023-06-19 at 4 18 09 PM](https://github.com/DDMAL/CantusDB/assets/58090591/c25aca10-b04d-4848-8558-0d1d0d62de53) Admittedly, there exists a faster way to get to the source page—source-detail -> click "manage source editors", which takes you to the admin area for the source in question, where you can find a filterable list of users—but these two pages of the admin area could certainly be improved by the ability to filter. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `django/cantusdb_project/main_app/admin.py` Content: ``` 1 from django.contrib import admin 2 from main_app.models import * 3 4 # these fields should not be editable by all classes 5 EXCLUDE = ("created_by", "last_updated_by", "json_info") 6 7 8 class BaseModelAdmin(admin.ModelAdmin): 9 exclude = EXCLUDE 10 11 # if an object is created in the admin interface, assign the user to the created_by field 12 # else if an object is updated in the admin interface, assign the user to the last_updated_by field 13 def save_model(self, request, obj, form, change): 14 if change: 15 obj.last_updated_by = request.user 16 else: 17 obj.created_by = request.user 18 super().save_model(request, obj, form, change) 19 20 21 class CenturyAdmin(BaseModelAdmin): 22 pass 23 24 25 class ChantAdmin(BaseModelAdmin): 26 list_display = ("incipit", "siglum", "genre") 27 search_fields = ("title", "incipit", "cantus_id") 28 list_filter = ("genre",) 29 exclude = EXCLUDE + ( 30 "col1", 31 "col2", 32 "col3", 33 "next_chant", 34 "s_sequence", 35 "is_last_chant_in_feast", 36 ) 37 38 39 class FeastAdmin(BaseModelAdmin): 40 pass 41 42 43 class GenreAdmin(BaseModelAdmin): 44 pass 45 46 47 class NotationAdmin(BaseModelAdmin): 48 pass 49 50 51 class OfficeAdmin(BaseModelAdmin): 52 pass 53 54 55 class ProvenanceAdmin(BaseModelAdmin): 56 pass 57 58 59 class RismSiglumAdmin(BaseModelAdmin): 60 pass 61 62 63 class SegmentAdmin(BaseModelAdmin): 64 pass 65 66 67 class SequenceAdmin(BaseModelAdmin): 68 exclude = EXCLUDE + ("c_sequence", "next_chant", "is_last_chant_in_feast") 69 70 71 class SourceAdmin(BaseModelAdmin): 72 # from the Django docs: 73 # Adding a ManyToManyField to this list will instead use a nifty unobtrusive JavaScript “filter” interface 74 # that allows searching within the options. The unselected and selected options appear in two boxes side by side. 75 filter_horizontal = ( 76 "century", 77 "notation", 78 "current_editors", 79 "inventoried_by", 80 "full_text_entered_by", 81 "melodies_entered_by", 82 "proofreaders", 83 "other_editors", 84 ) 85 86 87 admin.site.register(Century, CenturyAdmin) 88 admin.site.register(Chant, ChantAdmin) 89 admin.site.register(Feast, FeastAdmin) 90 admin.site.register(Genre, GenreAdmin) 91 admin.site.register(Notation, NotationAdmin) 92 admin.site.register(Office, OfficeAdmin) 93 admin.site.register(Provenance, ProvenanceAdmin) 94 admin.site.register(RismSiglum, RismSiglumAdmin) 95 admin.site.register(Segment, SegmentAdmin) 96 admin.site.register(Sequence, SequenceAdmin) 97 admin.site.register(Source, SourceAdmin) 98 ``` Path: `django/cantusdb_project/users/admin.py` Content: ``` 1 from django.contrib import admin 2 from .models import * 3 from django.contrib.auth.admin import UserAdmin as BaseUserAdmin 4 from main_app.models import Source 5 6 # Register your models here. 7 8 9 # this will allow us to assign sources to users in the User admin page 10 class SourceInline(admin.TabularInline): 11 model = Source.current_editors.through 12 13 14 class UserAdmin(BaseUserAdmin): 15 readonly_fields = ( 16 "date_joined", 17 "last_login", 18 ) 19 # fields that are displayed on the user list page of the admin 20 list_display = ( 21 "email", 22 "first_name", 23 "last_name", 24 "institution", 25 ) 26 # creates a filter on the right side of the page to filter users by group 27 list_filter = ("groups",) 28 fieldsets = ( 29 ( 30 "Account info", 31 { 32 "fields": ( 33 ("email", "password"), 34 "is_active", 35 ("date_joined", "last_login"), 36 ) 37 }, 38 ), 39 ( 40 "Personal info", 41 { 42 "fields": ( 43 "full_name", 44 ("first_name", "last_name"), 45 "institution", 46 ("city", "country"), 47 "website", 48 ) 49 }, 50 ), 51 ( 52 "Permissions", 53 { 54 "fields": ( 55 "is_staff", 56 "is_superuser", 57 "groups", 58 ) 59 }, 60 ), 61 ) 62 add_fieldsets = ( 63 ( 64 "Account info", 65 { 66 "fields": ( 67 "email", 68 ("password1", "password2"), 69 ) 70 }, 71 ), 72 ( 73 "Personal info", 74 { 75 "fields": ( 76 "full_name", 77 ("first_name", "last_name"), 78 "institution", 79 ("city", "country"), 80 "website", 81 ) 82 }, 83 ), 84 ( 85 "Permissions", 86 { 87 "fields": ( 88 "is_staff", 89 "is_superuser", 90 "groups", 91 ) 92 }, 93 ), 94 ) 95 search_fields = ( 96 "email", 97 "first_name", 98 "last_name", 99 "institution", 100 ) 101 # order the list of users by email 102 ordering = ("email",) 103 filter_horizontal = ("groups",) 104 inlines = [SourceInline] 105 106 107 admin.site.register(User, UserAdmin) 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/django/cantusdb_project/main_app/admin.py b/django/cantusdb_project/main_app/admin.py --- a/django/cantusdb_project/main_app/admin.py +++ b/django/cantusdb_project/main_app/admin.py @@ -69,6 +69,11 @@ class SourceAdmin(BaseModelAdmin): + # These search fields are also available on the user-source inline relationship in the user admin page + search_fields = ( + "siglum", + "title", + ) # from the Django docs: # Adding a ManyToManyField to this list will instead use a nifty unobtrusive JavaScript “filter” interface # that allows searching within the options. The unselected and selected options appear in two boxes side by side. diff --git a/django/cantusdb_project/users/admin.py b/django/cantusdb_project/users/admin.py --- a/django/cantusdb_project/users/admin.py +++ b/django/cantusdb_project/users/admin.py @@ -9,6 +9,7 @@ # this will allow us to assign sources to users in the User admin page class SourceInline(admin.TabularInline): model = Source.current_editors.through + raw_id_fields = ["source"] class UserAdmin(BaseUserAdmin):
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/admin.py b/django/cantusdb_project/main_app/admin.py\n--- a/django/cantusdb_project/main_app/admin.py\n+++ b/django/cantusdb_project/main_app/admin.py\n@@ -69,6 +69,11 @@\n \n \n class SourceAdmin(BaseModelAdmin):\n+ # These search fields are also available on the user-source inline relationship in the user admin page\n+ search_fields = (\n+ \"siglum\",\n+ \"title\",\n+ )\n # from the Django docs:\n # Adding a ManyToManyField to this list will instead use a nifty unobtrusive JavaScript \u201cfilter\u201d interface\n # that allows searching within the options. The unselected and selected options appear in two boxes side by side.\ndiff --git a/django/cantusdb_project/users/admin.py b/django/cantusdb_project/users/admin.py\n--- a/django/cantusdb_project/users/admin.py\n+++ b/django/cantusdb_project/users/admin.py\n@@ -9,6 +9,7 @@\n # this will allow us to assign sources to users in the User admin page\n class SourceInline(admin.TabularInline):\n model = Source.current_editors.through\n+ raw_id_fields = [\"source\"]\n \n \n class UserAdmin(BaseUserAdmin):\n", "issue": "assigning-sources-to-users process could be smoother\nI recently tried to add myself as an editor for a test source on staging. I went into the admin area, selected my user account, and tried to add CDN-Test2020 to my sources:\r\n\r\nSo far so good...:\r\n![Screenshot 2023-06-19 at 4 17 36 PM](https://github.com/DDMAL/CantusDB/assets/58090591/badfa6ac-7256-4c9c-b384-24c1d41fee42)\r\n\r\nOh dear...:\r\n![Screenshot 2023-06-19 at 4 17 26 PM](https://github.com/DDMAL/CantusDB/assets/58090591/69407ac3-6aa2-418b-b4d3-588480c14b98)\r\n\r\nLet me try finding the source in the admin area, and then add myself to the source that way! oh... oh dear...:\r\n![Screenshot 2023-06-19 at 4 18 09 PM](https://github.com/DDMAL/CantusDB/assets/58090591/c25aca10-b04d-4848-8558-0d1d0d62de53)\r\n\r\nAdmittedly, there exists a faster way to get to the source page\u2014source-detail -> click \"manage source editors\", which takes you to the admin area for the source in question, where you can find a filterable list of users\u2014but these two pages of the admin area could certainly be improved by the ability to filter.\n", "before_files": [{"content": "from django.contrib import admin\nfrom main_app.models import *\n\n# these fields should not be editable by all classes\nEXCLUDE = (\"created_by\", \"last_updated_by\", \"json_info\")\n\n\nclass BaseModelAdmin(admin.ModelAdmin):\n exclude = EXCLUDE\n\n # if an object is created in the admin interface, assign the user to the created_by field\n # else if an object is updated in the admin interface, assign the user to the last_updated_by field\n def save_model(self, request, obj, form, change):\n if change:\n obj.last_updated_by = request.user\n else:\n obj.created_by = request.user\n super().save_model(request, obj, form, change)\n\n\nclass CenturyAdmin(BaseModelAdmin):\n pass\n\n\nclass ChantAdmin(BaseModelAdmin):\n list_display = (\"incipit\", \"siglum\", \"genre\")\n search_fields = (\"title\", \"incipit\", \"cantus_id\")\n list_filter = (\"genre\",)\n exclude = EXCLUDE + (\n \"col1\",\n \"col2\",\n \"col3\",\n \"next_chant\",\n \"s_sequence\",\n \"is_last_chant_in_feast\",\n )\n\n\nclass FeastAdmin(BaseModelAdmin):\n pass\n\n\nclass GenreAdmin(BaseModelAdmin):\n pass\n\n\nclass NotationAdmin(BaseModelAdmin):\n pass\n\n\nclass OfficeAdmin(BaseModelAdmin):\n pass\n\n\nclass ProvenanceAdmin(BaseModelAdmin):\n pass\n\n\nclass RismSiglumAdmin(BaseModelAdmin):\n pass\n\n\nclass SegmentAdmin(BaseModelAdmin):\n pass\n\n\nclass SequenceAdmin(BaseModelAdmin):\n exclude = EXCLUDE + (\"c_sequence\", \"next_chant\", \"is_last_chant_in_feast\")\n\n\nclass SourceAdmin(BaseModelAdmin):\n # from the Django docs:\n # Adding a ManyToManyField to this list will instead use a nifty unobtrusive JavaScript \u201cfilter\u201d interface\n # that allows searching within the options. The unselected and selected options appear in two boxes side by side.\n filter_horizontal = (\n \"century\",\n \"notation\",\n \"current_editors\",\n \"inventoried_by\",\n \"full_text_entered_by\",\n \"melodies_entered_by\",\n \"proofreaders\",\n \"other_editors\",\n )\n\n\nadmin.site.register(Century, CenturyAdmin)\nadmin.site.register(Chant, ChantAdmin)\nadmin.site.register(Feast, FeastAdmin)\nadmin.site.register(Genre, GenreAdmin)\nadmin.site.register(Notation, NotationAdmin)\nadmin.site.register(Office, OfficeAdmin)\nadmin.site.register(Provenance, ProvenanceAdmin)\nadmin.site.register(RismSiglum, RismSiglumAdmin)\nadmin.site.register(Segment, SegmentAdmin)\nadmin.site.register(Sequence, SequenceAdmin)\nadmin.site.register(Source, SourceAdmin)\n", "path": "django/cantusdb_project/main_app/admin.py"}, {"content": "from django.contrib import admin\nfrom .models import *\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom main_app.models import Source\n\n# Register your models here.\n\n\n# this will allow us to assign sources to users in the User admin page\nclass SourceInline(admin.TabularInline):\n model = Source.current_editors.through\n\n\nclass UserAdmin(BaseUserAdmin):\n readonly_fields = (\n \"date_joined\",\n \"last_login\",\n )\n # fields that are displayed on the user list page of the admin\n list_display = (\n \"email\",\n \"first_name\",\n \"last_name\",\n \"institution\",\n )\n # creates a filter on the right side of the page to filter users by group\n list_filter = (\"groups\",)\n fieldsets = (\n (\n \"Account info\",\n {\n \"fields\": (\n (\"email\", \"password\"),\n \"is_active\",\n (\"date_joined\", \"last_login\"),\n )\n },\n ),\n (\n \"Personal info\",\n {\n \"fields\": (\n \"full_name\",\n (\"first_name\", \"last_name\"),\n \"institution\",\n (\"city\", \"country\"),\n \"website\",\n )\n },\n ),\n (\n \"Permissions\",\n {\n \"fields\": (\n \"is_staff\",\n \"is_superuser\",\n \"groups\",\n )\n },\n ),\n )\n add_fieldsets = (\n (\n \"Account info\",\n {\n \"fields\": (\n \"email\",\n (\"password1\", \"password2\"),\n )\n },\n ),\n (\n \"Personal info\",\n {\n \"fields\": (\n \"full_name\",\n (\"first_name\", \"last_name\"),\n \"institution\",\n (\"city\", \"country\"),\n \"website\",\n )\n },\n ),\n (\n \"Permissions\",\n {\n \"fields\": (\n \"is_staff\",\n \"is_superuser\",\n \"groups\",\n )\n },\n ),\n )\n search_fields = (\n \"email\",\n \"first_name\",\n \"last_name\",\n \"institution\",\n )\n # order the list of users by email\n ordering = (\"email\",)\n filter_horizontal = (\"groups\",)\n inlines = [SourceInline]\n\n\nadmin.site.register(User, UserAdmin)\n", "path": "django/cantusdb_project/users/admin.py"}], "after_files": [{"content": "from django.contrib import admin\nfrom main_app.models import *\n\n# these fields should not be editable by all classes\nEXCLUDE = (\"created_by\", \"last_updated_by\", \"json_info\")\n\n\nclass BaseModelAdmin(admin.ModelAdmin):\n exclude = EXCLUDE\n\n # if an object is created in the admin interface, assign the user to the created_by field\n # else if an object is updated in the admin interface, assign the user to the last_updated_by field\n def save_model(self, request, obj, form, change):\n if change:\n obj.last_updated_by = request.user\n else:\n obj.created_by = request.user\n super().save_model(request, obj, form, change)\n\n\nclass CenturyAdmin(BaseModelAdmin):\n pass\n\n\nclass ChantAdmin(BaseModelAdmin):\n list_display = (\"incipit\", \"siglum\", \"genre\")\n search_fields = (\"title\", \"incipit\", \"cantus_id\")\n list_filter = (\"genre\",)\n exclude = EXCLUDE + (\n \"col1\",\n \"col2\",\n \"col3\",\n \"next_chant\",\n \"s_sequence\",\n \"is_last_chant_in_feast\",\n )\n\n\nclass FeastAdmin(BaseModelAdmin):\n pass\n\n\nclass GenreAdmin(BaseModelAdmin):\n pass\n\n\nclass NotationAdmin(BaseModelAdmin):\n pass\n\n\nclass OfficeAdmin(BaseModelAdmin):\n pass\n\n\nclass ProvenanceAdmin(BaseModelAdmin):\n pass\n\n\nclass RismSiglumAdmin(BaseModelAdmin):\n pass\n\n\nclass SegmentAdmin(BaseModelAdmin):\n pass\n\n\nclass SequenceAdmin(BaseModelAdmin):\n exclude = EXCLUDE + (\"c_sequence\", \"next_chant\", \"is_last_chant_in_feast\")\n\n\nclass SourceAdmin(BaseModelAdmin):\n # These search fields are also available on the user-source inline relationship in the user admin page\n search_fields = (\n \"siglum\",\n \"title\",\n )\n # from the Django docs:\n # Adding a ManyToManyField to this list will instead use a nifty unobtrusive JavaScript \u201cfilter\u201d interface\n # that allows searching within the options. The unselected and selected options appear in two boxes side by side.\n filter_horizontal = (\n \"century\",\n \"notation\",\n \"current_editors\",\n \"inventoried_by\",\n \"full_text_entered_by\",\n \"melodies_entered_by\",\n \"proofreaders\",\n \"other_editors\",\n )\n\n\nadmin.site.register(Century, CenturyAdmin)\nadmin.site.register(Chant, ChantAdmin)\nadmin.site.register(Feast, FeastAdmin)\nadmin.site.register(Genre, GenreAdmin)\nadmin.site.register(Notation, NotationAdmin)\nadmin.site.register(Office, OfficeAdmin)\nadmin.site.register(Provenance, ProvenanceAdmin)\nadmin.site.register(RismSiglum, RismSiglumAdmin)\nadmin.site.register(Segment, SegmentAdmin)\nadmin.site.register(Sequence, SequenceAdmin)\nadmin.site.register(Source, SourceAdmin)\n", "path": "django/cantusdb_project/main_app/admin.py"}, {"content": "from django.contrib import admin\nfrom .models import *\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom main_app.models import Source\n\n# Register your models here.\n\n\n# this will allow us to assign sources to users in the User admin page\nclass SourceInline(admin.TabularInline):\n model = Source.current_editors.through\n raw_id_fields = [\"source\"]\n\n\nclass UserAdmin(BaseUserAdmin):\n readonly_fields = (\n \"date_joined\",\n \"last_login\",\n )\n # fields that are displayed on the user list page of the admin\n list_display = (\n \"email\",\n \"first_name\",\n \"last_name\",\n \"institution\",\n )\n # creates a filter on the right side of the page to filter users by group\n list_filter = (\"groups\",)\n fieldsets = (\n (\n \"Account info\",\n {\n \"fields\": (\n (\"email\", \"password\"),\n \"is_active\",\n (\"date_joined\", \"last_login\"),\n )\n },\n ),\n (\n \"Personal info\",\n {\n \"fields\": (\n \"full_name\",\n (\"first_name\", \"last_name\"),\n \"institution\",\n (\"city\", \"country\"),\n \"website\",\n )\n },\n ),\n (\n \"Permissions\",\n {\n \"fields\": (\n \"is_staff\",\n \"is_superuser\",\n \"groups\",\n )\n },\n ),\n )\n add_fieldsets = (\n (\n \"Account info\",\n {\n \"fields\": (\n \"email\",\n (\"password1\", \"password2\"),\n )\n },\n ),\n (\n \"Personal info\",\n {\n \"fields\": (\n \"full_name\",\n (\"first_name\", \"last_name\"),\n \"institution\",\n (\"city\", \"country\"),\n \"website\",\n )\n },\n ),\n (\n \"Permissions\",\n {\n \"fields\": (\n \"is_staff\",\n \"is_superuser\",\n \"groups\",\n )\n },\n ),\n )\n search_fields = (\n \"email\",\n \"first_name\",\n \"last_name\",\n \"institution\",\n )\n # order the list of users by email\n ordering = (\"email\",)\n filter_horizontal = (\"groups\",)\n inlines = [SourceInline]\n\n\nadmin.site.register(User, UserAdmin)\n", "path": "django/cantusdb_project/users/admin.py"}]}
2,186
288
gh_patches_debug_28895
rasdani/github-patches
git_diff
translate__pootle-3780
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Legal pages don't remember your acceptance of the agreemenet This is likely only a problem on the [LibreOffice dev server](https://translations-test.documentfoundation.org/projects/) but tracking it here so we don't forget. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pootle/apps/staticpages/managers.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright (C) Pootle contributors. 5 # 6 # This file is a part of the Pootle project. It is distributed under the GPL3 7 # or later license. See the LICENSE file for a copy of the license and the 8 # AUTHORS file for copyright and authorship information. 9 10 from django.db.models import F, Manager 11 12 13 class PageManager(Manager): 14 15 def live(self, user=None, **kwargs): 16 """Filters active (live) pages. 17 18 :param user: Current active user. If omitted or the user doesn't 19 have administration privileges, only active pages will be 20 returned. 21 """ 22 if user is not None and user.is_superuser: 23 return self.get_queryset() 24 25 return self.get_queryset().filter(active=True) 26 27 def pending_user_agreement(self, user, **kwargs): 28 """Filters active pages where the given `user` has pending 29 agreements. 30 """ 31 # FIXME: This should be a method exclusive to a LegalPage manager 32 return self.live().exclude( 33 agreement__user=user, 34 modified_on__lt=F('agreement__agreed_on'), 35 ).distinct() 36 ``` Path: `pootle/apps/pootle_misc/context_processors.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright (C) Pootle contributors. 5 # 6 # This file is a part of the Pootle project. It is distributed under the GPL3 7 # or later license. See the LICENSE file for a copy of the license and the 8 # AUTHORS file for copyright and authorship information. 9 10 from django.conf import settings 11 from django.utils import translation 12 13 from pootle import __version__ 14 from pootle_language.models import Language 15 from pootle_project.models import Project 16 from staticpages.models import LegalPage 17 18 19 def _agreement_context(request): 20 """Returns whether the agreement box should be displayed or not.""" 21 request_path = request.META['PATH_INFO'] 22 nocheck = filter(lambda x: request_path.startswith(x), 23 settings.LEGALPAGE_NOCHECK_PREFIXES) 24 25 if (request.user.is_authenticated() and not nocheck and 26 LegalPage.objects.pending_user_agreement(request.user).exists()): 27 return True 28 29 return False 30 31 32 def pootle_context(request): 33 """Exposes settings to templates.""" 34 #FIXME: maybe we should expose relevant settings only? 35 return { 36 'settings': { 37 'TITLE': settings.TITLE, 38 'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID, 39 'CAN_CONTACT': settings.CAN_CONTACT and settings.CONTACT_EMAIL, 40 'SCRIPT_NAME': settings.SCRIPT_NAME, 41 'POOTLE_VERSION': __version__, 42 'CACHE_TIMEOUT': settings.CACHE_MIDDLEWARE_SECONDS, 43 'POOTLE_CACHE_TIMEOUT': settings.POOTLE_CACHE_TIMEOUT, 44 'DEBUG': settings.DEBUG, 45 }, 46 'custom': settings.CUSTOM_TEMPLATE_CONTEXT, 47 'ALL_LANGUAGES': Language.live.cached_dict(translation.get_language()), 48 'ALL_PROJECTS': Project.objects.cached_dict(request.user), 49 'display_agreement': _agreement_context(request), 50 } 51 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pootle/apps/pootle_misc/context_processors.py b/pootle/apps/pootle_misc/context_processors.py --- a/pootle/apps/pootle_misc/context_processors.py +++ b/pootle/apps/pootle_misc/context_processors.py @@ -23,7 +23,7 @@ settings.LEGALPAGE_NOCHECK_PREFIXES) if (request.user.is_authenticated() and not nocheck and - LegalPage.objects.pending_user_agreement(request.user).exists()): + LegalPage.objects.has_pending_agreement(request.user)): return True return False diff --git a/pootle/apps/staticpages/managers.py b/pootle/apps/staticpages/managers.py --- a/pootle/apps/staticpages/managers.py +++ b/pootle/apps/staticpages/managers.py @@ -24,12 +24,24 @@ return self.get_queryset().filter(active=True) + def has_pending_agreement(self, user): + agreements = self.pending_user_agreement(user) + return len(list(agreements)) > 0 + def pending_user_agreement(self, user, **kwargs): """Filters active pages where the given `user` has pending agreements. """ # FIXME: This should be a method exclusive to a LegalPage manager - return self.live().exclude( - agreement__user=user, - modified_on__lt=F('agreement__agreed_on'), - ).distinct() + return self.raw(''' + SELECT DISTINCT staticpages_legalpage.id + FROM staticpages_legalpage + WHERE (staticpages_legalpage.active = TRUE + AND NOT (staticpages_legalpage.id IN + (SELECT A.document_id + FROM staticpages_legalpage AS LP + INNER JOIN staticpages_agreement AS A + ON LP.id = A.document_id + WHERE A.user_id = %s AND + A.agreed_on > LP.modified_on))) + ''', [user.id])
{"golden_diff": "diff --git a/pootle/apps/pootle_misc/context_processors.py b/pootle/apps/pootle_misc/context_processors.py\n--- a/pootle/apps/pootle_misc/context_processors.py\n+++ b/pootle/apps/pootle_misc/context_processors.py\n@@ -23,7 +23,7 @@\n settings.LEGALPAGE_NOCHECK_PREFIXES)\n \n if (request.user.is_authenticated() and not nocheck and\n- LegalPage.objects.pending_user_agreement(request.user).exists()):\n+ LegalPage.objects.has_pending_agreement(request.user)):\n return True\n \n return False\ndiff --git a/pootle/apps/staticpages/managers.py b/pootle/apps/staticpages/managers.py\n--- a/pootle/apps/staticpages/managers.py\n+++ b/pootle/apps/staticpages/managers.py\n@@ -24,12 +24,24 @@\n \n return self.get_queryset().filter(active=True)\n \n+ def has_pending_agreement(self, user):\n+ agreements = self.pending_user_agreement(user)\n+ return len(list(agreements)) > 0\n+\n def pending_user_agreement(self, user, **kwargs):\n \"\"\"Filters active pages where the given `user` has pending\n agreements.\n \"\"\"\n # FIXME: This should be a method exclusive to a LegalPage manager\n- return self.live().exclude(\n- agreement__user=user,\n- modified_on__lt=F('agreement__agreed_on'),\n- ).distinct()\n+ return self.raw('''\n+ SELECT DISTINCT staticpages_legalpage.id\n+ FROM staticpages_legalpage\n+ WHERE (staticpages_legalpage.active = TRUE\n+ AND NOT (staticpages_legalpage.id IN\n+ (SELECT A.document_id\n+ FROM staticpages_legalpage AS LP\n+ INNER JOIN staticpages_agreement AS A\n+ ON LP.id = A.document_id\n+ WHERE A.user_id = %s AND\n+ A.agreed_on > LP.modified_on)))\n+ ''', [user.id])\n", "issue": "Legal pages don't remember your acceptance of the agreemenet\nThis is likely only a problem on the [LibreOffice dev server](https://translations-test.documentfoundation.org/projects/) but tracking it here so we don't forget.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.db.models import F, Manager\n\n\nclass PageManager(Manager):\n\n def live(self, user=None, **kwargs):\n \"\"\"Filters active (live) pages.\n\n :param user: Current active user. If omitted or the user doesn't\n have administration privileges, only active pages will be\n returned.\n \"\"\"\n if user is not None and user.is_superuser:\n return self.get_queryset()\n\n return self.get_queryset().filter(active=True)\n\n def pending_user_agreement(self, user, **kwargs):\n \"\"\"Filters active pages where the given `user` has pending\n agreements.\n \"\"\"\n # FIXME: This should be a method exclusive to a LegalPage manager\n return self.live().exclude(\n agreement__user=user,\n modified_on__lt=F('agreement__agreed_on'),\n ).distinct()\n", "path": "pootle/apps/staticpages/managers.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.conf import settings\nfrom django.utils import translation\n\nfrom pootle import __version__\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\nfrom staticpages.models import LegalPage\n\n\ndef _agreement_context(request):\n \"\"\"Returns whether the agreement box should be displayed or not.\"\"\"\n request_path = request.META['PATH_INFO']\n nocheck = filter(lambda x: request_path.startswith(x),\n settings.LEGALPAGE_NOCHECK_PREFIXES)\n\n if (request.user.is_authenticated() and not nocheck and\n LegalPage.objects.pending_user_agreement(request.user).exists()):\n return True\n\n return False\n\n\ndef pootle_context(request):\n \"\"\"Exposes settings to templates.\"\"\"\n #FIXME: maybe we should expose relevant settings only?\n return {\n 'settings': {\n 'TITLE': settings.TITLE,\n 'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID,\n 'CAN_CONTACT': settings.CAN_CONTACT and settings.CONTACT_EMAIL,\n 'SCRIPT_NAME': settings.SCRIPT_NAME,\n 'POOTLE_VERSION': __version__,\n 'CACHE_TIMEOUT': settings.CACHE_MIDDLEWARE_SECONDS,\n 'POOTLE_CACHE_TIMEOUT': settings.POOTLE_CACHE_TIMEOUT,\n 'DEBUG': settings.DEBUG,\n },\n 'custom': settings.CUSTOM_TEMPLATE_CONTEXT,\n 'ALL_LANGUAGES': Language.live.cached_dict(translation.get_language()),\n 'ALL_PROJECTS': Project.objects.cached_dict(request.user),\n 'display_agreement': _agreement_context(request),\n }\n", "path": "pootle/apps/pootle_misc/context_processors.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.db.models import F, Manager\n\n\nclass PageManager(Manager):\n\n def live(self, user=None, **kwargs):\n \"\"\"Filters active (live) pages.\n\n :param user: Current active user. If omitted or the user doesn't\n have administration privileges, only active pages will be\n returned.\n \"\"\"\n if user is not None and user.is_superuser:\n return self.get_queryset()\n\n return self.get_queryset().filter(active=True)\n\n def has_pending_agreement(self, user):\n agreements = self.pending_user_agreement(user)\n return len(list(agreements)) > 0\n\n def pending_user_agreement(self, user, **kwargs):\n \"\"\"Filters active pages where the given `user` has pending\n agreements.\n \"\"\"\n # FIXME: This should be a method exclusive to a LegalPage manager\n return self.raw('''\n SELECT DISTINCT staticpages_legalpage.id\n FROM staticpages_legalpage\n WHERE (staticpages_legalpage.active = TRUE\n AND NOT (staticpages_legalpage.id IN\n (SELECT A.document_id\n FROM staticpages_legalpage AS LP\n INNER JOIN staticpages_agreement AS A\n ON LP.id = A.document_id\n WHERE A.user_id = %s AND\n A.agreed_on > LP.modified_on)))\n ''', [user.id])\n", "path": "pootle/apps/staticpages/managers.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.conf import settings\nfrom django.utils import translation\n\nfrom pootle import __version__\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\nfrom staticpages.models import LegalPage\n\n\ndef _agreement_context(request):\n \"\"\"Returns whether the agreement box should be displayed or not.\"\"\"\n request_path = request.META['PATH_INFO']\n nocheck = filter(lambda x: request_path.startswith(x),\n settings.LEGALPAGE_NOCHECK_PREFIXES)\n\n if (request.user.is_authenticated() and not nocheck and\n LegalPage.objects.has_pending_agreement(request.user)):\n return True\n\n return False\n\n\ndef pootle_context(request):\n \"\"\"Exposes settings to templates.\"\"\"\n #FIXME: maybe we should expose relevant settings only?\n return {\n 'settings': {\n 'TITLE': settings.TITLE,\n 'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID,\n 'CAN_CONTACT': settings.CAN_CONTACT and settings.CONTACT_EMAIL,\n 'SCRIPT_NAME': settings.SCRIPT_NAME,\n 'POOTLE_VERSION': __version__,\n 'CACHE_TIMEOUT': settings.CACHE_MIDDLEWARE_SECONDS,\n 'POOTLE_CACHE_TIMEOUT': settings.POOTLE_CACHE_TIMEOUT,\n 'DEBUG': settings.DEBUG,\n },\n 'custom': settings.CUSTOM_TEMPLATE_CONTEXT,\n 'ALL_LANGUAGES': Language.live.cached_dict(translation.get_language()),\n 'ALL_PROJECTS': Project.objects.cached_dict(request.user),\n 'display_agreement': _agreement_context(request),\n }\n", "path": "pootle/apps/pootle_misc/context_processors.py"}]}
1,150
439
gh_patches_debug_33621
rasdani/github-patches
git_diff
dotkom__onlineweb4-1548
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make sure people with `webshop.view_model` can view detail views in dashboard Users with access to webshop dashboard cannot get a list of products (dashboard -> webshop -> kategorier -> click on a category), nor view the details of an order (dashboard -> webshop -> bestillinger -> click on an order) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `apps/webshop/dashboard/views.py` Content: ``` 1 import logging 2 3 from django.contrib.contenttypes.models import ContentType 4 from django.core.urlresolvers import reverse 5 from django.shortcuts import get_object_or_404, redirect 6 from django.views.generic import (CreateView, DeleteView, DetailView, ListView, TemplateView, 7 UpdateView) 8 from taggit.models import TaggedItem 9 10 from apps.dashboard.tools import DashboardPermissionMixin 11 from apps.gallery.models import ResponsiveImage 12 from apps.webshop.models import Category, OrderLine, Product 13 14 logger = logging.getLogger(__name__) 15 16 17 class Overview(DashboardPermissionMixin, TemplateView): 18 template_name = 'webshop/dashboard/index.html' 19 permission_required = 'webshop.view_category' 20 21 22 class Categories(DashboardPermissionMixin, TemplateView): 23 model = Category 24 template_name = 'webshop/dashboard/categories.html' 25 permission_required = 'webshop.change_category' 26 27 def get_context_data(self, *args, **kwargs): 28 context = super(Categories, self).get_context_data(*args, **kwargs) 29 context['categories'] = Category.objects.all().prefetch_related('products') 30 return context 31 32 33 class CategoryView(DashboardPermissionMixin, DetailView): 34 model = Category 35 template_name = 'webshop/dashboard/category.html' 36 permission_required = 'webshop.view_category' 37 38 39 class CategoryCreate(DashboardPermissionMixin, CreateView): 40 model = Category 41 fields = ['name', 'slug'] 42 template_name = 'webshop/dashboard/category_update.html' 43 permission_required = 'webshop.add_category' 44 45 def get_object(self, *args, **kwargs): 46 # django-guardian hack https://github.com/django-guardian/django-guardian/issues/195 47 return None 48 49 def get_success_url(self): 50 return reverse('dashboard-webshop:categories') 51 52 53 class CategoryUpdate(DashboardPermissionMixin, UpdateView): 54 model = Category 55 fields = ['name', 'slug'] 56 template_name = 'webshop/dashboard/category_update.html' 57 context_object_name = 'category' 58 permission_required = 'webshop.change_product' 59 60 def get_success_url(self): 61 return reverse('dashboard-webshop:category', kwargs={'slug': self.object.slug}) 62 63 64 class CategoryDelete(DashboardPermissionMixin, DeleteView): 65 model = Category 66 template_name = 'webshop/dashboard/delete.html' 67 permission_required = 'webshop.delete_category' 68 69 def get_success_url(self): 70 return reverse('dashboard-webshop:categories') 71 72 73 class ProductView(DashboardPermissionMixin, DetailView): 74 model = Product 75 template_name = 'webshop/dashboard/product.html' 76 permission_required = 'webshop.view_product' 77 78 79 class ProductCreate(DashboardPermissionMixin, CreateView): 80 model = Product 81 fields = ['name', 'slug', 'short', 'description', 'price', 'stock'] 82 template_name = 'webshop/dashboard/product_update.html' 83 permission_required = 'webshop.add_product' 84 85 def get_context_data(self, *args, **kwargs): 86 context = super(ProductCreate, self).get_context_data(*args, **kwargs) 87 context['category'] = get_object_or_404(Category, slug=self.kwargs.get('category_slug')) 88 return context 89 90 def form_valid(self, form): 91 product = form.save(commit=False) 92 # Setting foreign key 93 category = get_object_or_404(Category, slug=self.kwargs.get('category_slug')) 94 product.category = category 95 return super(ProductCreate, self).form_valid(form) 96 97 def get_object(self, *args, **kwargs): 98 # django-guardian hack https://github.com/django-guardian/django-guardian/issues/195 99 return None 100 101 def get_success_url(self): 102 return reverse('dashboard-webshop:category', kwargs={'slug': self.kwargs.get('category_slug')}) 103 104 105 class ProductUpdate(DashboardPermissionMixin, UpdateView): 106 model = Product 107 fields = ['name', 'slug', 'short', 'description', 'price', 'stock'] 108 template_name = 'webshop/dashboard/product_update.html' 109 context_object_name = 'product' 110 permission_required = 'webshop.change_product' 111 112 def get_context_data(self, *args, **kwargs): 113 context = super(ProductUpdate, self).get_context_data(*args, **kwargs) 114 context['category'] = self.object.category 115 return context 116 117 def get_success_url(self): 118 return reverse('dashboard-webshop:product', kwargs={'slug': self.object.slug}) 119 120 121 class ProductDelete(DashboardPermissionMixin, DeleteView): 122 model = Product 123 template_name = 'webshop/dashboard/delete.html' 124 permission_required = 'webshop.delete_product' 125 126 def get_success_url(self): 127 return reverse('dashboard-webshop:category', kwargs={'slug': self.object.category.slug}) 128 129 130 class ProductImage(DashboardPermissionMixin, DetailView): 131 model = Product 132 template_name = 'webshop/dashboard/image.html' 133 permission_required = 'webshop.change_product' 134 135 def get_context_data(self, *args, **kwargs): 136 context = super(ProductImage, self).get_context_data(*args, **kwargs) 137 # Filter out potential ResponsiveImage objects that have orphan file references 138 images = ResponsiveImage.objects.all().order_by('-timestamp')[:15] 139 context['images'] = [i for i in images if i.file_status_ok()] 140 141 context['tags'] = sorted(set(tag.tag.name for tag in TaggedItem.objects.filter( 142 content_type=ContentType.objects.get_for_model(ResponsiveImage) 143 ).order_by('tag__name'))) 144 145 return context 146 147 def post(self, request, *args, **kwargs): 148 pass 149 150 151 class Orders(DashboardPermissionMixin, ListView): 152 template_name = 'webshop/dashboard/orders.html' 153 permission_required = 'webshop.view_order' 154 queryset = OrderLine.objects.filter(paid=True) 155 context_object_name = 'orders' 156 157 158 class Order(DashboardPermissionMixin, DetailView): 159 model = OrderLine 160 template_name = 'webshop/dashboard/order.html' 161 permission_required = 'webshop.change_order_line' 162 context_object_name = 'order' 163 164 165 class OrderDeliver(DashboardPermissionMixin, DetailView): 166 model = OrderLine 167 permission_required = 'webshop.change_order_line' 168 169 def post(self, *args, **kwargs): 170 super(OrderDeliver, self).get(*args, **kwargs) 171 if not self.object.delivered: 172 self.object.delivered = True 173 self.object.save() 174 return self.get(*args, **kwargs) 175 176 def get(self, *args, **kwargs): 177 super(OrderDeliver, self).get(*args, **kwargs) 178 return redirect('dashboard-webshop:order', pk=self.object.pk) 179 ``` Path: `apps/dashboard/chunks/dashboard/views.py` Content: ``` 1 from chunks.models import Chunk 2 from django.core.urlresolvers import reverse, reverse_lazy 3 from django.views.generic import CreateView, DeleteView, ListView, UpdateView 4 5 from apps.dashboard.tools import DashboardPermissionMixin 6 7 8 class ChunkList(DashboardPermissionMixin, ListView): 9 model = Chunk 10 queryset = Chunk.objects.all() 11 context_object_name = 'chunks' 12 permission_required = 'chunks.add_chunk' 13 14 15 class ChunkUpdate(DashboardPermissionMixin, UpdateView): 16 model = Chunk 17 context_object_name = 'chunk' 18 fields = ('key', 'description', 'content') 19 permission_required = 'chunks.change_chunk' 20 success_url = reverse_lazy('chunk-dashboard:list') 21 22 23 class ChunkCreate(DashboardPermissionMixin, CreateView): 24 model = Chunk 25 fields = ('key', 'description', 'content') 26 permission_required = 'chunks.add_chunk' 27 28 def get_object(self, queryset=None): 29 # Overridden to work around https://github.com/django-guardian/django-guardian/issues/146, 30 # an issue with regards to guardian's PermissionMixin working with CreateView - 31 # where it requires an object even in a CreateView. 32 return None 33 34 def get_success_url(self): 35 return reverse('chunk-dashboard:update', args=(self.object.id,)) 36 37 38 class ChunkDelete(DashboardPermissionMixin, DeleteView): 39 model = Chunk 40 permission_required = ('chunks.delete_chunk',) 41 success_url = reverse_lazy('chunk-dashboard:list') 42 ``` Path: `apps/dashboard/tools.py` Content: ``` 1 # -*- encoding: utf-8 -*- 2 3 from datetime import date 4 5 from django.contrib.auth.models import Group 6 from django.core.exceptions import ObjectDoesNotExist, PermissionDenied 7 from guardian.mixins import PermissionRequiredMixin 8 9 from apps.approval.models import MembershipApproval 10 from apps.gallery.models import UnhandledImage 11 from apps.inventory.models import Batch 12 from apps.posters.models import Poster 13 14 15 def has_access(request): 16 """ 17 This helper method does a basic check to see if the logged in user 18 has access to the dashboard. 19 20 We might add additional checks here later. 21 """ 22 23 if request.user.is_superuser: 24 return True 25 26 try: 27 committees = Group.objects.get(name='Komiteer') 28 except ObjectDoesNotExist: 29 committees = None 30 31 if committees and committees in request.user.groups.all(): 32 return True 33 34 return False 35 36 37 def check_access_or_403(request): 38 """ 39 Checks if a user bundled in a request object has access using has_access 40 tool function, if not raise a 403 exception 41 """ 42 if not has_access(request): 43 raise PermissionDenied 44 45 46 def get_base_context(request): 47 """ 48 This function returns a dictionary with the proper context variables 49 needed for given permission settings. Should be used as the initial 50 context for every dashboard view. For example, it is used for rendering 51 badges in the dashboard menu. 52 53 Add your own if req.user.has_perm statements adding the context objects 54 of that you need. 55 """ 56 57 context = {} 58 59 context['user_permissions'] = set(request.user.get_all_permissions()) 60 61 # Check if we need approval count to display in template sidebar badge 62 if request.user.has_perm('approval.view_membershipapproval'): 63 context['approval_pending'] = MembershipApproval.objects.filter( 64 processed=False).count() 65 66 # Check if there exists a batch in inventory that has expired 67 if request.user.has_perm('inventory.view_item'): 68 if Batch.objects.filter(expiration_date__lt=date.today()): 69 context['inventory_expired'] = True 70 71 if request.user.has_perm('posters.view_poster'): 72 if Poster.objects.filter(assigned_to=None) or Poster.objects.filter(assigned_to=request.user): 73 context['poster_orders'] = Poster.objects.filter(assigned_to=None).count() 74 context['poster_orders'] += Poster.objects.filter(assigned_to=request.user, finished=False).count() 75 76 # Check if we have any unhandled images pending crop and save 77 if request.user.has_perm('gallery.view_unhandledimage'): 78 context['unhandled_images'] = UnhandledImage.objects.all() 79 80 return context 81 82 83 # Mixin for Class Based Views 84 class DashboardMixin(object): 85 """ 86 The DashboardMixin sets up the needed context data, as well as performs 87 generic access checks. 88 """ 89 90 def dispatch(self, request, *args, **kwargs): 91 """ 92 Hooks into the dispatch cycle, checking whether or not the currently 93 logged in user has access to the dashboard in general. 94 :param request: Django Request object 95 :param args: Positional arguments 96 :param kwargs: Keyword arguments 97 :return: Invocation of superclass dispatch 98 """ 99 100 if not has_access(request): 101 raise PermissionDenied 102 103 return super(DashboardMixin, self).dispatch(request, *args, **kwargs) 104 105 def get_context_data(self, **kwargs): 106 """ 107 Sets context data on superclass, before populating it further with 108 the context data needed by dashboard. 109 :param kwargs: Keyword arguments 110 :return: A context dictionary 111 """ 112 113 context = super(DashboardMixin, self).get_context_data(**kwargs) 114 context.update(get_base_context(self.request)) 115 116 return context 117 118 119 class DashboardPermissionMixin(DashboardMixin, PermissionRequiredMixin): 120 """ 121 DashboardPermissionMixin combines the DashboardMixin with Django 122 Guardian's permission based mixin, rendering a 403 Unauthorized 123 template if the currently logged in user is lacking appropriate 124 permissions to access a certain view. 125 """ 126 127 return_403 = True 128 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/apps/dashboard/chunks/dashboard/views.py b/apps/dashboard/chunks/dashboard/views.py --- a/apps/dashboard/chunks/dashboard/views.py +++ b/apps/dashboard/chunks/dashboard/views.py @@ -25,12 +25,6 @@ fields = ('key', 'description', 'content') permission_required = 'chunks.add_chunk' - def get_object(self, queryset=None): - # Overridden to work around https://github.com/django-guardian/django-guardian/issues/146, - # an issue with regards to guardian's PermissionMixin working with CreateView - - # where it requires an object even in a CreateView. - return None - def get_success_url(self): return reverse('chunk-dashboard:update', args=(self.object.id,)) diff --git a/apps/dashboard/tools.py b/apps/dashboard/tools.py --- a/apps/dashboard/tools.py +++ b/apps/dashboard/tools.py @@ -124,4 +124,12 @@ permissions to access a certain view. """ + def get_permission_object(self, *args, **kwargs): + """ + By default PermissionRequiredMixin works with object permissions. + By returning None we force guardian to only check if the user + has the proper permission. + """ + return None + return_403 = True diff --git a/apps/webshop/dashboard/views.py b/apps/webshop/dashboard/views.py --- a/apps/webshop/dashboard/views.py +++ b/apps/webshop/dashboard/views.py @@ -42,10 +42,6 @@ template_name = 'webshop/dashboard/category_update.html' permission_required = 'webshop.add_category' - def get_object(self, *args, **kwargs): - # django-guardian hack https://github.com/django-guardian/django-guardian/issues/195 - return None - def get_success_url(self): return reverse('dashboard-webshop:categories') @@ -94,10 +90,6 @@ product.category = category return super(ProductCreate, self).form_valid(form) - def get_object(self, *args, **kwargs): - # django-guardian hack https://github.com/django-guardian/django-guardian/issues/195 - return None - def get_success_url(self): return reverse('dashboard-webshop:category', kwargs={'slug': self.kwargs.get('category_slug')})
{"golden_diff": "diff --git a/apps/dashboard/chunks/dashboard/views.py b/apps/dashboard/chunks/dashboard/views.py\n--- a/apps/dashboard/chunks/dashboard/views.py\n+++ b/apps/dashboard/chunks/dashboard/views.py\n@@ -25,12 +25,6 @@\n fields = ('key', 'description', 'content')\n permission_required = 'chunks.add_chunk'\n \n- def get_object(self, queryset=None):\n- # Overridden to work around https://github.com/django-guardian/django-guardian/issues/146,\n- # an issue with regards to guardian's PermissionMixin working with CreateView -\n- # where it requires an object even in a CreateView.\n- return None\n-\n def get_success_url(self):\n return reverse('chunk-dashboard:update', args=(self.object.id,))\n \ndiff --git a/apps/dashboard/tools.py b/apps/dashboard/tools.py\n--- a/apps/dashboard/tools.py\n+++ b/apps/dashboard/tools.py\n@@ -124,4 +124,12 @@\n permissions to access a certain view.\n \"\"\"\n \n+ def get_permission_object(self, *args, **kwargs):\n+ \"\"\"\n+ By default PermissionRequiredMixin works with object permissions.\n+ By returning None we force guardian to only check if the user\n+ has the proper permission.\n+ \"\"\"\n+ return None\n+\n return_403 = True\ndiff --git a/apps/webshop/dashboard/views.py b/apps/webshop/dashboard/views.py\n--- a/apps/webshop/dashboard/views.py\n+++ b/apps/webshop/dashboard/views.py\n@@ -42,10 +42,6 @@\n template_name = 'webshop/dashboard/category_update.html'\n permission_required = 'webshop.add_category'\n \n- def get_object(self, *args, **kwargs):\n- # django-guardian hack https://github.com/django-guardian/django-guardian/issues/195\n- return None\n-\n def get_success_url(self):\n return reverse('dashboard-webshop:categories')\n \n@@ -94,10 +90,6 @@\n product.category = category\n return super(ProductCreate, self).form_valid(form)\n \n- def get_object(self, *args, **kwargs):\n- # django-guardian hack https://github.com/django-guardian/django-guardian/issues/195\n- return None\n-\n def get_success_url(self):\n return reverse('dashboard-webshop:category', kwargs={'slug': self.kwargs.get('category_slug')})\n", "issue": "Make sure people with `webshop.view_model` can view detail views in dashboard\nUsers with access to webshop dashboard cannot get a list of products (dashboard -> webshop -> kategorier -> click on a category), nor view the details of an order (dashboard -> webshop -> bestillinger -> click on an order)\n\n", "before_files": [{"content": "import logging\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.views.generic import (CreateView, DeleteView, DetailView, ListView, TemplateView,\n UpdateView)\nfrom taggit.models import TaggedItem\n\nfrom apps.dashboard.tools import DashboardPermissionMixin\nfrom apps.gallery.models import ResponsiveImage\nfrom apps.webshop.models import Category, OrderLine, Product\n\nlogger = logging.getLogger(__name__)\n\n\nclass Overview(DashboardPermissionMixin, TemplateView):\n template_name = 'webshop/dashboard/index.html'\n permission_required = 'webshop.view_category'\n\n\nclass Categories(DashboardPermissionMixin, TemplateView):\n model = Category\n template_name = 'webshop/dashboard/categories.html'\n permission_required = 'webshop.change_category'\n\n def get_context_data(self, *args, **kwargs):\n context = super(Categories, self).get_context_data(*args, **kwargs)\n context['categories'] = Category.objects.all().prefetch_related('products')\n return context\n\n\nclass CategoryView(DashboardPermissionMixin, DetailView):\n model = Category\n template_name = 'webshop/dashboard/category.html'\n permission_required = 'webshop.view_category'\n\n\nclass CategoryCreate(DashboardPermissionMixin, CreateView):\n model = Category\n fields = ['name', 'slug']\n template_name = 'webshop/dashboard/category_update.html'\n permission_required = 'webshop.add_category'\n\n def get_object(self, *args, **kwargs):\n # django-guardian hack https://github.com/django-guardian/django-guardian/issues/195\n return None\n\n def get_success_url(self):\n return reverse('dashboard-webshop:categories')\n\n\nclass CategoryUpdate(DashboardPermissionMixin, UpdateView):\n model = Category\n fields = ['name', 'slug']\n template_name = 'webshop/dashboard/category_update.html'\n context_object_name = 'category'\n permission_required = 'webshop.change_product'\n\n def get_success_url(self):\n return reverse('dashboard-webshop:category', kwargs={'slug': self.object.slug})\n\n\nclass CategoryDelete(DashboardPermissionMixin, DeleteView):\n model = Category\n template_name = 'webshop/dashboard/delete.html'\n permission_required = 'webshop.delete_category'\n\n def get_success_url(self):\n return reverse('dashboard-webshop:categories')\n\n\nclass ProductView(DashboardPermissionMixin, DetailView):\n model = Product\n template_name = 'webshop/dashboard/product.html'\n permission_required = 'webshop.view_product'\n\n\nclass ProductCreate(DashboardPermissionMixin, CreateView):\n model = Product\n fields = ['name', 'slug', 'short', 'description', 'price', 'stock']\n template_name = 'webshop/dashboard/product_update.html'\n permission_required = 'webshop.add_product'\n\n def get_context_data(self, *args, **kwargs):\n context = super(ProductCreate, self).get_context_data(*args, **kwargs)\n context['category'] = get_object_or_404(Category, slug=self.kwargs.get('category_slug'))\n return context\n\n def form_valid(self, form):\n product = form.save(commit=False)\n # Setting foreign key\n category = get_object_or_404(Category, slug=self.kwargs.get('category_slug'))\n product.category = category\n return super(ProductCreate, self).form_valid(form)\n\n def get_object(self, *args, **kwargs):\n # django-guardian hack https://github.com/django-guardian/django-guardian/issues/195\n return None\n\n def get_success_url(self):\n return reverse('dashboard-webshop:category', kwargs={'slug': self.kwargs.get('category_slug')})\n\n\nclass ProductUpdate(DashboardPermissionMixin, UpdateView):\n model = Product\n fields = ['name', 'slug', 'short', 'description', 'price', 'stock']\n template_name = 'webshop/dashboard/product_update.html'\n context_object_name = 'product'\n permission_required = 'webshop.change_product'\n\n def get_context_data(self, *args, **kwargs):\n context = super(ProductUpdate, self).get_context_data(*args, **kwargs)\n context['category'] = self.object.category\n return context\n\n def get_success_url(self):\n return reverse('dashboard-webshop:product', kwargs={'slug': self.object.slug})\n\n\nclass ProductDelete(DashboardPermissionMixin, DeleteView):\n model = Product\n template_name = 'webshop/dashboard/delete.html'\n permission_required = 'webshop.delete_product'\n\n def get_success_url(self):\n return reverse('dashboard-webshop:category', kwargs={'slug': self.object.category.slug})\n\n\nclass ProductImage(DashboardPermissionMixin, DetailView):\n model = Product\n template_name = 'webshop/dashboard/image.html'\n permission_required = 'webshop.change_product'\n\n def get_context_data(self, *args, **kwargs):\n context = super(ProductImage, self).get_context_data(*args, **kwargs)\n # Filter out potential ResponsiveImage objects that have orphan file references\n images = ResponsiveImage.objects.all().order_by('-timestamp')[:15]\n context['images'] = [i for i in images if i.file_status_ok()]\n\n context['tags'] = sorted(set(tag.tag.name for tag in TaggedItem.objects.filter(\n content_type=ContentType.objects.get_for_model(ResponsiveImage)\n ).order_by('tag__name')))\n\n return context\n\n def post(self, request, *args, **kwargs):\n pass\n\n\nclass Orders(DashboardPermissionMixin, ListView):\n template_name = 'webshop/dashboard/orders.html'\n permission_required = 'webshop.view_order'\n queryset = OrderLine.objects.filter(paid=True)\n context_object_name = 'orders'\n\n\nclass Order(DashboardPermissionMixin, DetailView):\n model = OrderLine\n template_name = 'webshop/dashboard/order.html'\n permission_required = 'webshop.change_order_line'\n context_object_name = 'order'\n\n\nclass OrderDeliver(DashboardPermissionMixin, DetailView):\n model = OrderLine\n permission_required = 'webshop.change_order_line'\n\n def post(self, *args, **kwargs):\n super(OrderDeliver, self).get(*args, **kwargs)\n if not self.object.delivered:\n self.object.delivered = True\n self.object.save()\n return self.get(*args, **kwargs)\n\n def get(self, *args, **kwargs):\n super(OrderDeliver, self).get(*args, **kwargs)\n return redirect('dashboard-webshop:order', pk=self.object.pk)\n", "path": "apps/webshop/dashboard/views.py"}, {"content": "from chunks.models import Chunk\nfrom django.core.urlresolvers import reverse, reverse_lazy\nfrom django.views.generic import CreateView, DeleteView, ListView, UpdateView\n\nfrom apps.dashboard.tools import DashboardPermissionMixin\n\n\nclass ChunkList(DashboardPermissionMixin, ListView):\n model = Chunk\n queryset = Chunk.objects.all()\n context_object_name = 'chunks'\n permission_required = 'chunks.add_chunk'\n\n\nclass ChunkUpdate(DashboardPermissionMixin, UpdateView):\n model = Chunk\n context_object_name = 'chunk'\n fields = ('key', 'description', 'content')\n permission_required = 'chunks.change_chunk'\n success_url = reverse_lazy('chunk-dashboard:list')\n\n\nclass ChunkCreate(DashboardPermissionMixin, CreateView):\n model = Chunk\n fields = ('key', 'description', 'content')\n permission_required = 'chunks.add_chunk'\n\n def get_object(self, queryset=None):\n # Overridden to work around https://github.com/django-guardian/django-guardian/issues/146,\n # an issue with regards to guardian's PermissionMixin working with CreateView -\n # where it requires an object even in a CreateView.\n return None\n\n def get_success_url(self):\n return reverse('chunk-dashboard:update', args=(self.object.id,))\n\n\nclass ChunkDelete(DashboardPermissionMixin, DeleteView):\n model = Chunk\n permission_required = ('chunks.delete_chunk',)\n success_url = reverse_lazy('chunk-dashboard:list')\n", "path": "apps/dashboard/chunks/dashboard/views.py"}, {"content": "# -*- encoding: utf-8 -*-\n\nfrom datetime import date\n\nfrom django.contrib.auth.models import Group\nfrom django.core.exceptions import ObjectDoesNotExist, PermissionDenied\nfrom guardian.mixins import PermissionRequiredMixin\n\nfrom apps.approval.models import MembershipApproval\nfrom apps.gallery.models import UnhandledImage\nfrom apps.inventory.models import Batch\nfrom apps.posters.models import Poster\n\n\ndef has_access(request):\n \"\"\"\n This helper method does a basic check to see if the logged in user\n has access to the dashboard.\n\n We might add additional checks here later.\n \"\"\"\n\n if request.user.is_superuser:\n return True\n\n try:\n committees = Group.objects.get(name='Komiteer')\n except ObjectDoesNotExist:\n committees = None\n\n if committees and committees in request.user.groups.all():\n return True\n\n return False\n\n\ndef check_access_or_403(request):\n \"\"\"\n Checks if a user bundled in a request object has access using has_access\n tool function, if not raise a 403 exception\n \"\"\"\n if not has_access(request):\n raise PermissionDenied\n\n\ndef get_base_context(request):\n \"\"\"\n This function returns a dictionary with the proper context variables\n needed for given permission settings. Should be used as the initial\n context for every dashboard view. For example, it is used for rendering\n badges in the dashboard menu.\n\n Add your own if req.user.has_perm statements adding the context objects\n of that you need.\n \"\"\"\n\n context = {}\n\n context['user_permissions'] = set(request.user.get_all_permissions())\n\n # Check if we need approval count to display in template sidebar badge\n if request.user.has_perm('approval.view_membershipapproval'):\n context['approval_pending'] = MembershipApproval.objects.filter(\n processed=False).count()\n\n # Check if there exists a batch in inventory that has expired\n if request.user.has_perm('inventory.view_item'):\n if Batch.objects.filter(expiration_date__lt=date.today()):\n context['inventory_expired'] = True\n\n if request.user.has_perm('posters.view_poster'):\n if Poster.objects.filter(assigned_to=None) or Poster.objects.filter(assigned_to=request.user):\n context['poster_orders'] = Poster.objects.filter(assigned_to=None).count()\n context['poster_orders'] += Poster.objects.filter(assigned_to=request.user, finished=False).count()\n\n # Check if we have any unhandled images pending crop and save\n if request.user.has_perm('gallery.view_unhandledimage'):\n context['unhandled_images'] = UnhandledImage.objects.all()\n\n return context\n\n\n# Mixin for Class Based Views\nclass DashboardMixin(object):\n \"\"\"\n The DashboardMixin sets up the needed context data, as well as performs\n generic access checks.\n \"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"\n Hooks into the dispatch cycle, checking whether or not the currently\n logged in user has access to the dashboard in general.\n :param request: Django Request object\n :param args: Positional arguments\n :param kwargs: Keyword arguments\n :return: Invocation of superclass dispatch\n \"\"\"\n\n if not has_access(request):\n raise PermissionDenied\n\n return super(DashboardMixin, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Sets context data on superclass, before populating it further with\n the context data needed by dashboard.\n :param kwargs: Keyword arguments\n :return: A context dictionary\n \"\"\"\n\n context = super(DashboardMixin, self).get_context_data(**kwargs)\n context.update(get_base_context(self.request))\n\n return context\n\n\nclass DashboardPermissionMixin(DashboardMixin, PermissionRequiredMixin):\n \"\"\"\n DashboardPermissionMixin combines the DashboardMixin with Django\n Guardian's permission based mixin, rendering a 403 Unauthorized\n template if the currently logged in user is lacking appropriate\n permissions to access a certain view.\n \"\"\"\n\n return_403 = True\n", "path": "apps/dashboard/tools.py"}], "after_files": [{"content": "import logging\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.views.generic import (CreateView, DeleteView, DetailView, ListView, TemplateView,\n UpdateView)\nfrom taggit.models import TaggedItem\n\nfrom apps.dashboard.tools import DashboardPermissionMixin\nfrom apps.gallery.models import ResponsiveImage\nfrom apps.webshop.models import Category, OrderLine, Product\n\nlogger = logging.getLogger(__name__)\n\n\nclass Overview(DashboardPermissionMixin, TemplateView):\n template_name = 'webshop/dashboard/index.html'\n permission_required = 'webshop.view_category'\n\n\nclass Categories(DashboardPermissionMixin, TemplateView):\n model = Category\n template_name = 'webshop/dashboard/categories.html'\n permission_required = 'webshop.change_category'\n\n def get_context_data(self, *args, **kwargs):\n context = super(Categories, self).get_context_data(*args, **kwargs)\n context['categories'] = Category.objects.all().prefetch_related('products')\n return context\n\n\nclass CategoryView(DashboardPermissionMixin, DetailView):\n model = Category\n template_name = 'webshop/dashboard/category.html'\n permission_required = 'webshop.view_category'\n\n\nclass CategoryCreate(DashboardPermissionMixin, CreateView):\n model = Category\n fields = ['name', 'slug']\n template_name = 'webshop/dashboard/category_update.html'\n permission_required = 'webshop.add_category'\n\n def get_success_url(self):\n return reverse('dashboard-webshop:categories')\n\n\nclass CategoryUpdate(DashboardPermissionMixin, UpdateView):\n model = Category\n fields = ['name', 'slug']\n template_name = 'webshop/dashboard/category_update.html'\n context_object_name = 'category'\n permission_required = 'webshop.change_product'\n\n def get_success_url(self):\n return reverse('dashboard-webshop:category', kwargs={'slug': self.object.slug})\n\n\nclass CategoryDelete(DashboardPermissionMixin, DeleteView):\n model = Category\n template_name = 'webshop/dashboard/delete.html'\n permission_required = 'webshop.delete_category'\n\n def get_success_url(self):\n return reverse('dashboard-webshop:categories')\n\n\nclass ProductView(DashboardPermissionMixin, DetailView):\n model = Product\n template_name = 'webshop/dashboard/product.html'\n permission_required = 'webshop.view_product'\n\n\nclass ProductCreate(DashboardPermissionMixin, CreateView):\n model = Product\n fields = ['name', 'slug', 'short', 'description', 'price', 'stock']\n template_name = 'webshop/dashboard/product_update.html'\n permission_required = 'webshop.add_product'\n\n def get_context_data(self, *args, **kwargs):\n context = super(ProductCreate, self).get_context_data(*args, **kwargs)\n context['category'] = get_object_or_404(Category, slug=self.kwargs.get('category_slug'))\n return context\n\n def form_valid(self, form):\n product = form.save(commit=False)\n # Setting foreign key\n category = get_object_or_404(Category, slug=self.kwargs.get('category_slug'))\n product.category = category\n return super(ProductCreate, self).form_valid(form)\n\n def get_success_url(self):\n return reverse('dashboard-webshop:category', kwargs={'slug': self.kwargs.get('category_slug')})\n\n\nclass ProductUpdate(DashboardPermissionMixin, UpdateView):\n model = Product\n fields = ['name', 'slug', 'short', 'description', 'price', 'stock']\n template_name = 'webshop/dashboard/product_update.html'\n context_object_name = 'product'\n permission_required = 'webshop.change_product'\n\n def get_context_data(self, *args, **kwargs):\n context = super(ProductUpdate, self).get_context_data(*args, **kwargs)\n context['category'] = self.object.category\n return context\n\n def get_success_url(self):\n return reverse('dashboard-webshop:product', kwargs={'slug': self.object.slug})\n\n\nclass ProductDelete(DashboardPermissionMixin, DeleteView):\n model = Product\n template_name = 'webshop/dashboard/delete.html'\n permission_required = 'webshop.delete_product'\n\n def get_success_url(self):\n return reverse('dashboard-webshop:category', kwargs={'slug': self.object.category.slug})\n\n\nclass ProductImage(DashboardPermissionMixin, DetailView):\n model = Product\n template_name = 'webshop/dashboard/image.html'\n permission_required = 'webshop.change_product'\n\n def get_context_data(self, *args, **kwargs):\n context = super(ProductImage, self).get_context_data(*args, **kwargs)\n # Filter out potential ResponsiveImage objects that have orphan file references\n images = ResponsiveImage.objects.all().order_by('-timestamp')[:15]\n context['images'] = [i for i in images if i.file_status_ok()]\n\n context['tags'] = sorted(set(tag.tag.name for tag in TaggedItem.objects.filter(\n content_type=ContentType.objects.get_for_model(ResponsiveImage)\n ).order_by('tag__name')))\n\n return context\n\n def post(self, request, *args, **kwargs):\n pass\n\n\nclass Orders(DashboardPermissionMixin, ListView):\n template_name = 'webshop/dashboard/orders.html'\n permission_required = 'webshop.view_order'\n queryset = OrderLine.objects.filter(paid=True)\n context_object_name = 'orders'\n\n\nclass Order(DashboardPermissionMixin, DetailView):\n model = OrderLine\n template_name = 'webshop/dashboard/order.html'\n permission_required = 'webshop.change_order_line'\n context_object_name = 'order'\n\n\nclass OrderDeliver(DashboardPermissionMixin, DetailView):\n model = OrderLine\n permission_required = 'webshop.change_order_line'\n\n def post(self, *args, **kwargs):\n super(OrderDeliver, self).get(*args, **kwargs)\n if not self.object.delivered:\n self.object.delivered = True\n self.object.save()\n return self.get(*args, **kwargs)\n\n def get(self, *args, **kwargs):\n super(OrderDeliver, self).get(*args, **kwargs)\n return redirect('dashboard-webshop:order', pk=self.object.pk)\n", "path": "apps/webshop/dashboard/views.py"}, {"content": "from chunks.models import Chunk\nfrom django.core.urlresolvers import reverse, reverse_lazy\nfrom django.views.generic import CreateView, DeleteView, ListView, UpdateView\n\nfrom apps.dashboard.tools import DashboardPermissionMixin\n\n\nclass ChunkList(DashboardPermissionMixin, ListView):\n model = Chunk\n queryset = Chunk.objects.all()\n context_object_name = 'chunks'\n permission_required = 'chunks.add_chunk'\n\n\nclass ChunkUpdate(DashboardPermissionMixin, UpdateView):\n model = Chunk\n context_object_name = 'chunk'\n fields = ('key', 'description', 'content')\n permission_required = 'chunks.change_chunk'\n success_url = reverse_lazy('chunk-dashboard:list')\n\n\nclass ChunkCreate(DashboardPermissionMixin, CreateView):\n model = Chunk\n fields = ('key', 'description', 'content')\n permission_required = 'chunks.add_chunk'\n\n def get_success_url(self):\n return reverse('chunk-dashboard:update', args=(self.object.id,))\n\n\nclass ChunkDelete(DashboardPermissionMixin, DeleteView):\n model = Chunk\n permission_required = ('chunks.delete_chunk',)\n success_url = reverse_lazy('chunk-dashboard:list')\n", "path": "apps/dashboard/chunks/dashboard/views.py"}, {"content": "# -*- encoding: utf-8 -*-\n\nfrom datetime import date\n\nfrom django.contrib.auth.models import Group\nfrom django.core.exceptions import ObjectDoesNotExist, PermissionDenied\nfrom guardian.mixins import PermissionRequiredMixin\n\nfrom apps.approval.models import MembershipApproval\nfrom apps.gallery.models import UnhandledImage\nfrom apps.inventory.models import Batch\nfrom apps.posters.models import Poster\n\n\ndef has_access(request):\n \"\"\"\n This helper method does a basic check to see if the logged in user\n has access to the dashboard.\n\n We might add additional checks here later.\n \"\"\"\n\n if request.user.is_superuser:\n return True\n\n try:\n committees = Group.objects.get(name='Komiteer')\n except ObjectDoesNotExist:\n committees = None\n\n if committees and committees in request.user.groups.all():\n return True\n\n return False\n\n\ndef check_access_or_403(request):\n \"\"\"\n Checks if a user bundled in a request object has access using has_access\n tool function, if not raise a 403 exception\n \"\"\"\n if not has_access(request):\n raise PermissionDenied\n\n\ndef get_base_context(request):\n \"\"\"\n This function returns a dictionary with the proper context variables\n needed for given permission settings. Should be used as the initial\n context for every dashboard view. For example, it is used for rendering\n badges in the dashboard menu.\n\n Add your own if req.user.has_perm statements adding the context objects\n of that you need.\n \"\"\"\n\n context = {}\n\n context['user_permissions'] = set(request.user.get_all_permissions())\n\n # Check if we need approval count to display in template sidebar badge\n if request.user.has_perm('approval.view_membershipapproval'):\n context['approval_pending'] = MembershipApproval.objects.filter(\n processed=False).count()\n\n # Check if there exists a batch in inventory that has expired\n if request.user.has_perm('inventory.view_item'):\n if Batch.objects.filter(expiration_date__lt=date.today()):\n context['inventory_expired'] = True\n\n if request.user.has_perm('posters.view_poster'):\n if Poster.objects.filter(assigned_to=None) or Poster.objects.filter(assigned_to=request.user):\n context['poster_orders'] = Poster.objects.filter(assigned_to=None).count()\n context['poster_orders'] += Poster.objects.filter(assigned_to=request.user, finished=False).count()\n\n # Check if we have any unhandled images pending crop and save\n if request.user.has_perm('gallery.view_unhandledimage'):\n context['unhandled_images'] = UnhandledImage.objects.all()\n\n return context\n\n\n# Mixin for Class Based Views\nclass DashboardMixin(object):\n \"\"\"\n The DashboardMixin sets up the needed context data, as well as performs\n generic access checks.\n \"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"\n Hooks into the dispatch cycle, checking whether or not the currently\n logged in user has access to the dashboard in general.\n :param request: Django Request object\n :param args: Positional arguments\n :param kwargs: Keyword arguments\n :return: Invocation of superclass dispatch\n \"\"\"\n\n if not has_access(request):\n raise PermissionDenied\n\n return super(DashboardMixin, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Sets context data on superclass, before populating it further with\n the context data needed by dashboard.\n :param kwargs: Keyword arguments\n :return: A context dictionary\n \"\"\"\n\n context = super(DashboardMixin, self).get_context_data(**kwargs)\n context.update(get_base_context(self.request))\n\n return context\n\n\nclass DashboardPermissionMixin(DashboardMixin, PermissionRequiredMixin):\n \"\"\"\n DashboardPermissionMixin combines the DashboardMixin with Django\n Guardian's permission based mixin, rendering a 403 Unauthorized\n template if the currently logged in user is lacking appropriate\n permissions to access a certain view.\n \"\"\"\n\n def get_permission_object(self, *args, **kwargs):\n \"\"\"\n By default PermissionRequiredMixin works with object permissions.\n By returning None we force guardian to only check if the user\n has the proper permission.\n \"\"\"\n return None\n\n return_403 = True\n", "path": "apps/dashboard/tools.py"}]}
3,746
531
gh_patches_debug_20412
rasdani/github-patches
git_diff
docker__docker-py-1412
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Swarm tests fail if host has multiple network interfaces ``` __________________ ERROR at setup of ServiceTest.test_create ___________________ tests/integration/models_services_test.py:14: in setUpClass client.swarm.init(listen_addr=helpers.swarm_listen_addr()) docker/models/swarm.py:96: in init self.client.api.init_swarm(**init_kwargs) docker/utils/decorators.py:35: in wrapper return f(self, *args, **kwargs) docker/api/swarm.py:102: in init_swarm self._raise_for_status(response) docker/api/client.py:208: in _raise_for_status raise create_api_error_from_http_exception(e) docker/errors.py:30: in create_api_error_from_http_exception raise cls(e, response=response, explanation=explanation) E APIError: 500 Server Error: Internal Server Error for url: http://172.17.0.1:12375/v1.24/swarm/init ("could not choose an IP address to advertise since this system has multiple addresses on different interfaces (10.0.2.15 on enp0s3 and 192.168.48.11 on enp0s8)") ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docker/models/swarm.py` Content: ``` 1 from docker.api import APIClient 2 from docker.errors import APIError 3 from docker.types import SwarmSpec 4 from .resource import Model 5 6 7 class Swarm(Model): 8 """ 9 The server's Swarm state. This a singleton that must be reloaded to get 10 the current state of the Swarm. 11 """ 12 def __init__(self, *args, **kwargs): 13 super(Swarm, self).__init__(*args, **kwargs) 14 if self.client: 15 try: 16 self.reload() 17 except APIError as e: 18 # FIXME: https://github.com/docker/docker/issues/29192 19 if e.response.status_code not in (406, 503): 20 raise 21 22 @property 23 def version(self): 24 """ 25 The version number of the swarm. If this is not the same as the 26 server, the :py:meth:`update` function will not work and you will 27 need to call :py:meth:`reload` before calling it again. 28 """ 29 return self.attrs.get('Version').get('Index') 30 31 def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377', 32 force_new_cluster=False, swarm_spec=None, **kwargs): 33 """ 34 Initialize a new swarm on this Engine. 35 36 Args: 37 advertise_addr (str): Externally reachable address advertised to 38 other nodes. This can either be an address/port combination in 39 the form ``192.168.1.1:4567``, or an interface followed by a 40 port number, like ``eth0:4567``. If the port number is omitted, 41 the port number from the listen address is used. 42 43 If not specified, it will be automatically detected when 44 possible. 45 listen_addr (str): Listen address used for inter-manager 46 communication, as well as determining the networking interface 47 used for the VXLAN Tunnel Endpoint (VTEP). This can either be 48 an address/port combination in the form ``192.168.1.1:4567``, 49 or an interface followed by a port number, like ``eth0:4567``. 50 If the port number is omitted, the default swarm listening port 51 is used. Default: ``0.0.0.0:2377`` 52 force_new_cluster (bool): Force creating a new Swarm, even if 53 already part of one. Default: False 54 task_history_retention_limit (int): Maximum number of tasks 55 history stored. 56 snapshot_interval (int): Number of logs entries between snapshot. 57 keep_old_snapshots (int): Number of snapshots to keep beyond the 58 current snapshot. 59 log_entries_for_slow_followers (int): Number of log entries to 60 keep around to sync up slow followers after a snapshot is 61 created. 62 heartbeat_tick (int): Amount of ticks (in seconds) between each 63 heartbeat. 64 election_tick (int): Amount of ticks (in seconds) needed without a 65 leader to trigger a new election. 66 dispatcher_heartbeat_period (int): The delay for an agent to send 67 a heartbeat to the dispatcher. 68 node_cert_expiry (int): Automatic expiry for nodes certificates. 69 external_ca (dict): Configuration for forwarding signing requests 70 to an external certificate authority. Use 71 ``docker.types.SwarmExternalCA``. 72 name (string): Swarm's name 73 74 Returns: 75 ``True`` if the request went through. 76 77 Raises: 78 :py:class:`docker.errors.APIError` 79 If the server returns an error. 80 81 Example: 82 83 >>> client.swarm.init( 84 advertise_addr='eth0', listen_addr='0.0.0.0:5000', 85 force_new_cluster=False, snapshot_interval=5000, 86 log_entries_for_slow_followers=1200 87 ) 88 89 """ 90 init_kwargs = {} 91 for arg in ['advertise_addr', 'listen_addr', 'force_new_cluster']: 92 if arg in kwargs: 93 init_kwargs[arg] = kwargs[arg] 94 del kwargs[arg] 95 init_kwargs['swarm_spec'] = SwarmSpec(**kwargs) 96 self.client.api.init_swarm(**init_kwargs) 97 self.reload() 98 99 def join(self, *args, **kwargs): 100 return self.client.api.join_swarm(*args, **kwargs) 101 join.__doc__ = APIClient.join_swarm.__doc__ 102 103 def leave(self, *args, **kwargs): 104 return self.client.api.leave_swarm(*args, **kwargs) 105 leave.__doc__ = APIClient.leave_swarm.__doc__ 106 107 def reload(self): 108 """ 109 Inspect the swarm on the server and store the response in 110 :py:attr:`attrs`. 111 112 Raises: 113 :py:class:`docker.errors.APIError` 114 If the server returns an error. 115 """ 116 self.attrs = self.client.api.inspect_swarm() 117 118 def update(self, rotate_worker_token=False, rotate_manager_token=False, 119 **kwargs): 120 """ 121 Update the swarm's configuration. 122 123 It takes the same arguments as :py:meth:`init`, except 124 ``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In 125 addition, it takes these arguments: 126 127 Args: 128 rotate_worker_token (bool): Rotate the worker join token. Default: 129 ``False``. 130 rotate_manager_token (bool): Rotate the manager join token. 131 Default: ``False``. 132 133 Raises: 134 :py:class:`docker.errors.APIError` 135 If the server returns an error. 136 137 """ 138 # this seems to have to be set 139 if kwargs.get('node_cert_expiry') is None: 140 kwargs['node_cert_expiry'] = 7776000000000000 141 142 return self.client.api.update_swarm( 143 version=self.version, 144 swarm_spec=SwarmSpec(**kwargs), 145 rotate_worker_token=rotate_worker_token, 146 rotate_manager_token=rotate_manager_token 147 ) 148 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docker/models/swarm.py b/docker/models/swarm.py --- a/docker/models/swarm.py +++ b/docker/models/swarm.py @@ -29,7 +29,7 @@ return self.attrs.get('Version').get('Index') def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377', - force_new_cluster=False, swarm_spec=None, **kwargs): + force_new_cluster=False, **kwargs): """ Initialize a new swarm on this Engine. @@ -87,11 +87,11 @@ ) """ - init_kwargs = {} - for arg in ['advertise_addr', 'listen_addr', 'force_new_cluster']: - if arg in kwargs: - init_kwargs[arg] = kwargs[arg] - del kwargs[arg] + init_kwargs = { + 'advertise_addr': advertise_addr, + 'listen_addr': listen_addr, + 'force_new_cluster': force_new_cluster + } init_kwargs['swarm_spec'] = SwarmSpec(**kwargs) self.client.api.init_swarm(**init_kwargs) self.reload()
{"golden_diff": "diff --git a/docker/models/swarm.py b/docker/models/swarm.py\n--- a/docker/models/swarm.py\n+++ b/docker/models/swarm.py\n@@ -29,7 +29,7 @@\n return self.attrs.get('Version').get('Index')\n \n def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',\n- force_new_cluster=False, swarm_spec=None, **kwargs):\n+ force_new_cluster=False, **kwargs):\n \"\"\"\n Initialize a new swarm on this Engine.\n \n@@ -87,11 +87,11 @@\n )\n \n \"\"\"\n- init_kwargs = {}\n- for arg in ['advertise_addr', 'listen_addr', 'force_new_cluster']:\n- if arg in kwargs:\n- init_kwargs[arg] = kwargs[arg]\n- del kwargs[arg]\n+ init_kwargs = {\n+ 'advertise_addr': advertise_addr,\n+ 'listen_addr': listen_addr,\n+ 'force_new_cluster': force_new_cluster\n+ }\n init_kwargs['swarm_spec'] = SwarmSpec(**kwargs)\n self.client.api.init_swarm(**init_kwargs)\n self.reload()\n", "issue": "Swarm tests fail if host has multiple network interfaces\n```\r\n__________________ ERROR at setup of ServiceTest.test_create ___________________\r\ntests/integration/models_services_test.py:14: in setUpClass\r\n client.swarm.init(listen_addr=helpers.swarm_listen_addr())\r\ndocker/models/swarm.py:96: in init\r\n self.client.api.init_swarm(**init_kwargs)\r\ndocker/utils/decorators.py:35: in wrapper\r\n return f(self, *args, **kwargs)\r\ndocker/api/swarm.py:102: in init_swarm\r\n self._raise_for_status(response)\r\ndocker/api/client.py:208: in _raise_for_status\r\n raise create_api_error_from_http_exception(e)\r\ndocker/errors.py:30: in create_api_error_from_http_exception\r\n raise cls(e, response=response, explanation=explanation)\r\nE APIError: 500 Server Error: Internal Server Error for url: http://172.17.0.1:12375/v1.24/swarm/init (\"could not choose an IP address to advertise since this system has multiple addresses on different interfaces (10.0.2.15 on enp0s3 and 192.168.48.11 on enp0s8)\")\r\n```\r\n\n", "before_files": [{"content": "from docker.api import APIClient\nfrom docker.errors import APIError\nfrom docker.types import SwarmSpec\nfrom .resource import Model\n\n\nclass Swarm(Model):\n \"\"\"\n The server's Swarm state. This a singleton that must be reloaded to get\n the current state of the Swarm.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(Swarm, self).__init__(*args, **kwargs)\n if self.client:\n try:\n self.reload()\n except APIError as e:\n # FIXME: https://github.com/docker/docker/issues/29192\n if e.response.status_code not in (406, 503):\n raise\n\n @property\n def version(self):\n \"\"\"\n The version number of the swarm. If this is not the same as the\n server, the :py:meth:`update` function will not work and you will\n need to call :py:meth:`reload` before calling it again.\n \"\"\"\n return self.attrs.get('Version').get('Index')\n\n def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',\n force_new_cluster=False, swarm_spec=None, **kwargs):\n \"\"\"\n Initialize a new swarm on this Engine.\n\n Args:\n advertise_addr (str): Externally reachable address advertised to\n other nodes. This can either be an address/port combination in\n the form ``192.168.1.1:4567``, or an interface followed by a\n port number, like ``eth0:4567``. If the port number is omitted,\n the port number from the listen address is used.\n\n If not specified, it will be automatically detected when\n possible.\n listen_addr (str): Listen address used for inter-manager\n communication, as well as determining the networking interface\n used for the VXLAN Tunnel Endpoint (VTEP). This can either be\n an address/port combination in the form ``192.168.1.1:4567``,\n or an interface followed by a port number, like ``eth0:4567``.\n If the port number is omitted, the default swarm listening port\n is used. Default: ``0.0.0.0:2377``\n force_new_cluster (bool): Force creating a new Swarm, even if\n already part of one. Default: False\n task_history_retention_limit (int): Maximum number of tasks\n history stored.\n snapshot_interval (int): Number of logs entries between snapshot.\n keep_old_snapshots (int): Number of snapshots to keep beyond the\n current snapshot.\n log_entries_for_slow_followers (int): Number of log entries to\n keep around to sync up slow followers after a snapshot is\n created.\n heartbeat_tick (int): Amount of ticks (in seconds) between each\n heartbeat.\n election_tick (int): Amount of ticks (in seconds) needed without a\n leader to trigger a new election.\n dispatcher_heartbeat_period (int): The delay for an agent to send\n a heartbeat to the dispatcher.\n node_cert_expiry (int): Automatic expiry for nodes certificates.\n external_ca (dict): Configuration for forwarding signing requests\n to an external certificate authority. Use\n ``docker.types.SwarmExternalCA``.\n name (string): Swarm's name\n\n Returns:\n ``True`` if the request went through.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> client.swarm.init(\n advertise_addr='eth0', listen_addr='0.0.0.0:5000',\n force_new_cluster=False, snapshot_interval=5000,\n log_entries_for_slow_followers=1200\n )\n\n \"\"\"\n init_kwargs = {}\n for arg in ['advertise_addr', 'listen_addr', 'force_new_cluster']:\n if arg in kwargs:\n init_kwargs[arg] = kwargs[arg]\n del kwargs[arg]\n init_kwargs['swarm_spec'] = SwarmSpec(**kwargs)\n self.client.api.init_swarm(**init_kwargs)\n self.reload()\n\n def join(self, *args, **kwargs):\n return self.client.api.join_swarm(*args, **kwargs)\n join.__doc__ = APIClient.join_swarm.__doc__\n\n def leave(self, *args, **kwargs):\n return self.client.api.leave_swarm(*args, **kwargs)\n leave.__doc__ = APIClient.leave_swarm.__doc__\n\n def reload(self):\n \"\"\"\n Inspect the swarm on the server and store the response in\n :py:attr:`attrs`.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n self.attrs = self.client.api.inspect_swarm()\n\n def update(self, rotate_worker_token=False, rotate_manager_token=False,\n **kwargs):\n \"\"\"\n Update the swarm's configuration.\n\n It takes the same arguments as :py:meth:`init`, except\n ``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In\n addition, it takes these arguments:\n\n Args:\n rotate_worker_token (bool): Rotate the worker join token. Default:\n ``False``.\n rotate_manager_token (bool): Rotate the manager join token.\n Default: ``False``.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n \"\"\"\n # this seems to have to be set\n if kwargs.get('node_cert_expiry') is None:\n kwargs['node_cert_expiry'] = 7776000000000000\n\n return self.client.api.update_swarm(\n version=self.version,\n swarm_spec=SwarmSpec(**kwargs),\n rotate_worker_token=rotate_worker_token,\n rotate_manager_token=rotate_manager_token\n )\n", "path": "docker/models/swarm.py"}], "after_files": [{"content": "from docker.api import APIClient\nfrom docker.errors import APIError\nfrom docker.types import SwarmSpec\nfrom .resource import Model\n\n\nclass Swarm(Model):\n \"\"\"\n The server's Swarm state. This a singleton that must be reloaded to get\n the current state of the Swarm.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(Swarm, self).__init__(*args, **kwargs)\n if self.client:\n try:\n self.reload()\n except APIError as e:\n # FIXME: https://github.com/docker/docker/issues/29192\n if e.response.status_code not in (406, 503):\n raise\n\n @property\n def version(self):\n \"\"\"\n The version number of the swarm. If this is not the same as the\n server, the :py:meth:`update` function will not work and you will\n need to call :py:meth:`reload` before calling it again.\n \"\"\"\n return self.attrs.get('Version').get('Index')\n\n def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',\n force_new_cluster=False, **kwargs):\n \"\"\"\n Initialize a new swarm on this Engine.\n\n Args:\n advertise_addr (str): Externally reachable address advertised to\n other nodes. This can either be an address/port combination in\n the form ``192.168.1.1:4567``, or an interface followed by a\n port number, like ``eth0:4567``. If the port number is omitted,\n the port number from the listen address is used.\n\n If not specified, it will be automatically detected when\n possible.\n listen_addr (str): Listen address used for inter-manager\n communication, as well as determining the networking interface\n used for the VXLAN Tunnel Endpoint (VTEP). This can either be\n an address/port combination in the form ``192.168.1.1:4567``,\n or an interface followed by a port number, like ``eth0:4567``.\n If the port number is omitted, the default swarm listening port\n is used. Default: ``0.0.0.0:2377``\n force_new_cluster (bool): Force creating a new Swarm, even if\n already part of one. Default: False\n task_history_retention_limit (int): Maximum number of tasks\n history stored.\n snapshot_interval (int): Number of logs entries between snapshot.\n keep_old_snapshots (int): Number of snapshots to keep beyond the\n current snapshot.\n log_entries_for_slow_followers (int): Number of log entries to\n keep around to sync up slow followers after a snapshot is\n created.\n heartbeat_tick (int): Amount of ticks (in seconds) between each\n heartbeat.\n election_tick (int): Amount of ticks (in seconds) needed without a\n leader to trigger a new election.\n dispatcher_heartbeat_period (int): The delay for an agent to send\n a heartbeat to the dispatcher.\n node_cert_expiry (int): Automatic expiry for nodes certificates.\n external_ca (dict): Configuration for forwarding signing requests\n to an external certificate authority. Use\n ``docker.types.SwarmExternalCA``.\n name (string): Swarm's name\n\n Returns:\n ``True`` if the request went through.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> client.swarm.init(\n advertise_addr='eth0', listen_addr='0.0.0.0:5000',\n force_new_cluster=False, snapshot_interval=5000,\n log_entries_for_slow_followers=1200\n )\n\n \"\"\"\n init_kwargs = {\n 'advertise_addr': advertise_addr,\n 'listen_addr': listen_addr,\n 'force_new_cluster': force_new_cluster\n }\n init_kwargs['swarm_spec'] = SwarmSpec(**kwargs)\n self.client.api.init_swarm(**init_kwargs)\n self.reload()\n\n def join(self, *args, **kwargs):\n return self.client.api.join_swarm(*args, **kwargs)\n join.__doc__ = APIClient.join_swarm.__doc__\n\n def leave(self, *args, **kwargs):\n return self.client.api.leave_swarm(*args, **kwargs)\n leave.__doc__ = APIClient.leave_swarm.__doc__\n\n def reload(self):\n \"\"\"\n Inspect the swarm on the server and store the response in\n :py:attr:`attrs`.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n self.attrs = self.client.api.inspect_swarm()\n\n def update(self, rotate_worker_token=False, rotate_manager_token=False,\n **kwargs):\n \"\"\"\n Update the swarm's configuration.\n\n It takes the same arguments as :py:meth:`init`, except\n ``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In\n addition, it takes these arguments:\n\n Args:\n rotate_worker_token (bool): Rotate the worker join token. Default:\n ``False``.\n rotate_manager_token (bool): Rotate the manager join token.\n Default: ``False``.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n \"\"\"\n # this seems to have to be set\n if kwargs.get('node_cert_expiry') is None:\n kwargs['node_cert_expiry'] = 7776000000000000\n\n return self.client.api.update_swarm(\n version=self.version,\n swarm_spec=SwarmSpec(**kwargs),\n rotate_worker_token=rotate_worker_token,\n rotate_manager_token=rotate_manager_token\n )\n", "path": "docker/models/swarm.py"}]}
2,171
253
gh_patches_debug_1241
rasdani/github-patches
git_diff
crytic__slither-1108
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Bug]: Infinite loop in RTLO detector ### What happened? Slither hangs on this code indefinitely ### Can you share code with us to reproduce this bug? https://github.com/ethereum/solidity/blob/develop/test/libsolidity/syntaxTests/comments/multiline_unicode_direction_override_5.sol ### Version 0.8.2 ### Relevant log output _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `slither/detectors/source/rtlo.py` Content: ``` 1 import re 2 from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification 3 4 5 class RightToLeftOverride(AbstractDetector): 6 """ 7 Detect the usage of a Right-To-Left-Override (U+202E) character 8 """ 9 10 ARGUMENT = "rtlo" 11 HELP = "Right-To-Left-Override control character is used" 12 IMPACT = DetectorClassification.HIGH 13 CONFIDENCE = DetectorClassification.HIGH 14 15 WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#right-to-left-override-character" 16 WIKI_TITLE = "Right-to-Left-Override character" 17 WIKI_DESCRIPTION = "An attacker can manipulate the logic of the contract by using a right-to-left-override character (`U+202E)`." 18 19 # region wiki_exploit_scenario 20 WIKI_EXPLOIT_SCENARIO = """ 21 ```solidity 22 contract Token 23 { 24 25 address payable o; // owner 26 mapping(address => uint) tokens; 27 28 function withdraw() external returns(uint) 29 { 30 uint amount = tokens[msg.sender]; 31 address payable d = msg.sender; 32 tokens[msg.sender] = 0; 33 _withdraw(/*owner‮/*noitanitsed*/ d, o/*‭ 34 /*value */, amount); 35 } 36 37 function _withdraw(address payable fee_receiver, address payable destination, uint value) internal 38 { 39 fee_receiver.transfer(1); 40 destination.transfer(value); 41 } 42 } 43 ``` 44 45 `Token` uses the right-to-left-override character when calling `_withdraw`. As a result, the fee is incorrectly sent to `msg.sender`, and the token balance is sent to the owner. 46 47 """ 48 # endregion wiki_exploit_scenario 49 50 WIKI_RECOMMENDATION = "Special control characters must not be allowed." 51 52 RTLO_CHARACTER_ENCODED = "\u202e".encode("utf-8") 53 STANDARD_JSON = False 54 55 def _detect(self): 56 results = [] 57 pattern = re.compile(".*\u202e.*".encode("utf-8")) 58 59 for filename, source in self.slither.source_code.items(): 60 # Attempt to find all RTLO characters in this source file. 61 original_source_encoded = source.encode("utf-8") 62 start_index = 0 63 64 # Keep searching all file contents for the character. 65 while True: 66 source_encoded = original_source_encoded[start_index:] 67 result_index = source_encoded.find(self.RTLO_CHARACTER_ENCODED) 68 69 # If we couldn't find the character in the remainder of source, stop. 70 if result_index == -1: 71 break 72 73 # We found another instance of the character, define our output 74 idx = start_index + result_index 75 76 relative = self.slither.crytic_compile.filename_lookup(filename).relative 77 info = f"{relative} contains a unicode right-to-left-override character at byte offset {idx}:\n" 78 79 # We have a patch, so pattern.find will return at least one result 80 81 info += f"\t- {pattern.findall(source_encoded)[0]}\n" 82 res = self.generate_result(info) 83 res.add_other( 84 "rtlo-character", 85 (filename, idx, len(self.RTLO_CHARACTER_ENCODED)), 86 self.compilation_unit, 87 ) 88 results.append(res) 89 90 # Advance the start index for the next iteration 91 start_index = result_index + 1 92 93 return results 94 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/slither/detectors/source/rtlo.py b/slither/detectors/source/rtlo.py --- a/slither/detectors/source/rtlo.py +++ b/slither/detectors/source/rtlo.py @@ -88,6 +88,6 @@ results.append(res) # Advance the start index for the next iteration - start_index = result_index + 1 + start_index = idx + 1 return results
{"golden_diff": "diff --git a/slither/detectors/source/rtlo.py b/slither/detectors/source/rtlo.py\n--- a/slither/detectors/source/rtlo.py\n+++ b/slither/detectors/source/rtlo.py\n@@ -88,6 +88,6 @@\n results.append(res)\n \n # Advance the start index for the next iteration\n- start_index = result_index + 1\n+ start_index = idx + 1\n \n return results\n", "issue": "[Bug]: Infinite loop in RTLO detector\n### What happened?\n\nSlither hangs on this code indefinitely\n\n### Can you share code with us to reproduce this bug?\n\nhttps://github.com/ethereum/solidity/blob/develop/test/libsolidity/syntaxTests/comments/multiline_unicode_direction_override_5.sol\n\n### Version\n\n0.8.2\n\n### Relevant log output\n\n_No response_\n", "before_files": [{"content": "import re\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\n\n\nclass RightToLeftOverride(AbstractDetector):\n \"\"\"\n Detect the usage of a Right-To-Left-Override (U+202E) character\n \"\"\"\n\n ARGUMENT = \"rtlo\"\n HELP = \"Right-To-Left-Override control character is used\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#right-to-left-override-character\"\n WIKI_TITLE = \"Right-to-Left-Override character\"\n WIKI_DESCRIPTION = \"An attacker can manipulate the logic of the contract by using a right-to-left-override character (`U+202E)`.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract Token\n{\n\n address payable o; // owner\n mapping(address => uint) tokens;\n\n function withdraw() external returns(uint)\n {\n uint amount = tokens[msg.sender];\n address payable d = msg.sender;\n tokens[msg.sender] = 0;\n _withdraw(/*owner\u202e/*noitanitsed*/ d, o/*\u202d\n\t\t /*value */, amount);\n }\n\n function _withdraw(address payable fee_receiver, address payable destination, uint value) internal\n {\n\t\tfee_receiver.transfer(1);\n\t\tdestination.transfer(value);\n }\n}\n```\n\n`Token` uses the right-to-left-override character when calling `_withdraw`. As a result, the fee is incorrectly sent to `msg.sender`, and the token balance is sent to the owner.\n\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Special control characters must not be allowed.\"\n\n RTLO_CHARACTER_ENCODED = \"\\u202e\".encode(\"utf-8\")\n STANDARD_JSON = False\n\n def _detect(self):\n results = []\n pattern = re.compile(\".*\\u202e.*\".encode(\"utf-8\"))\n\n for filename, source in self.slither.source_code.items():\n # Attempt to find all RTLO characters in this source file.\n original_source_encoded = source.encode(\"utf-8\")\n start_index = 0\n\n # Keep searching all file contents for the character.\n while True:\n source_encoded = original_source_encoded[start_index:]\n result_index = source_encoded.find(self.RTLO_CHARACTER_ENCODED)\n\n # If we couldn't find the character in the remainder of source, stop.\n if result_index == -1:\n break\n\n # We found another instance of the character, define our output\n idx = start_index + result_index\n\n relative = self.slither.crytic_compile.filename_lookup(filename).relative\n info = f\"{relative} contains a unicode right-to-left-override character at byte offset {idx}:\\n\"\n\n # We have a patch, so pattern.find will return at least one result\n\n info += f\"\\t- {pattern.findall(source_encoded)[0]}\\n\"\n res = self.generate_result(info)\n res.add_other(\n \"rtlo-character\",\n (filename, idx, len(self.RTLO_CHARACTER_ENCODED)),\n self.compilation_unit,\n )\n results.append(res)\n\n # Advance the start index for the next iteration\n start_index = result_index + 1\n\n return results\n", "path": "slither/detectors/source/rtlo.py"}], "after_files": [{"content": "import re\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\n\n\nclass RightToLeftOverride(AbstractDetector):\n \"\"\"\n Detect the usage of a Right-To-Left-Override (U+202E) character\n \"\"\"\n\n ARGUMENT = \"rtlo\"\n HELP = \"Right-To-Left-Override control character is used\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#right-to-left-override-character\"\n WIKI_TITLE = \"Right-to-Left-Override character\"\n WIKI_DESCRIPTION = \"An attacker can manipulate the logic of the contract by using a right-to-left-override character (`U+202E)`.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract Token\n{\n\n address payable o; // owner\n mapping(address => uint) tokens;\n\n function withdraw() external returns(uint)\n {\n uint amount = tokens[msg.sender];\n address payable d = msg.sender;\n tokens[msg.sender] = 0;\n _withdraw(/*owner\u202e/*noitanitsed*/ d, o/*\u202d\n\t\t /*value */, amount);\n }\n\n function _withdraw(address payable fee_receiver, address payable destination, uint value) internal\n {\n\t\tfee_receiver.transfer(1);\n\t\tdestination.transfer(value);\n }\n}\n```\n\n`Token` uses the right-to-left-override character when calling `_withdraw`. As a result, the fee is incorrectly sent to `msg.sender`, and the token balance is sent to the owner.\n\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Special control characters must not be allowed.\"\n\n RTLO_CHARACTER_ENCODED = \"\\u202e\".encode(\"utf-8\")\n STANDARD_JSON = False\n\n def _detect(self):\n results = []\n pattern = re.compile(\".*\\u202e.*\".encode(\"utf-8\"))\n\n for filename, source in self.slither.source_code.items():\n # Attempt to find all RTLO characters in this source file.\n original_source_encoded = source.encode(\"utf-8\")\n start_index = 0\n\n # Keep searching all file contents for the character.\n while True:\n source_encoded = original_source_encoded[start_index:]\n result_index = source_encoded.find(self.RTLO_CHARACTER_ENCODED)\n\n # If we couldn't find the character in the remainder of source, stop.\n if result_index == -1:\n break\n\n # We found another instance of the character, define our output\n idx = start_index + result_index\n\n relative = self.slither.crytic_compile.filename_lookup(filename).relative\n info = f\"{relative} contains a unicode right-to-left-override character at byte offset {idx}:\\n\"\n\n # We have a patch, so pattern.find will return at least one result\n\n info += f\"\\t- {pattern.findall(source_encoded)[0]}\\n\"\n res = self.generate_result(info)\n res.add_other(\n \"rtlo-character\",\n (filename, idx, len(self.RTLO_CHARACTER_ENCODED)),\n self.compilation_unit,\n )\n results.append(res)\n\n # Advance the start index for the next iteration\n start_index = idx + 1\n\n return results\n", "path": "slither/detectors/source/rtlo.py"}]}
1,270
106
gh_patches_debug_5715
rasdani/github-patches
git_diff
huggingface__diffusers-1686
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- It is not possible to disable telemetry ### Describe the bug Although the DISABLE_TELEMETRY and HF_HUB_OFFLINE env variables exist, setting them doesn't actually seem to disable telemetry. https://github.com/huggingface/diffusers/blob/main/src/diffusers/hub_utils.py#L87 Pass is used, and /api/telemetry is prepended to the huggingface endpoint. ### Reproduction - set DISABLE_TELEMETRY to True - run stablediffusion from pretrained - see `/api/telemetry/diffusers_from_pretrained` in the requests made to huggingface ### Logs _No response_ ### System Info main branch --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/diffusers/hub_utils.py` Content: ``` 1 # coding=utf-8 2 # Copyright 2022 The HuggingFace Inc. team. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 17 import os 18 import sys 19 from pathlib import Path 20 from typing import Dict, Optional, Union 21 from uuid import uuid4 22 23 import requests 24 from huggingface_hub import HfFolder, whoami 25 26 from . import __version__ 27 from .utils import ENV_VARS_TRUE_VALUES, HUGGINGFACE_CO_RESOLVE_ENDPOINT, logging 28 from .utils.import_utils import ( 29 _flax_version, 30 _jax_version, 31 _onnxruntime_version, 32 _torch_version, 33 is_flax_available, 34 is_modelcards_available, 35 is_onnx_available, 36 is_torch_available, 37 ) 38 39 40 if is_modelcards_available(): 41 from modelcards import CardData, ModelCard 42 43 44 logger = logging.get_logger(__name__) 45 46 47 MODEL_CARD_TEMPLATE_PATH = Path(__file__).parent / "utils" / "model_card_template.md" 48 SESSION_ID = uuid4().hex 49 HF_HUB_OFFLINE = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES 50 DISABLE_TELEMETRY = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES 51 HUGGINGFACE_CO_TELEMETRY = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/" 52 53 54 def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: 55 """ 56 Formats a user-agent string with basic info about a request. 57 """ 58 ua = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" 59 if DISABLE_TELEMETRY: 60 return ua + "; telemetry/off" 61 if is_torch_available(): 62 ua += f"; torch/{_torch_version}" 63 if is_flax_available(): 64 ua += f"; jax/{_jax_version}" 65 ua += f"; flax/{_flax_version}" 66 if is_onnx_available(): 67 ua += f"; onnxruntime/{_onnxruntime_version}" 68 # CI will set this value to True 69 if os.environ.get("DIFFUSERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: 70 ua += "; is_ci/true" 71 if isinstance(user_agent, dict): 72 ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) 73 elif isinstance(user_agent, str): 74 ua += "; " + user_agent 75 return ua 76 77 78 def send_telemetry(data: Dict, name: str): 79 """ 80 Sends logs to the Hub telemetry endpoint. 81 82 Args: 83 data: the fields to track, e.g. {"example_name": "dreambooth"} 84 name: a unique name to differentiate the telemetry logs, e.g. "diffusers_examples" or "diffusers_notebooks" 85 """ 86 if DISABLE_TELEMETRY or HF_HUB_OFFLINE: 87 pass 88 89 headers = {"user-agent": http_user_agent(data)} 90 endpoint = HUGGINGFACE_CO_TELEMETRY + name 91 try: 92 r = requests.head(endpoint, headers=headers) 93 r.raise_for_status() 94 except Exception: 95 # We don't want to error in case of connection errors of any kind. 96 pass 97 98 99 def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): 100 if token is None: 101 token = HfFolder.get_token() 102 if organization is None: 103 username = whoami(token)["name"] 104 return f"{username}/{model_id}" 105 else: 106 return f"{organization}/{model_id}" 107 108 109 def create_model_card(args, model_name): 110 if not is_modelcards_available: 111 raise ValueError( 112 "Please make sure to have `modelcards` installed when using the `create_model_card` function. You can" 113 " install the package with `pip install modelcards`." 114 ) 115 116 if hasattr(args, "local_rank") and args.local_rank not in [-1, 0]: 117 return 118 119 hub_token = args.hub_token if hasattr(args, "hub_token") else None 120 repo_name = get_full_repo_name(model_name, token=hub_token) 121 122 model_card = ModelCard.from_template( 123 card_data=CardData( # Card metadata object that will be converted to YAML block 124 language="en", 125 license="apache-2.0", 126 library_name="diffusers", 127 tags=[], 128 datasets=args.dataset_name, 129 metrics=[], 130 ), 131 template_path=MODEL_CARD_TEMPLATE_PATH, 132 model_name=model_name, 133 repo_name=repo_name, 134 dataset_name=args.dataset_name if hasattr(args, "dataset_name") else None, 135 learning_rate=args.learning_rate, 136 train_batch_size=args.train_batch_size, 137 eval_batch_size=args.eval_batch_size, 138 gradient_accumulation_steps=args.gradient_accumulation_steps 139 if hasattr(args, "gradient_accumulation_steps") 140 else None, 141 adam_beta1=args.adam_beta1 if hasattr(args, "adam_beta1") else None, 142 adam_beta2=args.adam_beta2 if hasattr(args, "adam_beta2") else None, 143 adam_weight_decay=args.adam_weight_decay if hasattr(args, "adam_weight_decay") else None, 144 adam_epsilon=args.adam_epsilon if hasattr(args, "adam_epsilon") else None, 145 lr_scheduler=args.lr_scheduler if hasattr(args, "lr_scheduler") else None, 146 lr_warmup_steps=args.lr_warmup_steps if hasattr(args, "lr_warmup_steps") else None, 147 ema_inv_gamma=args.ema_inv_gamma if hasattr(args, "ema_inv_gamma") else None, 148 ema_power=args.ema_power if hasattr(args, "ema_power") else None, 149 ema_max_decay=args.ema_max_decay if hasattr(args, "ema_max_decay") else None, 150 mixed_precision=args.mixed_precision, 151 ) 152 153 card_path = os.path.join(args.output_dir, "README.md") 154 model_card.save(card_path) 155 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/diffusers/hub_utils.py b/src/diffusers/hub_utils.py --- a/src/diffusers/hub_utils.py +++ b/src/diffusers/hub_utils.py @@ -84,7 +84,7 @@ name: a unique name to differentiate the telemetry logs, e.g. "diffusers_examples" or "diffusers_notebooks" """ if DISABLE_TELEMETRY or HF_HUB_OFFLINE: - pass + return headers = {"user-agent": http_user_agent(data)} endpoint = HUGGINGFACE_CO_TELEMETRY + name
{"golden_diff": "diff --git a/src/diffusers/hub_utils.py b/src/diffusers/hub_utils.py\n--- a/src/diffusers/hub_utils.py\n+++ b/src/diffusers/hub_utils.py\n@@ -84,7 +84,7 @@\n name: a unique name to differentiate the telemetry logs, e.g. \"diffusers_examples\" or \"diffusers_notebooks\"\n \"\"\"\n if DISABLE_TELEMETRY or HF_HUB_OFFLINE:\n- pass\n+ return\n \n headers = {\"user-agent\": http_user_agent(data)}\n endpoint = HUGGINGFACE_CO_TELEMETRY + name\n", "issue": "It is not possible to disable telemetry\n### Describe the bug\n\nAlthough the DISABLE_TELEMETRY and HF_HUB_OFFLINE env variables exist, setting them doesn't actually seem to disable telemetry.\r\n\r\nhttps://github.com/huggingface/diffusers/blob/main/src/diffusers/hub_utils.py#L87\r\n\r\nPass is used, and /api/telemetry is prepended to the huggingface endpoint.\n\n### Reproduction\n\n- set DISABLE_TELEMETRY to True\r\n- run stablediffusion from pretrained\r\n- see `/api/telemetry/diffusers_from_pretrained` in the requests made to huggingface\n\n### Logs\n\n_No response_\n\n### System Info\n\nmain branch\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2022 The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport sys\nfrom pathlib import Path\nfrom typing import Dict, Optional, Union\nfrom uuid import uuid4\n\nimport requests\nfrom huggingface_hub import HfFolder, whoami\n\nfrom . import __version__\nfrom .utils import ENV_VARS_TRUE_VALUES, HUGGINGFACE_CO_RESOLVE_ENDPOINT, logging\nfrom .utils.import_utils import (\n _flax_version,\n _jax_version,\n _onnxruntime_version,\n _torch_version,\n is_flax_available,\n is_modelcards_available,\n is_onnx_available,\n is_torch_available,\n)\n\n\nif is_modelcards_available():\n from modelcards import CardData, ModelCard\n\n\nlogger = logging.get_logger(__name__)\n\n\nMODEL_CARD_TEMPLATE_PATH = Path(__file__).parent / \"utils\" / \"model_card_template.md\"\nSESSION_ID = uuid4().hex\nHF_HUB_OFFLINE = os.getenv(\"HF_HUB_OFFLINE\", \"\").upper() in ENV_VARS_TRUE_VALUES\nDISABLE_TELEMETRY = os.getenv(\"DISABLE_TELEMETRY\", \"\").upper() in ENV_VARS_TRUE_VALUES\nHUGGINGFACE_CO_TELEMETRY = HUGGINGFACE_CO_RESOLVE_ENDPOINT + \"/api/telemetry/\"\n\n\ndef http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:\n \"\"\"\n Formats a user-agent string with basic info about a request.\n \"\"\"\n ua = f\"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}\"\n if DISABLE_TELEMETRY:\n return ua + \"; telemetry/off\"\n if is_torch_available():\n ua += f\"; torch/{_torch_version}\"\n if is_flax_available():\n ua += f\"; jax/{_jax_version}\"\n ua += f\"; flax/{_flax_version}\"\n if is_onnx_available():\n ua += f\"; onnxruntime/{_onnxruntime_version}\"\n # CI will set this value to True\n if os.environ.get(\"DIFFUSERS_IS_CI\", \"\").upper() in ENV_VARS_TRUE_VALUES:\n ua += \"; is_ci/true\"\n if isinstance(user_agent, dict):\n ua += \"; \" + \"; \".join(f\"{k}/{v}\" for k, v in user_agent.items())\n elif isinstance(user_agent, str):\n ua += \"; \" + user_agent\n return ua\n\n\ndef send_telemetry(data: Dict, name: str):\n \"\"\"\n Sends logs to the Hub telemetry endpoint.\n\n Args:\n data: the fields to track, e.g. {\"example_name\": \"dreambooth\"}\n name: a unique name to differentiate the telemetry logs, e.g. \"diffusers_examples\" or \"diffusers_notebooks\"\n \"\"\"\n if DISABLE_TELEMETRY or HF_HUB_OFFLINE:\n pass\n\n headers = {\"user-agent\": http_user_agent(data)}\n endpoint = HUGGINGFACE_CO_TELEMETRY + name\n try:\n r = requests.head(endpoint, headers=headers)\n r.raise_for_status()\n except Exception:\n # We don't want to error in case of connection errors of any kind.\n pass\n\n\ndef get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):\n if token is None:\n token = HfFolder.get_token()\n if organization is None:\n username = whoami(token)[\"name\"]\n return f\"{username}/{model_id}\"\n else:\n return f\"{organization}/{model_id}\"\n\n\ndef create_model_card(args, model_name):\n if not is_modelcards_available:\n raise ValueError(\n \"Please make sure to have `modelcards` installed when using the `create_model_card` function. You can\"\n \" install the package with `pip install modelcards`.\"\n )\n\n if hasattr(args, \"local_rank\") and args.local_rank not in [-1, 0]:\n return\n\n hub_token = args.hub_token if hasattr(args, \"hub_token\") else None\n repo_name = get_full_repo_name(model_name, token=hub_token)\n\n model_card = ModelCard.from_template(\n card_data=CardData( # Card metadata object that will be converted to YAML block\n language=\"en\",\n license=\"apache-2.0\",\n library_name=\"diffusers\",\n tags=[],\n datasets=args.dataset_name,\n metrics=[],\n ),\n template_path=MODEL_CARD_TEMPLATE_PATH,\n model_name=model_name,\n repo_name=repo_name,\n dataset_name=args.dataset_name if hasattr(args, \"dataset_name\") else None,\n learning_rate=args.learning_rate,\n train_batch_size=args.train_batch_size,\n eval_batch_size=args.eval_batch_size,\n gradient_accumulation_steps=args.gradient_accumulation_steps\n if hasattr(args, \"gradient_accumulation_steps\")\n else None,\n adam_beta1=args.adam_beta1 if hasattr(args, \"adam_beta1\") else None,\n adam_beta2=args.adam_beta2 if hasattr(args, \"adam_beta2\") else None,\n adam_weight_decay=args.adam_weight_decay if hasattr(args, \"adam_weight_decay\") else None,\n adam_epsilon=args.adam_epsilon if hasattr(args, \"adam_epsilon\") else None,\n lr_scheduler=args.lr_scheduler if hasattr(args, \"lr_scheduler\") else None,\n lr_warmup_steps=args.lr_warmup_steps if hasattr(args, \"lr_warmup_steps\") else None,\n ema_inv_gamma=args.ema_inv_gamma if hasattr(args, \"ema_inv_gamma\") else None,\n ema_power=args.ema_power if hasattr(args, \"ema_power\") else None,\n ema_max_decay=args.ema_max_decay if hasattr(args, \"ema_max_decay\") else None,\n mixed_precision=args.mixed_precision,\n )\n\n card_path = os.path.join(args.output_dir, \"README.md\")\n model_card.save(card_path)\n", "path": "src/diffusers/hub_utils.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2022 The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport sys\nfrom pathlib import Path\nfrom typing import Dict, Optional, Union\nfrom uuid import uuid4\n\nimport requests\nfrom huggingface_hub import HfFolder, whoami\n\nfrom . import __version__\nfrom .utils import ENV_VARS_TRUE_VALUES, HUGGINGFACE_CO_RESOLVE_ENDPOINT, logging\nfrom .utils.import_utils import (\n _flax_version,\n _jax_version,\n _onnxruntime_version,\n _torch_version,\n is_flax_available,\n is_modelcards_available,\n is_onnx_available,\n is_torch_available,\n)\n\n\nif is_modelcards_available():\n from modelcards import CardData, ModelCard\n\n\nlogger = logging.get_logger(__name__)\n\n\nMODEL_CARD_TEMPLATE_PATH = Path(__file__).parent / \"utils\" / \"model_card_template.md\"\nSESSION_ID = uuid4().hex\nHF_HUB_OFFLINE = os.getenv(\"HF_HUB_OFFLINE\", \"\").upper() in ENV_VARS_TRUE_VALUES\nDISABLE_TELEMETRY = os.getenv(\"DISABLE_TELEMETRY\", \"\").upper() in ENV_VARS_TRUE_VALUES\nHUGGINGFACE_CO_TELEMETRY = HUGGINGFACE_CO_RESOLVE_ENDPOINT + \"/api/telemetry/\"\n\n\ndef http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:\n \"\"\"\n Formats a user-agent string with basic info about a request.\n \"\"\"\n ua = f\"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}\"\n if DISABLE_TELEMETRY:\n return ua + \"; telemetry/off\"\n if is_torch_available():\n ua += f\"; torch/{_torch_version}\"\n if is_flax_available():\n ua += f\"; jax/{_jax_version}\"\n ua += f\"; flax/{_flax_version}\"\n if is_onnx_available():\n ua += f\"; onnxruntime/{_onnxruntime_version}\"\n # CI will set this value to True\n if os.environ.get(\"DIFFUSERS_IS_CI\", \"\").upper() in ENV_VARS_TRUE_VALUES:\n ua += \"; is_ci/true\"\n if isinstance(user_agent, dict):\n ua += \"; \" + \"; \".join(f\"{k}/{v}\" for k, v in user_agent.items())\n elif isinstance(user_agent, str):\n ua += \"; \" + user_agent\n return ua\n\n\ndef send_telemetry(data: Dict, name: str):\n \"\"\"\n Sends logs to the Hub telemetry endpoint.\n\n Args:\n data: the fields to track, e.g. {\"example_name\": \"dreambooth\"}\n name: a unique name to differentiate the telemetry logs, e.g. \"diffusers_examples\" or \"diffusers_notebooks\"\n \"\"\"\n if DISABLE_TELEMETRY or HF_HUB_OFFLINE:\n return\n\n headers = {\"user-agent\": http_user_agent(data)}\n endpoint = HUGGINGFACE_CO_TELEMETRY + name\n try:\n r = requests.head(endpoint, headers=headers)\n r.raise_for_status()\n except Exception:\n # We don't want to error in case of connection errors of any kind.\n pass\n\n\ndef get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):\n if token is None:\n token = HfFolder.get_token()\n if organization is None:\n username = whoami(token)[\"name\"]\n return f\"{username}/{model_id}\"\n else:\n return f\"{organization}/{model_id}\"\n\n\ndef create_model_card(args, model_name):\n if not is_modelcards_available:\n raise ValueError(\n \"Please make sure to have `modelcards` installed when using the `create_model_card` function. You can\"\n \" install the package with `pip install modelcards`.\"\n )\n\n if hasattr(args, \"local_rank\") and args.local_rank not in [-1, 0]:\n return\n\n hub_token = args.hub_token if hasattr(args, \"hub_token\") else None\n repo_name = get_full_repo_name(model_name, token=hub_token)\n\n model_card = ModelCard.from_template(\n card_data=CardData( # Card metadata object that will be converted to YAML block\n language=\"en\",\n license=\"apache-2.0\",\n library_name=\"diffusers\",\n tags=[],\n datasets=args.dataset_name,\n metrics=[],\n ),\n template_path=MODEL_CARD_TEMPLATE_PATH,\n model_name=model_name,\n repo_name=repo_name,\n dataset_name=args.dataset_name if hasattr(args, \"dataset_name\") else None,\n learning_rate=args.learning_rate,\n train_batch_size=args.train_batch_size,\n eval_batch_size=args.eval_batch_size,\n gradient_accumulation_steps=args.gradient_accumulation_steps\n if hasattr(args, \"gradient_accumulation_steps\")\n else None,\n adam_beta1=args.adam_beta1 if hasattr(args, \"adam_beta1\") else None,\n adam_beta2=args.adam_beta2 if hasattr(args, \"adam_beta2\") else None,\n adam_weight_decay=args.adam_weight_decay if hasattr(args, \"adam_weight_decay\") else None,\n adam_epsilon=args.adam_epsilon if hasattr(args, \"adam_epsilon\") else None,\n lr_scheduler=args.lr_scheduler if hasattr(args, \"lr_scheduler\") else None,\n lr_warmup_steps=args.lr_warmup_steps if hasattr(args, \"lr_warmup_steps\") else None,\n ema_inv_gamma=args.ema_inv_gamma if hasattr(args, \"ema_inv_gamma\") else None,\n ema_power=args.ema_power if hasattr(args, \"ema_power\") else None,\n ema_max_decay=args.ema_max_decay if hasattr(args, \"ema_max_decay\") else None,\n mixed_precision=args.mixed_precision,\n )\n\n card_path = os.path.join(args.output_dir, \"README.md\")\n model_card.save(card_path)\n", "path": "src/diffusers/hub_utils.py"}]}
2,159
134
gh_patches_debug_28367
rasdani/github-patches
git_diff
dbt-labs__dbt-core-8909
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [CT-3279] Remove usage of dbt.config in dbt/adapters --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `core/dbt/adapters/base/plugin.py` Content: ``` 1 from typing import List, Optional, Type 2 3 from dbt.adapters.base import Credentials 4 from dbt.exceptions import CompilationError 5 from dbt.adapters.protocol import AdapterProtocol 6 7 8 def project_name_from_path(include_path: str) -> str: 9 # avoid an import cycle 10 from dbt.config.project import PartialProject 11 12 partial = PartialProject.from_project_root(include_path) 13 if partial.project_name is None: 14 raise CompilationError(f"Invalid project at {include_path}: name not set!") 15 return partial.project_name 16 17 18 class AdapterPlugin: 19 """Defines the basic requirements for a dbt adapter plugin. 20 21 :param include_path: The path to this adapter plugin's root 22 :param dependencies: A list of adapter names that this adapter depends 23 upon. 24 """ 25 26 def __init__( 27 self, 28 adapter: Type[AdapterProtocol], 29 credentials: Type[Credentials], 30 include_path: str, 31 dependencies: Optional[List[str]] = None, 32 ) -> None: 33 34 self.adapter: Type[AdapterProtocol] = adapter 35 self.credentials: Type[Credentials] = credentials 36 self.include_path: str = include_path 37 self.project_name: str = project_name_from_path(include_path) 38 self.dependencies: List[str] 39 if dependencies is None: 40 self.dependencies = [] 41 else: 42 self.dependencies = dependencies 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/core/dbt/adapters/base/plugin.py b/core/dbt/adapters/base/plugin.py --- a/core/dbt/adapters/base/plugin.py +++ b/core/dbt/adapters/base/plugin.py @@ -1,20 +1,10 @@ from typing import List, Optional, Type +from pathlib import Path from dbt.adapters.base import Credentials -from dbt.exceptions import CompilationError from dbt.adapters.protocol import AdapterProtocol -def project_name_from_path(include_path: str) -> str: - # avoid an import cycle - from dbt.config.project import PartialProject - - partial = PartialProject.from_project_root(include_path) - if partial.project_name is None: - raise CompilationError(f"Invalid project at {include_path}: name not set!") - return partial.project_name - - class AdapterPlugin: """Defines the basic requirements for a dbt adapter plugin. @@ -29,12 +19,13 @@ credentials: Type[Credentials], include_path: str, dependencies: Optional[List[str]] = None, + project_name: Optional[str] = None, ) -> None: self.adapter: Type[AdapterProtocol] = adapter self.credentials: Type[Credentials] = credentials self.include_path: str = include_path - self.project_name: str = project_name_from_path(include_path) + self.project_name: str = project_name or f"dbt_{Path(include_path).name}" self.dependencies: List[str] if dependencies is None: self.dependencies = []
{"golden_diff": "diff --git a/core/dbt/adapters/base/plugin.py b/core/dbt/adapters/base/plugin.py\n--- a/core/dbt/adapters/base/plugin.py\n+++ b/core/dbt/adapters/base/plugin.py\n@@ -1,20 +1,10 @@\n from typing import List, Optional, Type\n+from pathlib import Path\n \n from dbt.adapters.base import Credentials\n-from dbt.exceptions import CompilationError\n from dbt.adapters.protocol import AdapterProtocol\n \n \n-def project_name_from_path(include_path: str) -> str:\n- # avoid an import cycle\n- from dbt.config.project import PartialProject\n-\n- partial = PartialProject.from_project_root(include_path)\n- if partial.project_name is None:\n- raise CompilationError(f\"Invalid project at {include_path}: name not set!\")\n- return partial.project_name\n-\n-\n class AdapterPlugin:\n \"\"\"Defines the basic requirements for a dbt adapter plugin.\n \n@@ -29,12 +19,13 @@\n credentials: Type[Credentials],\n include_path: str,\n dependencies: Optional[List[str]] = None,\n+ project_name: Optional[str] = None,\n ) -> None:\n \n self.adapter: Type[AdapterProtocol] = adapter\n self.credentials: Type[Credentials] = credentials\n self.include_path: str = include_path\n- self.project_name: str = project_name_from_path(include_path)\n+ self.project_name: str = project_name or f\"dbt_{Path(include_path).name}\"\n self.dependencies: List[str]\n if dependencies is None:\n self.dependencies = []\n", "issue": "[CT-3279] Remove usage of dbt.config in dbt/adapters\n\n", "before_files": [{"content": "from typing import List, Optional, Type\n\nfrom dbt.adapters.base import Credentials\nfrom dbt.exceptions import CompilationError\nfrom dbt.adapters.protocol import AdapterProtocol\n\n\ndef project_name_from_path(include_path: str) -> str:\n # avoid an import cycle\n from dbt.config.project import PartialProject\n\n partial = PartialProject.from_project_root(include_path)\n if partial.project_name is None:\n raise CompilationError(f\"Invalid project at {include_path}: name not set!\")\n return partial.project_name\n\n\nclass AdapterPlugin:\n \"\"\"Defines the basic requirements for a dbt adapter plugin.\n\n :param include_path: The path to this adapter plugin's root\n :param dependencies: A list of adapter names that this adapter depends\n upon.\n \"\"\"\n\n def __init__(\n self,\n adapter: Type[AdapterProtocol],\n credentials: Type[Credentials],\n include_path: str,\n dependencies: Optional[List[str]] = None,\n ) -> None:\n\n self.adapter: Type[AdapterProtocol] = adapter\n self.credentials: Type[Credentials] = credentials\n self.include_path: str = include_path\n self.project_name: str = project_name_from_path(include_path)\n self.dependencies: List[str]\n if dependencies is None:\n self.dependencies = []\n else:\n self.dependencies = dependencies\n", "path": "core/dbt/adapters/base/plugin.py"}], "after_files": [{"content": "from typing import List, Optional, Type\nfrom pathlib import Path\n\nfrom dbt.adapters.base import Credentials\nfrom dbt.adapters.protocol import AdapterProtocol\n\n\nclass AdapterPlugin:\n \"\"\"Defines the basic requirements for a dbt adapter plugin.\n\n :param include_path: The path to this adapter plugin's root\n :param dependencies: A list of adapter names that this adapter depends\n upon.\n \"\"\"\n\n def __init__(\n self,\n adapter: Type[AdapterProtocol],\n credentials: Type[Credentials],\n include_path: str,\n dependencies: Optional[List[str]] = None,\n project_name: Optional[str] = None,\n ) -> None:\n\n self.adapter: Type[AdapterProtocol] = adapter\n self.credentials: Type[Credentials] = credentials\n self.include_path: str = include_path\n self.project_name: str = project_name or f\"dbt_{Path(include_path).name}\"\n self.dependencies: List[str]\n if dependencies is None:\n self.dependencies = []\n else:\n self.dependencies = dependencies\n", "path": "core/dbt/adapters/base/plugin.py"}]}
644
338
gh_patches_debug_27213
rasdani/github-patches
git_diff
buildbot__buildbot-4561
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Can't use secret interpolation in change_hook_dialects? Trying to do something like ``` c['www'] = dict( port=8010, change_hook_dialects=dict( gitlab={ 'secret': util.Secret('my-webhook-token') }, ), plugins=dict(waterfall_view={}, console_view={}, grid_view={})) ``` fails; the webhook complains 'HTTP 400 Invalid secret'. Adding the print ``` --- a/master/buildbot/www/hooks/gitlab.py +++ b/master/buildbot/www/hooks/gitlab.py @@ -164,6 +164,7 @@ class GitLabHandler(BaseHookHandler): received_secret = request.getHeader(_HEADER_GITLAB_TOKEN) received_secret = bytes2unicode(received_secret) if received_secret != expected_secret: + log.msg("Wanted secret '%s' but got '%s'" % (expected_secret, received_secret)) raise ValueError("Invalid secret") try: content = request.content.read() ``` shows ``` ...[_GenericHTTPChannelProtocol,0,10.10.169.29] Wanted secret 'Secret(my-webhook-token)' but got 'thesecret' ``` (Evidently master/buildbot/www/hooks/gitlab.py needs to render expected_secret? https://github.com/buildbot/buildbot/issues/3627 has some tips on how to add that, but not clear enough for me to follow at first try.) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `master/buildbot/www/hooks/gitlab.py` Content: ``` 1 # This file is part of Buildbot. Buildbot is free software: you can 2 # redistribute it and/or modify it under the terms of the GNU General Public 3 # License as published by the Free Software Foundation, version 2. 4 # 5 # This program is distributed in the hope that it will be useful, but WITHOUT 6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more 8 # details. 9 # 10 # You should have received a copy of the GNU General Public License along with 11 # this program; if not, write to the Free Software Foundation, Inc., 51 12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 13 # 14 # Copyright Buildbot Team Members 15 16 17 import json 18 import re 19 20 from dateutil.parser import parse as dateparse 21 22 from twisted.python import log 23 24 from buildbot.util import bytes2unicode 25 from buildbot.www.hooks.base import BaseHookHandler 26 27 _HEADER_EVENT = b'X-Gitlab-Event' 28 _HEADER_GITLAB_TOKEN = b'X-Gitlab-Token' 29 30 31 class GitLabHandler(BaseHookHandler): 32 33 def _process_change(self, payload, user, repo, repo_url, event, 34 codebase=None): 35 """ 36 Consumes the JSON as a python object and actually starts the build. 37 38 :arguments: 39 payload 40 Python Object that represents the JSON sent by GitLab Service 41 Hook. 42 """ 43 changes = [] 44 refname = payload['ref'] 45 # project name from http headers is empty for me, so get it from repository/name 46 project = payload['repository']['name'] 47 48 # We only care about regular heads or tags 49 match = re.match(r"^refs/(heads|tags)/(.+)$", refname) 50 if not match: 51 log.msg("Ignoring refname `%s': Not a branch" % refname) 52 return changes 53 54 branch = match.group(2) 55 if payload.get('deleted'): 56 log.msg("Branch `%s' deleted, ignoring" % branch) 57 return changes 58 59 for commit in payload['commits']: 60 if not commit.get('distinct', True): 61 log.msg('Commit `%s` is a non-distinct commit, ignoring...' % 62 (commit['id'],)) 63 continue 64 65 files = [] 66 for kind in ('added', 'modified', 'removed'): 67 files.extend(commit.get(kind, [])) 68 69 when_timestamp = dateparse(commit['timestamp']) 70 71 log.msg("New revision: %s" % commit['id'][:8]) 72 73 change = { 74 'author': '%s <%s>' % (commit['author']['name'], 75 commit['author']['email']), 76 'files': files, 77 'comments': commit['message'], 78 'revision': commit['id'], 79 'when_timestamp': when_timestamp, 80 'branch': branch, 81 'revlink': commit['url'], 82 'repository': repo_url, 83 'project': project, 84 'category': event, 85 'properties': { 86 'event': event, 87 }, 88 } 89 90 if codebase is not None: 91 change['codebase'] = codebase 92 93 changes.append(change) 94 95 return changes 96 97 def _process_merge_request_change(self, payload, event, codebase=None): 98 """ 99 Consumes the merge_request JSON as a python object and turn it into a buildbot change. 100 101 :arguments: 102 payload 103 Python Object that represents the JSON sent by GitLab Service 104 Hook. 105 """ 106 attrs = payload['object_attributes'] 107 commit = attrs['last_commit'] 108 when_timestamp = dateparse(commit['timestamp']) 109 # @todo provide and document a way to choose between http and ssh url 110 repo_url = attrs['target']['git_http_url'] 111 # project name from http headers is empty for me, so get it from object_attributes/target/name 112 project = attrs['target']['name'] 113 114 # Filter out uninteresting events 115 state = attrs['state'] 116 if re.match('^(closed|merged|approved)$', state): 117 log.msg("GitLab MR#{}: Ignoring because state is {}".format(attrs['iid'], state)) 118 return [] 119 action = attrs['action'] 120 if not re.match('^(open|reopen)$', action) and not (action == "update" and "oldrev" in attrs): 121 log.msg("GitLab MR#{}: Ignoring because action {} was not open or " 122 "reopen or an update that added code".format(attrs['iid'], 123 action)) 124 return [] 125 126 changes = [{ 127 'author': '%s <%s>' % (commit['author']['name'], 128 commit['author']['email']), 129 'files': [], # @todo use rest API 130 'comments': "MR#{}: {}\n\n{}".format(attrs['iid'], attrs['title'], attrs['description']), 131 'revision': commit['id'], 132 'when_timestamp': when_timestamp, 133 'branch': attrs['target_branch'], 134 'repository': repo_url, 135 'project': project, 136 'category': event, 137 'revlink': attrs['url'], 138 'properties': { 139 'source_branch': attrs['source_branch'], 140 'source_project_id': attrs['source_project_id'], 141 'source_repository': attrs['source']['git_http_url'], 142 'source_git_ssh_url': attrs['source']['git_ssh_url'], 143 'target_branch': attrs['target_branch'], 144 'target_project_id': attrs['target_project_id'], 145 'target_repository': attrs['target']['git_http_url'], 146 'target_git_ssh_url': attrs['target']['git_ssh_url'], 147 'event': event, 148 }, 149 }] 150 if codebase is not None: 151 changes[0]['codebase'] = codebase 152 return changes 153 154 def getChanges(self, request): 155 """ 156 Reponds only to POST events and starts the build process 157 158 :arguments: 159 request 160 the http request object 161 """ 162 expected_secret = isinstance(self.options, dict) and self.options.get('secret') 163 if expected_secret: 164 received_secret = request.getHeader(_HEADER_GITLAB_TOKEN) 165 received_secret = bytes2unicode(received_secret) 166 if received_secret != expected_secret: 167 raise ValueError("Invalid secret") 168 try: 169 content = request.content.read() 170 payload = json.loads(bytes2unicode(content)) 171 except Exception as e: 172 raise ValueError("Error loading JSON: " + str(e)) 173 event_type = request.getHeader(_HEADER_EVENT) 174 event_type = bytes2unicode(event_type) 175 # newer version of gitlab have a object_kind parameter, 176 # which allows not to use the http header 177 event_type = payload.get('object_kind', event_type) 178 codebase = request.args.get(b'codebase', [None])[0] 179 codebase = bytes2unicode(codebase) 180 if event_type in ("push", "tag_push", "Push Hook"): 181 user = payload['user_name'] 182 repo = payload['repository']['name'] 183 repo_url = payload['repository']['url'] 184 changes = self._process_change( 185 payload, user, repo, repo_url, event_type, codebase=codebase) 186 elif event_type == 'merge_request': 187 changes = self._process_merge_request_change( 188 payload, event_type, codebase=codebase) 189 else: 190 changes = [] 191 if changes: 192 log.msg("Received {} changes from {} gitlab event".format( 193 len(changes), event_type)) 194 return (changes, 'git') 195 196 197 gitlab = GitLabHandler 198 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/master/buildbot/www/hooks/gitlab.py b/master/buildbot/www/hooks/gitlab.py --- a/master/buildbot/www/hooks/gitlab.py +++ b/master/buildbot/www/hooks/gitlab.py @@ -19,8 +19,10 @@ from dateutil.parser import parse as dateparse +from twisted.internet.defer import inlineCallbacks from twisted.python import log +from buildbot.process.properties import Properties from buildbot.util import bytes2unicode from buildbot.www.hooks.base import BaseHookHandler @@ -151,6 +153,7 @@ changes[0]['codebase'] = codebase return changes + @inlineCallbacks def getChanges(self, request): """ Reponds only to POST events and starts the build process @@ -163,7 +166,12 @@ if expected_secret: received_secret = request.getHeader(_HEADER_GITLAB_TOKEN) received_secret = bytes2unicode(received_secret) - if received_secret != expected_secret: + + p = Properties() + p.master = self.master + expected_secret_value = yield p.render(expected_secret) + + if received_secret != expected_secret_value: raise ValueError("Invalid secret") try: content = request.content.read()
{"golden_diff": "diff --git a/master/buildbot/www/hooks/gitlab.py b/master/buildbot/www/hooks/gitlab.py\n--- a/master/buildbot/www/hooks/gitlab.py\n+++ b/master/buildbot/www/hooks/gitlab.py\n@@ -19,8 +19,10 @@\n \n from dateutil.parser import parse as dateparse\n \n+from twisted.internet.defer import inlineCallbacks\n from twisted.python import log\n \n+from buildbot.process.properties import Properties\n from buildbot.util import bytes2unicode\n from buildbot.www.hooks.base import BaseHookHandler\n \n@@ -151,6 +153,7 @@\n changes[0]['codebase'] = codebase\n return changes\n \n+ @inlineCallbacks\n def getChanges(self, request):\n \"\"\"\n Reponds only to POST events and starts the build process\n@@ -163,7 +166,12 @@\n if expected_secret:\n received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)\n received_secret = bytes2unicode(received_secret)\n- if received_secret != expected_secret:\n+\n+ p = Properties()\n+ p.master = self.master\n+ expected_secret_value = yield p.render(expected_secret)\n+\n+ if received_secret != expected_secret_value:\n raise ValueError(\"Invalid secret\")\n try:\n content = request.content.read()\n", "issue": "Can't use secret interpolation in change_hook_dialects?\nTrying to do something like\r\n```\r\nc['www'] = dict(\r\n port=8010,\r\n change_hook_dialects=dict(\r\n gitlab={\r\n 'secret': util.Secret('my-webhook-token')\r\n },\r\n ),\r\n plugins=dict(waterfall_view={}, console_view={}, grid_view={}))\r\n```\r\nfails; the webhook complains 'HTTP 400 Invalid secret'.\r\nAdding the print\r\n\r\n```\r\n--- a/master/buildbot/www/hooks/gitlab.py\r\n+++ b/master/buildbot/www/hooks/gitlab.py\r\n@@ -164,6 +164,7 @@ class GitLabHandler(BaseHookHandler):\r\n received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)\r\n received_secret = bytes2unicode(received_secret)\r\n if received_secret != expected_secret:\r\n+ log.msg(\"Wanted secret '%s' but got '%s'\" % (expected_secret, received_secret))\r\n raise ValueError(\"Invalid secret\")\r\n try:\r\n content = request.content.read()\r\n```\r\nshows\r\n```\r\n...[_GenericHTTPChannelProtocol,0,10.10.169.29] Wanted secret 'Secret(my-webhook-token)' but got 'thesecret'\r\n```\r\n\r\n(Evidently master/buildbot/www/hooks/gitlab.py needs to render expected_secret?\r\nhttps://github.com/buildbot/buildbot/issues/3627 has some tips on how to add that, but\r\nnot clear enough for me to follow at first try.)\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\n\nimport json\nimport re\n\nfrom dateutil.parser import parse as dateparse\n\nfrom twisted.python import log\n\nfrom buildbot.util import bytes2unicode\nfrom buildbot.www.hooks.base import BaseHookHandler\n\n_HEADER_EVENT = b'X-Gitlab-Event'\n_HEADER_GITLAB_TOKEN = b'X-Gitlab-Token'\n\n\nclass GitLabHandler(BaseHookHandler):\n\n def _process_change(self, payload, user, repo, repo_url, event,\n codebase=None):\n \"\"\"\n Consumes the JSON as a python object and actually starts the build.\n\n :arguments:\n payload\n Python Object that represents the JSON sent by GitLab Service\n Hook.\n \"\"\"\n changes = []\n refname = payload['ref']\n # project name from http headers is empty for me, so get it from repository/name\n project = payload['repository']['name']\n\n # We only care about regular heads or tags\n match = re.match(r\"^refs/(heads|tags)/(.+)$\", refname)\n if not match:\n log.msg(\"Ignoring refname `%s': Not a branch\" % refname)\n return changes\n\n branch = match.group(2)\n if payload.get('deleted'):\n log.msg(\"Branch `%s' deleted, ignoring\" % branch)\n return changes\n\n for commit in payload['commits']:\n if not commit.get('distinct', True):\n log.msg('Commit `%s` is a non-distinct commit, ignoring...' %\n (commit['id'],))\n continue\n\n files = []\n for kind in ('added', 'modified', 'removed'):\n files.extend(commit.get(kind, []))\n\n when_timestamp = dateparse(commit['timestamp'])\n\n log.msg(\"New revision: %s\" % commit['id'][:8])\n\n change = {\n 'author': '%s <%s>' % (commit['author']['name'],\n commit['author']['email']),\n 'files': files,\n 'comments': commit['message'],\n 'revision': commit['id'],\n 'when_timestamp': when_timestamp,\n 'branch': branch,\n 'revlink': commit['url'],\n 'repository': repo_url,\n 'project': project,\n 'category': event,\n 'properties': {\n 'event': event,\n },\n }\n\n if codebase is not None:\n change['codebase'] = codebase\n\n changes.append(change)\n\n return changes\n\n def _process_merge_request_change(self, payload, event, codebase=None):\n \"\"\"\n Consumes the merge_request JSON as a python object and turn it into a buildbot change.\n\n :arguments:\n payload\n Python Object that represents the JSON sent by GitLab Service\n Hook.\n \"\"\"\n attrs = payload['object_attributes']\n commit = attrs['last_commit']\n when_timestamp = dateparse(commit['timestamp'])\n # @todo provide and document a way to choose between http and ssh url\n repo_url = attrs['target']['git_http_url']\n # project name from http headers is empty for me, so get it from object_attributes/target/name\n project = attrs['target']['name']\n\n # Filter out uninteresting events\n state = attrs['state']\n if re.match('^(closed|merged|approved)$', state):\n log.msg(\"GitLab MR#{}: Ignoring because state is {}\".format(attrs['iid'], state))\n return []\n action = attrs['action']\n if not re.match('^(open|reopen)$', action) and not (action == \"update\" and \"oldrev\" in attrs):\n log.msg(\"GitLab MR#{}: Ignoring because action {} was not open or \"\n \"reopen or an update that added code\".format(attrs['iid'],\n action))\n return []\n\n changes = [{\n 'author': '%s <%s>' % (commit['author']['name'],\n commit['author']['email']),\n 'files': [], # @todo use rest API\n 'comments': \"MR#{}: {}\\n\\n{}\".format(attrs['iid'], attrs['title'], attrs['description']),\n 'revision': commit['id'],\n 'when_timestamp': when_timestamp,\n 'branch': attrs['target_branch'],\n 'repository': repo_url,\n 'project': project,\n 'category': event,\n 'revlink': attrs['url'],\n 'properties': {\n 'source_branch': attrs['source_branch'],\n 'source_project_id': attrs['source_project_id'],\n 'source_repository': attrs['source']['git_http_url'],\n 'source_git_ssh_url': attrs['source']['git_ssh_url'],\n 'target_branch': attrs['target_branch'],\n 'target_project_id': attrs['target_project_id'],\n 'target_repository': attrs['target']['git_http_url'],\n 'target_git_ssh_url': attrs['target']['git_ssh_url'],\n 'event': event,\n },\n }]\n if codebase is not None:\n changes[0]['codebase'] = codebase\n return changes\n\n def getChanges(self, request):\n \"\"\"\n Reponds only to POST events and starts the build process\n\n :arguments:\n request\n the http request object\n \"\"\"\n expected_secret = isinstance(self.options, dict) and self.options.get('secret')\n if expected_secret:\n received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)\n received_secret = bytes2unicode(received_secret)\n if received_secret != expected_secret:\n raise ValueError(\"Invalid secret\")\n try:\n content = request.content.read()\n payload = json.loads(bytes2unicode(content))\n except Exception as e:\n raise ValueError(\"Error loading JSON: \" + str(e))\n event_type = request.getHeader(_HEADER_EVENT)\n event_type = bytes2unicode(event_type)\n # newer version of gitlab have a object_kind parameter,\n # which allows not to use the http header\n event_type = payload.get('object_kind', event_type)\n codebase = request.args.get(b'codebase', [None])[0]\n codebase = bytes2unicode(codebase)\n if event_type in (\"push\", \"tag_push\", \"Push Hook\"):\n user = payload['user_name']\n repo = payload['repository']['name']\n repo_url = payload['repository']['url']\n changes = self._process_change(\n payload, user, repo, repo_url, event_type, codebase=codebase)\n elif event_type == 'merge_request':\n changes = self._process_merge_request_change(\n payload, event_type, codebase=codebase)\n else:\n changes = []\n if changes:\n log.msg(\"Received {} changes from {} gitlab event\".format(\n len(changes), event_type))\n return (changes, 'git')\n\n\ngitlab = GitLabHandler\n", "path": "master/buildbot/www/hooks/gitlab.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\n\nimport json\nimport re\n\nfrom dateutil.parser import parse as dateparse\n\nfrom twisted.internet.defer import inlineCallbacks\nfrom twisted.python import log\n\nfrom buildbot.process.properties import Properties\nfrom buildbot.util import bytes2unicode\nfrom buildbot.www.hooks.base import BaseHookHandler\n\n_HEADER_EVENT = b'X-Gitlab-Event'\n_HEADER_GITLAB_TOKEN = b'X-Gitlab-Token'\n\n\nclass GitLabHandler(BaseHookHandler):\n\n def _process_change(self, payload, user, repo, repo_url, event,\n codebase=None):\n \"\"\"\n Consumes the JSON as a python object and actually starts the build.\n\n :arguments:\n payload\n Python Object that represents the JSON sent by GitLab Service\n Hook.\n \"\"\"\n changes = []\n refname = payload['ref']\n # project name from http headers is empty for me, so get it from repository/name\n project = payload['repository']['name']\n\n # We only care about regular heads or tags\n match = re.match(r\"^refs/(heads|tags)/(.+)$\", refname)\n if not match:\n log.msg(\"Ignoring refname `%s': Not a branch\" % refname)\n return changes\n\n branch = match.group(2)\n if payload.get('deleted'):\n log.msg(\"Branch `%s' deleted, ignoring\" % branch)\n return changes\n\n for commit in payload['commits']:\n if not commit.get('distinct', True):\n log.msg('Commit `%s` is a non-distinct commit, ignoring...' %\n (commit['id'],))\n continue\n\n files = []\n for kind in ('added', 'modified', 'removed'):\n files.extend(commit.get(kind, []))\n\n when_timestamp = dateparse(commit['timestamp'])\n\n log.msg(\"New revision: %s\" % commit['id'][:8])\n\n change = {\n 'author': '%s <%s>' % (commit['author']['name'],\n commit['author']['email']),\n 'files': files,\n 'comments': commit['message'],\n 'revision': commit['id'],\n 'when_timestamp': when_timestamp,\n 'branch': branch,\n 'revlink': commit['url'],\n 'repository': repo_url,\n 'project': project,\n 'category': event,\n 'properties': {\n 'event': event,\n },\n }\n\n if codebase is not None:\n change['codebase'] = codebase\n\n changes.append(change)\n\n return changes\n\n def _process_merge_request_change(self, payload, event, codebase=None):\n \"\"\"\n Consumes the merge_request JSON as a python object and turn it into a buildbot change.\n\n :arguments:\n payload\n Python Object that represents the JSON sent by GitLab Service\n Hook.\n \"\"\"\n attrs = payload['object_attributes']\n commit = attrs['last_commit']\n when_timestamp = dateparse(commit['timestamp'])\n # @todo provide and document a way to choose between http and ssh url\n repo_url = attrs['target']['git_http_url']\n # project name from http headers is empty for me, so get it from object_attributes/target/name\n project = attrs['target']['name']\n\n # Filter out uninteresting events\n state = attrs['state']\n if re.match('^(closed|merged|approved)$', state):\n log.msg(\"GitLab MR#{}: Ignoring because state is {}\".format(attrs['iid'], state))\n return []\n action = attrs['action']\n if not re.match('^(open|reopen)$', action) and not (action == \"update\" and \"oldrev\" in attrs):\n log.msg(\"GitLab MR#{}: Ignoring because action {} was not open or \"\n \"reopen or an update that added code\".format(attrs['iid'],\n action))\n return []\n\n changes = [{\n 'author': '%s <%s>' % (commit['author']['name'],\n commit['author']['email']),\n 'files': [], # @todo use rest API\n 'comments': \"MR#{}: {}\\n\\n{}\".format(attrs['iid'], attrs['title'], attrs['description']),\n 'revision': commit['id'],\n 'when_timestamp': when_timestamp,\n 'branch': attrs['target_branch'],\n 'repository': repo_url,\n 'project': project,\n 'category': event,\n 'revlink': attrs['url'],\n 'properties': {\n 'source_branch': attrs['source_branch'],\n 'source_project_id': attrs['source_project_id'],\n 'source_repository': attrs['source']['git_http_url'],\n 'source_git_ssh_url': attrs['source']['git_ssh_url'],\n 'target_branch': attrs['target_branch'],\n 'target_project_id': attrs['target_project_id'],\n 'target_repository': attrs['target']['git_http_url'],\n 'target_git_ssh_url': attrs['target']['git_ssh_url'],\n 'event': event,\n },\n }]\n if codebase is not None:\n changes[0]['codebase'] = codebase\n return changes\n\n @inlineCallbacks\n def getChanges(self, request):\n \"\"\"\n Reponds only to POST events and starts the build process\n\n :arguments:\n request\n the http request object\n \"\"\"\n expected_secret = isinstance(self.options, dict) and self.options.get('secret')\n if expected_secret:\n received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)\n received_secret = bytes2unicode(received_secret)\n\n p = Properties()\n p.master = self.master\n expected_secret_value = yield p.render(expected_secret)\n\n if received_secret != expected_secret_value:\n raise ValueError(\"Invalid secret\")\n try:\n content = request.content.read()\n payload = json.loads(bytes2unicode(content))\n except Exception as e:\n raise ValueError(\"Error loading JSON: \" + str(e))\n event_type = request.getHeader(_HEADER_EVENT)\n event_type = bytes2unicode(event_type)\n # newer version of gitlab have a object_kind parameter,\n # which allows not to use the http header\n event_type = payload.get('object_kind', event_type)\n codebase = request.args.get(b'codebase', [None])[0]\n codebase = bytes2unicode(codebase)\n if event_type in (\"push\", \"tag_push\", \"Push Hook\"):\n user = payload['user_name']\n repo = payload['repository']['name']\n repo_url = payload['repository']['url']\n changes = self._process_change(\n payload, user, repo, repo_url, event_type, codebase=codebase)\n elif event_type == 'merge_request':\n changes = self._process_merge_request_change(\n payload, event_type, codebase=codebase)\n else:\n changes = []\n if changes:\n log.msg(\"Received {} changes from {} gitlab event\".format(\n len(changes), event_type))\n return (changes, 'git')\n\n\ngitlab = GitLabHandler\n", "path": "master/buildbot/www/hooks/gitlab.py"}]}
2,686
281
gh_patches_debug_10793
rasdani/github-patches
git_diff
pyro-ppl__pyro-724
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Deprecate dependency on scipy for pyro-ppl package Once distributions are migrated upstream and we do not have a core dependency on scipy, we should remove this from our requirements in `setup.py` (add it to test dependencies), so that users do not need to install a separate 16MB file when they do a pip install of `pyro-ppl`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from __future__ import absolute_import, division, print_function 2 3 import sys 4 5 from setuptools import find_packages, setup 6 7 # Find pyro version. 8 for line in open('pyro/__init__.py'): 9 if line.startswith('__version__ = '): 10 version = line.strip().split()[2][1:-1] 11 12 # Convert README.md to rst for display at https://pypi.python.org/pypi/pyro-ppl 13 # When releasing on pypi, make sure pandoc is on your system: 14 # $ brew install pandoc # OS X 15 # $ sudo apt-get install pandoc # Ubuntu Linux 16 try: 17 import pypandoc 18 long_description = pypandoc.convert('README.md', 'rst') 19 except (IOError, ImportError, OSError) as e: 20 sys.stderr.write('Failed to convert README.md to rst:\n {}\n'.format(e)) 21 sys.stderr.flush() 22 long_description = open('README.md').read() 23 24 # Remove badges since they will always be obsolete. 25 blacklist = ['Build Status', 'Latest Version', 'travis-ci.org', 'pypi.python.org'] 26 long_description = '\n'.join( 27 [line for line in long_description.split('\n') if not any(patt in line for patt in blacklist)]) 28 29 setup( 30 name='pyro-ppl', 31 version=version, 32 description='A Python library for probabilistic modeling and inference', 33 long_description=long_description, 34 packages=find_packages(exclude=('tests*',)), 35 url='http://pyro.ai', 36 author='Uber AI Labs', 37 author_email='[email protected]', 38 install_requires=[ 39 'numpy>=1.7', 40 'scipy>=0.19.0', 41 'cloudpickle>=0.3.1', 42 'graphviz>=0.8', 43 'networkx>=2.0.0', 44 'observations>=0.1.4', 45 'torch', 46 'six>=1.10.0', 47 ], 48 extras_require={ 49 'notebooks': ['jupyter>=1.0.0'], 50 'visualization': [ 51 'matplotlib>=1.3', 52 'visdom>=0.1.4', 53 'pillow', 54 ], 55 'test': [ 56 'pytest', 57 'pytest-cov', 58 'nbval', 59 # examples/tutorials 60 'matplotlib', 61 'visdom', 62 'torchvision', 63 ], 64 'profile': ['prettytable'], 65 'dev': [ 66 'torchvision', 67 'flake8', 68 'yapf', 69 'isort', 70 'pytest', 71 'pytest-xdist', 72 'nbval', 73 'nbstripout', 74 'pypandoc', 75 'sphinx', 76 'sphinx_rtd_theme', 77 ], 78 }, 79 tests_require=['flake8', 'pytest'], 80 keywords='machine learning statistics probabilistic programming bayesian modeling pytorch', 81 license='MIT License', 82 classifiers=[ 83 'Intended Audience :: Developers', 84 'Intended Audience :: Education', 85 'Intended Audience :: Science/Research', 86 'Operating System :: POSIX :: Linux', 87 'Operating System :: MacOS :: MacOS X', 88 'Programming Language :: Python :: 2.7', 89 'Programming Language :: Python :: 3.4', 90 ], 91 # yapf 92 ) 93 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -37,7 +37,6 @@ author_email='[email protected]', install_requires=[ 'numpy>=1.7', - 'scipy>=0.19.0', 'cloudpickle>=0.3.1', 'graphviz>=0.8', 'networkx>=2.0.0', @@ -56,6 +55,7 @@ 'pytest', 'pytest-cov', 'nbval', + 'scipy>=0.19.0', # examples/tutorials 'matplotlib', 'visdom',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,7 +37,6 @@\n author_email='[email protected]',\n install_requires=[\n 'numpy>=1.7',\n- 'scipy>=0.19.0',\n 'cloudpickle>=0.3.1',\n 'graphviz>=0.8',\n 'networkx>=2.0.0',\n@@ -56,6 +55,7 @@\n 'pytest',\n 'pytest-cov',\n 'nbval',\n+ 'scipy>=0.19.0',\n # examples/tutorials\n 'matplotlib',\n 'visdom',\n", "issue": "Deprecate dependency on scipy for pyro-ppl package\nOnce distributions are migrated upstream and we do not have a core dependency on scipy, we should remove this from our requirements in `setup.py` (add it to test dependencies), so that users do not need to install a separate 16MB file when they do a pip install of `pyro-ppl`.\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport sys\n\nfrom setuptools import find_packages, setup\n\n# Find pyro version.\nfor line in open('pyro/__init__.py'):\n if line.startswith('__version__ = '):\n version = line.strip().split()[2][1:-1]\n\n# Convert README.md to rst for display at https://pypi.python.org/pypi/pyro-ppl\n# When releasing on pypi, make sure pandoc is on your system:\n# $ brew install pandoc # OS X\n# $ sudo apt-get install pandoc # Ubuntu Linux\ntry:\n import pypandoc\n long_description = pypandoc.convert('README.md', 'rst')\nexcept (IOError, ImportError, OSError) as e:\n sys.stderr.write('Failed to convert README.md to rst:\\n {}\\n'.format(e))\n sys.stderr.flush()\n long_description = open('README.md').read()\n\n# Remove badges since they will always be obsolete.\nblacklist = ['Build Status', 'Latest Version', 'travis-ci.org', 'pypi.python.org']\nlong_description = '\\n'.join(\n [line for line in long_description.split('\\n') if not any(patt in line for patt in blacklist)])\n\nsetup(\n name='pyro-ppl',\n version=version,\n description='A Python library for probabilistic modeling and inference',\n long_description=long_description,\n packages=find_packages(exclude=('tests*',)),\n url='http://pyro.ai',\n author='Uber AI Labs',\n author_email='[email protected]',\n install_requires=[\n 'numpy>=1.7',\n 'scipy>=0.19.0',\n 'cloudpickle>=0.3.1',\n 'graphviz>=0.8',\n 'networkx>=2.0.0',\n 'observations>=0.1.4',\n 'torch',\n 'six>=1.10.0',\n ],\n extras_require={\n 'notebooks': ['jupyter>=1.0.0'],\n 'visualization': [\n 'matplotlib>=1.3',\n 'visdom>=0.1.4',\n 'pillow',\n ],\n 'test': [\n 'pytest',\n 'pytest-cov',\n 'nbval',\n # examples/tutorials\n 'matplotlib',\n 'visdom',\n 'torchvision',\n ],\n 'profile': ['prettytable'],\n 'dev': [\n 'torchvision',\n 'flake8',\n 'yapf',\n 'isort',\n 'pytest',\n 'pytest-xdist',\n 'nbval',\n 'nbstripout',\n 'pypandoc',\n 'sphinx',\n 'sphinx_rtd_theme',\n ],\n },\n tests_require=['flake8', 'pytest'],\n keywords='machine learning statistics probabilistic programming bayesian modeling pytorch',\n license='MIT License',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n ],\n # yapf\n)\n", "path": "setup.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport sys\n\nfrom setuptools import find_packages, setup\n\n# Find pyro version.\nfor line in open('pyro/__init__.py'):\n if line.startswith('__version__ = '):\n version = line.strip().split()[2][1:-1]\n\n# Convert README.md to rst for display at https://pypi.python.org/pypi/pyro-ppl\n# When releasing on pypi, make sure pandoc is on your system:\n# $ brew install pandoc # OS X\n# $ sudo apt-get install pandoc # Ubuntu Linux\ntry:\n import pypandoc\n long_description = pypandoc.convert('README.md', 'rst')\nexcept (IOError, ImportError, OSError) as e:\n sys.stderr.write('Failed to convert README.md to rst:\\n {}\\n'.format(e))\n sys.stderr.flush()\n long_description = open('README.md').read()\n\n# Remove badges since they will always be obsolete.\nblacklist = ['Build Status', 'Latest Version', 'travis-ci.org', 'pypi.python.org']\nlong_description = '\\n'.join(\n [line for line in long_description.split('\\n') if not any(patt in line for patt in blacklist)])\n\nsetup(\n name='pyro-ppl',\n version=version,\n description='A Python library for probabilistic modeling and inference',\n long_description=long_description,\n packages=find_packages(exclude=('tests*',)),\n url='http://pyro.ai',\n author='Uber AI Labs',\n author_email='[email protected]',\n install_requires=[\n 'numpy>=1.7',\n 'cloudpickle>=0.3.1',\n 'graphviz>=0.8',\n 'networkx>=2.0.0',\n 'observations>=0.1.4',\n 'torch',\n 'six>=1.10.0',\n ],\n extras_require={\n 'notebooks': ['jupyter>=1.0.0'],\n 'visualization': [\n 'matplotlib>=1.3',\n 'visdom>=0.1.4',\n 'pillow',\n ],\n 'test': [\n 'pytest',\n 'pytest-cov',\n 'nbval',\n 'scipy>=0.19.0',\n # examples/tutorials\n 'matplotlib',\n 'visdom',\n 'torchvision',\n ],\n 'profile': ['prettytable'],\n 'dev': [\n 'torchvision',\n 'flake8',\n 'yapf',\n 'isort',\n 'pytest',\n 'pytest-xdist',\n 'nbval',\n 'nbstripout',\n 'pypandoc',\n 'sphinx',\n 'sphinx_rtd_theme',\n ],\n },\n tests_require=['flake8', 'pytest'],\n keywords='machine learning statistics probabilistic programming bayesian modeling pytorch',\n license='MIT License',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n ],\n # yapf\n)\n", "path": "setup.py"}]}
1,216
153
gh_patches_debug_12336
rasdani/github-patches
git_diff
conda__conda-build-1456
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Regression: --version not respected in conda-build-2.0.4 skeleton The --version flag does not seem to be respected by conda skeleton. version is conda-build-2.0.4. ``` (bjorn3) bjorn@bjorn-ThinkPad-T450s:~/Desktop$ conda skeleton pypi PrettyTable --version 0.7.2 Warning, the following versions were found for PrettyTable 0.1 0.2 0.2.1 0.3 0.4 0.5 0.6 0.6.1 0.7.1 0.7.2 7 Using 7 Use --version to specify a different version. More than one source version is available for PrettyTable: Using the one with the least source size use --manual-url to override this behavior Using url https://pypi.python.org/packages/c7/21/8911c1b4aab57b891a54bcb6b1fab2ef59e80a78f7f699dd3d162ddda87f/prettytable-0.7.tar.bz2 (21 KB) for PrettyTable. Downloading PrettyTable Using cached download Unpacking PrettyTable... done working in /tmp/tmpxippxdfoconda_skeleton_prettytable-0.7.tar.bz2 ``` will try to manually pin the url as suggested meanwhile --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conda_build/api.py` Content: ``` 1 # (c) Continuum Analytics, Inc. / http://continuum.io 2 # All Rights Reserved 3 # 4 # conda is distributed under the terms of the BSD 3-clause license. 5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause. 6 7 """ 8 This file defines the public API for conda-build. Adding or removing functions, 9 or Changing arguments to anything in here should also mean changing the major 10 version number. 11 12 Design philosophy: put variability into config. Make each function here accept kwargs, 13 but only use those kwargs in config. Config must change to support new features elsewhere. 14 """ 15 16 # imports are done locally to keep the api clean and limited strictly 17 # to conda-build's functionality. 18 19 import sys as _sys 20 21 # make the Config class available in the api namespace 22 from conda_build.config import Config, get_or_merge_config, DEFAULT_PREFIX_LENGTH as _prefix_length 23 24 25 def _ensure_list(recipe_arg): 26 from .conda_interface import string_types 27 if isinstance(recipe_arg, string_types) or not hasattr(recipe_arg, '__iter__'): 28 recipe_arg = [recipe_arg] 29 return recipe_arg 30 31 32 def render(recipe_path, config=None, **kwargs): 33 from conda_build.render import render_recipe 34 config = get_or_merge_config(config, **kwargs) 35 return render_recipe(recipe_path, no_download_source=config.no_download_source, config=config) 36 37 38 def output_yaml(metadata, file_path=None): 39 from conda_build.render import output_yaml 40 return output_yaml(metadata, file_path) 41 42 43 def get_output_file_path(recipe_path_or_metadata, no_download_source=False, config=None, **kwargs): 44 from conda_build.render import render_recipe, bldpkg_path 45 config = get_or_merge_config(config, **kwargs) 46 if hasattr(recipe_path_or_metadata, 'config'): 47 metadata = recipe_path_or_metadata 48 recipe_config = metadata.config 49 else: 50 metadata, _, _ = render_recipe(recipe_path_or_metadata, 51 no_download_source=no_download_source, 52 config=config) 53 recipe_config = config 54 return bldpkg_path(metadata, recipe_config) 55 56 57 def check(recipe_path, no_download_source=False, config=None, **kwargs): 58 from conda_build.render import render_recipe 59 config = get_or_merge_config(config, **kwargs) 60 metadata, _, _ = render_recipe(recipe_path, no_download_source=no_download_source, 61 config=config) 62 return metadata.check_fields() 63 64 65 def build(recipe_paths_or_metadata, post=None, need_source_download=True, 66 build_only=False, notest=False, config=None, **kwargs): 67 import os 68 from conda_build.build import build_tree 69 70 config = get_or_merge_config(config, **kwargs) 71 72 recipes = _ensure_list(recipe_paths_or_metadata) 73 absolute_recipes = [] 74 for recipe in recipes: 75 if hasattr(recipe, "config"): 76 absolute_recipes.append(recipe) 77 elif os.path.isabs(recipe): 78 absolute_recipes.append(recipe) 79 else: 80 absolute_recipes.append(os.path.normpath(os.path.join(os.getcwd(), recipe))) 81 82 return build_tree(absolute_recipes, build_only=build_only, post=post, notest=notest, 83 need_source_download=need_source_download, config=config) 84 85 86 def test(recipedir_or_package_or_metadata, move_broken=True, config=None, **kwargs): 87 import os 88 from conda_build.build import test 89 from conda_build.render import render_recipe 90 91 config = get_or_merge_config(config, **kwargs) 92 93 if hasattr(recipedir_or_package_or_metadata, 'config'): 94 metadata = recipedir_or_package_or_metadata 95 recipe_config = metadata.config 96 elif os.path.isdir(recipedir_or_package_or_metadata): 97 # This will create a new local build folder if and only if config doesn't already have one. 98 # What this means is that if we're running a test immediately after build, we use the one 99 # that the build already provided 100 config.compute_build_id(recipedir_or_package_or_metadata) 101 metadata, _, _ = render_recipe(recipedir_or_package_or_metadata, config=config) 102 recipe_config = config 103 else: 104 # fall back to old way (use recipe, rather than package) 105 metadata, _, _ = render_recipe(recipedir_or_package_or_metadata, no_download_source=False, 106 config=config, **kwargs) 107 recipe_config = config 108 109 with recipe_config: 110 # This will create a new local build folder if and only if config doesn't already have one. 111 # What this means is that if we're running a test immediately after build, we use the one 112 # that the build already provided 113 114 config.compute_build_id(metadata.name()) 115 test_result = test(metadata, config=recipe_config, move_broken=move_broken) 116 return test_result 117 118 119 def keygen(name="conda_build_signing", size=2048): 120 """Create a private/public key pair for package verification purposes 121 122 name: string name of key to be generated. 123 size: length of the RSA key, in bits. Should be power of 2. 124 """ 125 from .sign import keygen 126 return keygen(name, size) 127 128 129 def import_sign_key(private_key_path, new_name=None): 130 """ 131 private_key_path: specify a private key to be imported. The public key is 132 generated automatically. Specify ```new_name``` also to rename the 133 private key in the copied location. 134 """ 135 from .sign import import_key 136 return import_key(private_key_path, new_name=new_name) 137 138 139 def sign(file_path, key_name_or_path=None): 140 from .sign import sign_and_write 141 return sign_and_write(file_path, key_name_or_path) 142 143 144 def verify(file_path): 145 """Verify a signed package""" 146 from .sign import verify 147 return verify(file_path) 148 149 150 def list_skeletons(): 151 """List available skeletons for generating conda recipes from external sources. 152 153 The returned list is generally the names of supported repositories (pypi, cran, etc.)""" 154 import pkgutil 155 modules = pkgutil.iter_modules(['conda_build/skeletons']) 156 files = [] 157 for _, name, _ in modules: 158 if not name.startswith("_"): 159 files.append(name) 160 return files 161 162 163 def skeletonize(packages, repo, output_dir=".", version=None, recursive=False, 164 config=None, **kwargs): 165 """Generate a conda recipe from an external repo. Translates metadata from external 166 sources into expected conda recipe format.""" 167 168 # here we're dumping all extra kwargs as attributes on the config object. We'll extract 169 # only relevant ones below 170 config = get_or_merge_config(config, **kwargs) 171 config.compute_build_id('skeleton') 172 packages = _ensure_list(packages) 173 174 # This is a little bit of black magic. The idea is that for any keyword argument that 175 # we inspect from the given module's skeletonize funtion, we should hoist the argument 176 # off of the config object, and pass it as a keyword argument. This is sort of the 177 # inverse of what we do in the CLI code - there we take CLI arguments and dangle them 178 # all on the config object as attributes. 179 module = getattr(__import__("conda_build.skeletons", globals=globals(), locals=locals(), 180 fromlist=[repo]), 181 repo) 182 183 func_args = module.skeletonize.__code__.co_varnames 184 kwargs = {name: getattr(config, name) for name in dir(config) if name in func_args} 185 kwargs.update({name: value for name, value in kwargs.items() if name in func_args}) 186 # strip out local arguments that we pass directly 187 for arg in skeletonize.__code__.co_varnames: 188 if arg in kwargs: 189 del kwargs[arg] 190 with config: 191 skeleton_return = module.skeletonize(packages, output_dir=output_dir, version=version, 192 recursive=recursive, config=config, **kwargs) 193 return skeleton_return 194 195 196 def develop(recipe_dir, prefix=_sys.prefix, no_pth_file=False, 197 build_ext=False, clean=False, uninstall=False): 198 """Install a Python package in 'development mode'. 199 200 This works by creating a conda.pth file in site-packages.""" 201 from .develop import execute 202 recipe_dir = _ensure_list(recipe_dir) 203 return execute(recipe_dir, prefix, no_pth_file, build_ext, clean, uninstall) 204 205 206 def convert(package_file, output_dir=".", show_imports=False, platforms=None, force=False, 207 dependencies=None, verbose=False, quiet=True, dry_run=False): 208 """Convert changes a package from one platform to another. It applies only to things that are 209 portable, such as pure python, or header-only C/C++ libraries.""" 210 from .convert import conda_convert 211 if not platforms: 212 platforms = [] 213 if package_file.endswith('tar.bz2'): 214 return conda_convert(package_file, output_dir=output_dir, show_imports=show_imports, 215 platforms=platforms, force=force, verbose=verbose, quiet=quiet, 216 dry_run=dry_run, dependencies=dependencies) 217 elif package_file.endswith('.whl'): 218 raise RuntimeError('Conversion from wheel packages is not ' 219 'implemented yet, stay tuned.') 220 else: 221 raise RuntimeError("cannot convert: %s" % package_file) 222 223 224 def test_installable(channel='defaults'): 225 """Check to make sure that packages in channel are installable. 226 This is a consistency check for the channel.""" 227 from .inspect import test_installable 228 return test_installable(channel) 229 230 231 def inspect_linkages(packages, prefix=_sys.prefix, untracked=False, all_packages=False, 232 show_files=False, groupby='package'): 233 from .inspect import inspect_linkages 234 packages = _ensure_list(packages) 235 return inspect_linkages(packages, prefix=prefix, untracked=untracked, all_packages=all_packages, 236 show_files=show_files, groupby=groupby) 237 238 239 def inspect_objects(packages, prefix=_sys.prefix, groupby='filename'): 240 from .inspect import inspect_objects 241 packages = _ensure_list(packages) 242 return inspect_objects(packages, prefix=prefix, groupby=groupby) 243 244 245 def inspect_prefix_length(packages, min_prefix_length=_prefix_length): 246 from conda_build.tarcheck import check_prefix_lengths 247 packages = _ensure_list(packages) 248 prefix_lengths = check_prefix_lengths(packages, min_prefix_length) 249 if prefix_lengths: 250 print("Packages with binary prefixes shorter than %d characters:" 251 % min_prefix_length) 252 for fn, length in prefix_lengths.items(): 253 print("{0} ({1} chars)".format(fn, length)) 254 else: 255 print("No packages found with binary prefixes shorter than %d characters." 256 % min_prefix_length) 257 return len(prefix_lengths) == 0 258 259 260 def create_metapackage(name, version, entry_points=(), build_string=None, build_number=0, 261 dependencies=(), home=None, license_name=None, summary=None, 262 config=None): 263 from .metapackage import create_metapackage 264 if not config: 265 config = Config() 266 return create_metapackage(name=name, version=version, entry_points=entry_points, 267 build_string=build_string, build_number=build_number, 268 dependencies=dependencies, home=home, 269 license_name=license_name, summary=summary, config=config) 270 271 272 def update_index(dir_paths, config=None, force=False, check_md5=False, remove=False): 273 from locale import getpreferredencoding 274 import os 275 from .conda_interface import PY3 276 from conda_build.index import update_index 277 dir_paths = [os.path.abspath(path) for path in _ensure_list(dir_paths)] 278 # Don't use byte strings in Python 2 279 if not PY3: 280 dir_paths = [path.decode(getpreferredencoding()) for path in dir_paths] 281 282 if not config: 283 config = Config() 284 285 for path in dir_paths: 286 update_index(path, config, force=force, check_md5=check_md5, remove=remove) 287 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conda_build/api.py b/conda_build/api.py --- a/conda_build/api.py +++ b/conda_build/api.py @@ -165,6 +165,13 @@ """Generate a conda recipe from an external repo. Translates metadata from external sources into expected conda recipe format.""" + if version: + kwargs.update({'version': version}) + if recursive: + kwargs.update({'version': version}) + if output_dir != ".": + kwargs.update({'output_dir': output_dir}) + # here we're dumping all extra kwargs as attributes on the config object. We'll extract # only relevant ones below config = get_or_merge_config(config, **kwargs)
{"golden_diff": "diff --git a/conda_build/api.py b/conda_build/api.py\n--- a/conda_build/api.py\n+++ b/conda_build/api.py\n@@ -165,6 +165,13 @@\n \"\"\"Generate a conda recipe from an external repo. Translates metadata from external\n sources into expected conda recipe format.\"\"\"\n \n+ if version:\n+ kwargs.update({'version': version})\n+ if recursive:\n+ kwargs.update({'version': version})\n+ if output_dir != \".\":\n+ kwargs.update({'output_dir': output_dir})\n+\n # here we're dumping all extra kwargs as attributes on the config object. We'll extract\n # only relevant ones below\n config = get_or_merge_config(config, **kwargs)\n", "issue": "Regression: --version not respected in conda-build-2.0.4 skeleton\nThe --version flag does not seem to be respected by conda skeleton.\nversion is conda-build-2.0.4.\n\n```\n(bjorn3) bjorn@bjorn-ThinkPad-T450s:~/Desktop$ conda skeleton pypi PrettyTable --version 0.7.2\nWarning, the following versions were found for PrettyTable\n0.1\n0.2\n0.2.1\n0.3\n0.4\n0.5\n0.6\n0.6.1\n0.7.1\n0.7.2\n7\nUsing 7\nUse --version to specify a different version.\nMore than one source version is available for PrettyTable:\nUsing the one with the least source size\nuse --manual-url to override this behavior\nUsing url https://pypi.python.org/packages/c7/21/8911c1b4aab57b891a54bcb6b1fab2ef59e80a78f7f699dd3d162ddda87f/prettytable-0.7.tar.bz2 (21 KB) for PrettyTable.\nDownloading PrettyTable\nUsing cached download\nUnpacking PrettyTable...\ndone\nworking in /tmp/tmpxippxdfoconda_skeleton_prettytable-0.7.tar.bz2\n```\n\nwill try to manually pin the url as suggested meanwhile\n\n", "before_files": [{"content": "# (c) Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\n\"\"\"\nThis file defines the public API for conda-build. Adding or removing functions,\nor Changing arguments to anything in here should also mean changing the major\nversion number.\n\nDesign philosophy: put variability into config. Make each function here accept kwargs,\nbut only use those kwargs in config. Config must change to support new features elsewhere.\n\"\"\"\n\n# imports are done locally to keep the api clean and limited strictly\n# to conda-build's functionality.\n\nimport sys as _sys\n\n# make the Config class available in the api namespace\nfrom conda_build.config import Config, get_or_merge_config, DEFAULT_PREFIX_LENGTH as _prefix_length\n\n\ndef _ensure_list(recipe_arg):\n from .conda_interface import string_types\n if isinstance(recipe_arg, string_types) or not hasattr(recipe_arg, '__iter__'):\n recipe_arg = [recipe_arg]\n return recipe_arg\n\n\ndef render(recipe_path, config=None, **kwargs):\n from conda_build.render import render_recipe\n config = get_or_merge_config(config, **kwargs)\n return render_recipe(recipe_path, no_download_source=config.no_download_source, config=config)\n\n\ndef output_yaml(metadata, file_path=None):\n from conda_build.render import output_yaml\n return output_yaml(metadata, file_path)\n\n\ndef get_output_file_path(recipe_path_or_metadata, no_download_source=False, config=None, **kwargs):\n from conda_build.render import render_recipe, bldpkg_path\n config = get_or_merge_config(config, **kwargs)\n if hasattr(recipe_path_or_metadata, 'config'):\n metadata = recipe_path_or_metadata\n recipe_config = metadata.config\n else:\n metadata, _, _ = render_recipe(recipe_path_or_metadata,\n no_download_source=no_download_source,\n config=config)\n recipe_config = config\n return bldpkg_path(metadata, recipe_config)\n\n\ndef check(recipe_path, no_download_source=False, config=None, **kwargs):\n from conda_build.render import render_recipe\n config = get_or_merge_config(config, **kwargs)\n metadata, _, _ = render_recipe(recipe_path, no_download_source=no_download_source,\n config=config)\n return metadata.check_fields()\n\n\ndef build(recipe_paths_or_metadata, post=None, need_source_download=True,\n build_only=False, notest=False, config=None, **kwargs):\n import os\n from conda_build.build import build_tree\n\n config = get_or_merge_config(config, **kwargs)\n\n recipes = _ensure_list(recipe_paths_or_metadata)\n absolute_recipes = []\n for recipe in recipes:\n if hasattr(recipe, \"config\"):\n absolute_recipes.append(recipe)\n elif os.path.isabs(recipe):\n absolute_recipes.append(recipe)\n else:\n absolute_recipes.append(os.path.normpath(os.path.join(os.getcwd(), recipe)))\n\n return build_tree(absolute_recipes, build_only=build_only, post=post, notest=notest,\n need_source_download=need_source_download, config=config)\n\n\ndef test(recipedir_or_package_or_metadata, move_broken=True, config=None, **kwargs):\n import os\n from conda_build.build import test\n from conda_build.render import render_recipe\n\n config = get_or_merge_config(config, **kwargs)\n\n if hasattr(recipedir_or_package_or_metadata, 'config'):\n metadata = recipedir_or_package_or_metadata\n recipe_config = metadata.config\n elif os.path.isdir(recipedir_or_package_or_metadata):\n # This will create a new local build folder if and only if config doesn't already have one.\n # What this means is that if we're running a test immediately after build, we use the one\n # that the build already provided\n config.compute_build_id(recipedir_or_package_or_metadata)\n metadata, _, _ = render_recipe(recipedir_or_package_or_metadata, config=config)\n recipe_config = config\n else:\n # fall back to old way (use recipe, rather than package)\n metadata, _, _ = render_recipe(recipedir_or_package_or_metadata, no_download_source=False,\n config=config, **kwargs)\n recipe_config = config\n\n with recipe_config:\n # This will create a new local build folder if and only if config doesn't already have one.\n # What this means is that if we're running a test immediately after build, we use the one\n # that the build already provided\n\n config.compute_build_id(metadata.name())\n test_result = test(metadata, config=recipe_config, move_broken=move_broken)\n return test_result\n\n\ndef keygen(name=\"conda_build_signing\", size=2048):\n \"\"\"Create a private/public key pair for package verification purposes\n\n name: string name of key to be generated.\n size: length of the RSA key, in bits. Should be power of 2.\n \"\"\"\n from .sign import keygen\n return keygen(name, size)\n\n\ndef import_sign_key(private_key_path, new_name=None):\n \"\"\"\n private_key_path: specify a private key to be imported. The public key is\n generated automatically. Specify ```new_name``` also to rename the\n private key in the copied location.\n \"\"\"\n from .sign import import_key\n return import_key(private_key_path, new_name=new_name)\n\n\ndef sign(file_path, key_name_or_path=None):\n from .sign import sign_and_write\n return sign_and_write(file_path, key_name_or_path)\n\n\ndef verify(file_path):\n \"\"\"Verify a signed package\"\"\"\n from .sign import verify\n return verify(file_path)\n\n\ndef list_skeletons():\n \"\"\"List available skeletons for generating conda recipes from external sources.\n\n The returned list is generally the names of supported repositories (pypi, cran, etc.)\"\"\"\n import pkgutil\n modules = pkgutil.iter_modules(['conda_build/skeletons'])\n files = []\n for _, name, _ in modules:\n if not name.startswith(\"_\"):\n files.append(name)\n return files\n\n\ndef skeletonize(packages, repo, output_dir=\".\", version=None, recursive=False,\n config=None, **kwargs):\n \"\"\"Generate a conda recipe from an external repo. Translates metadata from external\n sources into expected conda recipe format.\"\"\"\n\n # here we're dumping all extra kwargs as attributes on the config object. We'll extract\n # only relevant ones below\n config = get_or_merge_config(config, **kwargs)\n config.compute_build_id('skeleton')\n packages = _ensure_list(packages)\n\n # This is a little bit of black magic. The idea is that for any keyword argument that\n # we inspect from the given module's skeletonize funtion, we should hoist the argument\n # off of the config object, and pass it as a keyword argument. This is sort of the\n # inverse of what we do in the CLI code - there we take CLI arguments and dangle them\n # all on the config object as attributes.\n module = getattr(__import__(\"conda_build.skeletons\", globals=globals(), locals=locals(),\n fromlist=[repo]),\n repo)\n\n func_args = module.skeletonize.__code__.co_varnames\n kwargs = {name: getattr(config, name) for name in dir(config) if name in func_args}\n kwargs.update({name: value for name, value in kwargs.items() if name in func_args})\n # strip out local arguments that we pass directly\n for arg in skeletonize.__code__.co_varnames:\n if arg in kwargs:\n del kwargs[arg]\n with config:\n skeleton_return = module.skeletonize(packages, output_dir=output_dir, version=version,\n recursive=recursive, config=config, **kwargs)\n return skeleton_return\n\n\ndef develop(recipe_dir, prefix=_sys.prefix, no_pth_file=False,\n build_ext=False, clean=False, uninstall=False):\n \"\"\"Install a Python package in 'development mode'.\n\nThis works by creating a conda.pth file in site-packages.\"\"\"\n from .develop import execute\n recipe_dir = _ensure_list(recipe_dir)\n return execute(recipe_dir, prefix, no_pth_file, build_ext, clean, uninstall)\n\n\ndef convert(package_file, output_dir=\".\", show_imports=False, platforms=None, force=False,\n dependencies=None, verbose=False, quiet=True, dry_run=False):\n \"\"\"Convert changes a package from one platform to another. It applies only to things that are\n portable, such as pure python, or header-only C/C++ libraries.\"\"\"\n from .convert import conda_convert\n if not platforms:\n platforms = []\n if package_file.endswith('tar.bz2'):\n return conda_convert(package_file, output_dir=output_dir, show_imports=show_imports,\n platforms=platforms, force=force, verbose=verbose, quiet=quiet,\n dry_run=dry_run, dependencies=dependencies)\n elif package_file.endswith('.whl'):\n raise RuntimeError('Conversion from wheel packages is not '\n 'implemented yet, stay tuned.')\n else:\n raise RuntimeError(\"cannot convert: %s\" % package_file)\n\n\ndef test_installable(channel='defaults'):\n \"\"\"Check to make sure that packages in channel are installable.\n This is a consistency check for the channel.\"\"\"\n from .inspect import test_installable\n return test_installable(channel)\n\n\ndef inspect_linkages(packages, prefix=_sys.prefix, untracked=False, all_packages=False,\n show_files=False, groupby='package'):\n from .inspect import inspect_linkages\n packages = _ensure_list(packages)\n return inspect_linkages(packages, prefix=prefix, untracked=untracked, all_packages=all_packages,\n show_files=show_files, groupby=groupby)\n\n\ndef inspect_objects(packages, prefix=_sys.prefix, groupby='filename'):\n from .inspect import inspect_objects\n packages = _ensure_list(packages)\n return inspect_objects(packages, prefix=prefix, groupby=groupby)\n\n\ndef inspect_prefix_length(packages, min_prefix_length=_prefix_length):\n from conda_build.tarcheck import check_prefix_lengths\n packages = _ensure_list(packages)\n prefix_lengths = check_prefix_lengths(packages, min_prefix_length)\n if prefix_lengths:\n print(\"Packages with binary prefixes shorter than %d characters:\"\n % min_prefix_length)\n for fn, length in prefix_lengths.items():\n print(\"{0} ({1} chars)\".format(fn, length))\n else:\n print(\"No packages found with binary prefixes shorter than %d characters.\"\n % min_prefix_length)\n return len(prefix_lengths) == 0\n\n\ndef create_metapackage(name, version, entry_points=(), build_string=None, build_number=0,\n dependencies=(), home=None, license_name=None, summary=None,\n config=None):\n from .metapackage import create_metapackage\n if not config:\n config = Config()\n return create_metapackage(name=name, version=version, entry_points=entry_points,\n build_string=build_string, build_number=build_number,\n dependencies=dependencies, home=home,\n license_name=license_name, summary=summary, config=config)\n\n\ndef update_index(dir_paths, config=None, force=False, check_md5=False, remove=False):\n from locale import getpreferredencoding\n import os\n from .conda_interface import PY3\n from conda_build.index import update_index\n dir_paths = [os.path.abspath(path) for path in _ensure_list(dir_paths)]\n # Don't use byte strings in Python 2\n if not PY3:\n dir_paths = [path.decode(getpreferredencoding()) for path in dir_paths]\n\n if not config:\n config = Config()\n\n for path in dir_paths:\n update_index(path, config, force=force, check_md5=check_md5, remove=remove)\n", "path": "conda_build/api.py"}], "after_files": [{"content": "# (c) Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\n\"\"\"\nThis file defines the public API for conda-build. Adding or removing functions,\nor Changing arguments to anything in here should also mean changing the major\nversion number.\n\nDesign philosophy: put variability into config. Make each function here accept kwargs,\nbut only use those kwargs in config. Config must change to support new features elsewhere.\n\"\"\"\n\n# imports are done locally to keep the api clean and limited strictly\n# to conda-build's functionality.\n\nimport sys as _sys\n\n# make the Config class available in the api namespace\nfrom conda_build.config import Config, get_or_merge_config, DEFAULT_PREFIX_LENGTH as _prefix_length\n\n\ndef _ensure_list(recipe_arg):\n from .conda_interface import string_types\n if isinstance(recipe_arg, string_types) or not hasattr(recipe_arg, '__iter__'):\n recipe_arg = [recipe_arg]\n return recipe_arg\n\n\ndef render(recipe_path, config=None, **kwargs):\n from conda_build.render import render_recipe\n config = get_or_merge_config(config, **kwargs)\n return render_recipe(recipe_path, no_download_source=config.no_download_source, config=config)\n\n\ndef output_yaml(metadata, file_path=None):\n from conda_build.render import output_yaml\n return output_yaml(metadata, file_path)\n\n\ndef get_output_file_path(recipe_path_or_metadata, no_download_source=False, config=None, **kwargs):\n from conda_build.render import render_recipe, bldpkg_path\n config = get_or_merge_config(config, **kwargs)\n if hasattr(recipe_path_or_metadata, 'config'):\n metadata = recipe_path_or_metadata\n recipe_config = metadata.config\n else:\n metadata, _, _ = render_recipe(recipe_path_or_metadata,\n no_download_source=no_download_source,\n config=config)\n recipe_config = config\n return bldpkg_path(metadata, recipe_config)\n\n\ndef check(recipe_path, no_download_source=False, config=None, **kwargs):\n from conda_build.render import render_recipe\n config = get_or_merge_config(config, **kwargs)\n metadata, _, _ = render_recipe(recipe_path, no_download_source=no_download_source,\n config=config)\n return metadata.check_fields()\n\n\ndef build(recipe_paths_or_metadata, post=None, need_source_download=True,\n build_only=False, notest=False, config=None, **kwargs):\n import os\n from conda_build.build import build_tree\n\n config = get_or_merge_config(config, **kwargs)\n\n recipes = _ensure_list(recipe_paths_or_metadata)\n absolute_recipes = []\n for recipe in recipes:\n if hasattr(recipe, \"config\"):\n absolute_recipes.append(recipe)\n elif os.path.isabs(recipe):\n absolute_recipes.append(recipe)\n else:\n absolute_recipes.append(os.path.normpath(os.path.join(os.getcwd(), recipe)))\n\n return build_tree(absolute_recipes, build_only=build_only, post=post, notest=notest,\n need_source_download=need_source_download, config=config)\n\n\ndef test(recipedir_or_package_or_metadata, move_broken=True, config=None, **kwargs):\n import os\n from conda_build.build import test\n from conda_build.render import render_recipe\n\n config = get_or_merge_config(config, **kwargs)\n\n if hasattr(recipedir_or_package_or_metadata, 'config'):\n metadata = recipedir_or_package_or_metadata\n recipe_config = metadata.config\n elif os.path.isdir(recipedir_or_package_or_metadata):\n # This will create a new local build folder if and only if config doesn't already have one.\n # What this means is that if we're running a test immediately after build, we use the one\n # that the build already provided\n config.compute_build_id(recipedir_or_package_or_metadata)\n metadata, _, _ = render_recipe(recipedir_or_package_or_metadata, config=config)\n recipe_config = config\n else:\n # fall back to old way (use recipe, rather than package)\n metadata, _, _ = render_recipe(recipedir_or_package_or_metadata, no_download_source=False,\n config=config, **kwargs)\n recipe_config = config\n\n with recipe_config:\n # This will create a new local build folder if and only if config doesn't already have one.\n # What this means is that if we're running a test immediately after build, we use the one\n # that the build already provided\n\n config.compute_build_id(metadata.name())\n test_result = test(metadata, config=recipe_config, move_broken=move_broken)\n return test_result\n\n\ndef keygen(name=\"conda_build_signing\", size=2048):\n \"\"\"Create a private/public key pair for package verification purposes\n\n name: string name of key to be generated.\n size: length of the RSA key, in bits. Should be power of 2.\n \"\"\"\n from .sign import keygen\n return keygen(name, size)\n\n\ndef import_sign_key(private_key_path, new_name=None):\n \"\"\"\n private_key_path: specify a private key to be imported. The public key is\n generated automatically. Specify ```new_name``` also to rename the\n private key in the copied location.\n \"\"\"\n from .sign import import_key\n return import_key(private_key_path, new_name=new_name)\n\n\ndef sign(file_path, key_name_or_path=None):\n from .sign import sign_and_write\n return sign_and_write(file_path, key_name_or_path)\n\n\ndef verify(file_path):\n \"\"\"Verify a signed package\"\"\"\n from .sign import verify\n return verify(file_path)\n\n\ndef list_skeletons():\n \"\"\"List available skeletons for generating conda recipes from external sources.\n\n The returned list is generally the names of supported repositories (pypi, cran, etc.)\"\"\"\n import pkgutil\n modules = pkgutil.iter_modules(['conda_build/skeletons'])\n files = []\n for _, name, _ in modules:\n if not name.startswith(\"_\"):\n files.append(name)\n return files\n\n\ndef skeletonize(packages, repo, output_dir=\".\", version=None, recursive=False,\n config=None, **kwargs):\n \"\"\"Generate a conda recipe from an external repo. Translates metadata from external\n sources into expected conda recipe format.\"\"\"\n\n if version:\n kwargs.update({'version': version})\n if recursive:\n kwargs.update({'version': version})\n if output_dir != \".\":\n kwargs.update({'output_dir': output_dir})\n\n # here we're dumping all extra kwargs as attributes on the config object. We'll extract\n # only relevant ones below\n config = get_or_merge_config(config, **kwargs)\n config.compute_build_id('skeleton')\n packages = _ensure_list(packages)\n\n # This is a little bit of black magic. The idea is that for any keyword argument that\n # we inspect from the given module's skeletonize funtion, we should hoist the argument\n # off of the config object, and pass it as a keyword argument. This is sort of the\n # inverse of what we do in the CLI code - there we take CLI arguments and dangle them\n # all on the config object as attributes.\n module = getattr(__import__(\"conda_build.skeletons\", globals=globals(), locals=locals(),\n fromlist=[repo]),\n repo)\n\n func_args = module.skeletonize.__code__.co_varnames\n kwargs = {name: getattr(config, name) for name in dir(config) if name in func_args}\n kwargs.update({name: value for name, value in kwargs.items() if name in func_args})\n # strip out local arguments that we pass directly\n for arg in skeletonize.__code__.co_varnames:\n if arg in kwargs:\n del kwargs[arg]\n with config:\n skeleton_return = module.skeletonize(packages, output_dir=output_dir, version=version,\n recursive=recursive, config=config, **kwargs)\n return skeleton_return\n\n\ndef develop(recipe_dir, prefix=_sys.prefix, no_pth_file=False,\n build_ext=False, clean=False, uninstall=False):\n \"\"\"Install a Python package in 'development mode'.\n\nThis works by creating a conda.pth file in site-packages.\"\"\"\n from .develop import execute\n recipe_dir = _ensure_list(recipe_dir)\n return execute(recipe_dir, prefix, no_pth_file, build_ext, clean, uninstall)\n\n\ndef convert(package_file, output_dir=\".\", show_imports=False, platforms=None, force=False,\n dependencies=None, verbose=False, quiet=True, dry_run=False):\n \"\"\"Convert changes a package from one platform to another. It applies only to things that are\n portable, such as pure python, or header-only C/C++ libraries.\"\"\"\n from .convert import conda_convert\n if not platforms:\n platforms = []\n if package_file.endswith('tar.bz2'):\n return conda_convert(package_file, output_dir=output_dir, show_imports=show_imports,\n platforms=platforms, force=force, verbose=verbose, quiet=quiet,\n dry_run=dry_run, dependencies=dependencies)\n elif package_file.endswith('.whl'):\n raise RuntimeError('Conversion from wheel packages is not '\n 'implemented yet, stay tuned.')\n else:\n raise RuntimeError(\"cannot convert: %s\" % package_file)\n\n\ndef test_installable(channel='defaults'):\n \"\"\"Check to make sure that packages in channel are installable.\n This is a consistency check for the channel.\"\"\"\n from .inspect import test_installable\n return test_installable(channel)\n\n\ndef inspect_linkages(packages, prefix=_sys.prefix, untracked=False, all_packages=False,\n show_files=False, groupby='package'):\n from .inspect import inspect_linkages\n packages = _ensure_list(packages)\n return inspect_linkages(packages, prefix=prefix, untracked=untracked, all_packages=all_packages,\n show_files=show_files, groupby=groupby)\n\n\ndef inspect_objects(packages, prefix=_sys.prefix, groupby='filename'):\n from .inspect import inspect_objects\n packages = _ensure_list(packages)\n return inspect_objects(packages, prefix=prefix, groupby=groupby)\n\n\ndef inspect_prefix_length(packages, min_prefix_length=_prefix_length):\n from conda_build.tarcheck import check_prefix_lengths\n packages = _ensure_list(packages)\n prefix_lengths = check_prefix_lengths(packages, min_prefix_length)\n if prefix_lengths:\n print(\"Packages with binary prefixes shorter than %d characters:\"\n % min_prefix_length)\n for fn, length in prefix_lengths.items():\n print(\"{0} ({1} chars)\".format(fn, length))\n else:\n print(\"No packages found with binary prefixes shorter than %d characters.\"\n % min_prefix_length)\n return len(prefix_lengths) == 0\n\n\ndef create_metapackage(name, version, entry_points=(), build_string=None, build_number=0,\n dependencies=(), home=None, license_name=None, summary=None,\n config=None):\n from .metapackage import create_metapackage\n if not config:\n config = Config()\n return create_metapackage(name=name, version=version, entry_points=entry_points,\n build_string=build_string, build_number=build_number,\n dependencies=dependencies, home=home,\n license_name=license_name, summary=summary, config=config)\n\n\ndef update_index(dir_paths, config=None, force=False, check_md5=False, remove=False):\n from locale import getpreferredencoding\n import os\n from .conda_interface import PY3\n from conda_build.index import update_index\n dir_paths = [os.path.abspath(path) for path in _ensure_list(dir_paths)]\n # Don't use byte strings in Python 2\n if not PY3:\n dir_paths = [path.decode(getpreferredencoding()) for path in dir_paths]\n\n if not config:\n config = Config()\n\n for path in dir_paths:\n update_index(path, config, force=force, check_md5=check_md5, remove=remove)\n", "path": "conda_build/api.py"}]}
3,939
165
gh_patches_debug_14605
rasdani/github-patches
git_diff
Mailu__Mailu-2444
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Deprecate (and remove) POD_ADDRESS_RANGE As discussed in #1209, POD_ADDRESS_RANGE should be removed and SUBNET should be used instead. Tasks: - remove all occurences of POD_ADDRESS_RANGE from containers - update the docs - write a changelog (breaking change!) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `core/admin/mailu/internal/views/dovecot.py` Content: ``` 1 from mailu import models 2 from mailu.internal import internal 3 from flask import current_app as app 4 5 import flask 6 import socket 7 import os 8 9 @internal.route("/dovecot/passdb/<path:user_email>") 10 def dovecot_passdb_dict(user_email): 11 user = models.User.query.get(user_email) or flask.abort(404) 12 allow_nets = [] 13 allow_nets.append(app.config["SUBNET"]) 14 if app.config["SUBNET6"]: 15 allow_nets.append(app.config["SUBNET6"]) 16 if app.config["POD_ADDRESS_RANGE"]: 17 allow_nets.append(app.config["POD_ADDRESS_RANGE"]) 18 return flask.jsonify({ 19 "password": None, 20 "nopassword": "Y", 21 "allow_nets": ",".join(allow_nets) 22 }) 23 24 25 @internal.route("/dovecot/userdb/<path:user_email>") 26 def dovecot_userdb_dict(user_email): 27 user = models.User.query.get(user_email) or flask.abort(404) 28 return flask.jsonify({ 29 "quota_rule": "*:bytes={}".format(user.quota_bytes) 30 }) 31 32 33 @internal.route("/dovecot/quota/<ns>/<path:user_email>", methods=["POST"]) 34 def dovecot_quota(ns, user_email): 35 user = models.User.query.get(user_email) or flask.abort(404) 36 if ns == "storage": 37 user.quota_bytes_used = flask.request.get_json() 38 models.db.session.commit() 39 return flask.jsonify(None) 40 41 42 @internal.route("/dovecot/sieve/name/<script>/<path:user_email>") 43 def dovecot_sieve_name(script, user_email): 44 return flask.jsonify(script) 45 46 47 @internal.route("/dovecot/sieve/data/default/<path:user_email>") 48 def dovecot_sieve_data(user_email): 49 user = models.User.query.get(user_email) or flask.abort(404) 50 return flask.jsonify(flask.render_template("default.sieve", user=user)) 51 ``` Path: `core/admin/mailu/configuration.py` Content: ``` 1 import os 2 3 from datetime import timedelta 4 from socrate import system 5 import ipaddress 6 7 DEFAULT_CONFIG = { 8 # Specific to the admin UI 9 'DOCKER_SOCKET': 'unix:///var/run/docker.sock', 10 'BABEL_DEFAULT_LOCALE': 'en', 11 'BABEL_DEFAULT_TIMEZONE': 'UTC', 12 'BOOTSTRAP_SERVE_LOCAL': True, 13 'RATELIMIT_STORAGE_URL': '', 14 'QUOTA_STORAGE_URL': '', 15 'DEBUG': False, 16 'DOMAIN_REGISTRATION': False, 17 'TEMPLATES_AUTO_RELOAD': True, 18 'MEMORY_SESSIONS': False, 19 # Database settings 20 'DB_FLAVOR': None, 21 'DB_USER': 'mailu', 22 'DB_PW': None, 23 'DB_HOST': 'database', 24 'DB_NAME': 'mailu', 25 'SQLITE_DATABASE_FILE':'data/main.db', 26 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db', 27 'SQLALCHEMY_TRACK_MODIFICATIONS': False, 28 # Statistics management 29 'INSTANCE_ID_PATH': '/data/instance', 30 'STATS_ENDPOINT': '19.{}.stats.mailu.io', 31 # Common configuration variables 32 'SECRET_KEY': 'changeMe', 33 'DOMAIN': 'mailu.io', 34 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io', 35 'POSTMASTER': 'postmaster', 36 'WILDCARD_SENDERS': '', 37 'TLS_FLAVOR': 'cert', 38 'INBOUND_TLS_ENFORCE': False, 39 'DEFER_ON_TLS_ERROR': True, 40 'AUTH_RATELIMIT_IP': '60/hour', 41 'AUTH_RATELIMIT_IP_V4_MASK': 24, 42 'AUTH_RATELIMIT_IP_V6_MASK': 56, 43 'AUTH_RATELIMIT_USER': '100/day', 44 'AUTH_RATELIMIT_EXEMPTION': '', 45 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400, 46 'DISABLE_STATISTICS': False, 47 # Mail settings 48 'DMARC_RUA': None, 49 'DMARC_RUF': None, 50 'WELCOME': False, 51 'WELCOME_SUBJECT': 'Dummy welcome topic', 52 'WELCOME_BODY': 'Dummy welcome body', 53 'DKIM_SELECTOR': 'dkim', 54 'DKIM_PATH': '/dkim/{domain}.{selector}.key', 55 'DEFAULT_QUOTA': 1000000000, 56 'MESSAGE_RATELIMIT': '200/day', 57 'MESSAGE_RATELIMIT_EXEMPTION': '', 58 'RECIPIENT_DELIMITER': '', 59 # Web settings 60 'SITENAME': 'Mailu', 61 'WEBSITE': 'https://mailu.io', 62 'ADMIN' : 'none', 63 'WEB_ADMIN': '/admin', 64 'WEB_WEBMAIL': '/webmail', 65 'WEBMAIL': 'none', 66 'RECAPTCHA_PUBLIC_KEY': '', 67 'RECAPTCHA_PRIVATE_KEY': '', 68 'LOGO_URL': None, 69 'LOGO_BACKGROUND': None, 70 # Advanced settings 71 'LOG_LEVEL': 'WARNING', 72 'SESSION_KEY_BITS': 128, 73 'SESSION_TIMEOUT': 3600, 74 'PERMANENT_SESSION_LIFETIME': 30*24*3600, 75 'SESSION_COOKIE_SECURE': True, 76 'CREDENTIAL_ROUNDS': 12, 77 'TZ': 'Etc/UTC', 78 # Host settings 79 'HOST_IMAP': 'imap', 80 'HOST_LMTP': 'imap:2525', 81 'HOST_POP3': 'imap', 82 'HOST_SMTP': 'smtp', 83 'HOST_AUTHSMTP': 'smtp', 84 'HOST_ADMIN': 'admin', 85 'HOST_WEBMAIL': 'webmail', 86 'HOST_WEBDAV': 'webdav:5232', 87 'HOST_REDIS': 'redis', 88 'HOST_FRONT': 'front', 89 'SUBNET': '192.168.203.0/24', 90 'SUBNET6': None, 91 'POD_ADDRESS_RANGE': None 92 } 93 94 class ConfigManager: 95 """ Naive configuration manager that uses environment only 96 """ 97 98 DB_TEMPLATES = { 99 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}', 100 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}', 101 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}' 102 } 103 104 def __init__(self): 105 self.config = dict() 106 107 def get_host_address(self, name): 108 # if MYSERVICE_ADDRESS is defined, use this 109 if f'{name}_ADDRESS' in os.environ: 110 return os.environ.get(f'{name}_ADDRESS') 111 # otherwise use the host name and resolve it 112 return system.resolve_address(self.config[f'HOST_{name}']) 113 114 def resolve_hosts(self): 115 for key in ['IMAP', 'POP3', 'AUTHSMTP', 'SMTP', 'REDIS']: 116 self.config[f'{key}_ADDRESS'] = self.get_host_address(key) 117 if self.config['WEBMAIL'] != 'none': 118 self.config['WEBMAIL_ADDRESS'] = self.get_host_address('WEBMAIL') 119 120 def __get_env(self, key, value): 121 key_file = key + "_FILE" 122 if key_file in os.environ: 123 with open(os.environ.get(key_file)) as file: 124 value_from_file = file.read() 125 return value_from_file.strip() 126 else: 127 return os.environ.get(key, value) 128 129 def __coerce_value(self, value): 130 if isinstance(value, str) and value.lower() in ('true','yes'): 131 return True 132 elif isinstance(value, str) and value.lower() in ('false', 'no'): 133 return False 134 return value 135 136 def init_app(self, app): 137 # get current app config 138 self.config.update(app.config) 139 # get environment variables 140 self.config.update({ 141 key: self.__coerce_value(self.__get_env(key, value)) 142 for key, value in DEFAULT_CONFIG.items() 143 }) 144 self.resolve_hosts() 145 146 # automatically set the sqlalchemy string 147 if self.config['DB_FLAVOR']: 148 template = self.DB_TEMPLATES[self.config['DB_FLAVOR']] 149 self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config) 150 151 self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/2' 152 self.config['QUOTA_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/1' 153 self.config['SESSION_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/3' 154 self.config['SESSION_COOKIE_SAMESITE'] = 'Strict' 155 self.config['SESSION_COOKIE_HTTPONLY'] = True 156 self.config['SESSION_PERMANENT'] = True 157 self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT']) 158 self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME']) 159 self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK']) 160 self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK']) 161 hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')] 162 self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr) 163 self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s]) 164 self.config['HOSTNAMES'] = ','.join(hostnames) 165 self.config['HOSTNAME'] = hostnames[0] 166 167 # update the app config 168 app.config.update(self.config) 169 170 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py --- a/core/admin/mailu/configuration.py +++ b/core/admin/mailu/configuration.py @@ -87,8 +87,7 @@ 'HOST_REDIS': 'redis', 'HOST_FRONT': 'front', 'SUBNET': '192.168.203.0/24', - 'SUBNET6': None, - 'POD_ADDRESS_RANGE': None + 'SUBNET6': None } class ConfigManager: diff --git a/core/admin/mailu/internal/views/dovecot.py b/core/admin/mailu/internal/views/dovecot.py --- a/core/admin/mailu/internal/views/dovecot.py +++ b/core/admin/mailu/internal/views/dovecot.py @@ -13,8 +13,6 @@ allow_nets.append(app.config["SUBNET"]) if app.config["SUBNET6"]: allow_nets.append(app.config["SUBNET6"]) - if app.config["POD_ADDRESS_RANGE"]: - allow_nets.append(app.config["POD_ADDRESS_RANGE"]) return flask.jsonify({ "password": None, "nopassword": "Y",
{"golden_diff": "diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py\n--- a/core/admin/mailu/configuration.py\n+++ b/core/admin/mailu/configuration.py\n@@ -87,8 +87,7 @@\n 'HOST_REDIS': 'redis',\n 'HOST_FRONT': 'front',\n 'SUBNET': '192.168.203.0/24',\n- 'SUBNET6': None,\n- 'POD_ADDRESS_RANGE': None\n+ 'SUBNET6': None\n }\n \n class ConfigManager:\ndiff --git a/core/admin/mailu/internal/views/dovecot.py b/core/admin/mailu/internal/views/dovecot.py\n--- a/core/admin/mailu/internal/views/dovecot.py\n+++ b/core/admin/mailu/internal/views/dovecot.py\n@@ -13,8 +13,6 @@\n allow_nets.append(app.config[\"SUBNET\"])\n if app.config[\"SUBNET6\"]:\n allow_nets.append(app.config[\"SUBNET6\"])\n- if app.config[\"POD_ADDRESS_RANGE\"]:\n- allow_nets.append(app.config[\"POD_ADDRESS_RANGE\"])\n return flask.jsonify({\n \"password\": None,\n \"nopassword\": \"Y\",\n", "issue": "Deprecate (and remove) POD_ADDRESS_RANGE\nAs discussed in #1209, POD_ADDRESS_RANGE should be removed and SUBNET should be used instead.\r\nTasks:\r\n- remove all occurences of POD_ADDRESS_RANGE from containers\r\n- update the docs\r\n- write a changelog (breaking change!)\n", "before_files": [{"content": "from mailu import models\nfrom mailu.internal import internal\nfrom flask import current_app as app\n\nimport flask\nimport socket\nimport os\n\[email protected](\"/dovecot/passdb/<path:user_email>\")\ndef dovecot_passdb_dict(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n allow_nets = []\n allow_nets.append(app.config[\"SUBNET\"])\n if app.config[\"SUBNET6\"]:\n allow_nets.append(app.config[\"SUBNET6\"])\n if app.config[\"POD_ADDRESS_RANGE\"]:\n allow_nets.append(app.config[\"POD_ADDRESS_RANGE\"])\n return flask.jsonify({\n \"password\": None,\n \"nopassword\": \"Y\",\n \"allow_nets\": \",\".join(allow_nets)\n })\n\n\[email protected](\"/dovecot/userdb/<path:user_email>\")\ndef dovecot_userdb_dict(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n return flask.jsonify({\n \"quota_rule\": \"*:bytes={}\".format(user.quota_bytes)\n })\n\n\[email protected](\"/dovecot/quota/<ns>/<path:user_email>\", methods=[\"POST\"])\ndef dovecot_quota(ns, user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n if ns == \"storage\":\n user.quota_bytes_used = flask.request.get_json()\n models.db.session.commit()\n return flask.jsonify(None)\n\n\[email protected](\"/dovecot/sieve/name/<script>/<path:user_email>\")\ndef dovecot_sieve_name(script, user_email):\n return flask.jsonify(script)\n\n\[email protected](\"/dovecot/sieve/data/default/<path:user_email>\")\ndef dovecot_sieve_data(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n return flask.jsonify(flask.render_template(\"default.sieve\", user=user))\n", "path": "core/admin/mailu/internal/views/dovecot.py"}, {"content": "import os\n\nfrom datetime import timedelta\nfrom socrate import system\nimport ipaddress\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'QUOTA_STORAGE_URL': '',\n 'DEBUG': False,\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE':'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '19.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'WILDCARD_SENDERS': '',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n 'DEFER_ON_TLS_ERROR': True,\n 'AUTH_RATELIMIT_IP': '60/hour',\n 'AUTH_RATELIMIT_IP_V4_MASK': 24,\n 'AUTH_RATELIMIT_IP_V6_MASK': 56,\n 'AUTH_RATELIMIT_USER': '100/day',\n 'AUTH_RATELIMIT_EXEMPTION': '',\n 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n 'MESSAGE_RATELIMIT': '200/day',\n 'MESSAGE_RATELIMIT_EXEMPTION': '',\n 'RECIPIENT_DELIMITER': '',\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'ADMIN' : 'none',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n 'LOGO_URL': None,\n 'LOGO_BACKGROUND': None,\n # Advanced settings\n 'LOG_LEVEL': 'WARNING',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_TIMEOUT': 3600,\n 'PERMANENT_SESSION_LIFETIME': 30*24*3600,\n 'SESSION_COOKIE_SECURE': True,\n 'CREDENTIAL_ROUNDS': 12,\n 'TZ': 'Etc/UTC',\n # Host settings\n 'HOST_IMAP': 'imap',\n 'HOST_LMTP': 'imap:2525',\n 'HOST_POP3': 'imap',\n 'HOST_SMTP': 'smtp',\n 'HOST_AUTHSMTP': 'smtp',\n 'HOST_ADMIN': 'admin',\n 'HOST_WEBMAIL': 'webmail',\n 'HOST_WEBDAV': 'webdav:5232',\n 'HOST_REDIS': 'redis',\n 'HOST_FRONT': 'front',\n 'SUBNET': '192.168.203.0/24',\n 'SUBNET6': None,\n 'POD_ADDRESS_RANGE': None\n}\n\nclass ConfigManager:\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'\n }\n\n def __init__(self):\n self.config = dict()\n\n def get_host_address(self, name):\n # if MYSERVICE_ADDRESS is defined, use this\n if f'{name}_ADDRESS' in os.environ:\n return os.environ.get(f'{name}_ADDRESS')\n # otherwise use the host name and resolve it\n return system.resolve_address(self.config[f'HOST_{name}'])\n\n def resolve_hosts(self):\n for key in ['IMAP', 'POP3', 'AUTHSMTP', 'SMTP', 'REDIS']:\n self.config[f'{key}_ADDRESS'] = self.get_host_address(key)\n if self.config['WEBMAIL'] != 'none':\n self.config['WEBMAIL_ADDRESS'] = self.get_host_address('WEBMAIL')\n\n def __get_env(self, key, value):\n key_file = key + \"_FILE\"\n if key_file in os.environ:\n with open(os.environ.get(key_file)) as file:\n value_from_file = file.read()\n return value_from_file.strip()\n else:\n return os.environ.get(key, value)\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n # get current app config\n self.config.update(app.config)\n # get environment variables\n self.config.update({\n key: self.__coerce_value(self.__get_env(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n self.resolve_hosts()\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/2'\n self.config['QUOTA_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/1'\n self.config['SESSION_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/3'\n self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'\n self.config['SESSION_COOKIE_HTTPONLY'] = True\n self.config['SESSION_PERMANENT'] = True\n self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])\n self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])\n self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])\n self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])\n hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]\n self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)\n self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])\n self.config['HOSTNAMES'] = ','.join(hostnames)\n self.config['HOSTNAME'] = hostnames[0]\n\n # update the app config\n app.config.update(self.config)\n\n", "path": "core/admin/mailu/configuration.py"}], "after_files": [{"content": "from mailu import models\nfrom mailu.internal import internal\nfrom flask import current_app as app\n\nimport flask\nimport socket\nimport os\n\[email protected](\"/dovecot/passdb/<path:user_email>\")\ndef dovecot_passdb_dict(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n allow_nets = []\n allow_nets.append(app.config[\"SUBNET\"])\n if app.config[\"SUBNET6\"]:\n allow_nets.append(app.config[\"SUBNET6\"])\n return flask.jsonify({\n \"password\": None,\n \"nopassword\": \"Y\",\n \"allow_nets\": \",\".join(allow_nets)\n })\n\n\[email protected](\"/dovecot/userdb/<path:user_email>\")\ndef dovecot_userdb_dict(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n return flask.jsonify({\n \"quota_rule\": \"*:bytes={}\".format(user.quota_bytes)\n })\n\n\[email protected](\"/dovecot/quota/<ns>/<path:user_email>\", methods=[\"POST\"])\ndef dovecot_quota(ns, user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n if ns == \"storage\":\n user.quota_bytes_used = flask.request.get_json()\n models.db.session.commit()\n return flask.jsonify(None)\n\n\[email protected](\"/dovecot/sieve/name/<script>/<path:user_email>\")\ndef dovecot_sieve_name(script, user_email):\n return flask.jsonify(script)\n\n\[email protected](\"/dovecot/sieve/data/default/<path:user_email>\")\ndef dovecot_sieve_data(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n return flask.jsonify(flask.render_template(\"default.sieve\", user=user))\n", "path": "core/admin/mailu/internal/views/dovecot.py"}, {"content": "import os\n\nfrom datetime import timedelta\nfrom socrate import system\nimport ipaddress\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'QUOTA_STORAGE_URL': '',\n 'DEBUG': False,\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE':'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '19.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'WILDCARD_SENDERS': '',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n 'DEFER_ON_TLS_ERROR': True,\n 'AUTH_RATELIMIT_IP': '60/hour',\n 'AUTH_RATELIMIT_IP_V4_MASK': 24,\n 'AUTH_RATELIMIT_IP_V6_MASK': 56,\n 'AUTH_RATELIMIT_USER': '100/day',\n 'AUTH_RATELIMIT_EXEMPTION': '',\n 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n 'MESSAGE_RATELIMIT': '200/day',\n 'MESSAGE_RATELIMIT_EXEMPTION': '',\n 'RECIPIENT_DELIMITER': '',\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'ADMIN' : 'none',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n 'LOGO_URL': None,\n 'LOGO_BACKGROUND': None,\n # Advanced settings\n 'LOG_LEVEL': 'WARNING',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_TIMEOUT': 3600,\n 'PERMANENT_SESSION_LIFETIME': 30*24*3600,\n 'SESSION_COOKIE_SECURE': True,\n 'CREDENTIAL_ROUNDS': 12,\n 'TZ': 'Etc/UTC',\n # Host settings\n 'HOST_IMAP': 'imap',\n 'HOST_LMTP': 'imap:2525',\n 'HOST_POP3': 'imap',\n 'HOST_SMTP': 'smtp',\n 'HOST_AUTHSMTP': 'smtp',\n 'HOST_ADMIN': 'admin',\n 'HOST_WEBMAIL': 'webmail',\n 'HOST_WEBDAV': 'webdav:5232',\n 'HOST_REDIS': 'redis',\n 'HOST_FRONT': 'front',\n 'SUBNET': '192.168.203.0/24',\n 'SUBNET6': None\n}\n\nclass ConfigManager:\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'\n }\n\n def __init__(self):\n self.config = dict()\n\n def get_host_address(self, name):\n # if MYSERVICE_ADDRESS is defined, use this\n if f'{name}_ADDRESS' in os.environ:\n return os.environ.get(f'{name}_ADDRESS')\n # otherwise use the host name and resolve it\n return system.resolve_address(self.config[f'HOST_{name}'])\n\n def resolve_hosts(self):\n for key in ['IMAP', 'POP3', 'AUTHSMTP', 'SMTP', 'REDIS']:\n self.config[f'{key}_ADDRESS'] = self.get_host_address(key)\n if self.config['WEBMAIL'] != 'none':\n self.config['WEBMAIL_ADDRESS'] = self.get_host_address('WEBMAIL')\n\n def __get_env(self, key, value):\n key_file = key + \"_FILE\"\n if key_file in os.environ:\n with open(os.environ.get(key_file)) as file:\n value_from_file = file.read()\n return value_from_file.strip()\n else:\n return os.environ.get(key, value)\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n # get current app config\n self.config.update(app.config)\n # get environment variables\n self.config.update({\n key: self.__coerce_value(self.__get_env(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n self.resolve_hosts()\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/2'\n self.config['QUOTA_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/1'\n self.config['SESSION_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/3'\n self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'\n self.config['SESSION_COOKIE_HTTPONLY'] = True\n self.config['SESSION_PERMANENT'] = True\n self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])\n self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])\n self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])\n self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])\n hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]\n self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)\n self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])\n self.config['HOSTNAMES'] = ','.join(hostnames)\n self.config['HOSTNAME'] = hostnames[0]\n\n # update the app config\n app.config.update(self.config)\n\n", "path": "core/admin/mailu/configuration.py"}]}
2,983
263
gh_patches_debug_14989
rasdani/github-patches
git_diff
OCHA-DAP__hdx-ckan-1984
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Server Error when downloading dataset This dataset, recently added by @JavierTeran , has been throwing a server error when I click on "download": https://data.hdx.rwlabs.org/dataset/victimas-de-minas/resource/0f39852c-e4bd-4c41-b5a1-88f38564955f --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py` Content: ``` 1 import ckan.controllers.storage as storage 2 import os 3 import re 4 import urllib 5 import uuid 6 from datetime import datetime 7 from cgi import FieldStorage 8 9 from ofs import get_impl 10 from pylons import request, response 11 from pylons.controllers.util import abort, redirect_to 12 from pylons import config 13 from paste.fileapp import FileApp 14 from paste.deploy.converters import asbool 15 16 from ckan.lib.base import BaseController, c, request, render, config, h, abort 17 from ckan.lib.jsonp import jsonpify 18 import ckan.model as model 19 import ckan.logic as logic 20 21 try: 22 from cStringIO import StringIO 23 except ImportError: 24 from StringIO import StringIO 25 try: 26 import json 27 except: 28 import simplejson as json 29 30 from logging import getLogger 31 log = getLogger(__name__) 32 33 34 BUCKET = config.get('ckan.storage.bucket', 'default') 35 key_prefix = config.get('ckan.storage.key_prefix', 'file/') 36 37 _eq_re = re.compile(r"^(.*)(=[0-9]*)$") 38 39 40 def generate_response(http_status, unicode_body, no_cache=True, other_headers=None): 41 r = request.environ['pylons.pylons'].response 42 if no_cache: 43 r.headers['Pragma'] = 'no-cache' 44 r.headers['Cache-Control'] = 'no-cache' 45 46 if other_headers: 47 for key, value in other_headers.iteritems(): 48 r.headers[key] = value 49 50 r.unicode_body = unicode_body 51 r.status = http_status 52 return r 53 54 55 class FileDownloadController(storage.StorageController): 56 _ofs_impl = None 57 58 @property 59 def ofs(self): 60 if not FileDownloadController._ofs_impl: 61 FileDownloadController._ofs_impl = get_ofs() 62 return FileDownloadController._ofs_impl 63 64 def _download_file(self, res, label): 65 # We need this as a resource object to check access so create a dummy 66 # obj and trick CKAN 67 resource = model.Resource() 68 69 for k in res.keys(): 70 setattr(resource, k, res[k]) 71 72 context = {'model': model, 'session': model.Session, 73 'user': c.user or c.author, 'for_view': True, 74 'auth_user_obj': c.userobj, 'resource': resource} 75 data_dict = {'id': resource.id} 76 try: 77 logic.check_access('resource_show', context, data_dict) 78 except logic.NotAuthorized: 79 redirect_url = h.url_for(controller='user', action='login', 80 came_from=resource.url) 81 r = generate_response(303, u'Not authorized to read file ' + resource.id, 82 other_headers={'Location': redirect_url, 'X-CKAN-Error': '403 Access Denied'}) 83 return r 84 85 exists = self.ofs.exists(BUCKET, label) 86 if not exists: 87 # handle erroneous trailing slash by redirecting to url w/o slash 88 if label.endswith('/'): 89 label = label[:-1] 90 # This may be best being cached_url until we have moved it into 91 # permanent storage 92 file_url = h.url_for('storage_file', label=label) 93 h.redirect_to(file_url) 94 else: 95 # abort(404) 96 r = generate_response(404, u'File not found') 97 return r 98 99 file_url = self.ofs.get_url(BUCKET, label) 100 if file_url.startswith("file://") or file_url.endswith('xlsx'): 101 metadata = self.ofs.get_metadata(BUCKET, label) 102 filepath = file_url[len("file://"):] 103 headers = { 104 # 'Content-Disposition':'attachment; filename="%s"' % label, 105 'Pragma': 'no-cache', 106 'Cache-Control': 'no-cache', 107 'Content-Type': metadata.get('_format', 'text/plain')} 108 if resource.name: 109 res_name = resource.name.replace('"', '_') 110 headers[ 111 'Content-Disposition'] = 'inline; filename="{}"'.format(res_name) 112 fapp = FileApp(filepath, headers=None, **headers) 113 return fapp(request.environ, self.start_response) 114 else: 115 h.redirect_to(file_url.encode('ascii', 'ignore')) 116 117 def file(self, label): 118 from sqlalchemy.engine import create_engine 119 # from label find resource id 120 url = config.get('ckan.site_url', '') + \ 121 '/storage/f/' + urllib.quote(label) 122 engine = create_engine(config.get('sqlalchemy.url', ''), echo=True) 123 connection = engine.connect() 124 query = connection.execute( 125 """SELECT * from resource where url= %s""", (url,)) 126 res = query.fetchone() 127 if not res: 128 # raise logic.NotFound 129 r = generate_response(404, u'File not found') 130 return r 131 132 return self._download_file(res, label) 133 134 def perma_file(self, id, resource_id): 135 from sqlalchemy.engine import create_engine 136 # from label find resource id 137 engine = create_engine(config.get('sqlalchemy.url', ''), echo=True) 138 connection = engine.connect() 139 query = connection.execute( 140 """SELECT * from resource where id= %s""", (resource_id,)) 141 res = query.fetchone() 142 if not res: 143 # raise logic.NotFound 144 r = generate_response(404, u'File not found') 145 return r 146 else: 147 label = self._get_label_from_resource(res) 148 149 return self._download_file(res, label) 150 151 def _get_label_from_resource(self, resource): 152 prefix = '/storage/f/' 153 url = resource.url 154 index = url.find(prefix) 155 label = url[index + len(prefix):] 156 uq_label = urllib.unquote(label) 157 return uq_label 158 159 160 def create_pairtree_marker(folder): 161 """ Creates the pairtree marker for tests if it doesn't exist """ 162 if not folder[:-1] == '/': 163 folder = folder + '/' 164 165 directory = os.path.dirname(folder) 166 if not os.path.exists(directory): 167 os.makedirs(directory) 168 169 target = os.path.join(directory, 'pairtree_version0_1') 170 if os.path.exists(target): 171 return 172 173 open(target, 'wb').close() 174 175 176 def get_ofs(): 177 """Return a configured instance of the appropriate OFS driver. 178 """ 179 storage_backend = config['ofs.impl'] 180 kw = {} 181 for k, v in config.items(): 182 if not k.startswith('ofs.') or k == 'ofs.impl': 183 continue 184 kw[k[4:]] = v 185 186 # Make sure we have created the marker file to avoid pairtree issues 187 if storage_backend == 'pairtree' and 'storage_dir' in kw: 188 create_pairtree_marker(kw['storage_dir']) 189 190 ofs = get_impl(storage_backend)(**kw) 191 return ofs 192 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py b/ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py --- a/ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py +++ b/ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py @@ -107,8 +107,9 @@ 'Content-Type': metadata.get('_format', 'text/plain')} if resource.name: res_name = resource.name.replace('"', '_') + res_name_encoded = res_name.encode('utf-8', 'ignore') headers[ - 'Content-Disposition'] = 'inline; filename="{}"'.format(res_name) + 'Content-Disposition'] = 'inline; filename="{}"'.format(res_name_encoded) fapp = FileApp(filepath, headers=None, **headers) return fapp(request.environ, self.start_response) else:
{"golden_diff": "diff --git a/ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py b/ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py\n--- a/ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py\n+++ b/ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py\n@@ -107,8 +107,9 @@\n 'Content-Type': metadata.get('_format', 'text/plain')}\n if resource.name:\n res_name = resource.name.replace('\"', '_')\n+ res_name_encoded = res_name.encode('utf-8', 'ignore')\n headers[\n- 'Content-Disposition'] = 'inline; filename=\"{}\"'.format(res_name)\n+ 'Content-Disposition'] = 'inline; filename=\"{}\"'.format(res_name_encoded)\n fapp = FileApp(filepath, headers=None, **headers)\n return fapp(request.environ, self.start_response)\n else:\n", "issue": "Server Error when downloading dataset\nThis dataset, recently added by @JavierTeran , has been throwing a server error when I click on \"download\": https://data.hdx.rwlabs.org/dataset/victimas-de-minas/resource/0f39852c-e4bd-4c41-b5a1-88f38564955f\n\n", "before_files": [{"content": "import ckan.controllers.storage as storage\nimport os\nimport re\nimport urllib\nimport uuid\nfrom datetime import datetime\nfrom cgi import FieldStorage\n\nfrom ofs import get_impl\nfrom pylons import request, response\nfrom pylons.controllers.util import abort, redirect_to\nfrom pylons import config\nfrom paste.fileapp import FileApp\nfrom paste.deploy.converters import asbool\n\nfrom ckan.lib.base import BaseController, c, request, render, config, h, abort\nfrom ckan.lib.jsonp import jsonpify\nimport ckan.model as model\nimport ckan.logic as logic\n\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from StringIO import StringIO\ntry:\n import json\nexcept:\n import simplejson as json\n\nfrom logging import getLogger\nlog = getLogger(__name__)\n\n\nBUCKET = config.get('ckan.storage.bucket', 'default')\nkey_prefix = config.get('ckan.storage.key_prefix', 'file/')\n\n_eq_re = re.compile(r\"^(.*)(=[0-9]*)$\")\n\n\ndef generate_response(http_status, unicode_body, no_cache=True, other_headers=None):\n r = request.environ['pylons.pylons'].response\n if no_cache:\n r.headers['Pragma'] = 'no-cache'\n r.headers['Cache-Control'] = 'no-cache'\n\n if other_headers:\n for key, value in other_headers.iteritems():\n r.headers[key] = value\n\n r.unicode_body = unicode_body\n r.status = http_status\n return r\n\n\nclass FileDownloadController(storage.StorageController):\n _ofs_impl = None\n\n @property\n def ofs(self):\n if not FileDownloadController._ofs_impl:\n FileDownloadController._ofs_impl = get_ofs()\n return FileDownloadController._ofs_impl\n\n def _download_file(self, res, label):\n # We need this as a resource object to check access so create a dummy\n # obj and trick CKAN\n resource = model.Resource()\n\n for k in res.keys():\n setattr(resource, k, res[k])\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True,\n 'auth_user_obj': c.userobj, 'resource': resource}\n data_dict = {'id': resource.id}\n try:\n logic.check_access('resource_show', context, data_dict)\n except logic.NotAuthorized:\n redirect_url = h.url_for(controller='user', action='login',\n came_from=resource.url)\n r = generate_response(303, u'Not authorized to read file ' + resource.id,\n other_headers={'Location': redirect_url, 'X-CKAN-Error': '403 Access Denied'})\n return r\n\n exists = self.ofs.exists(BUCKET, label)\n if not exists:\n # handle erroneous trailing slash by redirecting to url w/o slash\n if label.endswith('/'):\n label = label[:-1]\n # This may be best being cached_url until we have moved it into\n # permanent storage\n file_url = h.url_for('storage_file', label=label)\n h.redirect_to(file_url)\n else:\n # abort(404)\n r = generate_response(404, u'File not found')\n return r\n\n file_url = self.ofs.get_url(BUCKET, label)\n if file_url.startswith(\"file://\") or file_url.endswith('xlsx'):\n metadata = self.ofs.get_metadata(BUCKET, label)\n filepath = file_url[len(\"file://\"):]\n headers = {\n # 'Content-Disposition':'attachment; filename=\"%s\"' % label,\n 'Pragma': 'no-cache',\n 'Cache-Control': 'no-cache',\n 'Content-Type': metadata.get('_format', 'text/plain')}\n if resource.name:\n res_name = resource.name.replace('\"', '_')\n headers[\n 'Content-Disposition'] = 'inline; filename=\"{}\"'.format(res_name)\n fapp = FileApp(filepath, headers=None, **headers)\n return fapp(request.environ, self.start_response)\n else:\n h.redirect_to(file_url.encode('ascii', 'ignore'))\n\n def file(self, label):\n from sqlalchemy.engine import create_engine\n # from label find resource id\n url = config.get('ckan.site_url', '') + \\\n '/storage/f/' + urllib.quote(label)\n engine = create_engine(config.get('sqlalchemy.url', ''), echo=True)\n connection = engine.connect()\n query = connection.execute(\n \"\"\"SELECT * from resource where url= %s\"\"\", (url,))\n res = query.fetchone()\n if not res:\n # raise logic.NotFound\n r = generate_response(404, u'File not found')\n return r\n\n return self._download_file(res, label)\n\n def perma_file(self, id, resource_id):\n from sqlalchemy.engine import create_engine\n # from label find resource id\n engine = create_engine(config.get('sqlalchemy.url', ''), echo=True)\n connection = engine.connect()\n query = connection.execute(\n \"\"\"SELECT * from resource where id= %s\"\"\", (resource_id,))\n res = query.fetchone()\n if not res:\n # raise logic.NotFound\n r = generate_response(404, u'File not found')\n return r\n else:\n label = self._get_label_from_resource(res)\n\n return self._download_file(res, label)\n\n def _get_label_from_resource(self, resource):\n prefix = '/storage/f/'\n url = resource.url\n index = url.find(prefix)\n label = url[index + len(prefix):]\n uq_label = urllib.unquote(label)\n return uq_label\n\n\ndef create_pairtree_marker(folder):\n \"\"\" Creates the pairtree marker for tests if it doesn't exist \"\"\"\n if not folder[:-1] == '/':\n folder = folder + '/'\n\n directory = os.path.dirname(folder)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n target = os.path.join(directory, 'pairtree_version0_1')\n if os.path.exists(target):\n return\n\n open(target, 'wb').close()\n\n\ndef get_ofs():\n \"\"\"Return a configured instance of the appropriate OFS driver.\n \"\"\"\n storage_backend = config['ofs.impl']\n kw = {}\n for k, v in config.items():\n if not k.startswith('ofs.') or k == 'ofs.impl':\n continue\n kw[k[4:]] = v\n\n # Make sure we have created the marker file to avoid pairtree issues\n if storage_backend == 'pairtree' and 'storage_dir' in kw:\n create_pairtree_marker(kw['storage_dir'])\n\n ofs = get_impl(storage_backend)(**kw)\n return ofs\n", "path": "ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py"}], "after_files": [{"content": "import ckan.controllers.storage as storage\nimport os\nimport re\nimport urllib\nimport uuid\nfrom datetime import datetime\nfrom cgi import FieldStorage\n\nfrom ofs import get_impl\nfrom pylons import request, response\nfrom pylons.controllers.util import abort, redirect_to\nfrom pylons import config\nfrom paste.fileapp import FileApp\nfrom paste.deploy.converters import asbool\n\nfrom ckan.lib.base import BaseController, c, request, render, config, h, abort\nfrom ckan.lib.jsonp import jsonpify\nimport ckan.model as model\nimport ckan.logic as logic\n\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from StringIO import StringIO\ntry:\n import json\nexcept:\n import simplejson as json\n\nfrom logging import getLogger\nlog = getLogger(__name__)\n\n\nBUCKET = config.get('ckan.storage.bucket', 'default')\nkey_prefix = config.get('ckan.storage.key_prefix', 'file/')\n\n_eq_re = re.compile(r\"^(.*)(=[0-9]*)$\")\n\n\ndef generate_response(http_status, unicode_body, no_cache=True, other_headers=None):\n r = request.environ['pylons.pylons'].response\n if no_cache:\n r.headers['Pragma'] = 'no-cache'\n r.headers['Cache-Control'] = 'no-cache'\n\n if other_headers:\n for key, value in other_headers.iteritems():\n r.headers[key] = value\n\n r.unicode_body = unicode_body\n r.status = http_status\n return r\n\n\nclass FileDownloadController(storage.StorageController):\n _ofs_impl = None\n\n @property\n def ofs(self):\n if not FileDownloadController._ofs_impl:\n FileDownloadController._ofs_impl = get_ofs()\n return FileDownloadController._ofs_impl\n\n def _download_file(self, res, label):\n # We need this as a resource object to check access so create a dummy\n # obj and trick CKAN\n resource = model.Resource()\n\n for k in res.keys():\n setattr(resource, k, res[k])\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True,\n 'auth_user_obj': c.userobj, 'resource': resource}\n data_dict = {'id': resource.id}\n try:\n logic.check_access('resource_show', context, data_dict)\n except logic.NotAuthorized:\n redirect_url = h.url_for(controller='user', action='login',\n came_from=resource.url)\n r = generate_response(303, u'Not authorized to read file ' + resource.id,\n other_headers={'Location': redirect_url, 'X-CKAN-Error': '403 Access Denied'})\n return r\n\n exists = self.ofs.exists(BUCKET, label)\n if not exists:\n # handle erroneous trailing slash by redirecting to url w/o slash\n if label.endswith('/'):\n label = label[:-1]\n # This may be best being cached_url until we have moved it into\n # permanent storage\n file_url = h.url_for('storage_file', label=label)\n h.redirect_to(file_url)\n else:\n # abort(404)\n r = generate_response(404, u'File not found')\n return r\n\n file_url = self.ofs.get_url(BUCKET, label)\n if file_url.startswith(\"file://\") or file_url.endswith('xlsx'):\n metadata = self.ofs.get_metadata(BUCKET, label)\n filepath = file_url[len(\"file://\"):]\n headers = {\n # 'Content-Disposition':'attachment; filename=\"%s\"' % label,\n 'Pragma': 'no-cache',\n 'Cache-Control': 'no-cache',\n 'Content-Type': metadata.get('_format', 'text/plain')}\n if resource.name:\n res_name = resource.name.replace('\"', '_')\n res_name_encoded = res_name.encode('utf-8', 'ignore')\n headers[\n 'Content-Disposition'] = 'inline; filename=\"{}\"'.format(res_name_encoded)\n fapp = FileApp(filepath, headers=None, **headers)\n return fapp(request.environ, self.start_response)\n else:\n h.redirect_to(file_url.encode('ascii', 'ignore'))\n\n def file(self, label):\n from sqlalchemy.engine import create_engine\n # from label find resource id\n url = config.get('ckan.site_url', '') + \\\n '/storage/f/' + urllib.quote(label)\n engine = create_engine(config.get('sqlalchemy.url', ''), echo=True)\n connection = engine.connect()\n query = connection.execute(\n \"\"\"SELECT * from resource where url= %s\"\"\", (url,))\n res = query.fetchone()\n if not res:\n # raise logic.NotFound\n r = generate_response(404, u'File not found')\n return r\n\n return self._download_file(res, label)\n\n def perma_file(self, id, resource_id):\n from sqlalchemy.engine import create_engine\n # from label find resource id\n engine = create_engine(config.get('sqlalchemy.url', ''), echo=True)\n connection = engine.connect()\n query = connection.execute(\n \"\"\"SELECT * from resource where id= %s\"\"\", (resource_id,))\n res = query.fetchone()\n if not res:\n # raise logic.NotFound\n r = generate_response(404, u'File not found')\n return r\n else:\n label = self._get_label_from_resource(res)\n\n return self._download_file(res, label)\n\n def _get_label_from_resource(self, resource):\n prefix = '/storage/f/'\n url = resource.url\n index = url.find(prefix)\n label = url[index + len(prefix):]\n uq_label = urllib.unquote(label)\n return uq_label\n\n\ndef create_pairtree_marker(folder):\n \"\"\" Creates the pairtree marker for tests if it doesn't exist \"\"\"\n if not folder[:-1] == '/':\n folder = folder + '/'\n\n directory = os.path.dirname(folder)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n target = os.path.join(directory, 'pairtree_version0_1')\n if os.path.exists(target):\n return\n\n open(target, 'wb').close()\n\n\ndef get_ofs():\n \"\"\"Return a configured instance of the appropriate OFS driver.\n \"\"\"\n storage_backend = config['ofs.impl']\n kw = {}\n for k, v in config.items():\n if not k.startswith('ofs.') or k == 'ofs.impl':\n continue\n kw[k[4:]] = v\n\n # Make sure we have created the marker file to avoid pairtree issues\n if storage_backend == 'pairtree' and 'storage_dir' in kw:\n create_pairtree_marker(kw['storage_dir'])\n\n ofs = get_impl(storage_backend)(**kw)\n return ofs\n", "path": "ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py"}]}
2,291
215
gh_patches_debug_27848
rasdani/github-patches
git_diff
ultrabug__py3status-1876
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- error basic auth url encoding I get the following error when attempting to use basic auth url encoding. ``` $ curl https://user:[email protected]/test.json {"some":" data"} $ cat config order += "getjson test" getjson test { url = "https://user:[email protected]/test.json" format = "{some}" cache_timeout = 300 } $ py3status --config config --log-file log-file --wm sway {"version": 1, "click_events": true, "stop_signal": 20} [[] ,[{"full_text": "getjson test", "color": "#FF0000", "instance": "test", "name": "getjson"}] ^C $ cat log-file 2019-12-15 13:00:36 INFO ======== 2019-12-15 13:00:36 INFO Starting py3status version 3.22 python 3.8.0 2019-12-15 13:00:36 INFO window manager: sway 2019-12-15 13:00:36 INFO config file: config 2019-12-15 13:00:36 INFO modules include paths: [] 2019-12-15 13:00:36 INFO loading module "getjson test" from py3status.modules.getjson 2019-12-15 13:00:36 INFO starting module getjson test 2019-12-15 13:00:36 WARNING Instance `getjson test`, user method `getjson` failed (InvalidURL) request.py line 72. 2019-12-15 13:00:36 INFO Traceback InvalidURL: nonnumeric port: '[email protected]' File "/usr/lib/python3.8/site-packages/py3status/module.py", line 946, in run response = method() File "/usr/lib/python3.8/site-packages/py3status/modules/getjson.py", line 85, in getjson json_data = self.py3.request(self.url).json() File "/usr/lib/python3.8/site-packages/py3status/py3.py", line 1318, in request return get_http_response() File "/usr/lib/python3.8/site-packages/py3status/py3.py", line 1306, in get_http_response return HttpResponse( File "/usr/lib/python3.8/site-packages/py3status/request.py", line 72, in __init__ self._response = urlopen(request, data=data, timeout=timeout) File "/usr/lib/python3.8/urllib/request.py", line 222, in urlopen return opener.open(url, data, timeout) File "/usr/lib/python3.8/urllib/request.py", line 525, in open response = self._open(req, data) File "/usr/lib/python3.8/urllib/request.py", line 542, in _open result = self._call_chain(self.handle_open, protocol, protocol + File "/usr/lib/python3.8/urllib/request.py", line 502, in _call_chain result = func(*args) File "/usr/lib/python3.8/urllib/request.py", line 1360, in https_open return self.do_open(http.client.HTTPSConnection, req, File "/usr/lib/python3.8/urllib/request.py", line 1286, in do_open h = http_class(host, timeout=req.timeout, **http_conn_args) File "/usr/lib/python3.8/http/client.py", line 1357, in __init__ super(HTTPSConnection, self).__init__(host, port, timeout, File "/usr/lib/python3.8/http/client.py", line 829, in __init__ (self.host, self.port) = self._get_hostport(host, port) File "/usr/lib/python3.8/http/client.py", line 870, in _get_hostport raise InvalidURL("nonnumeric port: '%s'" % host[i+1:]) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `py3status/modules/getjson.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """ 3 Display JSON data fetched from a URL. 4 5 This module gets the given `url` configuration parameter and assumes the 6 response is a JSON object. The keys of the JSON object are used as the format 7 placeholders. The format placeholders are replaced by the value. Objects that 8 are nested can be accessed by using the `delimiter` configuration parameter 9 in between. 10 11 Configuration parameters: 12 cache_timeout: refresh interval for this module (default 30) 13 delimiter: the delimiter between parent and child objects (default '-') 14 format: display format for this module (default None) 15 url: specify URL to fetch JSON from (default None) 16 17 Format placeholders: 18 Placeholders will be replaced by the JSON keys. 19 20 Placeholders for objects with sub-objects are flattened using 'delimiter' 21 in between (eg. {'parent': {'child': 'value'}} will use placeholder 22 {parent-child}). 23 24 Placeholders for list elements have 'delimiter' followed by the index 25 (eg. {'parent': ['this', 'that']) will use placeholders {parent-0} 26 for 'this' and {parent-1} for 'that'. 27 28 Examples: 29 ``` 30 # straightforward key replacement 31 url = 'http://ip-api.com/json' 32 format = '{lat}, {lon}' 33 34 # access child objects 35 url = 'https://api.icndb.com/jokes/random' 36 format = '{value-joke}' 37 38 # access title from 0th element of articles list 39 url = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey={KEY}' 40 format = '{articles-0-title}' 41 42 # access if top-level object is a list 43 url = 'https://jsonplaceholder.typicode.com/posts/1/comments' 44 format = '{0-name}' 45 ``` 46 47 @author vicyap 48 49 SAMPLE OUTPUT 50 {'full_text': 'Github: Everything operating normally'} 51 """ 52 53 STRING_ERROR = "missing url" 54 55 56 class Py3status: 57 """ 58 """ 59 60 # available configuration parameters 61 cache_timeout = 30 62 delimiter = "-" 63 format = None 64 url = None 65 66 class Meta: 67 deprecated = { 68 "rename": [ 69 { 70 "param": "timeout", 71 "new": "request_timeout", 72 "msg": "obsolete parameter use `request_timeout`", 73 } 74 ] 75 } 76 77 def post_config_hook(self): 78 if not self.url: 79 raise Exception(STRING_ERROR) 80 81 def getjson(self): 82 """ 83 """ 84 try: 85 json_data = self.py3.request(self.url).json() 86 json_data = self.py3.flatten_dict(json_data, self.delimiter, True) 87 except self.py3.RequestException: 88 json_data = None 89 90 if json_data: 91 full_text = self.py3.safe_format(self.format, json_data) 92 else: 93 full_text = "" 94 95 return { 96 "cached_until": self.py3.time_in(self.cache_timeout), 97 "full_text": full_text, 98 } 99 100 101 if __name__ == "__main__": 102 """ 103 Run module in test mode. 104 """ 105 from py3status.module_test import module_test 106 107 module_test(Py3status) 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/py3status/modules/getjson.py b/py3status/modules/getjson.py --- a/py3status/modules/getjson.py +++ b/py3status/modules/getjson.py @@ -12,7 +12,9 @@ cache_timeout: refresh interval for this module (default 30) delimiter: the delimiter between parent and child objects (default '-') format: display format for this module (default None) + password: basic auth password information (default None) url: specify URL to fetch JSON from (default None) + username: basic auth user information (default None) Format placeholders: Placeholders will be replaced by the JSON keys. @@ -61,7 +63,9 @@ cache_timeout = 30 delimiter = "-" format = None + password = None url = None + username = None class Meta: deprecated = { @@ -82,7 +86,11 @@ """ """ try: - json_data = self.py3.request(self.url).json() + if self.username is not None and self.password is not None: + auth = (self.username, self.password) + else: + auth = None + json_data = self.py3.request(self.url, auth=auth).json() json_data = self.py3.flatten_dict(json_data, self.delimiter, True) except self.py3.RequestException: json_data = None
{"golden_diff": "diff --git a/py3status/modules/getjson.py b/py3status/modules/getjson.py\n--- a/py3status/modules/getjson.py\n+++ b/py3status/modules/getjson.py\n@@ -12,7 +12,9 @@\n cache_timeout: refresh interval for this module (default 30)\n delimiter: the delimiter between parent and child objects (default '-')\n format: display format for this module (default None)\n+ password: basic auth password information (default None)\n url: specify URL to fetch JSON from (default None)\n+ username: basic auth user information (default None)\n \n Format placeholders:\n Placeholders will be replaced by the JSON keys.\n@@ -61,7 +63,9 @@\n cache_timeout = 30\n delimiter = \"-\"\n format = None\n+ password = None\n url = None\n+ username = None\n \n class Meta:\n deprecated = {\n@@ -82,7 +86,11 @@\n \"\"\"\n \"\"\"\n try:\n- json_data = self.py3.request(self.url).json()\n+ if self.username is not None and self.password is not None:\n+ auth = (self.username, self.password)\n+ else:\n+ auth = None\n+ json_data = self.py3.request(self.url, auth=auth).json()\n json_data = self.py3.flatten_dict(json_data, self.delimiter, True)\n except self.py3.RequestException:\n json_data = None\n", "issue": "error basic auth url encoding\nI get the following error when attempting to use basic auth url encoding.\r\n\r\n```\r\n$ curl https://user:[email protected]/test.json \r\n{\"some\":\" data\"}\r\n\r\n$ cat config \r\norder += \"getjson test\"\r\n\r\ngetjson test {\r\n url = \"https://user:[email protected]/test.json\"\r\n format = \"{some}\"\r\n cache_timeout = 300\r\n}\r\n\r\n$ py3status --config config --log-file log-file --wm sway\r\n{\"version\": 1, \"click_events\": true, \"stop_signal\": 20}\r\n[[]\r\n,[{\"full_text\": \"getjson test\", \"color\": \"#FF0000\", \"instance\": \"test\", \"name\": \"getjson\"}]\r\n^C\r\n\r\n$ cat log-file\r\n2019-12-15 13:00:36 INFO ========\r\n2019-12-15 13:00:36 INFO Starting py3status version 3.22 python 3.8.0\r\n2019-12-15 13:00:36 INFO window manager: sway\r\n2019-12-15 13:00:36 INFO config file: config\r\n2019-12-15 13:00:36 INFO modules include paths: []\r\n2019-12-15 13:00:36 INFO loading module \"getjson test\" from py3status.modules.getjson\r\n2019-12-15 13:00:36 INFO starting module getjson test\r\n2019-12-15 13:00:36 WARNING Instance `getjson test`, user method `getjson` failed (InvalidURL) request.py line 72.\r\n2019-12-15 13:00:36 INFO Traceback\r\nInvalidURL: nonnumeric port: '[email protected]'\r\n File \"/usr/lib/python3.8/site-packages/py3status/module.py\", line 946, in run\r\n response = method()\r\n File \"/usr/lib/python3.8/site-packages/py3status/modules/getjson.py\", line 85, in getjson\r\n json_data = self.py3.request(self.url).json()\r\n File \"/usr/lib/python3.8/site-packages/py3status/py3.py\", line 1318, in request\r\n return get_http_response()\r\n File \"/usr/lib/python3.8/site-packages/py3status/py3.py\", line 1306, in get_http_response\r\n return HttpResponse(\r\n File \"/usr/lib/python3.8/site-packages/py3status/request.py\", line 72, in __init__\r\n self._response = urlopen(request, data=data, timeout=timeout)\r\n File \"/usr/lib/python3.8/urllib/request.py\", line 222, in urlopen\r\n return opener.open(url, data, timeout)\r\n File \"/usr/lib/python3.8/urllib/request.py\", line 525, in open\r\n response = self._open(req, data)\r\n File \"/usr/lib/python3.8/urllib/request.py\", line 542, in _open\r\n result = self._call_chain(self.handle_open, protocol, protocol +\r\n File \"/usr/lib/python3.8/urllib/request.py\", line 502, in _call_chain\r\n result = func(*args)\r\n File \"/usr/lib/python3.8/urllib/request.py\", line 1360, in https_open\r\n return self.do_open(http.client.HTTPSConnection, req,\r\n File \"/usr/lib/python3.8/urllib/request.py\", line 1286, in do_open\r\n h = http_class(host, timeout=req.timeout, **http_conn_args)\r\n File \"/usr/lib/python3.8/http/client.py\", line 1357, in __init__\r\n super(HTTPSConnection, self).__init__(host, port, timeout,\r\n File \"/usr/lib/python3.8/http/client.py\", line 829, in __init__\r\n (self.host, self.port) = self._get_hostport(host, port)\r\n File \"/usr/lib/python3.8/http/client.py\", line 870, in _get_hostport\r\n raise InvalidURL(\"nonnumeric port: '%s'\" % host[i+1:])\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay JSON data fetched from a URL.\n\nThis module gets the given `url` configuration parameter and assumes the\nresponse is a JSON object. The keys of the JSON object are used as the format\nplaceholders. The format placeholders are replaced by the value. Objects that\nare nested can be accessed by using the `delimiter` configuration parameter\nin between.\n\nConfiguration parameters:\n cache_timeout: refresh interval for this module (default 30)\n delimiter: the delimiter between parent and child objects (default '-')\n format: display format for this module (default None)\n url: specify URL to fetch JSON from (default None)\n\nFormat placeholders:\n Placeholders will be replaced by the JSON keys.\n\n Placeholders for objects with sub-objects are flattened using 'delimiter'\n in between (eg. {'parent': {'child': 'value'}} will use placeholder\n {parent-child}).\n\n Placeholders for list elements have 'delimiter' followed by the index\n (eg. {'parent': ['this', 'that']) will use placeholders {parent-0}\n for 'this' and {parent-1} for 'that'.\n\nExamples:\n```\n# straightforward key replacement\nurl = 'http://ip-api.com/json'\nformat = '{lat}, {lon}'\n\n# access child objects\nurl = 'https://api.icndb.com/jokes/random'\nformat = '{value-joke}'\n\n# access title from 0th element of articles list\nurl = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey={KEY}'\nformat = '{articles-0-title}'\n\n# access if top-level object is a list\nurl = 'https://jsonplaceholder.typicode.com/posts/1/comments'\nformat = '{0-name}'\n```\n\n@author vicyap\n\nSAMPLE OUTPUT\n{'full_text': 'Github: Everything operating normally'}\n\"\"\"\n\nSTRING_ERROR = \"missing url\"\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n\n # available configuration parameters\n cache_timeout = 30\n delimiter = \"-\"\n format = None\n url = None\n\n class Meta:\n deprecated = {\n \"rename\": [\n {\n \"param\": \"timeout\",\n \"new\": \"request_timeout\",\n \"msg\": \"obsolete parameter use `request_timeout`\",\n }\n ]\n }\n\n def post_config_hook(self):\n if not self.url:\n raise Exception(STRING_ERROR)\n\n def getjson(self):\n \"\"\"\n \"\"\"\n try:\n json_data = self.py3.request(self.url).json()\n json_data = self.py3.flatten_dict(json_data, self.delimiter, True)\n except self.py3.RequestException:\n json_data = None\n\n if json_data:\n full_text = self.py3.safe_format(self.format, json_data)\n else:\n full_text = \"\"\n\n return {\n \"cached_until\": self.py3.time_in(self.cache_timeout),\n \"full_text\": full_text,\n }\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Run module in test mode.\n \"\"\"\n from py3status.module_test import module_test\n\n module_test(Py3status)\n", "path": "py3status/modules/getjson.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay JSON data fetched from a URL.\n\nThis module gets the given `url` configuration parameter and assumes the\nresponse is a JSON object. The keys of the JSON object are used as the format\nplaceholders. The format placeholders are replaced by the value. Objects that\nare nested can be accessed by using the `delimiter` configuration parameter\nin between.\n\nConfiguration parameters:\n cache_timeout: refresh interval for this module (default 30)\n delimiter: the delimiter between parent and child objects (default '-')\n format: display format for this module (default None)\n password: basic auth password information (default None)\n url: specify URL to fetch JSON from (default None)\n username: basic auth user information (default None)\n\nFormat placeholders:\n Placeholders will be replaced by the JSON keys.\n\n Placeholders for objects with sub-objects are flattened using 'delimiter'\n in between (eg. {'parent': {'child': 'value'}} will use placeholder\n {parent-child}).\n\n Placeholders for list elements have 'delimiter' followed by the index\n (eg. {'parent': ['this', 'that']) will use placeholders {parent-0}\n for 'this' and {parent-1} for 'that'.\n\nExamples:\n```\n# straightforward key replacement\nurl = 'http://ip-api.com/json'\nformat = '{lat}, {lon}'\n\n# access child objects\nurl = 'https://api.icndb.com/jokes/random'\nformat = '{value-joke}'\n\n# access title from 0th element of articles list\nurl = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey={KEY}'\nformat = '{articles-0-title}'\n\n# access if top-level object is a list\nurl = 'https://jsonplaceholder.typicode.com/posts/1/comments'\nformat = '{0-name}'\n```\n\n@author vicyap\n\nSAMPLE OUTPUT\n{'full_text': 'Github: Everything operating normally'}\n\"\"\"\n\nSTRING_ERROR = \"missing url\"\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n\n # available configuration parameters\n cache_timeout = 30\n delimiter = \"-\"\n format = None\n password = None\n url = None\n username = None\n\n class Meta:\n deprecated = {\n \"rename\": [\n {\n \"param\": \"timeout\",\n \"new\": \"request_timeout\",\n \"msg\": \"obsolete parameter use `request_timeout`\",\n }\n ]\n }\n\n def post_config_hook(self):\n if not self.url:\n raise Exception(STRING_ERROR)\n\n def getjson(self):\n \"\"\"\n \"\"\"\n try:\n if self.username is not None and self.password is not None:\n auth = (self.username, self.password)\n else:\n auth = None\n json_data = self.py3.request(self.url, auth=auth).json()\n json_data = self.py3.flatten_dict(json_data, self.delimiter, True)\n except self.py3.RequestException:\n json_data = None\n\n if json_data:\n full_text = self.py3.safe_format(self.format, json_data)\n else:\n full_text = \"\"\n\n return {\n \"cached_until\": self.py3.time_in(self.cache_timeout),\n \"full_text\": full_text,\n }\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Run module in test mode.\n \"\"\"\n from py3status.module_test import module_test\n\n module_test(Py3status)\n", "path": "py3status/modules/getjson.py"}]}
2,127
318
gh_patches_debug_9779
rasdani/github-patches
git_diff
kedro-org__kedro-1057
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update pyarrow requirement to support newer versions ## Description The current pyarrow requirements don't allow pyarrow > 4.0.0, which is old (current version = 6.0.0). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 import re 2 from codecs import open 3 from glob import glob 4 from itertools import chain 5 from os import path 6 7 from setuptools import find_packages, setup 8 9 name = "kedro" 10 here = path.abspath(path.dirname(__file__)) 11 12 13 PANDAS = "pandas>=0.24" 14 SPARK = "pyspark>=2.2, <4.0" 15 HDFS = "hdfs>=2.5.8, <3.0" 16 S3FS = "s3fs>=0.3.0, <0.5" 17 18 # get package version 19 with open(path.join(here, name, "__init__.py"), encoding="utf-8") as f: 20 result = re.search(r'__version__ = ["\']([^"\']+)', f.read()) 21 22 if not result: 23 raise ValueError("Can't find the version in kedro/__init__.py") 24 25 version = result.group(1) 26 27 # get the dependencies and installs 28 with open("requirements.txt", encoding="utf-8") as f: 29 requires = [x.strip() for x in f if x.strip()] 30 31 # get test dependencies and installs 32 with open("test_requirements.txt", encoding="utf-8") as f: 33 test_requires = [x.strip() for x in f if x.strip() and not x.startswith("-r")] 34 35 36 # Get the long description from the README file 37 with open(path.join(here, "README.md"), encoding="utf-8") as f: 38 readme = f.read() 39 40 doc_html_files = [ 41 name.replace("kedro/", "", 1) 42 for name in glob("kedro/framework/html/**/*", recursive=True) 43 ] 44 45 template_files = [] 46 for pattern in ["**/*", "**/.*", "**/.*/**", "**/.*/.**"]: 47 template_files.extend( 48 [ 49 name.replace("kedro/", "", 1) 50 for name in glob("kedro/templates/" + pattern, recursive=True) 51 ] 52 ) 53 54 55 def _collect_requirements(requires): 56 return sorted(set(chain.from_iterable(requires.values()))) 57 58 59 api_require = {"api.APIDataSet": ["requests~=2.20"]} 60 biosequence_require = {"biosequence.BioSequenceDataSet": ["biopython~=1.73"]} 61 dask_require = { 62 "dask.ParquetDataSet": [ 63 "dask>=2021.10.0, <2022.01; python_version > '3.6'", 64 "dask[complete]~=2.6; python_version == '3.6'", 65 ] 66 } 67 geopandas_require = { 68 "geopandas.GeoJSONDataSet": ["geopandas>=0.6.0, <1.0", "pyproj>=2.2.0, <3.0"] 69 } 70 matplotlib_require = {"matplotlib.MatplotlibWriter": ["matplotlib>=3.0.3, <4.0"]} 71 holoviews_require = {"holoviews.HoloviewsWriter": ["holoviews~=1.13.0"]} 72 networkx_require = {"networkx.NetworkXDataSet": ["networkx~=2.4"]} 73 pandas_require = { 74 "pandas.CSVDataSet": [PANDAS], 75 "pandas.ExcelDataSet": [PANDAS, "xlrd~=1.0", "xlsxwriter~=1.0"], 76 "pandas.AppendableExcelDataSet": [PANDAS, "openpyxl>=3.0.3, <4.0"], 77 "pandas.FeatherDataSet": [PANDAS], 78 "pandas.GBQTableDataSet": [PANDAS, "pandas-gbq>=0.12.0, <1.0"], 79 "pandas.HDFDataSet": [PANDAS, "tables~=3.6"], 80 "pandas.JSONDataSet": [PANDAS], 81 "pandas.ParquetDataSet": [PANDAS, "pyarrow>=0.12.0, <4.0"], 82 "pandas.SQLTableDataSet": [PANDAS, "SQLAlchemy~=1.2"], 83 "pandas.SQLQueryDataSet": [PANDAS, "SQLAlchemy~=1.2"], 84 "pandas.GenericDataSet": [PANDAS], 85 } 86 pillow_require = {"pillow.ImageDataSet": ["Pillow~=8.0"]} 87 plotly_require = { 88 "plotly.PlotlyDataSet": [PANDAS, "plotly>=4.8.0, <6.0"], 89 "plotly.JSONDataSet": ["plotly>=4.8.0, <6.0"], 90 } 91 spark_require = { 92 "spark.SparkDataSet": [SPARK, HDFS, S3FS], 93 "spark.SparkHiveDataSet": [SPARK, HDFS, S3FS], 94 "spark.SparkJDBCDataSet": [SPARK, HDFS, S3FS], 95 } 96 tensorflow_required = { 97 "tensorflow.TensorflowModelDataset": [ 98 # currently only TensorFlow V2 supported for saving and loading. 99 # V1 requires HDF5 and serializes differently 100 "tensorflow~=2.0" 101 ] 102 } 103 yaml_require = {"yaml.YAMLDataSet": [PANDAS, "PyYAML>=4.2, <6.0"]} 104 105 extras_require = { 106 "api": _collect_requirements(api_require), 107 "biosequence": _collect_requirements(biosequence_require), 108 "dask": _collect_requirements(dask_require), 109 "docs": [ 110 "docutils==0.16", 111 "sphinx~=3.4.3", 112 "sphinx_rtd_theme==0.4.1", 113 "nbsphinx==0.8.1", 114 "nbstripout~=0.4", 115 "recommonmark==0.7.1", 116 "sphinx-autodoc-typehints==1.11.1", 117 "sphinx_copybutton==0.3.1", 118 "ipykernel>=5.3, <7.0", 119 ], 120 "geopandas": _collect_requirements(geopandas_require), 121 "ipython": ["ipython~=7.10"], 122 "matplotlib": _collect_requirements(matplotlib_require), 123 "holoviews": _collect_requirements(holoviews_require), 124 "networkx": _collect_requirements(networkx_require), 125 "notebook_templates": ["nbconvert>=5.3.1, <6.0", "nbformat~=4.4"], 126 "pandas": _collect_requirements(pandas_require), 127 "pillow": _collect_requirements(pillow_require), 128 "plotly": _collect_requirements(plotly_require), 129 "profilers": ["memory_profiler>=0.50.0, <1.0"], 130 "spark": _collect_requirements(spark_require), 131 "tensorflow": _collect_requirements(tensorflow_required), 132 "yaml": _collect_requirements(yaml_require), 133 **api_require, 134 **biosequence_require, 135 **dask_require, 136 **geopandas_require, 137 **matplotlib_require, 138 **holoviews_require, 139 **networkx_require, 140 **pandas_require, 141 **pillow_require, 142 **plotly_require, 143 **spark_require, 144 **tensorflow_required, 145 **yaml_require, 146 } 147 148 extras_require["all"] = _collect_requirements(extras_require) 149 150 setup( 151 name=name, 152 version=version, 153 description="Kedro helps you build production-ready data and analytics pipelines", 154 license="Apache Software License (Apache 2.0)", 155 long_description=readme, 156 long_description_content_type="text/markdown", 157 url="https://github.com/quantumblacklabs/kedro", 158 python_requires=">=3.6, <3.9", 159 packages=find_packages(exclude=["docs*", "tests*", "tools*", "features*"]), 160 include_package_data=True, 161 tests_require=test_requires, 162 install_requires=requires, 163 author="QuantumBlack Labs", 164 entry_points={"console_scripts": ["kedro = kedro.framework.cli:main"]}, 165 package_data={ 166 name: ["py.typed", "test_requirements.txt"] + template_files + doc_html_files 167 }, 168 zip_safe=False, 169 keywords="pipelines, machine learning, data pipelines, data science, data engineering", 170 classifiers=[ 171 "Development Status :: 4 - Beta", 172 "Programming Language :: Python :: 3.6", 173 "Programming Language :: Python :: 3.7", 174 "Programming Language :: Python :: 3.8", 175 ], 176 extras_require=extras_require, 177 ) 178 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -78,7 +78,7 @@ "pandas.GBQTableDataSet": [PANDAS, "pandas-gbq>=0.12.0, <1.0"], "pandas.HDFDataSet": [PANDAS, "tables~=3.6"], "pandas.JSONDataSet": [PANDAS], - "pandas.ParquetDataSet": [PANDAS, "pyarrow>=0.12.0, <4.0"], + "pandas.ParquetDataSet": [PANDAS, "pyarrow>=1.0, <7.0"], "pandas.SQLTableDataSet": [PANDAS, "SQLAlchemy~=1.2"], "pandas.SQLQueryDataSet": [PANDAS, "SQLAlchemy~=1.2"], "pandas.GenericDataSet": [PANDAS],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -78,7 +78,7 @@\n \"pandas.GBQTableDataSet\": [PANDAS, \"pandas-gbq>=0.12.0, <1.0\"],\n \"pandas.HDFDataSet\": [PANDAS, \"tables~=3.6\"],\n \"pandas.JSONDataSet\": [PANDAS],\n- \"pandas.ParquetDataSet\": [PANDAS, \"pyarrow>=0.12.0, <4.0\"],\n+ \"pandas.ParquetDataSet\": [PANDAS, \"pyarrow>=1.0, <7.0\"],\n \"pandas.SQLTableDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n \"pandas.SQLQueryDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n \"pandas.GenericDataSet\": [PANDAS],\n", "issue": "Update pyarrow requirement to support newer versions\n## Description\r\nThe current pyarrow requirements don't allow pyarrow > 4.0.0, which is old (current version = 6.0.0).\r\n\n", "before_files": [{"content": "import re\nfrom codecs import open\nfrom glob import glob\nfrom itertools import chain\nfrom os import path\n\nfrom setuptools import find_packages, setup\n\nname = \"kedro\"\nhere = path.abspath(path.dirname(__file__))\n\n\nPANDAS = \"pandas>=0.24\"\nSPARK = \"pyspark>=2.2, <4.0\"\nHDFS = \"hdfs>=2.5.8, <3.0\"\nS3FS = \"s3fs>=0.3.0, <0.5\"\n\n# get package version\nwith open(path.join(here, name, \"__init__.py\"), encoding=\"utf-8\") as f:\n result = re.search(r'__version__ = [\"\\']([^\"\\']+)', f.read())\n\n if not result:\n raise ValueError(\"Can't find the version in kedro/__init__.py\")\n\n version = result.group(1)\n\n# get the dependencies and installs\nwith open(\"requirements.txt\", encoding=\"utf-8\") as f:\n requires = [x.strip() for x in f if x.strip()]\n\n# get test dependencies and installs\nwith open(\"test_requirements.txt\", encoding=\"utf-8\") as f:\n test_requires = [x.strip() for x in f if x.strip() and not x.startswith(\"-r\")]\n\n\n# Get the long description from the README file\nwith open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n readme = f.read()\n\ndoc_html_files = [\n name.replace(\"kedro/\", \"\", 1)\n for name in glob(\"kedro/framework/html/**/*\", recursive=True)\n]\n\ntemplate_files = []\nfor pattern in [\"**/*\", \"**/.*\", \"**/.*/**\", \"**/.*/.**\"]:\n template_files.extend(\n [\n name.replace(\"kedro/\", \"\", 1)\n for name in glob(\"kedro/templates/\" + pattern, recursive=True)\n ]\n )\n\n\ndef _collect_requirements(requires):\n return sorted(set(chain.from_iterable(requires.values())))\n\n\napi_require = {\"api.APIDataSet\": [\"requests~=2.20\"]}\nbiosequence_require = {\"biosequence.BioSequenceDataSet\": [\"biopython~=1.73\"]}\ndask_require = {\n \"dask.ParquetDataSet\": [\n \"dask>=2021.10.0, <2022.01; python_version > '3.6'\",\n \"dask[complete]~=2.6; python_version == '3.6'\",\n ]\n}\ngeopandas_require = {\n \"geopandas.GeoJSONDataSet\": [\"geopandas>=0.6.0, <1.0\", \"pyproj>=2.2.0, <3.0\"]\n}\nmatplotlib_require = {\"matplotlib.MatplotlibWriter\": [\"matplotlib>=3.0.3, <4.0\"]}\nholoviews_require = {\"holoviews.HoloviewsWriter\": [\"holoviews~=1.13.0\"]}\nnetworkx_require = {\"networkx.NetworkXDataSet\": [\"networkx~=2.4\"]}\npandas_require = {\n \"pandas.CSVDataSet\": [PANDAS],\n \"pandas.ExcelDataSet\": [PANDAS, \"xlrd~=1.0\", \"xlsxwriter~=1.0\"],\n \"pandas.AppendableExcelDataSet\": [PANDAS, \"openpyxl>=3.0.3, <4.0\"],\n \"pandas.FeatherDataSet\": [PANDAS],\n \"pandas.GBQTableDataSet\": [PANDAS, \"pandas-gbq>=0.12.0, <1.0\"],\n \"pandas.HDFDataSet\": [PANDAS, \"tables~=3.6\"],\n \"pandas.JSONDataSet\": [PANDAS],\n \"pandas.ParquetDataSet\": [PANDAS, \"pyarrow>=0.12.0, <4.0\"],\n \"pandas.SQLTableDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n \"pandas.SQLQueryDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n \"pandas.GenericDataSet\": [PANDAS],\n}\npillow_require = {\"pillow.ImageDataSet\": [\"Pillow~=8.0\"]}\nplotly_require = {\n \"plotly.PlotlyDataSet\": [PANDAS, \"plotly>=4.8.0, <6.0\"],\n \"plotly.JSONDataSet\": [\"plotly>=4.8.0, <6.0\"],\n}\nspark_require = {\n \"spark.SparkDataSet\": [SPARK, HDFS, S3FS],\n \"spark.SparkHiveDataSet\": [SPARK, HDFS, S3FS],\n \"spark.SparkJDBCDataSet\": [SPARK, HDFS, S3FS],\n}\ntensorflow_required = {\n \"tensorflow.TensorflowModelDataset\": [\n # currently only TensorFlow V2 supported for saving and loading.\n # V1 requires HDF5 and serializes differently\n \"tensorflow~=2.0\"\n ]\n}\nyaml_require = {\"yaml.YAMLDataSet\": [PANDAS, \"PyYAML>=4.2, <6.0\"]}\n\nextras_require = {\n \"api\": _collect_requirements(api_require),\n \"biosequence\": _collect_requirements(biosequence_require),\n \"dask\": _collect_requirements(dask_require),\n \"docs\": [\n \"docutils==0.16\",\n \"sphinx~=3.4.3\",\n \"sphinx_rtd_theme==0.4.1\",\n \"nbsphinx==0.8.1\",\n \"nbstripout~=0.4\",\n \"recommonmark==0.7.1\",\n \"sphinx-autodoc-typehints==1.11.1\",\n \"sphinx_copybutton==0.3.1\",\n \"ipykernel>=5.3, <7.0\",\n ],\n \"geopandas\": _collect_requirements(geopandas_require),\n \"ipython\": [\"ipython~=7.10\"],\n \"matplotlib\": _collect_requirements(matplotlib_require),\n \"holoviews\": _collect_requirements(holoviews_require),\n \"networkx\": _collect_requirements(networkx_require),\n \"notebook_templates\": [\"nbconvert>=5.3.1, <6.0\", \"nbformat~=4.4\"],\n \"pandas\": _collect_requirements(pandas_require),\n \"pillow\": _collect_requirements(pillow_require),\n \"plotly\": _collect_requirements(plotly_require),\n \"profilers\": [\"memory_profiler>=0.50.0, <1.0\"],\n \"spark\": _collect_requirements(spark_require),\n \"tensorflow\": _collect_requirements(tensorflow_required),\n \"yaml\": _collect_requirements(yaml_require),\n **api_require,\n **biosequence_require,\n **dask_require,\n **geopandas_require,\n **matplotlib_require,\n **holoviews_require,\n **networkx_require,\n **pandas_require,\n **pillow_require,\n **plotly_require,\n **spark_require,\n **tensorflow_required,\n **yaml_require,\n}\n\nextras_require[\"all\"] = _collect_requirements(extras_require)\n\nsetup(\n name=name,\n version=version,\n description=\"Kedro helps you build production-ready data and analytics pipelines\",\n license=\"Apache Software License (Apache 2.0)\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/quantumblacklabs/kedro\",\n python_requires=\">=3.6, <3.9\",\n packages=find_packages(exclude=[\"docs*\", \"tests*\", \"tools*\", \"features*\"]),\n include_package_data=True,\n tests_require=test_requires,\n install_requires=requires,\n author=\"QuantumBlack Labs\",\n entry_points={\"console_scripts\": [\"kedro = kedro.framework.cli:main\"]},\n package_data={\n name: [\"py.typed\", \"test_requirements.txt\"] + template_files + doc_html_files\n },\n zip_safe=False,\n keywords=\"pipelines, machine learning, data pipelines, data science, data engineering\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n extras_require=extras_require,\n)\n", "path": "setup.py"}], "after_files": [{"content": "import re\nfrom codecs import open\nfrom glob import glob\nfrom itertools import chain\nfrom os import path\n\nfrom setuptools import find_packages, setup\n\nname = \"kedro\"\nhere = path.abspath(path.dirname(__file__))\n\n\nPANDAS = \"pandas>=0.24\"\nSPARK = \"pyspark>=2.2, <4.0\"\nHDFS = \"hdfs>=2.5.8, <3.0\"\nS3FS = \"s3fs>=0.3.0, <0.5\"\n\n# get package version\nwith open(path.join(here, name, \"__init__.py\"), encoding=\"utf-8\") as f:\n result = re.search(r'__version__ = [\"\\']([^\"\\']+)', f.read())\n\n if not result:\n raise ValueError(\"Can't find the version in kedro/__init__.py\")\n\n version = result.group(1)\n\n# get the dependencies and installs\nwith open(\"requirements.txt\", encoding=\"utf-8\") as f:\n requires = [x.strip() for x in f if x.strip()]\n\n# get test dependencies and installs\nwith open(\"test_requirements.txt\", encoding=\"utf-8\") as f:\n test_requires = [x.strip() for x in f if x.strip() and not x.startswith(\"-r\")]\n\n\n# Get the long description from the README file\nwith open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n readme = f.read()\n\ndoc_html_files = [\n name.replace(\"kedro/\", \"\", 1)\n for name in glob(\"kedro/framework/html/**/*\", recursive=True)\n]\n\ntemplate_files = []\nfor pattern in [\"**/*\", \"**/.*\", \"**/.*/**\", \"**/.*/.**\"]:\n template_files.extend(\n [\n name.replace(\"kedro/\", \"\", 1)\n for name in glob(\"kedro/templates/\" + pattern, recursive=True)\n ]\n )\n\n\ndef _collect_requirements(requires):\n return sorted(set(chain.from_iterable(requires.values())))\n\n\napi_require = {\"api.APIDataSet\": [\"requests~=2.20\"]}\nbiosequence_require = {\"biosequence.BioSequenceDataSet\": [\"biopython~=1.73\"]}\ndask_require = {\n \"dask.ParquetDataSet\": [\n \"dask>=2021.10.0, <2022.01; python_version > '3.6'\",\n \"dask[complete]~=2.6; python_version == '3.6'\",\n ]\n}\ngeopandas_require = {\n \"geopandas.GeoJSONDataSet\": [\"geopandas>=0.6.0, <1.0\", \"pyproj>=2.2.0, <3.0\"]\n}\nmatplotlib_require = {\"matplotlib.MatplotlibWriter\": [\"matplotlib>=3.0.3, <4.0\"]}\nholoviews_require = {\"holoviews.HoloviewsWriter\": [\"holoviews~=1.13.0\"]}\nnetworkx_require = {\"networkx.NetworkXDataSet\": [\"networkx~=2.4\"]}\npandas_require = {\n \"pandas.CSVDataSet\": [PANDAS],\n \"pandas.ExcelDataSet\": [PANDAS, \"xlrd~=1.0\", \"xlsxwriter~=1.0\"],\n \"pandas.AppendableExcelDataSet\": [PANDAS, \"openpyxl>=3.0.3, <4.0\"],\n \"pandas.FeatherDataSet\": [PANDAS],\n \"pandas.GBQTableDataSet\": [PANDAS, \"pandas-gbq>=0.12.0, <1.0\"],\n \"pandas.HDFDataSet\": [PANDAS, \"tables~=3.6\"],\n \"pandas.JSONDataSet\": [PANDAS],\n \"pandas.ParquetDataSet\": [PANDAS, \"pyarrow>=1.0, <7.0\"],\n \"pandas.SQLTableDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n \"pandas.SQLQueryDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n \"pandas.GenericDataSet\": [PANDAS],\n}\npillow_require = {\"pillow.ImageDataSet\": [\"Pillow~=8.0\"]}\nplotly_require = {\n \"plotly.PlotlyDataSet\": [PANDAS, \"plotly>=4.8.0, <6.0\"],\n \"plotly.JSONDataSet\": [\"plotly>=4.8.0, <6.0\"],\n}\nspark_require = {\n \"spark.SparkDataSet\": [SPARK, HDFS, S3FS],\n \"spark.SparkHiveDataSet\": [SPARK, HDFS, S3FS],\n \"spark.SparkJDBCDataSet\": [SPARK, HDFS, S3FS],\n}\ntensorflow_required = {\n \"tensorflow.TensorflowModelDataset\": [\n # currently only TensorFlow V2 supported for saving and loading.\n # V1 requires HDF5 and serializes differently\n \"tensorflow~=2.0\"\n ]\n}\nyaml_require = {\"yaml.YAMLDataSet\": [PANDAS, \"PyYAML>=4.2, <6.0\"]}\n\nextras_require = {\n \"api\": _collect_requirements(api_require),\n \"biosequence\": _collect_requirements(biosequence_require),\n \"dask\": _collect_requirements(dask_require),\n \"docs\": [\n \"docutils==0.16\",\n \"sphinx~=3.4.3\",\n \"sphinx_rtd_theme==0.4.1\",\n \"nbsphinx==0.8.1\",\n \"nbstripout~=0.4\",\n \"recommonmark==0.7.1\",\n \"sphinx-autodoc-typehints==1.11.1\",\n \"sphinx_copybutton==0.3.1\",\n \"ipykernel>=5.3, <7.0\",\n ],\n \"geopandas\": _collect_requirements(geopandas_require),\n \"ipython\": [\"ipython~=7.10\"],\n \"matplotlib\": _collect_requirements(matplotlib_require),\n \"holoviews\": _collect_requirements(holoviews_require),\n \"networkx\": _collect_requirements(networkx_require),\n \"notebook_templates\": [\"nbconvert>=5.3.1, <6.0\", \"nbformat~=4.4\"],\n \"pandas\": _collect_requirements(pandas_require),\n \"pillow\": _collect_requirements(pillow_require),\n \"plotly\": _collect_requirements(plotly_require),\n \"profilers\": [\"memory_profiler>=0.50.0, <1.0\"],\n \"spark\": _collect_requirements(spark_require),\n \"tensorflow\": _collect_requirements(tensorflow_required),\n \"yaml\": _collect_requirements(yaml_require),\n **api_require,\n **biosequence_require,\n **dask_require,\n **geopandas_require,\n **matplotlib_require,\n **holoviews_require,\n **networkx_require,\n **pandas_require,\n **pillow_require,\n **plotly_require,\n **spark_require,\n **tensorflow_required,\n **yaml_require,\n}\n\nextras_require[\"all\"] = _collect_requirements(extras_require)\n\nsetup(\n name=name,\n version=version,\n description=\"Kedro helps you build production-ready data and analytics pipelines\",\n license=\"Apache Software License (Apache 2.0)\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/quantumblacklabs/kedro\",\n python_requires=\">=3.6, <3.9\",\n packages=find_packages(exclude=[\"docs*\", \"tests*\", \"tools*\", \"features*\"]),\n include_package_data=True,\n tests_require=test_requires,\n install_requires=requires,\n author=\"QuantumBlack Labs\",\n entry_points={\"console_scripts\": [\"kedro = kedro.framework.cli:main\"]},\n package_data={\n name: [\"py.typed\", \"test_requirements.txt\"] + template_files + doc_html_files\n },\n zip_safe=False,\n keywords=\"pipelines, machine learning, data pipelines, data science, data engineering\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n extras_require=extras_require,\n)\n", "path": "setup.py"}]}
2,556
209
gh_patches_debug_13188
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-5738
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `colossalai/zero/gemini/gemini_mgr.py` Content: ``` 1 import functools 2 from time import time 3 from typing import Dict, Iterable, List, Optional, Tuple 4 5 import torch 6 import torch.distributed as dist 7 8 from .chunk import Chunk, ChunkManager 9 from .memory_tracer import ChunkMemStatsCollector, MemStats 10 from .placement_policy import PlacementPolicy, PlacementPolicyFactory 11 12 13 class GeminiManager: 14 """ 15 Stateful Tensor Manager, inspired from PatrickStar 16 17 PatrickStar: Parallel Training of Pre-trained Models via Chunk-based Memory Management 18 https://arxiv.org/abs/2108.05818 19 20 Args: 21 placement_policy (str): Which device to place *held* tensors. It can be 'static' and 'auto'. 22 If it's 'auto', they are moving dynamically based on CPU and CUDA memory usage. It will utilize heterogeneous memory space evenly and well. 23 Note that 'auto' policy can only work well when no other processes use CUDA during your training. 24 chunk_manager (ChunkManager): A ``ChunkManager`` instance. 25 memstats (MemStats, optional): a mem stats collected by a runtime mem tracer. if None then GeminiManager will collect it during a warmup iteration. 26 """ 27 28 def __init__( 29 self, 30 placement_policy: str, 31 chunk_manager: ChunkManager, 32 memstats: Optional[MemStats] = None, 33 **placement_kwargs, 34 ) -> None: 35 assert placement_policy in PlacementPolicyFactory.get_policy_names() 36 self.policy_name = placement_policy 37 policy_cls = PlacementPolicyFactory.create(placement_policy) 38 self._chunk_manager = chunk_manager 39 40 self._premade_memstats_ = memstats is not None 41 self._memstats = memstats 42 self._mem_stats_collector = ( 43 ChunkMemStatsCollector(chunk_manager, self._memstats) if policy_cls.need_mem_stats else None 44 ) 45 self._placement_policy = policy_cls(self, chunk_manager, self._mem_stats_collector, **placement_kwargs) 46 self._compute_list: List[Tuple[Chunk, ...]] = [] 47 self._compute_idx: int = -1 48 self._async_works: Dict[Chunk, dist.Work] = {} 49 50 self._h2d_volume = 0 51 self._d2h_volume = 0 52 self._layout_time = 0 53 self._evict_time = 0 54 self._warmup = True 55 self._comp_cuda_demand_time = 0 56 57 def reset_attributes(self): 58 self._compute_idx = -1 59 self._h2d_volume = 0 60 self._d2h_volume = 0 61 self._layout_time = 0 62 self._evict_time = 0 63 self._comp_cuda_demand_time = 0 64 65 @property 66 def need_warmup(self) -> bool: 67 return self.policy_name in ("auto", "const") 68 69 def is_warmup(self): 70 return self._warmup 71 72 def memstats(self): 73 """memstats 74 75 get the memory statistics during training. 76 The stats could be collected by a runtime memory tracer, or collected by the GeminiManager. 77 Note, for the latter, you can not access the memstats before warmup iteration finishes. 78 """ 79 if self._premade_memstats_: 80 return self._memstats 81 else: 82 assert not self._warmup, "Gemini Manager has memstats after warm up! Now is during warmup." 83 return self._mem_stats_collector._memstats 84 85 def pre_iter(self, *args): 86 if self._mem_stats_collector and self._warmup: 87 self._mem_stats_collector.start_collection() 88 89 def post_iter(self): 90 """This function must be called when each iteration finishes""" 91 if self._mem_stats_collector and self._warmup: 92 self._mem_stats_collector.finish_collection() 93 self._warmup = False 94 self.reset_attributes() 95 96 def adjust_layout(self, chunks: Tuple[Chunk, ...], record_anyway: bool = False) -> None: 97 """Adjust the layout of stateful tensors according to the information provided 98 by mem_stats_collector, which should belongs to a Sharded Model. 99 """ 100 # find stateful tensor in state COMPUTE 101 start = time() 102 self._record_warmup_chunks_order(chunks, record_anyway=record_anyway) 103 cuda_demand, can_evict_chunks = self._get_layout_info(self._compute_idx, self._warmup, chunks) 104 # don't evict chunks that are asynchronously fetched 105 can_evict_chunks = [chunk for chunk in can_evict_chunks if chunk not in self._async_works] 106 self._layout_time += time() - start 107 108 vol, evict_time = self._placement_policy.evict_tensors( 109 can_evict_chunks=can_evict_chunks, 110 cuda_demand=cuda_demand, 111 warmup=self._warmup, 112 compute_list=self._compute_list, 113 compute_idx=self._compute_idx, 114 ) 115 116 self._d2h_volume += vol 117 self._evict_time += evict_time 118 # move COMPUTE tensors to CUDA 119 self._h2d_volume += cuda_demand 120 121 def wait_chunks(self, chunks: Iterable[Chunk]) -> Tuple[Chunk]: 122 non_prefetched_chunks = [] 123 for chunk in chunks: 124 if chunk in self._async_works: 125 self._async_works[chunk].wait() 126 del self._async_works[chunk] 127 else: 128 non_prefetched_chunks.append(chunk) 129 return tuple(non_prefetched_chunks) 130 131 def add_work(self, chunk: Chunk, work: dist.Work): 132 assert work is not None 133 assert chunk not in self._async_works 134 self._async_works[chunk] = work 135 136 @functools.lru_cache(maxsize=None) 137 def _get_layout_info(self, compute_idx: int, warmup: bool, chunks: Tuple[Chunk, ...]): 138 start = time() 139 cuda_demand = 0 140 for chunk in chunks: 141 if chunk.device_type == "cuda" or chunk.device_type == "npu": 142 if chunk.is_gathered: 143 pass 144 else: 145 cuda_demand += chunk.chunk_mem - chunk.shard_mem 146 elif chunk.device_type == "cpu": 147 cuda_demand += chunk.chunk_mem 148 else: 149 raise RuntimeError 150 self._comp_cuda_demand_time += time() - start 151 152 can_evict_chunks = self._chunk_manager.get_cuda_movable_chunks() 153 return cuda_demand, can_evict_chunks 154 155 def _record_warmup_chunks_order(self, chunks: Tuple[Chunk, ...], record_anyway: bool = False) -> None: 156 self._compute_idx += 1 157 if self._warmup and (self._placement_policy.need_mem_stats or record_anyway): 158 self._compute_list.append(chunks) 159 160 def sample_overall_data(self): 161 if self._mem_stats_collector: 162 self._mem_stats_collector.sample_overall_data() 163 164 def record_model_data_volume(self): 165 if self._mem_stats_collector: 166 self._mem_stats_collector.record_model_data_volume() 167 168 @property 169 def chunk_manager(self): 170 return self._chunk_manager 171 172 @property 173 def cuda_margin_mem(self) -> Optional[float]: 174 if self._mem_stats_collector: 175 return self._mem_stats_collector.cuda_margin_mem 176 return None 177 178 @property 179 def compute_list(self) -> List[Tuple[Chunk, ...]]: 180 return self._compute_list 181 182 @property 183 def compute_idx(self) -> int: 184 return self._compute_idx 185 186 @property 187 def async_works(self) -> Dict[Chunk, dist.Work]: 188 return self._async_works 189 190 @property 191 def placement_policy(self) -> PlacementPolicy: 192 return self._placement_policy 193 194 @property 195 def is_cuda_margin_mem_avail(self) -> bool: 196 return self._placement_policy.need_mem_stats 197 198 def setup_grads_device( 199 self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device] 200 ) -> None: 201 self._placement_policy.setup_grads_device(params, grads_device_map) 202 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/colossalai/zero/gemini/gemini_mgr.py b/colossalai/zero/gemini/gemini_mgr.py --- a/colossalai/zero/gemini/gemini_mgr.py +++ b/colossalai/zero/gemini/gemini_mgr.py @@ -42,7 +42,9 @@ self._mem_stats_collector = ( ChunkMemStatsCollector(chunk_manager, self._memstats) if policy_cls.need_mem_stats else None ) - self._placement_policy = policy_cls(self, chunk_manager, self._mem_stats_collector, **placement_kwargs) + self._placement_policy = policy_cls( + chunk_manager=chunk_manager, mem_stats_collector=self._mem_stats_collector, **placement_kwargs + ) self._compute_list: List[Tuple[Chunk, ...]] = [] self._compute_idx: int = -1 self._async_works: Dict[Chunk, dist.Work] = {}
{"golden_diff": "diff --git a/colossalai/zero/gemini/gemini_mgr.py b/colossalai/zero/gemini/gemini_mgr.py\n--- a/colossalai/zero/gemini/gemini_mgr.py\n+++ b/colossalai/zero/gemini/gemini_mgr.py\n@@ -42,7 +42,9 @@\n self._mem_stats_collector = (\n ChunkMemStatsCollector(chunk_manager, self._memstats) if policy_cls.need_mem_stats else None\n )\n- self._placement_policy = policy_cls(self, chunk_manager, self._mem_stats_collector, **placement_kwargs)\n+ self._placement_policy = policy_cls(\n+ chunk_manager=chunk_manager, mem_stats_collector=self._mem_stats_collector, **placement_kwargs\n+ )\n self._compute_list: List[Tuple[Chunk, ...]] = []\n self._compute_idx: int = -1\n self._async_works: Dict[Chunk, dist.Work] = {}\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import functools\nfrom time import time\nfrom typing import Dict, Iterable, List, Optional, Tuple\n\nimport torch\nimport torch.distributed as dist\n\nfrom .chunk import Chunk, ChunkManager\nfrom .memory_tracer import ChunkMemStatsCollector, MemStats\nfrom .placement_policy import PlacementPolicy, PlacementPolicyFactory\n\n\nclass GeminiManager:\n \"\"\"\n Stateful Tensor Manager, inspired from PatrickStar\n\n PatrickStar: Parallel Training of Pre-trained Models via Chunk-based Memory Management\n https://arxiv.org/abs/2108.05818\n\n Args:\n placement_policy (str): Which device to place *held* tensors. It can be 'static' and 'auto'.\n If it's 'auto', they are moving dynamically based on CPU and CUDA memory usage. It will utilize heterogeneous memory space evenly and well.\n Note that 'auto' policy can only work well when no other processes use CUDA during your training.\n chunk_manager (ChunkManager): A ``ChunkManager`` instance.\n memstats (MemStats, optional): a mem stats collected by a runtime mem tracer. if None then GeminiManager will collect it during a warmup iteration.\n \"\"\"\n\n def __init__(\n self,\n placement_policy: str,\n chunk_manager: ChunkManager,\n memstats: Optional[MemStats] = None,\n **placement_kwargs,\n ) -> None:\n assert placement_policy in PlacementPolicyFactory.get_policy_names()\n self.policy_name = placement_policy\n policy_cls = PlacementPolicyFactory.create(placement_policy)\n self._chunk_manager = chunk_manager\n\n self._premade_memstats_ = memstats is not None\n self._memstats = memstats\n self._mem_stats_collector = (\n ChunkMemStatsCollector(chunk_manager, self._memstats) if policy_cls.need_mem_stats else None\n )\n self._placement_policy = policy_cls(self, chunk_manager, self._mem_stats_collector, **placement_kwargs)\n self._compute_list: List[Tuple[Chunk, ...]] = []\n self._compute_idx: int = -1\n self._async_works: Dict[Chunk, dist.Work] = {}\n\n self._h2d_volume = 0\n self._d2h_volume = 0\n self._layout_time = 0\n self._evict_time = 0\n self._warmup = True\n self._comp_cuda_demand_time = 0\n\n def reset_attributes(self):\n self._compute_idx = -1\n self._h2d_volume = 0\n self._d2h_volume = 0\n self._layout_time = 0\n self._evict_time = 0\n self._comp_cuda_demand_time = 0\n\n @property\n def need_warmup(self) -> bool:\n return self.policy_name in (\"auto\", \"const\")\n\n def is_warmup(self):\n return self._warmup\n\n def memstats(self):\n \"\"\"memstats\n\n get the memory statistics during training.\n The stats could be collected by a runtime memory tracer, or collected by the GeminiManager.\n Note, for the latter, you can not access the memstats before warmup iteration finishes.\n \"\"\"\n if self._premade_memstats_:\n return self._memstats\n else:\n assert not self._warmup, \"Gemini Manager has memstats after warm up! Now is during warmup.\"\n return self._mem_stats_collector._memstats\n\n def pre_iter(self, *args):\n if self._mem_stats_collector and self._warmup:\n self._mem_stats_collector.start_collection()\n\n def post_iter(self):\n \"\"\"This function must be called when each iteration finishes\"\"\"\n if self._mem_stats_collector and self._warmup:\n self._mem_stats_collector.finish_collection()\n self._warmup = False\n self.reset_attributes()\n\n def adjust_layout(self, chunks: Tuple[Chunk, ...], record_anyway: bool = False) -> None:\n \"\"\"Adjust the layout of stateful tensors according to the information provided\n by mem_stats_collector, which should belongs to a Sharded Model.\n \"\"\"\n # find stateful tensor in state COMPUTE\n start = time()\n self._record_warmup_chunks_order(chunks, record_anyway=record_anyway)\n cuda_demand, can_evict_chunks = self._get_layout_info(self._compute_idx, self._warmup, chunks)\n # don't evict chunks that are asynchronously fetched\n can_evict_chunks = [chunk for chunk in can_evict_chunks if chunk not in self._async_works]\n self._layout_time += time() - start\n\n vol, evict_time = self._placement_policy.evict_tensors(\n can_evict_chunks=can_evict_chunks,\n cuda_demand=cuda_demand,\n warmup=self._warmup,\n compute_list=self._compute_list,\n compute_idx=self._compute_idx,\n )\n\n self._d2h_volume += vol\n self._evict_time += evict_time\n # move COMPUTE tensors to CUDA\n self._h2d_volume += cuda_demand\n\n def wait_chunks(self, chunks: Iterable[Chunk]) -> Tuple[Chunk]:\n non_prefetched_chunks = []\n for chunk in chunks:\n if chunk in self._async_works:\n self._async_works[chunk].wait()\n del self._async_works[chunk]\n else:\n non_prefetched_chunks.append(chunk)\n return tuple(non_prefetched_chunks)\n\n def add_work(self, chunk: Chunk, work: dist.Work):\n assert work is not None\n assert chunk not in self._async_works\n self._async_works[chunk] = work\n\n @functools.lru_cache(maxsize=None)\n def _get_layout_info(self, compute_idx: int, warmup: bool, chunks: Tuple[Chunk, ...]):\n start = time()\n cuda_demand = 0\n for chunk in chunks:\n if chunk.device_type == \"cuda\" or chunk.device_type == \"npu\":\n if chunk.is_gathered:\n pass\n else:\n cuda_demand += chunk.chunk_mem - chunk.shard_mem\n elif chunk.device_type == \"cpu\":\n cuda_demand += chunk.chunk_mem\n else:\n raise RuntimeError\n self._comp_cuda_demand_time += time() - start\n\n can_evict_chunks = self._chunk_manager.get_cuda_movable_chunks()\n return cuda_demand, can_evict_chunks\n\n def _record_warmup_chunks_order(self, chunks: Tuple[Chunk, ...], record_anyway: bool = False) -> None:\n self._compute_idx += 1\n if self._warmup and (self._placement_policy.need_mem_stats or record_anyway):\n self._compute_list.append(chunks)\n\n def sample_overall_data(self):\n if self._mem_stats_collector:\n self._mem_stats_collector.sample_overall_data()\n\n def record_model_data_volume(self):\n if self._mem_stats_collector:\n self._mem_stats_collector.record_model_data_volume()\n\n @property\n def chunk_manager(self):\n return self._chunk_manager\n\n @property\n def cuda_margin_mem(self) -> Optional[float]:\n if self._mem_stats_collector:\n return self._mem_stats_collector.cuda_margin_mem\n return None\n\n @property\n def compute_list(self) -> List[Tuple[Chunk, ...]]:\n return self._compute_list\n\n @property\n def compute_idx(self) -> int:\n return self._compute_idx\n\n @property\n def async_works(self) -> Dict[Chunk, dist.Work]:\n return self._async_works\n\n @property\n def placement_policy(self) -> PlacementPolicy:\n return self._placement_policy\n\n @property\n def is_cuda_margin_mem_avail(self) -> bool:\n return self._placement_policy.need_mem_stats\n\n def setup_grads_device(\n self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device]\n ) -> None:\n self._placement_policy.setup_grads_device(params, grads_device_map)\n", "path": "colossalai/zero/gemini/gemini_mgr.py"}], "after_files": [{"content": "import functools\nfrom time import time\nfrom typing import Dict, Iterable, List, Optional, Tuple\n\nimport torch\nimport torch.distributed as dist\n\nfrom .chunk import Chunk, ChunkManager\nfrom .memory_tracer import ChunkMemStatsCollector, MemStats\nfrom .placement_policy import PlacementPolicy, PlacementPolicyFactory\n\n\nclass GeminiManager:\n \"\"\"\n Stateful Tensor Manager, inspired from PatrickStar\n\n PatrickStar: Parallel Training of Pre-trained Models via Chunk-based Memory Management\n https://arxiv.org/abs/2108.05818\n\n Args:\n placement_policy (str): Which device to place *held* tensors. It can be 'static' and 'auto'.\n If it's 'auto', they are moving dynamically based on CPU and CUDA memory usage. It will utilize heterogeneous memory space evenly and well.\n Note that 'auto' policy can only work well when no other processes use CUDA during your training.\n chunk_manager (ChunkManager): A ``ChunkManager`` instance.\n memstats (MemStats, optional): a mem stats collected by a runtime mem tracer. if None then GeminiManager will collect it during a warmup iteration.\n \"\"\"\n\n def __init__(\n self,\n placement_policy: str,\n chunk_manager: ChunkManager,\n memstats: Optional[MemStats] = None,\n **placement_kwargs,\n ) -> None:\n assert placement_policy in PlacementPolicyFactory.get_policy_names()\n self.policy_name = placement_policy\n policy_cls = PlacementPolicyFactory.create(placement_policy)\n self._chunk_manager = chunk_manager\n\n self._premade_memstats_ = memstats is not None\n self._memstats = memstats\n self._mem_stats_collector = (\n ChunkMemStatsCollector(chunk_manager, self._memstats) if policy_cls.need_mem_stats else None\n )\n self._placement_policy = policy_cls(\n chunk_manager=chunk_manager, mem_stats_collector=self._mem_stats_collector, **placement_kwargs\n )\n self._compute_list: List[Tuple[Chunk, ...]] = []\n self._compute_idx: int = -1\n self._async_works: Dict[Chunk, dist.Work] = {}\n\n self._h2d_volume = 0\n self._d2h_volume = 0\n self._layout_time = 0\n self._evict_time = 0\n self._warmup = True\n self._comp_cuda_demand_time = 0\n\n def reset_attributes(self):\n self._compute_idx = -1\n self._h2d_volume = 0\n self._d2h_volume = 0\n self._layout_time = 0\n self._evict_time = 0\n self._comp_cuda_demand_time = 0\n\n @property\n def need_warmup(self) -> bool:\n return self.policy_name in (\"auto\", \"const\")\n\n def is_warmup(self):\n return self._warmup\n\n def memstats(self):\n \"\"\"memstats\n\n get the memory statistics during training.\n The stats could be collected by a runtime memory tracer, or collected by the GeminiManager.\n Note, for the latter, you can not access the memstats before warmup iteration finishes.\n \"\"\"\n if self._premade_memstats_:\n return self._memstats\n else:\n assert not self._warmup, \"Gemini Manager has memstats after warm up! Now is during warmup.\"\n return self._mem_stats_collector._memstats\n\n def pre_iter(self, *args):\n if self._mem_stats_collector and self._warmup:\n self._mem_stats_collector.start_collection()\n\n def post_iter(self):\n \"\"\"This function must be called when each iteration finishes\"\"\"\n if self._mem_stats_collector and self._warmup:\n self._mem_stats_collector.finish_collection()\n self._warmup = False\n self.reset_attributes()\n\n def adjust_layout(self, chunks: Tuple[Chunk, ...], record_anyway: bool = False) -> None:\n \"\"\"Adjust the layout of stateful tensors according to the information provided\n by mem_stats_collector, which should belongs to a Sharded Model.\n \"\"\"\n # find stateful tensor in state COMPUTE\n start = time()\n self._record_warmup_chunks_order(chunks, record_anyway=record_anyway)\n cuda_demand, can_evict_chunks = self._get_layout_info(self._compute_idx, self._warmup, chunks)\n # don't evict chunks that are asynchronously fetched\n can_evict_chunks = [chunk for chunk in can_evict_chunks if chunk not in self._async_works]\n self._layout_time += time() - start\n\n vol, evict_time = self._placement_policy.evict_tensors(\n can_evict_chunks=can_evict_chunks,\n cuda_demand=cuda_demand,\n warmup=self._warmup,\n compute_list=self._compute_list,\n compute_idx=self._compute_idx,\n )\n\n self._d2h_volume += vol\n self._evict_time += evict_time\n # move COMPUTE tensors to CUDA\n self._h2d_volume += cuda_demand\n\n def wait_chunks(self, chunks: Iterable[Chunk]) -> Tuple[Chunk]:\n non_prefetched_chunks = []\n for chunk in chunks:\n if chunk in self._async_works:\n self._async_works[chunk].wait()\n del self._async_works[chunk]\n else:\n non_prefetched_chunks.append(chunk)\n return tuple(non_prefetched_chunks)\n\n def add_work(self, chunk: Chunk, work: dist.Work):\n assert work is not None\n assert chunk not in self._async_works\n self._async_works[chunk] = work\n\n @functools.lru_cache(maxsize=None)\n def _get_layout_info(self, compute_idx: int, warmup: bool, chunks: Tuple[Chunk, ...]):\n start = time()\n cuda_demand = 0\n for chunk in chunks:\n if chunk.device_type == \"cuda\" or chunk.device_type == \"npu\":\n if chunk.is_gathered:\n pass\n else:\n cuda_demand += chunk.chunk_mem - chunk.shard_mem\n elif chunk.device_type == \"cpu\":\n cuda_demand += chunk.chunk_mem\n else:\n raise RuntimeError\n self._comp_cuda_demand_time += time() - start\n\n can_evict_chunks = self._chunk_manager.get_cuda_movable_chunks()\n return cuda_demand, can_evict_chunks\n\n def _record_warmup_chunks_order(self, chunks: Tuple[Chunk, ...], record_anyway: bool = False) -> None:\n self._compute_idx += 1\n if self._warmup and (self._placement_policy.need_mem_stats or record_anyway):\n self._compute_list.append(chunks)\n\n def sample_overall_data(self):\n if self._mem_stats_collector:\n self._mem_stats_collector.sample_overall_data()\n\n def record_model_data_volume(self):\n if self._mem_stats_collector:\n self._mem_stats_collector.record_model_data_volume()\n\n @property\n def chunk_manager(self):\n return self._chunk_manager\n\n @property\n def cuda_margin_mem(self) -> Optional[float]:\n if self._mem_stats_collector:\n return self._mem_stats_collector.cuda_margin_mem\n return None\n\n @property\n def compute_list(self) -> List[Tuple[Chunk, ...]]:\n return self._compute_list\n\n @property\n def compute_idx(self) -> int:\n return self._compute_idx\n\n @property\n def async_works(self) -> Dict[Chunk, dist.Work]:\n return self._async_works\n\n @property\n def placement_policy(self) -> PlacementPolicy:\n return self._placement_policy\n\n @property\n def is_cuda_margin_mem_avail(self) -> bool:\n return self._placement_policy.need_mem_stats\n\n def setup_grads_device(\n self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device]\n ) -> None:\n self._placement_policy.setup_grads_device(params, grads_device_map)\n", "path": "colossalai/zero/gemini/gemini_mgr.py"}]}
2,563
218
gh_patches_debug_42050
rasdani/github-patches
git_diff
twisted__twisted-11611
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- The implementation of the AMP protocol used for `trial -j...` could use decorator syntax now Long ago Python didn't necessarily support decorator syntax. So, while `Command.responder` was a decorator, it wasn't always used in Twisted using decorator syntax. Now Python necessarily supports the syntax and we can make the `trial -j ...` support code slightly more idiomatic. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/twisted/trial/_dist/worker.py` Content: ``` 1 # -*- test-case-name: twisted.trial._dist.test.test_worker -*- 2 # 3 # Copyright (c) Twisted Matrix Laboratories. 4 # See LICENSE for details. 5 6 """ 7 This module implements the worker classes. 8 9 @since: 12.3 10 """ 11 12 import os 13 from typing import Awaitable, Callable, Dict, List, Optional, TextIO, TypeVar 14 15 from zope.interface import implementer 16 17 from attrs import frozen 18 from typing_extensions import Protocol 19 20 from twisted.internet.defer import Deferred 21 from twisted.internet.error import ProcessDone 22 from twisted.internet.interfaces import IAddress, ITransport 23 from twisted.internet.protocol import ProcessProtocol 24 from twisted.protocols.amp import AMP 25 from twisted.python.failure import Failure 26 from twisted.python.filepath import FilePath 27 from twisted.python.reflect import namedObject 28 from twisted.trial._dist import ( 29 _WORKER_AMP_STDIN, 30 _WORKER_AMP_STDOUT, 31 managercommands, 32 workercommands, 33 ) 34 from twisted.trial._dist.workerreporter import WorkerReporter 35 from twisted.trial.reporter import TestResult 36 from twisted.trial.runner import TestLoader, TrialSuite 37 from twisted.trial.unittest import TestCase, Todo 38 39 40 @frozen(auto_exc=False) 41 class WorkerException(Exception): 42 """ 43 An exception was reported by a test running in a worker process. 44 45 :ivar message: An error message describing the exception. 46 """ 47 48 message: str 49 50 51 class Worker(Protocol): 52 """ 53 An object that can run actions. 54 """ 55 56 async def run(self, case: TestCase, result: TestResult) -> None: 57 """ 58 Run a test case. 59 """ 60 61 62 _T = TypeVar("_T") 63 WorkerAction = Callable[[Worker], Awaitable[_T]] 64 65 66 class WorkerProtocol(AMP): 67 """ 68 The worker-side trial distributed protocol. 69 """ 70 71 def __init__(self, forceGarbageCollection=False): 72 self._loader = TestLoader() 73 self._result = WorkerReporter(self) 74 self._forceGarbageCollection = forceGarbageCollection 75 76 @workercommands.Run.responder 77 def run(self, testCase): 78 """ 79 Run a test case by name. 80 """ 81 case = self._loader.loadByName(testCase) 82 suite = TrialSuite([case], self._forceGarbageCollection) 83 suite.run(self._result) 84 return {"success": True} 85 86 @workercommands.Start.responder 87 def start(self, directory): 88 """ 89 Set up the worker, moving into given directory for tests to run in 90 them. 91 """ 92 os.chdir(directory) 93 return {"success": True} 94 95 96 class LocalWorkerAMP(AMP): 97 """ 98 Local implementation of the manager commands. 99 """ 100 101 def addSuccess(self, testName): 102 """ 103 Add a success to the reporter. 104 """ 105 self._result.addSuccess(self._testCase) 106 return {"success": True} 107 108 managercommands.AddSuccess.responder(addSuccess) 109 110 def _buildFailure( 111 self, 112 error: WorkerException, 113 errorClass: str, 114 frames: List[str], 115 ) -> Failure: 116 """ 117 Helper to build a C{Failure} with some traceback. 118 119 @param error: An C{Exception} instance. 120 121 @param errorClass: The class name of the C{error} class. 122 123 @param frames: A flat list of strings representing the information need 124 to approximatively rebuild C{Failure} frames. 125 126 @return: A L{Failure} instance with enough information about a test 127 error. 128 """ 129 errorType = namedObject(errorClass) 130 failure = Failure(error, errorType) 131 for i in range(0, len(frames), 3): 132 failure.frames.append( 133 (frames[i], frames[i + 1], int(frames[i + 2]), [], []) 134 ) 135 return failure 136 137 def addError( 138 self, 139 testName: str, 140 error: str, 141 errorClass: str, 142 frames: List[str], 143 ) -> Dict[str, bool]: 144 """ 145 Add an error to the reporter. 146 147 :param error: A message describing the error. 148 """ 149 # Wrap the error message in ``WorkerException`` because it is not 150 # possible to transfer arbitrary exception values over the AMP 151 # connection to the main process but we must give *some* Exception 152 # (not a str) to the test result object. 153 failure = self._buildFailure(WorkerException(error), errorClass, frames) 154 self._result.addError(self._testCase, failure) 155 return {"success": True} 156 157 managercommands.AddError.responder(addError) 158 159 def addFailure( 160 self, 161 testName: str, 162 fail: str, 163 failClass: str, 164 frames: List[str], 165 ) -> Dict[str, bool]: 166 """ 167 Add a failure to the reporter. 168 """ 169 # See addError for info about use of WorkerException here. 170 failure = self._buildFailure(WorkerException(fail), failClass, frames) 171 self._result.addFailure(self._testCase, failure) 172 return {"success": True} 173 174 managercommands.AddFailure.responder(addFailure) 175 176 def addSkip(self, testName, reason): 177 """ 178 Add a skip to the reporter. 179 """ 180 self._result.addSkip(self._testCase, reason) 181 return {"success": True} 182 183 managercommands.AddSkip.responder(addSkip) 184 185 def addExpectedFailure( 186 self, testName: str, error: str, todo: Optional[None] 187 ) -> Dict[str, bool]: 188 """ 189 Add an expected failure to the reporter. 190 """ 191 _todo = Todo(todo) 192 self._result.addExpectedFailure(self._testCase, error, _todo) 193 return {"success": True} 194 195 managercommands.AddExpectedFailure.responder(addExpectedFailure) 196 197 def addUnexpectedSuccess(self, testName, todo): 198 """ 199 Add an unexpected success to the reporter. 200 """ 201 self._result.addUnexpectedSuccess(self._testCase, todo) 202 return {"success": True} 203 204 managercommands.AddUnexpectedSuccess.responder(addUnexpectedSuccess) 205 206 def testWrite(self, out): 207 """ 208 Print test output from the worker. 209 """ 210 self._testStream.write(out + "\n") 211 self._testStream.flush() 212 return {"success": True} 213 214 managercommands.TestWrite.responder(testWrite) 215 216 def _stopTest(self, result): 217 """ 218 Stop the current running test case, forwarding the result. 219 """ 220 self._result.stopTest(self._testCase) 221 return result 222 223 def run(self, testCase, result): 224 """ 225 Run a test. 226 """ 227 self._testCase = testCase 228 self._result = result 229 self._result.startTest(testCase) 230 testCaseId = testCase.id() 231 d = self.callRemote(workercommands.Run, testCase=testCaseId) 232 return d.addCallback(self._stopTest) 233 234 def setTestStream(self, stream): 235 """ 236 Set the stream used to log output from tests. 237 """ 238 self._testStream = stream 239 240 241 @implementer(IAddress) 242 class LocalWorkerAddress: 243 """ 244 A L{IAddress} implementation meant to provide stub addresses for 245 L{ITransport.getPeer} and L{ITransport.getHost}. 246 """ 247 248 249 @implementer(ITransport) 250 class LocalWorkerTransport: 251 """ 252 A stub transport implementation used to support L{AMP} over a 253 L{ProcessProtocol} transport. 254 """ 255 256 def __init__(self, transport): 257 self._transport = transport 258 259 def write(self, data): 260 """ 261 Forward data to transport. 262 """ 263 self._transport.writeToChild(_WORKER_AMP_STDIN, data) 264 265 def writeSequence(self, sequence): 266 """ 267 Emulate C{writeSequence} by iterating data in the C{sequence}. 268 """ 269 for data in sequence: 270 self._transport.writeToChild(_WORKER_AMP_STDIN, data) 271 272 def loseConnection(self): 273 """ 274 Closes the transport. 275 """ 276 self._transport.loseConnection() 277 278 def getHost(self): 279 """ 280 Return a L{LocalWorkerAddress} instance. 281 """ 282 return LocalWorkerAddress() 283 284 def getPeer(self): 285 """ 286 Return a L{LocalWorkerAddress} instance. 287 """ 288 return LocalWorkerAddress() 289 290 291 class NotRunning(Exception): 292 """ 293 An operation was attempted on a worker process which is not running. 294 """ 295 296 297 class LocalWorker(ProcessProtocol): 298 """ 299 Local process worker protocol. This worker runs as a local process and 300 communicates via stdin/out. 301 302 @ivar _ampProtocol: The L{AMP} protocol instance used to communicate with 303 the worker. 304 305 @ivar _logDirectory: The directory where logs will reside. 306 307 @ivar _logFile: The main log file for tests output. 308 """ 309 310 def __init__( 311 self, 312 ampProtocol: LocalWorkerAMP, 313 logDirectory: FilePath, 314 logFile: TextIO, 315 ): 316 self._ampProtocol = ampProtocol 317 self._logDirectory = logDirectory 318 self._logFile = logFile 319 self.endDeferred: Deferred = Deferred() 320 321 async def exit(self) -> None: 322 """ 323 Cause the worker process to exit. 324 """ 325 if self.transport is None: 326 raise NotRunning() 327 328 endDeferred = self.endDeferred 329 self.transport.closeChildFD(_WORKER_AMP_STDIN) 330 try: 331 await endDeferred 332 except ProcessDone: 333 pass 334 335 def connectionMade(self): 336 """ 337 When connection is made, create the AMP protocol instance. 338 """ 339 self._ampProtocol.makeConnection(LocalWorkerTransport(self.transport)) 340 self._logDirectory.makedirs(ignoreExistingDirectory=True) 341 self._outLog = self._logDirectory.child("out.log").open("w") 342 self._errLog = self._logDirectory.child("err.log").open("w") 343 self._ampProtocol.setTestStream(self._logFile) 344 d = self._ampProtocol.callRemote( 345 workercommands.Start, 346 directory=self._logDirectory.path, 347 ) 348 # Ignore the potential errors, the test suite will fail properly and it 349 # would just print garbage. 350 d.addErrback(lambda x: None) 351 352 def connectionLost(self, reason): 353 """ 354 On connection lost, close the log files that we're managing for stdin 355 and stdout. 356 """ 357 self._outLog.close() 358 self._errLog.close() 359 self.transport = None 360 361 def processEnded(self, reason): 362 """ 363 When the process closes, call C{connectionLost} for cleanup purposes 364 and forward the information to the C{_ampProtocol}. 365 """ 366 self.connectionLost(reason) 367 self._ampProtocol.connectionLost(reason) 368 self.endDeferred.callback(reason) 369 370 def outReceived(self, data): 371 """ 372 Send data received from stdout to log. 373 """ 374 375 self._outLog.write(data) 376 377 def errReceived(self, data): 378 """ 379 Write error data to log. 380 """ 381 self._errLog.write(data) 382 383 def childDataReceived(self, childFD, data): 384 """ 385 Handle data received on the specific pipe for the C{_ampProtocol}. 386 """ 387 if childFD == _WORKER_AMP_STDOUT: 388 self._ampProtocol.dataReceived(data) 389 else: 390 ProcessProtocol.childDataReceived(self, childFD, data) 391 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/twisted/trial/_dist/worker.py b/src/twisted/trial/_dist/worker.py --- a/src/twisted/trial/_dist/worker.py +++ b/src/twisted/trial/_dist/worker.py @@ -98,6 +98,7 @@ Local implementation of the manager commands. """ + @managercommands.AddSuccess.responder def addSuccess(self, testName): """ Add a success to the reporter. @@ -105,8 +106,6 @@ self._result.addSuccess(self._testCase) return {"success": True} - managercommands.AddSuccess.responder(addSuccess) - def _buildFailure( self, error: WorkerException, @@ -134,6 +133,7 @@ ) return failure + @managercommands.AddError.responder def addError( self, testName: str, @@ -154,8 +154,7 @@ self._result.addError(self._testCase, failure) return {"success": True} - managercommands.AddError.responder(addError) - + @managercommands.AddFailure.responder def addFailure( self, testName: str, @@ -171,8 +170,7 @@ self._result.addFailure(self._testCase, failure) return {"success": True} - managercommands.AddFailure.responder(addFailure) - + @managercommands.AddSkip.responder def addSkip(self, testName, reason): """ Add a skip to the reporter. @@ -180,8 +178,7 @@ self._result.addSkip(self._testCase, reason) return {"success": True} - managercommands.AddSkip.responder(addSkip) - + @managercommands.AddExpectedFailure.responder def addExpectedFailure( self, testName: str, error: str, todo: Optional[None] ) -> Dict[str, bool]: @@ -192,8 +189,7 @@ self._result.addExpectedFailure(self._testCase, error, _todo) return {"success": True} - managercommands.AddExpectedFailure.responder(addExpectedFailure) - + @managercommands.AddUnexpectedSuccess.responder def addUnexpectedSuccess(self, testName, todo): """ Add an unexpected success to the reporter. @@ -201,8 +197,7 @@ self._result.addUnexpectedSuccess(self._testCase, todo) return {"success": True} - managercommands.AddUnexpectedSuccess.responder(addUnexpectedSuccess) - + @managercommands.TestWrite.responder def testWrite(self, out): """ Print test output from the worker. @@ -211,8 +206,6 @@ self._testStream.flush() return {"success": True} - managercommands.TestWrite.responder(testWrite) - def _stopTest(self, result): """ Stop the current running test case, forwarding the result.
{"golden_diff": "diff --git a/src/twisted/trial/_dist/worker.py b/src/twisted/trial/_dist/worker.py\n--- a/src/twisted/trial/_dist/worker.py\n+++ b/src/twisted/trial/_dist/worker.py\n@@ -98,6 +98,7 @@\n Local implementation of the manager commands.\n \"\"\"\n \n+ @managercommands.AddSuccess.responder\n def addSuccess(self, testName):\n \"\"\"\n Add a success to the reporter.\n@@ -105,8 +106,6 @@\n self._result.addSuccess(self._testCase)\n return {\"success\": True}\n \n- managercommands.AddSuccess.responder(addSuccess)\n-\n def _buildFailure(\n self,\n error: WorkerException,\n@@ -134,6 +133,7 @@\n )\n return failure\n \n+ @managercommands.AddError.responder\n def addError(\n self,\n testName: str,\n@@ -154,8 +154,7 @@\n self._result.addError(self._testCase, failure)\n return {\"success\": True}\n \n- managercommands.AddError.responder(addError)\n-\n+ @managercommands.AddFailure.responder\n def addFailure(\n self,\n testName: str,\n@@ -171,8 +170,7 @@\n self._result.addFailure(self._testCase, failure)\n return {\"success\": True}\n \n- managercommands.AddFailure.responder(addFailure)\n-\n+ @managercommands.AddSkip.responder\n def addSkip(self, testName, reason):\n \"\"\"\n Add a skip to the reporter.\n@@ -180,8 +178,7 @@\n self._result.addSkip(self._testCase, reason)\n return {\"success\": True}\n \n- managercommands.AddSkip.responder(addSkip)\n-\n+ @managercommands.AddExpectedFailure.responder\n def addExpectedFailure(\n self, testName: str, error: str, todo: Optional[None]\n ) -> Dict[str, bool]:\n@@ -192,8 +189,7 @@\n self._result.addExpectedFailure(self._testCase, error, _todo)\n return {\"success\": True}\n \n- managercommands.AddExpectedFailure.responder(addExpectedFailure)\n-\n+ @managercommands.AddUnexpectedSuccess.responder\n def addUnexpectedSuccess(self, testName, todo):\n \"\"\"\n Add an unexpected success to the reporter.\n@@ -201,8 +197,7 @@\n self._result.addUnexpectedSuccess(self._testCase, todo)\n return {\"success\": True}\n \n- managercommands.AddUnexpectedSuccess.responder(addUnexpectedSuccess)\n-\n+ @managercommands.TestWrite.responder\n def testWrite(self, out):\n \"\"\"\n Print test output from the worker.\n@@ -211,8 +206,6 @@\n self._testStream.flush()\n return {\"success\": True}\n \n- managercommands.TestWrite.responder(testWrite)\n-\n def _stopTest(self, result):\n \"\"\"\n Stop the current running test case, forwarding the result.\n", "issue": "The implementation of the AMP protocol used for `trial -j...` could use decorator syntax now\nLong ago Python didn't necessarily support decorator syntax. So, while `Command.responder` was a decorator, it wasn't always used in Twisted using decorator syntax.\r\n\r\nNow Python necessarily supports the syntax and we can make the `trial -j ...` support code slightly more idiomatic.\r\n\n", "before_files": [{"content": "# -*- test-case-name: twisted.trial._dist.test.test_worker -*-\n#\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nThis module implements the worker classes.\n\n@since: 12.3\n\"\"\"\n\nimport os\nfrom typing import Awaitable, Callable, Dict, List, Optional, TextIO, TypeVar\n\nfrom zope.interface import implementer\n\nfrom attrs import frozen\nfrom typing_extensions import Protocol\n\nfrom twisted.internet.defer import Deferred\nfrom twisted.internet.error import ProcessDone\nfrom twisted.internet.interfaces import IAddress, ITransport\nfrom twisted.internet.protocol import ProcessProtocol\nfrom twisted.protocols.amp import AMP\nfrom twisted.python.failure import Failure\nfrom twisted.python.filepath import FilePath\nfrom twisted.python.reflect import namedObject\nfrom twisted.trial._dist import (\n _WORKER_AMP_STDIN,\n _WORKER_AMP_STDOUT,\n managercommands,\n workercommands,\n)\nfrom twisted.trial._dist.workerreporter import WorkerReporter\nfrom twisted.trial.reporter import TestResult\nfrom twisted.trial.runner import TestLoader, TrialSuite\nfrom twisted.trial.unittest import TestCase, Todo\n\n\n@frozen(auto_exc=False)\nclass WorkerException(Exception):\n \"\"\"\n An exception was reported by a test running in a worker process.\n\n :ivar message: An error message describing the exception.\n \"\"\"\n\n message: str\n\n\nclass Worker(Protocol):\n \"\"\"\n An object that can run actions.\n \"\"\"\n\n async def run(self, case: TestCase, result: TestResult) -> None:\n \"\"\"\n Run a test case.\n \"\"\"\n\n\n_T = TypeVar(\"_T\")\nWorkerAction = Callable[[Worker], Awaitable[_T]]\n\n\nclass WorkerProtocol(AMP):\n \"\"\"\n The worker-side trial distributed protocol.\n \"\"\"\n\n def __init__(self, forceGarbageCollection=False):\n self._loader = TestLoader()\n self._result = WorkerReporter(self)\n self._forceGarbageCollection = forceGarbageCollection\n\n @workercommands.Run.responder\n def run(self, testCase):\n \"\"\"\n Run a test case by name.\n \"\"\"\n case = self._loader.loadByName(testCase)\n suite = TrialSuite([case], self._forceGarbageCollection)\n suite.run(self._result)\n return {\"success\": True}\n\n @workercommands.Start.responder\n def start(self, directory):\n \"\"\"\n Set up the worker, moving into given directory for tests to run in\n them.\n \"\"\"\n os.chdir(directory)\n return {\"success\": True}\n\n\nclass LocalWorkerAMP(AMP):\n \"\"\"\n Local implementation of the manager commands.\n \"\"\"\n\n def addSuccess(self, testName):\n \"\"\"\n Add a success to the reporter.\n \"\"\"\n self._result.addSuccess(self._testCase)\n return {\"success\": True}\n\n managercommands.AddSuccess.responder(addSuccess)\n\n def _buildFailure(\n self,\n error: WorkerException,\n errorClass: str,\n frames: List[str],\n ) -> Failure:\n \"\"\"\n Helper to build a C{Failure} with some traceback.\n\n @param error: An C{Exception} instance.\n\n @param errorClass: The class name of the C{error} class.\n\n @param frames: A flat list of strings representing the information need\n to approximatively rebuild C{Failure} frames.\n\n @return: A L{Failure} instance with enough information about a test\n error.\n \"\"\"\n errorType = namedObject(errorClass)\n failure = Failure(error, errorType)\n for i in range(0, len(frames), 3):\n failure.frames.append(\n (frames[i], frames[i + 1], int(frames[i + 2]), [], [])\n )\n return failure\n\n def addError(\n self,\n testName: str,\n error: str,\n errorClass: str,\n frames: List[str],\n ) -> Dict[str, bool]:\n \"\"\"\n Add an error to the reporter.\n\n :param error: A message describing the error.\n \"\"\"\n # Wrap the error message in ``WorkerException`` because it is not\n # possible to transfer arbitrary exception values over the AMP\n # connection to the main process but we must give *some* Exception\n # (not a str) to the test result object.\n failure = self._buildFailure(WorkerException(error), errorClass, frames)\n self._result.addError(self._testCase, failure)\n return {\"success\": True}\n\n managercommands.AddError.responder(addError)\n\n def addFailure(\n self,\n testName: str,\n fail: str,\n failClass: str,\n frames: List[str],\n ) -> Dict[str, bool]:\n \"\"\"\n Add a failure to the reporter.\n \"\"\"\n # See addError for info about use of WorkerException here.\n failure = self._buildFailure(WorkerException(fail), failClass, frames)\n self._result.addFailure(self._testCase, failure)\n return {\"success\": True}\n\n managercommands.AddFailure.responder(addFailure)\n\n def addSkip(self, testName, reason):\n \"\"\"\n Add a skip to the reporter.\n \"\"\"\n self._result.addSkip(self._testCase, reason)\n return {\"success\": True}\n\n managercommands.AddSkip.responder(addSkip)\n\n def addExpectedFailure(\n self, testName: str, error: str, todo: Optional[None]\n ) -> Dict[str, bool]:\n \"\"\"\n Add an expected failure to the reporter.\n \"\"\"\n _todo = Todo(todo)\n self._result.addExpectedFailure(self._testCase, error, _todo)\n return {\"success\": True}\n\n managercommands.AddExpectedFailure.responder(addExpectedFailure)\n\n def addUnexpectedSuccess(self, testName, todo):\n \"\"\"\n Add an unexpected success to the reporter.\n \"\"\"\n self._result.addUnexpectedSuccess(self._testCase, todo)\n return {\"success\": True}\n\n managercommands.AddUnexpectedSuccess.responder(addUnexpectedSuccess)\n\n def testWrite(self, out):\n \"\"\"\n Print test output from the worker.\n \"\"\"\n self._testStream.write(out + \"\\n\")\n self._testStream.flush()\n return {\"success\": True}\n\n managercommands.TestWrite.responder(testWrite)\n\n def _stopTest(self, result):\n \"\"\"\n Stop the current running test case, forwarding the result.\n \"\"\"\n self._result.stopTest(self._testCase)\n return result\n\n def run(self, testCase, result):\n \"\"\"\n Run a test.\n \"\"\"\n self._testCase = testCase\n self._result = result\n self._result.startTest(testCase)\n testCaseId = testCase.id()\n d = self.callRemote(workercommands.Run, testCase=testCaseId)\n return d.addCallback(self._stopTest)\n\n def setTestStream(self, stream):\n \"\"\"\n Set the stream used to log output from tests.\n \"\"\"\n self._testStream = stream\n\n\n@implementer(IAddress)\nclass LocalWorkerAddress:\n \"\"\"\n A L{IAddress} implementation meant to provide stub addresses for\n L{ITransport.getPeer} and L{ITransport.getHost}.\n \"\"\"\n\n\n@implementer(ITransport)\nclass LocalWorkerTransport:\n \"\"\"\n A stub transport implementation used to support L{AMP} over a\n L{ProcessProtocol} transport.\n \"\"\"\n\n def __init__(self, transport):\n self._transport = transport\n\n def write(self, data):\n \"\"\"\n Forward data to transport.\n \"\"\"\n self._transport.writeToChild(_WORKER_AMP_STDIN, data)\n\n def writeSequence(self, sequence):\n \"\"\"\n Emulate C{writeSequence} by iterating data in the C{sequence}.\n \"\"\"\n for data in sequence:\n self._transport.writeToChild(_WORKER_AMP_STDIN, data)\n\n def loseConnection(self):\n \"\"\"\n Closes the transport.\n \"\"\"\n self._transport.loseConnection()\n\n def getHost(self):\n \"\"\"\n Return a L{LocalWorkerAddress} instance.\n \"\"\"\n return LocalWorkerAddress()\n\n def getPeer(self):\n \"\"\"\n Return a L{LocalWorkerAddress} instance.\n \"\"\"\n return LocalWorkerAddress()\n\n\nclass NotRunning(Exception):\n \"\"\"\n An operation was attempted on a worker process which is not running.\n \"\"\"\n\n\nclass LocalWorker(ProcessProtocol):\n \"\"\"\n Local process worker protocol. This worker runs as a local process and\n communicates via stdin/out.\n\n @ivar _ampProtocol: The L{AMP} protocol instance used to communicate with\n the worker.\n\n @ivar _logDirectory: The directory where logs will reside.\n\n @ivar _logFile: The main log file for tests output.\n \"\"\"\n\n def __init__(\n self,\n ampProtocol: LocalWorkerAMP,\n logDirectory: FilePath,\n logFile: TextIO,\n ):\n self._ampProtocol = ampProtocol\n self._logDirectory = logDirectory\n self._logFile = logFile\n self.endDeferred: Deferred = Deferred()\n\n async def exit(self) -> None:\n \"\"\"\n Cause the worker process to exit.\n \"\"\"\n if self.transport is None:\n raise NotRunning()\n\n endDeferred = self.endDeferred\n self.transport.closeChildFD(_WORKER_AMP_STDIN)\n try:\n await endDeferred\n except ProcessDone:\n pass\n\n def connectionMade(self):\n \"\"\"\n When connection is made, create the AMP protocol instance.\n \"\"\"\n self._ampProtocol.makeConnection(LocalWorkerTransport(self.transport))\n self._logDirectory.makedirs(ignoreExistingDirectory=True)\n self._outLog = self._logDirectory.child(\"out.log\").open(\"w\")\n self._errLog = self._logDirectory.child(\"err.log\").open(\"w\")\n self._ampProtocol.setTestStream(self._logFile)\n d = self._ampProtocol.callRemote(\n workercommands.Start,\n directory=self._logDirectory.path,\n )\n # Ignore the potential errors, the test suite will fail properly and it\n # would just print garbage.\n d.addErrback(lambda x: None)\n\n def connectionLost(self, reason):\n \"\"\"\n On connection lost, close the log files that we're managing for stdin\n and stdout.\n \"\"\"\n self._outLog.close()\n self._errLog.close()\n self.transport = None\n\n def processEnded(self, reason):\n \"\"\"\n When the process closes, call C{connectionLost} for cleanup purposes\n and forward the information to the C{_ampProtocol}.\n \"\"\"\n self.connectionLost(reason)\n self._ampProtocol.connectionLost(reason)\n self.endDeferred.callback(reason)\n\n def outReceived(self, data):\n \"\"\"\n Send data received from stdout to log.\n \"\"\"\n\n self._outLog.write(data)\n\n def errReceived(self, data):\n \"\"\"\n Write error data to log.\n \"\"\"\n self._errLog.write(data)\n\n def childDataReceived(self, childFD, data):\n \"\"\"\n Handle data received on the specific pipe for the C{_ampProtocol}.\n \"\"\"\n if childFD == _WORKER_AMP_STDOUT:\n self._ampProtocol.dataReceived(data)\n else:\n ProcessProtocol.childDataReceived(self, childFD, data)\n", "path": "src/twisted/trial/_dist/worker.py"}], "after_files": [{"content": "# -*- test-case-name: twisted.trial._dist.test.test_worker -*-\n#\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nThis module implements the worker classes.\n\n@since: 12.3\n\"\"\"\n\nimport os\nfrom typing import Awaitable, Callable, Dict, List, Optional, TextIO, TypeVar\n\nfrom zope.interface import implementer\n\nfrom attrs import frozen\nfrom typing_extensions import Protocol\n\nfrom twisted.internet.defer import Deferred\nfrom twisted.internet.error import ProcessDone\nfrom twisted.internet.interfaces import IAddress, ITransport\nfrom twisted.internet.protocol import ProcessProtocol\nfrom twisted.protocols.amp import AMP\nfrom twisted.python.failure import Failure\nfrom twisted.python.filepath import FilePath\nfrom twisted.python.reflect import namedObject\nfrom twisted.trial._dist import (\n _WORKER_AMP_STDIN,\n _WORKER_AMP_STDOUT,\n managercommands,\n workercommands,\n)\nfrom twisted.trial._dist.workerreporter import WorkerReporter\nfrom twisted.trial.reporter import TestResult\nfrom twisted.trial.runner import TestLoader, TrialSuite\nfrom twisted.trial.unittest import TestCase, Todo\n\n\n@frozen(auto_exc=False)\nclass WorkerException(Exception):\n \"\"\"\n An exception was reported by a test running in a worker process.\n\n :ivar message: An error message describing the exception.\n \"\"\"\n\n message: str\n\n\nclass Worker(Protocol):\n \"\"\"\n An object that can run actions.\n \"\"\"\n\n async def run(self, case: TestCase, result: TestResult) -> None:\n \"\"\"\n Run a test case.\n \"\"\"\n\n\n_T = TypeVar(\"_T\")\nWorkerAction = Callable[[Worker], Awaitable[_T]]\n\n\nclass WorkerProtocol(AMP):\n \"\"\"\n The worker-side trial distributed protocol.\n \"\"\"\n\n def __init__(self, forceGarbageCollection=False):\n self._loader = TestLoader()\n self._result = WorkerReporter(self)\n self._forceGarbageCollection = forceGarbageCollection\n\n @workercommands.Run.responder\n def run(self, testCase):\n \"\"\"\n Run a test case by name.\n \"\"\"\n case = self._loader.loadByName(testCase)\n suite = TrialSuite([case], self._forceGarbageCollection)\n suite.run(self._result)\n return {\"success\": True}\n\n @workercommands.Start.responder\n def start(self, directory):\n \"\"\"\n Set up the worker, moving into given directory for tests to run in\n them.\n \"\"\"\n os.chdir(directory)\n return {\"success\": True}\n\n\nclass LocalWorkerAMP(AMP):\n \"\"\"\n Local implementation of the manager commands.\n \"\"\"\n\n @managercommands.AddSuccess.responder\n def addSuccess(self, testName):\n \"\"\"\n Add a success to the reporter.\n \"\"\"\n self._result.addSuccess(self._testCase)\n return {\"success\": True}\n\n def _buildFailure(\n self,\n error: WorkerException,\n errorClass: str,\n frames: List[str],\n ) -> Failure:\n \"\"\"\n Helper to build a C{Failure} with some traceback.\n\n @param error: An C{Exception} instance.\n\n @param errorClass: The class name of the C{error} class.\n\n @param frames: A flat list of strings representing the information need\n to approximatively rebuild C{Failure} frames.\n\n @return: A L{Failure} instance with enough information about a test\n error.\n \"\"\"\n errorType = namedObject(errorClass)\n failure = Failure(error, errorType)\n for i in range(0, len(frames), 3):\n failure.frames.append(\n (frames[i], frames[i + 1], int(frames[i + 2]), [], [])\n )\n return failure\n\n @managercommands.AddError.responder\n def addError(\n self,\n testName: str,\n error: str,\n errorClass: str,\n frames: List[str],\n ) -> Dict[str, bool]:\n \"\"\"\n Add an error to the reporter.\n\n :param error: A message describing the error.\n \"\"\"\n # Wrap the error message in ``WorkerException`` because it is not\n # possible to transfer arbitrary exception values over the AMP\n # connection to the main process but we must give *some* Exception\n # (not a str) to the test result object.\n failure = self._buildFailure(WorkerException(error), errorClass, frames)\n self._result.addError(self._testCase, failure)\n return {\"success\": True}\n\n @managercommands.AddFailure.responder\n def addFailure(\n self,\n testName: str,\n fail: str,\n failClass: str,\n frames: List[str],\n ) -> Dict[str, bool]:\n \"\"\"\n Add a failure to the reporter.\n \"\"\"\n # See addError for info about use of WorkerException here.\n failure = self._buildFailure(WorkerException(fail), failClass, frames)\n self._result.addFailure(self._testCase, failure)\n return {\"success\": True}\n\n @managercommands.AddSkip.responder\n def addSkip(self, testName, reason):\n \"\"\"\n Add a skip to the reporter.\n \"\"\"\n self._result.addSkip(self._testCase, reason)\n return {\"success\": True}\n\n @managercommands.AddExpectedFailure.responder\n def addExpectedFailure(\n self, testName: str, error: str, todo: Optional[None]\n ) -> Dict[str, bool]:\n \"\"\"\n Add an expected failure to the reporter.\n \"\"\"\n _todo = Todo(todo)\n self._result.addExpectedFailure(self._testCase, error, _todo)\n return {\"success\": True}\n\n @managercommands.AddUnexpectedSuccess.responder\n def addUnexpectedSuccess(self, testName, todo):\n \"\"\"\n Add an unexpected success to the reporter.\n \"\"\"\n self._result.addUnexpectedSuccess(self._testCase, todo)\n return {\"success\": True}\n\n @managercommands.TestWrite.responder\n def testWrite(self, out):\n \"\"\"\n Print test output from the worker.\n \"\"\"\n self._testStream.write(out + \"\\n\")\n self._testStream.flush()\n return {\"success\": True}\n\n def _stopTest(self, result):\n \"\"\"\n Stop the current running test case, forwarding the result.\n \"\"\"\n self._result.stopTest(self._testCase)\n return result\n\n def run(self, testCase, result):\n \"\"\"\n Run a test.\n \"\"\"\n self._testCase = testCase\n self._result = result\n self._result.startTest(testCase)\n testCaseId = testCase.id()\n d = self.callRemote(workercommands.Run, testCase=testCaseId)\n return d.addCallback(self._stopTest)\n\n def setTestStream(self, stream):\n \"\"\"\n Set the stream used to log output from tests.\n \"\"\"\n self._testStream = stream\n\n\n@implementer(IAddress)\nclass LocalWorkerAddress:\n \"\"\"\n A L{IAddress} implementation meant to provide stub addresses for\n L{ITransport.getPeer} and L{ITransport.getHost}.\n \"\"\"\n\n\n@implementer(ITransport)\nclass LocalWorkerTransport:\n \"\"\"\n A stub transport implementation used to support L{AMP} over a\n L{ProcessProtocol} transport.\n \"\"\"\n\n def __init__(self, transport):\n self._transport = transport\n\n def write(self, data):\n \"\"\"\n Forward data to transport.\n \"\"\"\n self._transport.writeToChild(_WORKER_AMP_STDIN, data)\n\n def writeSequence(self, sequence):\n \"\"\"\n Emulate C{writeSequence} by iterating data in the C{sequence}.\n \"\"\"\n for data in sequence:\n self._transport.writeToChild(_WORKER_AMP_STDIN, data)\n\n def loseConnection(self):\n \"\"\"\n Closes the transport.\n \"\"\"\n self._transport.loseConnection()\n\n def getHost(self):\n \"\"\"\n Return a L{LocalWorkerAddress} instance.\n \"\"\"\n return LocalWorkerAddress()\n\n def getPeer(self):\n \"\"\"\n Return a L{LocalWorkerAddress} instance.\n \"\"\"\n return LocalWorkerAddress()\n\n\nclass NotRunning(Exception):\n \"\"\"\n An operation was attempted on a worker process which is not running.\n \"\"\"\n\n\nclass LocalWorker(ProcessProtocol):\n \"\"\"\n Local process worker protocol. This worker runs as a local process and\n communicates via stdin/out.\n\n @ivar _ampProtocol: The L{AMP} protocol instance used to communicate with\n the worker.\n\n @ivar _logDirectory: The directory where logs will reside.\n\n @ivar _logFile: The main log file for tests output.\n \"\"\"\n\n def __init__(\n self,\n ampProtocol: LocalWorkerAMP,\n logDirectory: FilePath,\n logFile: TextIO,\n ):\n self._ampProtocol = ampProtocol\n self._logDirectory = logDirectory\n self._logFile = logFile\n self.endDeferred: Deferred = Deferred()\n\n async def exit(self) -> None:\n \"\"\"\n Cause the worker process to exit.\n \"\"\"\n if self.transport is None:\n raise NotRunning()\n\n endDeferred = self.endDeferred\n self.transport.closeChildFD(_WORKER_AMP_STDIN)\n try:\n await endDeferred\n except ProcessDone:\n pass\n\n def connectionMade(self):\n \"\"\"\n When connection is made, create the AMP protocol instance.\n \"\"\"\n self._ampProtocol.makeConnection(LocalWorkerTransport(self.transport))\n self._logDirectory.makedirs(ignoreExistingDirectory=True)\n self._outLog = self._logDirectory.child(\"out.log\").open(\"w\")\n self._errLog = self._logDirectory.child(\"err.log\").open(\"w\")\n self._ampProtocol.setTestStream(self._logFile)\n d = self._ampProtocol.callRemote(\n workercommands.Start,\n directory=self._logDirectory.path,\n )\n # Ignore the potential errors, the test suite will fail properly and it\n # would just print garbage.\n d.addErrback(lambda x: None)\n\n def connectionLost(self, reason):\n \"\"\"\n On connection lost, close the log files that we're managing for stdin\n and stdout.\n \"\"\"\n self._outLog.close()\n self._errLog.close()\n self.transport = None\n\n def processEnded(self, reason):\n \"\"\"\n When the process closes, call C{connectionLost} for cleanup purposes\n and forward the information to the C{_ampProtocol}.\n \"\"\"\n self.connectionLost(reason)\n self._ampProtocol.connectionLost(reason)\n self.endDeferred.callback(reason)\n\n def outReceived(self, data):\n \"\"\"\n Send data received from stdout to log.\n \"\"\"\n\n self._outLog.write(data)\n\n def errReceived(self, data):\n \"\"\"\n Write error data to log.\n \"\"\"\n self._errLog.write(data)\n\n def childDataReceived(self, childFD, data):\n \"\"\"\n Handle data received on the specific pipe for the C{_ampProtocol}.\n \"\"\"\n if childFD == _WORKER_AMP_STDOUT:\n self._ampProtocol.dataReceived(data)\n else:\n ProcessProtocol.childDataReceived(self, childFD, data)\n", "path": "src/twisted/trial/_dist/worker.py"}]}
3,837
668
gh_patches_debug_27703
rasdani/github-patches
git_diff
pre-commit__pre-commit-1395
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pre-commt run --all-files only runs on what is staged still When I run .git_hooks/pre-commit run --all-files, pre-commit still only runs on what is staged. Infact, passing any options to pre-commit doesn't seem to have any affect on what is executed. ``` git_hooks/pre-commit run --all-files Trim Trailing Whitespace.............................(no files to check)Skipped Fix End of Files.....................................(no files to check)Skipped Check Yaml...........................................(no files to check)Skipped Check for added large files..........................(no files to check)Skipped Check JSON...........................................(no files to check)Skipped Check Xml............................................(no files to check)Skipped Check for merge conflicts............................(no files to check)Skipped Pretty format JSON...................................(no files to check)Skipped ``` ``` .git_hooks/pre-commit flake8 Trim Trailing Whitespace.............................(no files to check)Skipped Fix End of Files.....................................(no files to check)Skipped Check Yaml...........................................(no files to check)Skipped Check for added large files..........................(no files to check)Skipped Check JSON...........................................(no files to check)Skipped Check Xml............................................(no files to check)Skipped Check for merge conflicts............................(no files to check)Skipped Pretty format JSON...................................(no files to check)Skipped ``` Wondering if my install is somehow incorrect? Thx, Phil --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/commands/hook_impl.py` Content: ``` 1 import argparse 2 import os.path 3 import subprocess 4 import sys 5 from typing import Optional 6 from typing import Sequence 7 from typing import Tuple 8 9 from pre_commit.commands.run import run 10 from pre_commit.envcontext import envcontext 11 from pre_commit.parse_shebang import normalize_cmd 12 from pre_commit.store import Store 13 14 Z40 = '0' * 40 15 16 17 def _run_legacy( 18 hook_type: str, 19 hook_dir: str, 20 args: Sequence[str], 21 ) -> Tuple[int, bytes]: 22 if os.environ.get('PRE_COMMIT_RUNNING_LEGACY'): 23 raise SystemExit( 24 f"bug: pre-commit's script is installed in migration mode\n" 25 f'run `pre-commit install -f --hook-type {hook_type}` to fix ' 26 f'this\n\n' 27 f'Please report this bug at ' 28 f'https://github.com/pre-commit/pre-commit/issues', 29 ) 30 31 if hook_type == 'pre-push': 32 stdin = sys.stdin.buffer.read() 33 else: 34 stdin = b'' 35 36 # not running in legacy mode 37 legacy_hook = os.path.join(hook_dir, f'{hook_type}.legacy') 38 if not os.access(legacy_hook, os.X_OK): 39 return 0, stdin 40 41 with envcontext((('PRE_COMMIT_RUNNING_LEGACY', '1'),)): 42 cmd = normalize_cmd((legacy_hook, *args)) 43 return subprocess.run(cmd, input=stdin).returncode, stdin 44 45 46 def _validate_config( 47 retv: int, 48 config: str, 49 skip_on_missing_config: bool, 50 ) -> None: 51 if not os.path.isfile(config): 52 if skip_on_missing_config or os.getenv('PRE_COMMIT_ALLOW_NO_CONFIG'): 53 print(f'`{config}` config file not found. Skipping `pre-commit`.') 54 raise SystemExit(retv) 55 else: 56 print( 57 f'No {config} file was found\n' 58 f'- To temporarily silence this, run ' 59 f'`PRE_COMMIT_ALLOW_NO_CONFIG=1 git ...`\n' 60 f'- To permanently silence this, install pre-commit with the ' 61 f'--allow-missing-config option\n' 62 f'- To uninstall pre-commit run `pre-commit uninstall`', 63 ) 64 raise SystemExit(1) 65 66 67 def _ns( 68 hook_type: str, 69 color: bool, 70 *, 71 all_files: bool = False, 72 from_ref: Optional[str] = None, 73 to_ref: Optional[str] = None, 74 remote_name: Optional[str] = None, 75 remote_url: Optional[str] = None, 76 commit_msg_filename: Optional[str] = None, 77 checkout_type: Optional[str] = None, 78 ) -> argparse.Namespace: 79 return argparse.Namespace( 80 color=color, 81 hook_stage=hook_type.replace('pre-', ''), 82 from_ref=from_ref, 83 to_ref=to_ref, 84 remote_name=remote_name, 85 remote_url=remote_url, 86 commit_msg_filename=commit_msg_filename, 87 all_files=all_files, 88 checkout_type=checkout_type, 89 files=(), 90 hook=None, 91 verbose=False, 92 show_diff_on_failure=False, 93 ) 94 95 96 def _rev_exists(rev: str) -> bool: 97 return not subprocess.call(('git', 'rev-list', '--quiet', rev)) 98 99 100 def _pre_push_ns( 101 color: bool, 102 args: Sequence[str], 103 stdin: bytes, 104 ) -> Optional[argparse.Namespace]: 105 remote_name = args[0] 106 remote_url = args[1] 107 108 for line in stdin.decode().splitlines(): 109 _, local_sha, _, remote_sha = line.split() 110 if local_sha == Z40: 111 continue 112 elif remote_sha != Z40 and _rev_exists(remote_sha): 113 return _ns( 114 'pre-push', color, 115 from_ref=remote_sha, to_ref=local_sha, 116 remote_name=remote_name, remote_url=remote_url, 117 ) 118 else: 119 # ancestors not found in remote 120 ancestors = subprocess.check_output(( 121 'git', 'rev-list', local_sha, '--topo-order', '--reverse', 122 '--not', f'--remotes={remote_name}', 123 )).decode().strip() 124 if not ancestors: 125 continue 126 else: 127 first_ancestor = ancestors.splitlines()[0] 128 cmd = ('git', 'rev-list', '--max-parents=0', local_sha) 129 roots = set(subprocess.check_output(cmd).decode().splitlines()) 130 if first_ancestor in roots: 131 # pushing the whole tree including root commit 132 return _ns( 133 'pre-push', color, 134 all_files=True, 135 remote_name=remote_name, remote_url=remote_url, 136 ) 137 else: 138 rev_cmd = ('git', 'rev-parse', f'{first_ancestor}^') 139 source = subprocess.check_output(rev_cmd).decode().strip() 140 return _ns( 141 'pre-push', color, 142 from_ref=source, to_ref=local_sha, 143 remote_name=remote_name, remote_url=remote_url, 144 ) 145 146 # nothing to push 147 return None 148 149 150 def _run_ns( 151 hook_type: str, 152 color: bool, 153 args: Sequence[str], 154 stdin: bytes, 155 ) -> Optional[argparse.Namespace]: 156 if hook_type == 'pre-push': 157 return _pre_push_ns(color, args, stdin) 158 elif hook_type in {'prepare-commit-msg', 'commit-msg'}: 159 return _ns(hook_type, color, commit_msg_filename=args[0]) 160 elif hook_type in {'pre-merge-commit', 'pre-commit'}: 161 return _ns(hook_type, color) 162 elif hook_type == 'post-checkout': 163 return _ns( 164 hook_type, color, 165 from_ref=args[0], to_ref=args[1], checkout_type=args[2], 166 ) 167 else: 168 raise AssertionError(f'unexpected hook type: {hook_type}') 169 170 171 def hook_impl( 172 store: Store, 173 *, 174 config: str, 175 color: bool, 176 hook_type: str, 177 hook_dir: str, 178 skip_on_missing_config: bool, 179 args: Sequence[str], 180 ) -> int: 181 retv, stdin = _run_legacy(hook_type, hook_dir, args) 182 _validate_config(retv, config, skip_on_missing_config) 183 ns = _run_ns(hook_type, color, args, stdin) 184 if ns is None: 185 return retv 186 else: 187 return retv | run(config, store, ns) 188 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/commands/hook_impl.py b/pre_commit/commands/hook_impl.py --- a/pre_commit/commands/hook_impl.py +++ b/pre_commit/commands/hook_impl.py @@ -147,15 +147,44 @@ return None +_EXPECTED_ARG_LENGTH_BY_HOOK = { + 'commit-msg': 1, + 'post-checkout': 3, + 'pre-commit': 0, + 'pre-merge-commit': 0, + 'pre-push': 2, +} + + +def _check_args_length(hook_type: str, args: Sequence[str]) -> None: + if hook_type == 'prepare-commit-msg': + if len(args) < 1 or len(args) > 3: + raise SystemExit( + f'hook-impl for {hook_type} expected 1, 2, or 3 arguments ' + f'but got {len(args)}: {args}', + ) + elif hook_type in _EXPECTED_ARG_LENGTH_BY_HOOK: + expected = _EXPECTED_ARG_LENGTH_BY_HOOK[hook_type] + if len(args) != expected: + arguments_s = 'argument' if expected == 1 else 'arguments' + raise SystemExit( + f'hook-impl for {hook_type} expected {expected} {arguments_s} ' + f'but got {len(args)}: {args}', + ) + else: + raise AssertionError(f'unexpected hook type: {hook_type}') + + def _run_ns( hook_type: str, color: bool, args: Sequence[str], stdin: bytes, ) -> Optional[argparse.Namespace]: + _check_args_length(hook_type, args) if hook_type == 'pre-push': return _pre_push_ns(color, args, stdin) - elif hook_type in {'prepare-commit-msg', 'commit-msg'}: + elif hook_type in {'commit-msg', 'prepare-commit-msg'}: return _ns(hook_type, color, commit_msg_filename=args[0]) elif hook_type in {'pre-merge-commit', 'pre-commit'}: return _ns(hook_type, color)
{"golden_diff": "diff --git a/pre_commit/commands/hook_impl.py b/pre_commit/commands/hook_impl.py\n--- a/pre_commit/commands/hook_impl.py\n+++ b/pre_commit/commands/hook_impl.py\n@@ -147,15 +147,44 @@\n return None\n \n \n+_EXPECTED_ARG_LENGTH_BY_HOOK = {\n+ 'commit-msg': 1,\n+ 'post-checkout': 3,\n+ 'pre-commit': 0,\n+ 'pre-merge-commit': 0,\n+ 'pre-push': 2,\n+}\n+\n+\n+def _check_args_length(hook_type: str, args: Sequence[str]) -> None:\n+ if hook_type == 'prepare-commit-msg':\n+ if len(args) < 1 or len(args) > 3:\n+ raise SystemExit(\n+ f'hook-impl for {hook_type} expected 1, 2, or 3 arguments '\n+ f'but got {len(args)}: {args}',\n+ )\n+ elif hook_type in _EXPECTED_ARG_LENGTH_BY_HOOK:\n+ expected = _EXPECTED_ARG_LENGTH_BY_HOOK[hook_type]\n+ if len(args) != expected:\n+ arguments_s = 'argument' if expected == 1 else 'arguments'\n+ raise SystemExit(\n+ f'hook-impl for {hook_type} expected {expected} {arguments_s} '\n+ f'but got {len(args)}: {args}',\n+ )\n+ else:\n+ raise AssertionError(f'unexpected hook type: {hook_type}')\n+\n+\n def _run_ns(\n hook_type: str,\n color: bool,\n args: Sequence[str],\n stdin: bytes,\n ) -> Optional[argparse.Namespace]:\n+ _check_args_length(hook_type, args)\n if hook_type == 'pre-push':\n return _pre_push_ns(color, args, stdin)\n- elif hook_type in {'prepare-commit-msg', 'commit-msg'}:\n+ elif hook_type in {'commit-msg', 'prepare-commit-msg'}:\n return _ns(hook_type, color, commit_msg_filename=args[0])\n elif hook_type in {'pre-merge-commit', 'pre-commit'}:\n return _ns(hook_type, color)\n", "issue": "pre-commt run --all-files only runs on what is staged still\nWhen I run .git_hooks/pre-commit run --all-files, pre-commit still only runs on what is staged. Infact, passing any options to pre-commit doesn't seem to have any affect on what is executed.\r\n\r\n```\r\ngit_hooks/pre-commit run --all-files\r\nTrim Trailing Whitespace.............................(no files to check)Skipped\r\nFix End of Files.....................................(no files to check)Skipped\r\nCheck Yaml...........................................(no files to check)Skipped\r\nCheck for added large files..........................(no files to check)Skipped\r\nCheck JSON...........................................(no files to check)Skipped\r\nCheck Xml............................................(no files to check)Skipped\r\nCheck for merge conflicts............................(no files to check)Skipped\r\nPretty format JSON...................................(no files to check)Skipped\r\n```\r\n\r\n\r\n```\r\n.git_hooks/pre-commit flake8\r\nTrim Trailing Whitespace.............................(no files to check)Skipped\r\nFix End of Files.....................................(no files to check)Skipped\r\nCheck Yaml...........................................(no files to check)Skipped\r\nCheck for added large files..........................(no files to check)Skipped\r\nCheck JSON...........................................(no files to check)Skipped\r\nCheck Xml............................................(no files to check)Skipped\r\nCheck for merge conflicts............................(no files to check)Skipped\r\nPretty format JSON...................................(no files to check)Skipped\r\n```\r\n\r\nWondering if my install is somehow incorrect?\r\n\r\nThx,\r\nPhil\n", "before_files": [{"content": "import argparse\nimport os.path\nimport subprocess\nimport sys\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nfrom pre_commit.commands.run import run\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.parse_shebang import normalize_cmd\nfrom pre_commit.store import Store\n\nZ40 = '0' * 40\n\n\ndef _run_legacy(\n hook_type: str,\n hook_dir: str,\n args: Sequence[str],\n) -> Tuple[int, bytes]:\n if os.environ.get('PRE_COMMIT_RUNNING_LEGACY'):\n raise SystemExit(\n f\"bug: pre-commit's script is installed in migration mode\\n\"\n f'run `pre-commit install -f --hook-type {hook_type}` to fix '\n f'this\\n\\n'\n f'Please report this bug at '\n f'https://github.com/pre-commit/pre-commit/issues',\n )\n\n if hook_type == 'pre-push':\n stdin = sys.stdin.buffer.read()\n else:\n stdin = b''\n\n # not running in legacy mode\n legacy_hook = os.path.join(hook_dir, f'{hook_type}.legacy')\n if not os.access(legacy_hook, os.X_OK):\n return 0, stdin\n\n with envcontext((('PRE_COMMIT_RUNNING_LEGACY', '1'),)):\n cmd = normalize_cmd((legacy_hook, *args))\n return subprocess.run(cmd, input=stdin).returncode, stdin\n\n\ndef _validate_config(\n retv: int,\n config: str,\n skip_on_missing_config: bool,\n) -> None:\n if not os.path.isfile(config):\n if skip_on_missing_config or os.getenv('PRE_COMMIT_ALLOW_NO_CONFIG'):\n print(f'`{config}` config file not found. Skipping `pre-commit`.')\n raise SystemExit(retv)\n else:\n print(\n f'No {config} file was found\\n'\n f'- To temporarily silence this, run '\n f'`PRE_COMMIT_ALLOW_NO_CONFIG=1 git ...`\\n'\n f'- To permanently silence this, install pre-commit with the '\n f'--allow-missing-config option\\n'\n f'- To uninstall pre-commit run `pre-commit uninstall`',\n )\n raise SystemExit(1)\n\n\ndef _ns(\n hook_type: str,\n color: bool,\n *,\n all_files: bool = False,\n from_ref: Optional[str] = None,\n to_ref: Optional[str] = None,\n remote_name: Optional[str] = None,\n remote_url: Optional[str] = None,\n commit_msg_filename: Optional[str] = None,\n checkout_type: Optional[str] = None,\n) -> argparse.Namespace:\n return argparse.Namespace(\n color=color,\n hook_stage=hook_type.replace('pre-', ''),\n from_ref=from_ref,\n to_ref=to_ref,\n remote_name=remote_name,\n remote_url=remote_url,\n commit_msg_filename=commit_msg_filename,\n all_files=all_files,\n checkout_type=checkout_type,\n files=(),\n hook=None,\n verbose=False,\n show_diff_on_failure=False,\n )\n\n\ndef _rev_exists(rev: str) -> bool:\n return not subprocess.call(('git', 'rev-list', '--quiet', rev))\n\n\ndef _pre_push_ns(\n color: bool,\n args: Sequence[str],\n stdin: bytes,\n) -> Optional[argparse.Namespace]:\n remote_name = args[0]\n remote_url = args[1]\n\n for line in stdin.decode().splitlines():\n _, local_sha, _, remote_sha = line.split()\n if local_sha == Z40:\n continue\n elif remote_sha != Z40 and _rev_exists(remote_sha):\n return _ns(\n 'pre-push', color,\n from_ref=remote_sha, to_ref=local_sha,\n remote_name=remote_name, remote_url=remote_url,\n )\n else:\n # ancestors not found in remote\n ancestors = subprocess.check_output((\n 'git', 'rev-list', local_sha, '--topo-order', '--reverse',\n '--not', f'--remotes={remote_name}',\n )).decode().strip()\n if not ancestors:\n continue\n else:\n first_ancestor = ancestors.splitlines()[0]\n cmd = ('git', 'rev-list', '--max-parents=0', local_sha)\n roots = set(subprocess.check_output(cmd).decode().splitlines())\n if first_ancestor in roots:\n # pushing the whole tree including root commit\n return _ns(\n 'pre-push', color,\n all_files=True,\n remote_name=remote_name, remote_url=remote_url,\n )\n else:\n rev_cmd = ('git', 'rev-parse', f'{first_ancestor}^')\n source = subprocess.check_output(rev_cmd).decode().strip()\n return _ns(\n 'pre-push', color,\n from_ref=source, to_ref=local_sha,\n remote_name=remote_name, remote_url=remote_url,\n )\n\n # nothing to push\n return None\n\n\ndef _run_ns(\n hook_type: str,\n color: bool,\n args: Sequence[str],\n stdin: bytes,\n) -> Optional[argparse.Namespace]:\n if hook_type == 'pre-push':\n return _pre_push_ns(color, args, stdin)\n elif hook_type in {'prepare-commit-msg', 'commit-msg'}:\n return _ns(hook_type, color, commit_msg_filename=args[0])\n elif hook_type in {'pre-merge-commit', 'pre-commit'}:\n return _ns(hook_type, color)\n elif hook_type == 'post-checkout':\n return _ns(\n hook_type, color,\n from_ref=args[0], to_ref=args[1], checkout_type=args[2],\n )\n else:\n raise AssertionError(f'unexpected hook type: {hook_type}')\n\n\ndef hook_impl(\n store: Store,\n *,\n config: str,\n color: bool,\n hook_type: str,\n hook_dir: str,\n skip_on_missing_config: bool,\n args: Sequence[str],\n) -> int:\n retv, stdin = _run_legacy(hook_type, hook_dir, args)\n _validate_config(retv, config, skip_on_missing_config)\n ns = _run_ns(hook_type, color, args, stdin)\n if ns is None:\n return retv\n else:\n return retv | run(config, store, ns)\n", "path": "pre_commit/commands/hook_impl.py"}], "after_files": [{"content": "import argparse\nimport os.path\nimport subprocess\nimport sys\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nfrom pre_commit.commands.run import run\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.parse_shebang import normalize_cmd\nfrom pre_commit.store import Store\n\nZ40 = '0' * 40\n\n\ndef _run_legacy(\n hook_type: str,\n hook_dir: str,\n args: Sequence[str],\n) -> Tuple[int, bytes]:\n if os.environ.get('PRE_COMMIT_RUNNING_LEGACY'):\n raise SystemExit(\n f\"bug: pre-commit's script is installed in migration mode\\n\"\n f'run `pre-commit install -f --hook-type {hook_type}` to fix '\n f'this\\n\\n'\n f'Please report this bug at '\n f'https://github.com/pre-commit/pre-commit/issues',\n )\n\n if hook_type == 'pre-push':\n stdin = sys.stdin.buffer.read()\n else:\n stdin = b''\n\n # not running in legacy mode\n legacy_hook = os.path.join(hook_dir, f'{hook_type}.legacy')\n if not os.access(legacy_hook, os.X_OK):\n return 0, stdin\n\n with envcontext((('PRE_COMMIT_RUNNING_LEGACY', '1'),)):\n cmd = normalize_cmd((legacy_hook, *args))\n return subprocess.run(cmd, input=stdin).returncode, stdin\n\n\ndef _validate_config(\n retv: int,\n config: str,\n skip_on_missing_config: bool,\n) -> None:\n if not os.path.isfile(config):\n if skip_on_missing_config or os.getenv('PRE_COMMIT_ALLOW_NO_CONFIG'):\n print(f'`{config}` config file not found. Skipping `pre-commit`.')\n raise SystemExit(retv)\n else:\n print(\n f'No {config} file was found\\n'\n f'- To temporarily silence this, run '\n f'`PRE_COMMIT_ALLOW_NO_CONFIG=1 git ...`\\n'\n f'- To permanently silence this, install pre-commit with the '\n f'--allow-missing-config option\\n'\n f'- To uninstall pre-commit run `pre-commit uninstall`',\n )\n raise SystemExit(1)\n\n\ndef _ns(\n hook_type: str,\n color: bool,\n *,\n all_files: bool = False,\n from_ref: Optional[str] = None,\n to_ref: Optional[str] = None,\n remote_name: Optional[str] = None,\n remote_url: Optional[str] = None,\n commit_msg_filename: Optional[str] = None,\n checkout_type: Optional[str] = None,\n) -> argparse.Namespace:\n return argparse.Namespace(\n color=color,\n hook_stage=hook_type.replace('pre-', ''),\n from_ref=from_ref,\n to_ref=to_ref,\n remote_name=remote_name,\n remote_url=remote_url,\n commit_msg_filename=commit_msg_filename,\n all_files=all_files,\n checkout_type=checkout_type,\n files=(),\n hook=None,\n verbose=False,\n show_diff_on_failure=False,\n )\n\n\ndef _rev_exists(rev: str) -> bool:\n return not subprocess.call(('git', 'rev-list', '--quiet', rev))\n\n\ndef _pre_push_ns(\n color: bool,\n args: Sequence[str],\n stdin: bytes,\n) -> Optional[argparse.Namespace]:\n remote_name = args[0]\n remote_url = args[1]\n\n for line in stdin.decode().splitlines():\n _, local_sha, _, remote_sha = line.split()\n if local_sha == Z40:\n continue\n elif remote_sha != Z40 and _rev_exists(remote_sha):\n return _ns(\n 'pre-push', color,\n from_ref=remote_sha, to_ref=local_sha,\n remote_name=remote_name, remote_url=remote_url,\n )\n else:\n # ancestors not found in remote\n ancestors = subprocess.check_output((\n 'git', 'rev-list', local_sha, '--topo-order', '--reverse',\n '--not', f'--remotes={remote_name}',\n )).decode().strip()\n if not ancestors:\n continue\n else:\n first_ancestor = ancestors.splitlines()[0]\n cmd = ('git', 'rev-list', '--max-parents=0', local_sha)\n roots = set(subprocess.check_output(cmd).decode().splitlines())\n if first_ancestor in roots:\n # pushing the whole tree including root commit\n return _ns(\n 'pre-push', color,\n all_files=True,\n remote_name=remote_name, remote_url=remote_url,\n )\n else:\n rev_cmd = ('git', 'rev-parse', f'{first_ancestor}^')\n source = subprocess.check_output(rev_cmd).decode().strip()\n return _ns(\n 'pre-push', color,\n from_ref=source, to_ref=local_sha,\n remote_name=remote_name, remote_url=remote_url,\n )\n\n # nothing to push\n return None\n\n\n_EXPECTED_ARG_LENGTH_BY_HOOK = {\n 'commit-msg': 1,\n 'post-checkout': 3,\n 'pre-commit': 0,\n 'pre-merge-commit': 0,\n 'pre-push': 2,\n}\n\n\ndef _check_args_length(hook_type: str, args: Sequence[str]) -> None:\n if hook_type == 'prepare-commit-msg':\n if len(args) < 1 or len(args) > 3:\n raise SystemExit(\n f'hook-impl for {hook_type} expected 1, 2, or 3 arguments '\n f'but got {len(args)}: {args}',\n )\n elif hook_type in _EXPECTED_ARG_LENGTH_BY_HOOK:\n expected = _EXPECTED_ARG_LENGTH_BY_HOOK[hook_type]\n if len(args) != expected:\n arguments_s = 'argument' if expected == 1 else 'arguments'\n raise SystemExit(\n f'hook-impl for {hook_type} expected {expected} {arguments_s} '\n f'but got {len(args)}: {args}',\n )\n else:\n raise AssertionError(f'unexpected hook type: {hook_type}')\n\n\ndef _run_ns(\n hook_type: str,\n color: bool,\n args: Sequence[str],\n stdin: bytes,\n) -> Optional[argparse.Namespace]:\n _check_args_length(hook_type, args)\n if hook_type == 'pre-push':\n return _pre_push_ns(color, args, stdin)\n elif hook_type in {'commit-msg', 'prepare-commit-msg'}:\n return _ns(hook_type, color, commit_msg_filename=args[0])\n elif hook_type in {'pre-merge-commit', 'pre-commit'}:\n return _ns(hook_type, color)\n elif hook_type == 'post-checkout':\n return _ns(\n hook_type, color,\n from_ref=args[0], to_ref=args[1], checkout_type=args[2],\n )\n else:\n raise AssertionError(f'unexpected hook type: {hook_type}')\n\n\ndef hook_impl(\n store: Store,\n *,\n config: str,\n color: bool,\n hook_type: str,\n hook_dir: str,\n skip_on_missing_config: bool,\n args: Sequence[str],\n) -> int:\n retv, stdin = _run_legacy(hook_type, hook_dir, args)\n _validate_config(retv, config, skip_on_missing_config)\n ns = _run_ns(hook_type, color, args, stdin)\n if ns is None:\n return retv\n else:\n return retv | run(config, store, ns)\n", "path": "pre_commit/commands/hook_impl.py"}]}
2,433
479
gh_patches_debug_17814
rasdani/github-patches
git_diff
Qiskit__qiskit-7328
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Parameters needs examples in the documentation ### What is the expected enhancement? Many users land in https://qiskit.org/documentation/stubs/qiskit.circuit.Parameter.html when search on how to parametrize circuits. @Cryoris did a good jobs at explaining them in https://quantumcomputing.stackexchange.com/questions/13558/how-to-add-several-parameters-in-qiskit-circuit . I think the Parameter docstring needs a short example. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `qiskit/circuit/parameter.py` Content: ``` 1 # This code is part of Qiskit. 2 # 3 # (C) Copyright IBM 2017, 2019. 4 # 5 # This code is licensed under the Apache License, Version 2.0. You may 6 # obtain a copy of this license in the LICENSE.txt file in the root directory 7 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 8 # 9 # Any modifications or derivative works of this code must retain this 10 # copyright notice, and modified files need to carry a notice indicating 11 # that they have been altered from the originals. 12 """ 13 Parameter Class for variable parameters. 14 """ 15 16 from uuid import uuid4 17 18 from .parameterexpression import ParameterExpression 19 20 try: 21 import symengine 22 23 HAS_SYMENGINE = True 24 except ImportError: 25 HAS_SYMENGINE = False 26 27 28 class Parameter(ParameterExpression): 29 """Parameter Class for variable parameters.""" 30 31 def __new__(cls, name, uuid=None): # pylint: disable=unused-argument 32 # Parameter relies on self._uuid being set prior to other attributes 33 # (e.g. symbol_map) which may depend on self._uuid for Parameter's hash 34 # or __eq__ functions. 35 obj = object.__new__(cls) 36 37 if uuid is None: 38 obj._uuid = uuid4() 39 else: 40 obj._uuid = uuid 41 42 obj._hash = hash(obj._uuid) 43 return obj 44 45 def __getnewargs__(self): 46 # Unpickling won't in general call __init__ but will always call 47 # __new__. Specify arguments to be passed to __new__ when unpickling. 48 49 return (self.name, self._uuid) 50 51 def __init__(self, name: str): 52 """Create a new named :class:`Parameter`. 53 54 Args: 55 name: name of the ``Parameter``, used for visual representation. This can 56 be any unicode string, e.g. "ϕ". 57 """ 58 self._name = name 59 if not HAS_SYMENGINE: 60 from sympy import Symbol 61 62 symbol = Symbol(name) 63 else: 64 symbol = symengine.Symbol(name) 65 super().__init__(symbol_map={self: symbol}, expr=symbol) 66 67 def subs(self, parameter_map: dict): 68 """Substitute self with the corresponding parameter in ``parameter_map``.""" 69 return parameter_map[self] 70 71 @property 72 def name(self): 73 """Returns the name of the :class:`Parameter`.""" 74 return self._name 75 76 def __str__(self): 77 return self.name 78 79 def __copy__(self): 80 return self 81 82 def __deepcopy__(self, memo=None): 83 return self 84 85 def __repr__(self): 86 return f"{self.__class__.__name__}({self.name})" 87 88 def __eq__(self, other): 89 if isinstance(other, Parameter): 90 return self._uuid == other._uuid 91 elif isinstance(other, ParameterExpression): 92 return super().__eq__(other) 93 else: 94 return False 95 96 def __hash__(self): 97 return self._hash 98 99 def __getstate__(self): 100 return {"name": self._name} 101 102 def __setstate__(self, state): 103 self._name = state["name"] 104 if not HAS_SYMENGINE: 105 from sympy import Symbol 106 107 symbol = Symbol(self._name) 108 else: 109 symbol = symengine.Symbol(self._name) 110 super().__init__(symbol_map={self: symbol}, expr=symbol) 111 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/qiskit/circuit/parameter.py b/qiskit/circuit/parameter.py --- a/qiskit/circuit/parameter.py +++ b/qiskit/circuit/parameter.py @@ -26,7 +26,32 @@ class Parameter(ParameterExpression): - """Parameter Class for variable parameters.""" + """Parameter Class for variable parameters. + + A parameter is a variable value that is not required to be fixed + at circuit definition. + + Examples: + + Construct a variable-rotation X gate using circuit parameters. + + .. jupyter-execute:: + + from qiskit.circuit import QuantumCircuit, Parameter + + # create the parameter + phi = Parameter('phi') + qc = QuantumCircuit(1) + + # parameterize the rotation + qc.rx(phi, 0) + qc.draw() + + # bind the parameters after circuit to create a bound circuit + bc = qc.bind_parameters({phi: 3.14}) + bc.measure_all() + bc.draw() + """ def __new__(cls, name, uuid=None): # pylint: disable=unused-argument # Parameter relies on self._uuid being set prior to other attributes
{"golden_diff": "diff --git a/qiskit/circuit/parameter.py b/qiskit/circuit/parameter.py\n--- a/qiskit/circuit/parameter.py\n+++ b/qiskit/circuit/parameter.py\n@@ -26,7 +26,32 @@\n \n \n class Parameter(ParameterExpression):\n- \"\"\"Parameter Class for variable parameters.\"\"\"\n+ \"\"\"Parameter Class for variable parameters.\n+\n+ A parameter is a variable value that is not required to be fixed\n+ at circuit definition.\n+\n+ Examples:\n+\n+ Construct a variable-rotation X gate using circuit parameters.\n+\n+ .. jupyter-execute::\n+\n+ from qiskit.circuit import QuantumCircuit, Parameter\n+\n+ # create the parameter\n+ phi = Parameter('phi')\n+ qc = QuantumCircuit(1)\n+\n+ # parameterize the rotation\n+ qc.rx(phi, 0)\n+ qc.draw()\n+\n+ # bind the parameters after circuit to create a bound circuit\n+ bc = qc.bind_parameters({phi: 3.14})\n+ bc.measure_all()\n+ bc.draw()\n+ \"\"\"\n \n def __new__(cls, name, uuid=None): # pylint: disable=unused-argument\n # Parameter relies on self._uuid being set prior to other attributes\n", "issue": "Parameters needs examples in the documentation\n### What is the expected enhancement?\n\nMany users land in https://qiskit.org/documentation/stubs/qiskit.circuit.Parameter.html when search on how to parametrize circuits. @Cryoris did a good jobs at explaining them in https://quantumcomputing.stackexchange.com/questions/13558/how-to-add-several-parameters-in-qiskit-circuit . I think the Parameter docstring needs a short example.\n", "before_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nParameter Class for variable parameters.\n\"\"\"\n\nfrom uuid import uuid4\n\nfrom .parameterexpression import ParameterExpression\n\ntry:\n import symengine\n\n HAS_SYMENGINE = True\nexcept ImportError:\n HAS_SYMENGINE = False\n\n\nclass Parameter(ParameterExpression):\n \"\"\"Parameter Class for variable parameters.\"\"\"\n\n def __new__(cls, name, uuid=None): # pylint: disable=unused-argument\n # Parameter relies on self._uuid being set prior to other attributes\n # (e.g. symbol_map) which may depend on self._uuid for Parameter's hash\n # or __eq__ functions.\n obj = object.__new__(cls)\n\n if uuid is None:\n obj._uuid = uuid4()\n else:\n obj._uuid = uuid\n\n obj._hash = hash(obj._uuid)\n return obj\n\n def __getnewargs__(self):\n # Unpickling won't in general call __init__ but will always call\n # __new__. Specify arguments to be passed to __new__ when unpickling.\n\n return (self.name, self._uuid)\n\n def __init__(self, name: str):\n \"\"\"Create a new named :class:`Parameter`.\n\n Args:\n name: name of the ``Parameter``, used for visual representation. This can\n be any unicode string, e.g. \"\u03d5\".\n \"\"\"\n self._name = name\n if not HAS_SYMENGINE:\n from sympy import Symbol\n\n symbol = Symbol(name)\n else:\n symbol = symengine.Symbol(name)\n super().__init__(symbol_map={self: symbol}, expr=symbol)\n\n def subs(self, parameter_map: dict):\n \"\"\"Substitute self with the corresponding parameter in ``parameter_map``.\"\"\"\n return parameter_map[self]\n\n @property\n def name(self):\n \"\"\"Returns the name of the :class:`Parameter`.\"\"\"\n return self._name\n\n def __str__(self):\n return self.name\n\n def __copy__(self):\n return self\n\n def __deepcopy__(self, memo=None):\n return self\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self.name})\"\n\n def __eq__(self, other):\n if isinstance(other, Parameter):\n return self._uuid == other._uuid\n elif isinstance(other, ParameterExpression):\n return super().__eq__(other)\n else:\n return False\n\n def __hash__(self):\n return self._hash\n\n def __getstate__(self):\n return {\"name\": self._name}\n\n def __setstate__(self, state):\n self._name = state[\"name\"]\n if not HAS_SYMENGINE:\n from sympy import Symbol\n\n symbol = Symbol(self._name)\n else:\n symbol = symengine.Symbol(self._name)\n super().__init__(symbol_map={self: symbol}, expr=symbol)\n", "path": "qiskit/circuit/parameter.py"}], "after_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nParameter Class for variable parameters.\n\"\"\"\n\nfrom uuid import uuid4\n\nfrom .parameterexpression import ParameterExpression\n\ntry:\n import symengine\n\n HAS_SYMENGINE = True\nexcept ImportError:\n HAS_SYMENGINE = False\n\n\nclass Parameter(ParameterExpression):\n \"\"\"Parameter Class for variable parameters.\n\n A parameter is a variable value that is not required to be fixed\n at circuit definition.\n\n Examples:\n\n Construct a variable-rotation X gate using circuit parameters.\n\n .. jupyter-execute::\n\n from qiskit.circuit import QuantumCircuit, Parameter\n\n # create the parameter\n phi = Parameter('phi')\n qc = QuantumCircuit(1)\n\n # parameterize the rotation\n qc.rx(phi, 0)\n qc.draw()\n\n # bind the parameters after circuit to create a bound circuit\n bc = qc.bind_parameters({phi: 3.14})\n bc.measure_all()\n bc.draw()\n \"\"\"\n\n def __new__(cls, name, uuid=None): # pylint: disable=unused-argument\n # Parameter relies on self._uuid being set prior to other attributes\n # (e.g. symbol_map) which may depend on self._uuid for Parameter's hash\n # or __eq__ functions.\n obj = object.__new__(cls)\n\n if uuid is None:\n obj._uuid = uuid4()\n else:\n obj._uuid = uuid\n\n obj._hash = hash(obj._uuid)\n return obj\n\n def __getnewargs__(self):\n # Unpickling won't in general call __init__ but will always call\n # __new__. Specify arguments to be passed to __new__ when unpickling.\n\n return (self.name, self._uuid)\n\n def __init__(self, name: str):\n \"\"\"Create a new named :class:`Parameter`.\n\n Args:\n name: name of the ``Parameter``, used for visual representation. This can\n be any unicode string, e.g. \"\u03d5\".\n \"\"\"\n self._name = name\n if not HAS_SYMENGINE:\n from sympy import Symbol\n\n symbol = Symbol(name)\n else:\n symbol = symengine.Symbol(name)\n super().__init__(symbol_map={self: symbol}, expr=symbol)\n\n def subs(self, parameter_map: dict):\n \"\"\"Substitute self with the corresponding parameter in ``parameter_map``.\"\"\"\n return parameter_map[self]\n\n @property\n def name(self):\n \"\"\"Returns the name of the :class:`Parameter`.\"\"\"\n return self._name\n\n def __str__(self):\n return self.name\n\n def __copy__(self):\n return self\n\n def __deepcopy__(self, memo=None):\n return self\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self.name})\"\n\n def __eq__(self, other):\n if isinstance(other, Parameter):\n return self._uuid == other._uuid\n elif isinstance(other, ParameterExpression):\n return super().__eq__(other)\n else:\n return False\n\n def __hash__(self):\n return self._hash\n\n def __getstate__(self):\n return {\"name\": self._name}\n\n def __setstate__(self, state):\n self._name = state[\"name\"]\n if not HAS_SYMENGINE:\n from sympy import Symbol\n\n symbol = Symbol(self._name)\n else:\n symbol = symengine.Symbol(self._name)\n super().__init__(symbol_map={self: symbol}, expr=symbol)\n", "path": "qiskit/circuit/parameter.py"}]}
1,338
280
gh_patches_debug_39684
rasdani/github-patches
git_diff
certbot__certbot-3230
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Nginx parser apparently can't parse "map" I was looking at issues on the nginxparser project and found this one: https://github.com/fatiherikli/nginxparser/issues/19 It appears to me that we can't parse configurations containing this. I got something like `ParseException: Expected stringEnd (at char 100), (line:5, col:1)` when trying to `nginxparser.loads()` a file that included that map directive. cc @pde --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `certbot-nginx/certbot_nginx/nginxparser.py` Content: ``` 1 """Very low-level nginx config parser based on pyparsing.""" 2 # Forked from https://github.com/fatiherikli/nginxparser (MIT Licensed) 3 import copy 4 import logging 5 import string 6 7 from pyparsing import ( 8 Literal, White, Word, alphanums, CharsNotIn, Combine, Forward, Group, 9 Optional, OneOrMore, Regex, ZeroOrMore) 10 from pyparsing import stringEnd 11 from pyparsing import restOfLine 12 13 logger = logging.getLogger(__name__) 14 15 class RawNginxParser(object): 16 # pylint: disable=expression-not-assigned 17 """A class that parses nginx configuration with pyparsing.""" 18 19 # constants 20 space = Optional(White()) 21 nonspace = Regex(r"\S+") 22 left_bracket = Literal("{").suppress() 23 right_bracket = space.leaveWhitespace() + Literal("}").suppress() 24 semicolon = Literal(";").suppress() 25 key = Word(alphanums + "_/+-.") 26 dollar_var = Combine(Literal('$') + nonspace) 27 condition = Regex(r"\(.+\)") 28 # Matches anything that is not a special character AND any chars in single 29 # or double quotes 30 value = Regex(r"((\".*\")?(\'.*\')?[^\{\};,]?)+") 31 location = CharsNotIn("{};," + string.whitespace) 32 # modifier for location uri [ = | ~ | ~* | ^~ ] 33 modifier = Literal("=") | Literal("~*") | Literal("~") | Literal("^~") 34 35 # rules 36 comment = space + Literal('#') + restOfLine() 37 38 assignment = space + key + Optional(space + value, default=None) + semicolon 39 location_statement = space + Optional(modifier) + Optional(space + location + space) 40 if_statement = space + Literal("if") + space + condition + space 41 map_statement = space + Literal("map") + space + nonspace + space + dollar_var + space 42 block = Forward() 43 44 block << Group( 45 # key could for instance be "server" or "http", or "location" (in which case 46 # location_statement needs to have a non-empty location) 47 (Group(space + key + location_statement) ^ Group(if_statement) ^ 48 Group(map_statement)).leaveWhitespace() + 49 left_bracket + 50 Group(ZeroOrMore(Group(comment | assignment) | block) + space).leaveWhitespace() + 51 right_bracket) 52 53 script = OneOrMore(Group(comment | assignment) ^ block) + space + stringEnd 54 script.parseWithTabs() 55 56 def __init__(self, source): 57 self.source = source 58 59 def parse(self): 60 """Returns the parsed tree.""" 61 return self.script.parseString(self.source) 62 63 def as_list(self): 64 """Returns the parsed tree as a list.""" 65 return self.parse().asList() 66 67 class RawNginxDumper(object): 68 # pylint: disable=too-few-public-methods 69 """A class that dumps nginx configuration from the provided tree.""" 70 def __init__(self, blocks): 71 self.blocks = blocks 72 73 def __iter__(self, blocks=None): 74 """Iterates the dumped nginx content.""" 75 blocks = blocks or self.blocks 76 for b0 in blocks: 77 if isinstance(b0, str): 78 yield b0 79 continue 80 b = copy.deepcopy(b0) 81 if spacey(b[0]): 82 yield b.pop(0) # indentation 83 if not b: 84 continue 85 key, values = b.pop(0), b.pop(0) 86 87 if isinstance(key, list): 88 yield "".join(key) + '{' 89 for parameter in values: 90 for line in self.__iter__([parameter]): # negate "for b0 in blocks" 91 yield line 92 yield '}' 93 else: 94 if isinstance(key, str) and key.strip() == '#': # comment 95 yield key + values 96 else: # assignment 97 gap = "" 98 # Sometimes the parser has stuck some gap whitespace in here; 99 # if so rotate it into gap 100 if values and spacey(values): 101 gap = values 102 values = b.pop(0) 103 yield key + gap + values + ';' 104 105 def __str__(self): 106 """Return the parsed block as a string.""" 107 return ''.join(self) 108 109 110 # Shortcut functions to respect Python's serialization interface 111 # (like pyyaml, picker or json) 112 113 def loads(source): 114 """Parses from a string. 115 116 :param str souce: The string to parse 117 :returns: The parsed tree 118 :rtype: list 119 120 """ 121 return UnspacedList(RawNginxParser(source).as_list()) 122 123 124 def load(_file): 125 """Parses from a file. 126 127 :param file _file: The file to parse 128 :returns: The parsed tree 129 :rtype: list 130 131 """ 132 return loads(_file.read()) 133 134 135 def dumps(blocks): 136 """Dump to a string. 137 138 :param UnspacedList block: The parsed tree 139 :param int indentation: The number of spaces to indent 140 :rtype: str 141 142 """ 143 return str(RawNginxDumper(blocks.spaced)) 144 145 146 def dump(blocks, _file): 147 """Dump to a file. 148 149 :param UnspacedList block: The parsed tree 150 :param file _file: The file to dump to 151 :param int indentation: The number of spaces to indent 152 :rtype: NoneType 153 154 """ 155 return _file.write(dumps(blocks)) 156 157 158 spacey = lambda x: (isinstance(x, str) and x.isspace()) or x == '' 159 160 class UnspacedList(list): 161 """Wrap a list [of lists], making any whitespace entries magically invisible""" 162 163 def __init__(self, list_source): 164 # ensure our argument is not a generator, and duplicate any sublists 165 self.spaced = copy.deepcopy(list(list_source)) 166 self.dirty = False 167 168 # Turn self into a version of the source list that has spaces removed 169 # and all sub-lists also UnspacedList()ed 170 list.__init__(self, list_source) 171 for i, entry in reversed(list(enumerate(self))): 172 if isinstance(entry, list): 173 sublist = UnspacedList(entry) 174 list.__setitem__(self, i, sublist) 175 self.spaced[i] = sublist.spaced 176 elif spacey(entry): 177 # don't delete comments 178 if "#" not in self[:i]: 179 list.__delitem__(self, i) 180 181 def _coerce(self, inbound): 182 """ 183 Coerce some inbound object to be appropriately usable in this object 184 185 :param inbound: string or None or list or UnspacedList 186 :returns: (coerced UnspacedList or string or None, spaced equivalent) 187 :rtype: tuple 188 189 """ 190 if not isinstance(inbound, list): # str or None 191 return (inbound, inbound) 192 else: 193 if not hasattr(inbound, "spaced"): 194 inbound = UnspacedList(inbound) 195 return (inbound, inbound.spaced) 196 197 198 def insert(self, i, x): 199 item, spaced_item = self._coerce(x) 200 self.spaced.insert(self._spaced_position(i), spaced_item) 201 list.insert(self, i, item) 202 self.dirty = True 203 204 def append(self, x): 205 item, spaced_item = self._coerce(x) 206 self.spaced.append(spaced_item) 207 list.append(self, item) 208 self.dirty = True 209 210 def extend(self, x): 211 item, spaced_item = self._coerce(x) 212 self.spaced.extend(spaced_item) 213 list.extend(self, item) 214 self.dirty = True 215 216 def __add__(self, other): 217 l = copy.deepcopy(self) 218 l.extend(other) 219 l.dirty = True 220 return l 221 222 def pop(self, _i=None): 223 raise NotImplementedError("UnspacedList.pop() not yet implemented") 224 def remove(self, _): 225 raise NotImplementedError("UnspacedList.remove() not yet implemented") 226 def reverse(self): 227 raise NotImplementedError("UnspacedList.reverse() not yet implemented") 228 def sort(self, _cmp=None, _key=None, _Rev=None): 229 raise NotImplementedError("UnspacedList.sort() not yet implemented") 230 def __setslice__(self, _i, _j, _newslice): 231 raise NotImplementedError("Slice operations on UnspacedLists not yet implemented") 232 233 def __setitem__(self, i, value): 234 if isinstance(i, slice): 235 raise NotImplementedError("Slice operations on UnspacedLists not yet implemented") 236 item, spaced_item = self._coerce(value) 237 self.spaced.__setitem__(self._spaced_position(i), spaced_item) 238 list.__setitem__(self, i, item) 239 self.dirty = True 240 241 def __delitem__(self, i): 242 self.spaced.__delitem__(self._spaced_position(i)) 243 list.__delitem__(self, i) 244 self.dirty = True 245 246 def __deepcopy__(self, memo): 247 l = UnspacedList(self[:]) 248 l.spaced = copy.deepcopy(self.spaced, memo=memo) 249 l.dirty = self.dirty 250 return l 251 252 def is_dirty(self): 253 """Recurse through the parse tree to figure out if any sublists are dirty""" 254 if self.dirty: 255 return True 256 return any((isinstance(x, list) and x.is_dirty() for x in self)) 257 258 def _spaced_position(self, idx): 259 "Convert from indexes in the unspaced list to positions in the spaced one" 260 pos = spaces = 0 261 # Normalize indexes like list[-1] etc, and save the result 262 if idx < 0: 263 idx = len(self) + idx 264 if not 0 <= idx < len(self): 265 raise IndexError("list index out of range") 266 idx0 = idx 267 # Count the number of spaces in the spaced list before idx in the unspaced one 268 while idx != -1: 269 if spacey(self.spaced[pos]): 270 spaces += 1 271 else: 272 idx -= 1 273 pos += 1 274 return idx0 + spaces 275 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/certbot-nginx/certbot_nginx/nginxparser.py b/certbot-nginx/certbot_nginx/nginxparser.py --- a/certbot-nginx/certbot_nginx/nginxparser.py +++ b/certbot-nginx/certbot_nginx/nginxparser.py @@ -23,10 +23,12 @@ right_bracket = space.leaveWhitespace() + Literal("}").suppress() semicolon = Literal(";").suppress() key = Word(alphanums + "_/+-.") - dollar_var = Combine(Literal('$') + nonspace) + dollar_var = Combine(Literal('$') + Regex(r"[^\{\};,\s]+")) condition = Regex(r"\(.+\)") # Matches anything that is not a special character AND any chars in single # or double quotes + # All of these COULD be upgraded to something like + # https://stackoverflow.com/a/16130746 value = Regex(r"((\".*\")?(\'.*\')?[^\{\};,]?)+") location = CharsNotIn("{};," + string.whitespace) # modifier for location uri [ = | ~ | ~* | ^~ ] @@ -38,19 +40,35 @@ assignment = space + key + Optional(space + value, default=None) + semicolon location_statement = space + Optional(modifier) + Optional(space + location + space) if_statement = space + Literal("if") + space + condition + space + map_statement = space + Literal("map") + space + nonspace + space + dollar_var + space - block = Forward() + # This is NOT an accurate way to parse nginx map entries; it's almost + # certianly too permissive and may be wrong in other ways, but it should + # preserve things correctly in mmmmost or all cases. + # + # - I can neither prove nor disprove that it is corect wrt all escaped + # semicolon situations + # Addresses https://github.com/fatiherikli/nginxparser/issues/19 + map_pattern = Regex(r'".*"') | Regex(r"'.*'") | nonspace + map_entry = space + map_pattern + space + value + space + semicolon + map_block = Group( + # key could for instance be "server" or "http", or "location" (in which case + # location_statement needs to have a non-empty location) + Group(map_statement).leaveWhitespace() + + left_bracket + + Group(ZeroOrMore(Group(comment | map_entry)) + space).leaveWhitespace() + + right_bracket) + block = Forward() block << Group( # key could for instance be "server" or "http", or "location" (in which case # location_statement needs to have a non-empty location) - (Group(space + key + location_statement) ^ Group(if_statement) ^ - Group(map_statement)).leaveWhitespace() + + (Group(space + key + location_statement) ^ Group(if_statement)).leaveWhitespace() + left_bracket + - Group(ZeroOrMore(Group(comment | assignment) | block) + space).leaveWhitespace() + - right_bracket) + Group(ZeroOrMore(Group(comment | assignment) | block | map_block) + space).leaveWhitespace() + + right_bracket) - script = OneOrMore(Group(comment | assignment) ^ block) + space + stringEnd + script = OneOrMore(Group(comment | assignment) ^ block ^ map_block) + space + stringEnd script.parseWithTabs() def __init__(self, source):
{"golden_diff": "diff --git a/certbot-nginx/certbot_nginx/nginxparser.py b/certbot-nginx/certbot_nginx/nginxparser.py\n--- a/certbot-nginx/certbot_nginx/nginxparser.py\n+++ b/certbot-nginx/certbot_nginx/nginxparser.py\n@@ -23,10 +23,12 @@\n right_bracket = space.leaveWhitespace() + Literal(\"}\").suppress()\n semicolon = Literal(\";\").suppress()\n key = Word(alphanums + \"_/+-.\")\n- dollar_var = Combine(Literal('$') + nonspace)\n+ dollar_var = Combine(Literal('$') + Regex(r\"[^\\{\\};,\\s]+\"))\n condition = Regex(r\"\\(.+\\)\")\n # Matches anything that is not a special character AND any chars in single\n # or double quotes\n+ # All of these COULD be upgraded to something like\n+ # https://stackoverflow.com/a/16130746\n value = Regex(r\"((\\\".*\\\")?(\\'.*\\')?[^\\{\\};,]?)+\")\n location = CharsNotIn(\"{};,\" + string.whitespace)\n # modifier for location uri [ = | ~ | ~* | ^~ ]\n@@ -38,19 +40,35 @@\n assignment = space + key + Optional(space + value, default=None) + semicolon\n location_statement = space + Optional(modifier) + Optional(space + location + space)\n if_statement = space + Literal(\"if\") + space + condition + space\n+\n map_statement = space + Literal(\"map\") + space + nonspace + space + dollar_var + space\n- block = Forward()\n+ # This is NOT an accurate way to parse nginx map entries; it's almost\n+ # certianly too permissive and may be wrong in other ways, but it should\n+ # preserve things correctly in mmmmost or all cases.\n+ #\n+ # - I can neither prove nor disprove that it is corect wrt all escaped\n+ # semicolon situations\n+ # Addresses https://github.com/fatiherikli/nginxparser/issues/19\n+ map_pattern = Regex(r'\".*\"') | Regex(r\"'.*'\") | nonspace\n+ map_entry = space + map_pattern + space + value + space + semicolon\n+ map_block = Group(\n+ # key could for instance be \"server\" or \"http\", or \"location\" (in which case\n+ # location_statement needs to have a non-empty location)\n+ Group(map_statement).leaveWhitespace() +\n+ left_bracket +\n+ Group(ZeroOrMore(Group(comment | map_entry)) + space).leaveWhitespace() +\n+ right_bracket)\n \n+ block = Forward()\n block << Group(\n # key could for instance be \"server\" or \"http\", or \"location\" (in which case\n # location_statement needs to have a non-empty location)\n- (Group(space + key + location_statement) ^ Group(if_statement) ^\n- Group(map_statement)).leaveWhitespace() +\n+ (Group(space + key + location_statement) ^ Group(if_statement)).leaveWhitespace() +\n left_bracket +\n- Group(ZeroOrMore(Group(comment | assignment) | block) + space).leaveWhitespace() +\n- right_bracket)\n+ Group(ZeroOrMore(Group(comment | assignment) | block | map_block) + space).leaveWhitespace()\n+ + right_bracket)\n \n- script = OneOrMore(Group(comment | assignment) ^ block) + space + stringEnd\n+ script = OneOrMore(Group(comment | assignment) ^ block ^ map_block) + space + stringEnd\n script.parseWithTabs()\n \n def __init__(self, source):\n", "issue": "Nginx parser apparently can't parse \"map\"\nI was looking at issues on the nginxparser project and found this one:\n\nhttps://github.com/fatiherikli/nginxparser/issues/19\n\nIt appears to me that we can't parse configurations containing this. I got something like\n\n`ParseException: Expected stringEnd (at char 100), (line:5, col:1)`\n\nwhen trying to `nginxparser.loads()` a file that included that map directive.\n\ncc @pde \n\n", "before_files": [{"content": "\"\"\"Very low-level nginx config parser based on pyparsing.\"\"\"\n# Forked from https://github.com/fatiherikli/nginxparser (MIT Licensed)\nimport copy\nimport logging\nimport string\n\nfrom pyparsing import (\n Literal, White, Word, alphanums, CharsNotIn, Combine, Forward, Group,\n Optional, OneOrMore, Regex, ZeroOrMore)\nfrom pyparsing import stringEnd\nfrom pyparsing import restOfLine\n\nlogger = logging.getLogger(__name__)\n\nclass RawNginxParser(object):\n # pylint: disable=expression-not-assigned\n \"\"\"A class that parses nginx configuration with pyparsing.\"\"\"\n\n # constants\n space = Optional(White())\n nonspace = Regex(r\"\\S+\")\n left_bracket = Literal(\"{\").suppress()\n right_bracket = space.leaveWhitespace() + Literal(\"}\").suppress()\n semicolon = Literal(\";\").suppress()\n key = Word(alphanums + \"_/+-.\")\n dollar_var = Combine(Literal('$') + nonspace)\n condition = Regex(r\"\\(.+\\)\")\n # Matches anything that is not a special character AND any chars in single\n # or double quotes\n value = Regex(r\"((\\\".*\\\")?(\\'.*\\')?[^\\{\\};,]?)+\")\n location = CharsNotIn(\"{};,\" + string.whitespace)\n # modifier for location uri [ = | ~ | ~* | ^~ ]\n modifier = Literal(\"=\") | Literal(\"~*\") | Literal(\"~\") | Literal(\"^~\")\n\n # rules\n comment = space + Literal('#') + restOfLine()\n\n assignment = space + key + Optional(space + value, default=None) + semicolon\n location_statement = space + Optional(modifier) + Optional(space + location + space)\n if_statement = space + Literal(\"if\") + space + condition + space\n map_statement = space + Literal(\"map\") + space + nonspace + space + dollar_var + space\n block = Forward()\n\n block << Group(\n # key could for instance be \"server\" or \"http\", or \"location\" (in which case\n # location_statement needs to have a non-empty location)\n (Group(space + key + location_statement) ^ Group(if_statement) ^\n Group(map_statement)).leaveWhitespace() +\n left_bracket +\n Group(ZeroOrMore(Group(comment | assignment) | block) + space).leaveWhitespace() +\n right_bracket)\n\n script = OneOrMore(Group(comment | assignment) ^ block) + space + stringEnd\n script.parseWithTabs()\n\n def __init__(self, source):\n self.source = source\n\n def parse(self):\n \"\"\"Returns the parsed tree.\"\"\"\n return self.script.parseString(self.source)\n\n def as_list(self):\n \"\"\"Returns the parsed tree as a list.\"\"\"\n return self.parse().asList()\n\nclass RawNginxDumper(object):\n # pylint: disable=too-few-public-methods\n \"\"\"A class that dumps nginx configuration from the provided tree.\"\"\"\n def __init__(self, blocks):\n self.blocks = blocks\n\n def __iter__(self, blocks=None):\n \"\"\"Iterates the dumped nginx content.\"\"\"\n blocks = blocks or self.blocks\n for b0 in blocks:\n if isinstance(b0, str):\n yield b0\n continue\n b = copy.deepcopy(b0)\n if spacey(b[0]):\n yield b.pop(0) # indentation\n if not b:\n continue\n key, values = b.pop(0), b.pop(0)\n\n if isinstance(key, list):\n yield \"\".join(key) + '{'\n for parameter in values:\n for line in self.__iter__([parameter]): # negate \"for b0 in blocks\"\n yield line\n yield '}'\n else:\n if isinstance(key, str) and key.strip() == '#': # comment\n yield key + values\n else: # assignment\n gap = \"\"\n # Sometimes the parser has stuck some gap whitespace in here;\n # if so rotate it into gap\n if values and spacey(values):\n gap = values\n values = b.pop(0)\n yield key + gap + values + ';'\n\n def __str__(self):\n \"\"\"Return the parsed block as a string.\"\"\"\n return ''.join(self)\n\n\n# Shortcut functions to respect Python's serialization interface\n# (like pyyaml, picker or json)\n\ndef loads(source):\n \"\"\"Parses from a string.\n\n :param str souce: The string to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return UnspacedList(RawNginxParser(source).as_list())\n\n\ndef load(_file):\n \"\"\"Parses from a file.\n\n :param file _file: The file to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return loads(_file.read())\n\n\ndef dumps(blocks):\n \"\"\"Dump to a string.\n\n :param UnspacedList block: The parsed tree\n :param int indentation: The number of spaces to indent\n :rtype: str\n\n \"\"\"\n return str(RawNginxDumper(blocks.spaced))\n\n\ndef dump(blocks, _file):\n \"\"\"Dump to a file.\n\n :param UnspacedList block: The parsed tree\n :param file _file: The file to dump to\n :param int indentation: The number of spaces to indent\n :rtype: NoneType\n\n \"\"\"\n return _file.write(dumps(blocks))\n\n\nspacey = lambda x: (isinstance(x, str) and x.isspace()) or x == ''\n\nclass UnspacedList(list):\n \"\"\"Wrap a list [of lists], making any whitespace entries magically invisible\"\"\"\n\n def __init__(self, list_source):\n # ensure our argument is not a generator, and duplicate any sublists\n self.spaced = copy.deepcopy(list(list_source))\n self.dirty = False\n\n # Turn self into a version of the source list that has spaces removed\n # and all sub-lists also UnspacedList()ed\n list.__init__(self, list_source)\n for i, entry in reversed(list(enumerate(self))):\n if isinstance(entry, list):\n sublist = UnspacedList(entry)\n list.__setitem__(self, i, sublist)\n self.spaced[i] = sublist.spaced\n elif spacey(entry):\n # don't delete comments\n if \"#\" not in self[:i]:\n list.__delitem__(self, i)\n\n def _coerce(self, inbound):\n \"\"\"\n Coerce some inbound object to be appropriately usable in this object\n\n :param inbound: string or None or list or UnspacedList\n :returns: (coerced UnspacedList or string or None, spaced equivalent)\n :rtype: tuple\n\n \"\"\"\n if not isinstance(inbound, list): # str or None\n return (inbound, inbound)\n else:\n if not hasattr(inbound, \"spaced\"):\n inbound = UnspacedList(inbound)\n return (inbound, inbound.spaced)\n\n\n def insert(self, i, x):\n item, spaced_item = self._coerce(x)\n self.spaced.insert(self._spaced_position(i), spaced_item)\n list.insert(self, i, item)\n self.dirty = True\n\n def append(self, x):\n item, spaced_item = self._coerce(x)\n self.spaced.append(spaced_item)\n list.append(self, item)\n self.dirty = True\n\n def extend(self, x):\n item, spaced_item = self._coerce(x)\n self.spaced.extend(spaced_item)\n list.extend(self, item)\n self.dirty = True\n\n def __add__(self, other):\n l = copy.deepcopy(self)\n l.extend(other)\n l.dirty = True\n return l\n\n def pop(self, _i=None):\n raise NotImplementedError(\"UnspacedList.pop() not yet implemented\")\n def remove(self, _):\n raise NotImplementedError(\"UnspacedList.remove() not yet implemented\")\n def reverse(self):\n raise NotImplementedError(\"UnspacedList.reverse() not yet implemented\")\n def sort(self, _cmp=None, _key=None, _Rev=None):\n raise NotImplementedError(\"UnspacedList.sort() not yet implemented\")\n def __setslice__(self, _i, _j, _newslice):\n raise NotImplementedError(\"Slice operations on UnspacedLists not yet implemented\")\n\n def __setitem__(self, i, value):\n if isinstance(i, slice):\n raise NotImplementedError(\"Slice operations on UnspacedLists not yet implemented\")\n item, spaced_item = self._coerce(value)\n self.spaced.__setitem__(self._spaced_position(i), spaced_item)\n list.__setitem__(self, i, item)\n self.dirty = True\n\n def __delitem__(self, i):\n self.spaced.__delitem__(self._spaced_position(i))\n list.__delitem__(self, i)\n self.dirty = True\n\n def __deepcopy__(self, memo):\n l = UnspacedList(self[:])\n l.spaced = copy.deepcopy(self.spaced, memo=memo)\n l.dirty = self.dirty\n return l\n\n def is_dirty(self):\n \"\"\"Recurse through the parse tree to figure out if any sublists are dirty\"\"\"\n if self.dirty:\n return True\n return any((isinstance(x, list) and x.is_dirty() for x in self))\n\n def _spaced_position(self, idx):\n \"Convert from indexes in the unspaced list to positions in the spaced one\"\n pos = spaces = 0\n # Normalize indexes like list[-1] etc, and save the result\n if idx < 0:\n idx = len(self) + idx\n if not 0 <= idx < len(self):\n raise IndexError(\"list index out of range\")\n idx0 = idx\n # Count the number of spaces in the spaced list before idx in the unspaced one\n while idx != -1:\n if spacey(self.spaced[pos]):\n spaces += 1\n else:\n idx -= 1\n pos += 1\n return idx0 + spaces\n", "path": "certbot-nginx/certbot_nginx/nginxparser.py"}], "after_files": [{"content": "\"\"\"Very low-level nginx config parser based on pyparsing.\"\"\"\n# Forked from https://github.com/fatiherikli/nginxparser (MIT Licensed)\nimport copy\nimport logging\nimport string\n\nfrom pyparsing import (\n Literal, White, Word, alphanums, CharsNotIn, Combine, Forward, Group,\n Optional, OneOrMore, Regex, ZeroOrMore)\nfrom pyparsing import stringEnd\nfrom pyparsing import restOfLine\n\nlogger = logging.getLogger(__name__)\n\nclass RawNginxParser(object):\n # pylint: disable=expression-not-assigned\n \"\"\"A class that parses nginx configuration with pyparsing.\"\"\"\n\n # constants\n space = Optional(White())\n nonspace = Regex(r\"\\S+\")\n left_bracket = Literal(\"{\").suppress()\n right_bracket = space.leaveWhitespace() + Literal(\"}\").suppress()\n semicolon = Literal(\";\").suppress()\n key = Word(alphanums + \"_/+-.\")\n dollar_var = Combine(Literal('$') + Regex(r\"[^\\{\\};,\\s]+\"))\n condition = Regex(r\"\\(.+\\)\")\n # Matches anything that is not a special character AND any chars in single\n # or double quotes\n # All of these COULD be upgraded to something like\n # https://stackoverflow.com/a/16130746\n value = Regex(r\"((\\\".*\\\")?(\\'.*\\')?[^\\{\\};,]?)+\")\n location = CharsNotIn(\"{};,\" + string.whitespace)\n # modifier for location uri [ = | ~ | ~* | ^~ ]\n modifier = Literal(\"=\") | Literal(\"~*\") | Literal(\"~\") | Literal(\"^~\")\n\n # rules\n comment = space + Literal('#') + restOfLine()\n\n assignment = space + key + Optional(space + value, default=None) + semicolon\n location_statement = space + Optional(modifier) + Optional(space + location + space)\n if_statement = space + Literal(\"if\") + space + condition + space\n\n map_statement = space + Literal(\"map\") + space + nonspace + space + dollar_var + space\n # This is NOT an accurate way to parse nginx map entries; it's almost\n # certianly too permissive and may be wrong in other ways, but it should\n # preserve things correctly in mmmmost or all cases.\n #\n # - I can neither prove nor disprove that it is corect wrt all escaped\n # semicolon situations\n # Addresses https://github.com/fatiherikli/nginxparser/issues/19\n map_pattern = Regex(r'\".*\"') | Regex(r\"'.*'\") | nonspace\n map_entry = space + map_pattern + space + value + space + semicolon\n map_block = Group(\n # key could for instance be \"server\" or \"http\", or \"location\" (in which case\n # location_statement needs to have a non-empty location)\n Group(map_statement).leaveWhitespace() +\n left_bracket +\n Group(ZeroOrMore(Group(comment | map_entry)) + space).leaveWhitespace() +\n right_bracket)\n\n block = Forward()\n block << Group(\n # key could for instance be \"server\" or \"http\", or \"location\" (in which case\n # location_statement needs to have a non-empty location)\n (Group(space + key + location_statement) ^ Group(if_statement)).leaveWhitespace() +\n left_bracket +\n Group(ZeroOrMore(Group(comment | assignment) | block | map_block) + space).leaveWhitespace()\n + right_bracket)\n\n script = OneOrMore(Group(comment | assignment) ^ block ^ map_block) + space + stringEnd\n script.parseWithTabs()\n\n def __init__(self, source):\n self.source = source\n\n def parse(self):\n \"\"\"Returns the parsed tree.\"\"\"\n return self.script.parseString(self.source)\n\n def as_list(self):\n \"\"\"Returns the parsed tree as a list.\"\"\"\n return self.parse().asList()\n\nclass RawNginxDumper(object):\n # pylint: disable=too-few-public-methods\n \"\"\"A class that dumps nginx configuration from the provided tree.\"\"\"\n def __init__(self, blocks):\n self.blocks = blocks\n\n def __iter__(self, blocks=None):\n \"\"\"Iterates the dumped nginx content.\"\"\"\n blocks = blocks or self.blocks\n for b0 in blocks:\n if isinstance(b0, str):\n yield b0\n continue\n b = copy.deepcopy(b0)\n if spacey(b[0]):\n yield b.pop(0) # indentation\n if not b:\n continue\n key, values = b.pop(0), b.pop(0)\n\n if isinstance(key, list):\n yield \"\".join(key) + '{'\n for parameter in values:\n for line in self.__iter__([parameter]): # negate \"for b0 in blocks\"\n yield line\n yield '}'\n else:\n if isinstance(key, str) and key.strip() == '#': # comment\n yield key + values\n else: # assignment\n gap = \"\"\n # Sometimes the parser has stuck some gap whitespace in here;\n # if so rotate it into gap\n if values and spacey(values):\n gap = values\n values = b.pop(0)\n yield key + gap + values + ';'\n\n def __str__(self):\n \"\"\"Return the parsed block as a string.\"\"\"\n return ''.join(self)\n\n\n# Shortcut functions to respect Python's serialization interface\n# (like pyyaml, picker or json)\n\ndef loads(source):\n \"\"\"Parses from a string.\n\n :param str souce: The string to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return UnspacedList(RawNginxParser(source).as_list())\n\n\ndef load(_file):\n \"\"\"Parses from a file.\n\n :param file _file: The file to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return loads(_file.read())\n\n\ndef dumps(blocks):\n \"\"\"Dump to a string.\n\n :param UnspacedList block: The parsed tree\n :param int indentation: The number of spaces to indent\n :rtype: str\n\n \"\"\"\n return str(RawNginxDumper(blocks.spaced))\n\n\ndef dump(blocks, _file):\n \"\"\"Dump to a file.\n\n :param UnspacedList block: The parsed tree\n :param file _file: The file to dump to\n :param int indentation: The number of spaces to indent\n :rtype: NoneType\n\n \"\"\"\n return _file.write(dumps(blocks))\n\n\nspacey = lambda x: (isinstance(x, str) and x.isspace()) or x == ''\n\nclass UnspacedList(list):\n \"\"\"Wrap a list [of lists], making any whitespace entries magically invisible\"\"\"\n\n def __init__(self, list_source):\n # ensure our argument is not a generator, and duplicate any sublists\n self.spaced = copy.deepcopy(list(list_source))\n self.dirty = False\n\n # Turn self into a version of the source list that has spaces removed\n # and all sub-lists also UnspacedList()ed\n list.__init__(self, list_source)\n for i, entry in reversed(list(enumerate(self))):\n if isinstance(entry, list):\n sublist = UnspacedList(entry)\n list.__setitem__(self, i, sublist)\n self.spaced[i] = sublist.spaced\n elif spacey(entry):\n # don't delete comments\n if \"#\" not in self[:i]:\n list.__delitem__(self, i)\n\n def _coerce(self, inbound):\n \"\"\"\n Coerce some inbound object to be appropriately usable in this object\n\n :param inbound: string or None or list or UnspacedList\n :returns: (coerced UnspacedList or string or None, spaced equivalent)\n :rtype: tuple\n\n \"\"\"\n if not isinstance(inbound, list): # str or None\n return (inbound, inbound)\n else:\n if not hasattr(inbound, \"spaced\"):\n inbound = UnspacedList(inbound)\n return (inbound, inbound.spaced)\n\n\n def insert(self, i, x):\n item, spaced_item = self._coerce(x)\n self.spaced.insert(self._spaced_position(i), spaced_item)\n list.insert(self, i, item)\n self.dirty = True\n\n def append(self, x):\n item, spaced_item = self._coerce(x)\n self.spaced.append(spaced_item)\n list.append(self, item)\n self.dirty = True\n\n def extend(self, x):\n item, spaced_item = self._coerce(x)\n self.spaced.extend(spaced_item)\n list.extend(self, item)\n self.dirty = True\n\n def __add__(self, other):\n l = copy.deepcopy(self)\n l.extend(other)\n l.dirty = True\n return l\n\n def pop(self, _i=None):\n raise NotImplementedError(\"UnspacedList.pop() not yet implemented\")\n def remove(self, _):\n raise NotImplementedError(\"UnspacedList.remove() not yet implemented\")\n def reverse(self):\n raise NotImplementedError(\"UnspacedList.reverse() not yet implemented\")\n def sort(self, _cmp=None, _key=None, _Rev=None):\n raise NotImplementedError(\"UnspacedList.sort() not yet implemented\")\n def __setslice__(self, _i, _j, _newslice):\n raise NotImplementedError(\"Slice operations on UnspacedLists not yet implemented\")\n\n def __setitem__(self, i, value):\n if isinstance(i, slice):\n raise NotImplementedError(\"Slice operations on UnspacedLists not yet implemented\")\n item, spaced_item = self._coerce(value)\n self.spaced.__setitem__(self._spaced_position(i), spaced_item)\n list.__setitem__(self, i, item)\n self.dirty = True\n\n def __delitem__(self, i):\n self.spaced.__delitem__(self._spaced_position(i))\n list.__delitem__(self, i)\n self.dirty = True\n\n def __deepcopy__(self, memo):\n l = UnspacedList(self[:])\n l.spaced = copy.deepcopy(self.spaced, memo=memo)\n l.dirty = self.dirty\n return l\n\n def is_dirty(self):\n \"\"\"Recurse through the parse tree to figure out if any sublists are dirty\"\"\"\n if self.dirty:\n return True\n return any((isinstance(x, list) and x.is_dirty() for x in self))\n\n def _spaced_position(self, idx):\n \"Convert from indexes in the unspaced list to positions in the spaced one\"\n pos = spaces = 0\n # Normalize indexes like list[-1] etc, and save the result\n if idx < 0:\n idx = len(self) + idx\n if not 0 <= idx < len(self):\n raise IndexError(\"list index out of range\")\n idx0 = idx\n # Count the number of spaces in the spaced list before idx in the unspaced one\n while idx != -1:\n if spacey(self.spaced[pos]):\n spaces += 1\n else:\n idx -= 1\n pos += 1\n return idx0 + spaces\n", "path": "certbot-nginx/certbot_nginx/nginxparser.py"}]}
3,324
820